Commit 35a2af94c7ce7130ca292c68b1d27fcfdb648f6b
Committed by
Ingo Molnar
1 parent
ebdc195f2e
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
sched/wait: Make the __wait_event*() interface more friendly
Change all __wait_event*() implementations to match the corresponding wait_event*() signature for convenience. In particular this does away with the weird 'ret' logic. Since there are __wait_event*() users this requires we update them too. Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131002092529.042563462@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Showing 5 changed files with 73 additions and 81 deletions Inline Diff
arch/mips/kernel/rtlx.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | 2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. |
3 | * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) | 3 | * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) |
4 | * | 4 | * |
5 | * This program is free software; you can distribute it and/or modify it | 5 | * This program is free software; you can distribute it and/or modify it |
6 | * under the terms of the GNU General Public License (Version 2) as | 6 | * under the terms of the GNU General Public License (Version 2) as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | 9 | * This program is distributed in the hope it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * for more details. | 12 | * for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along | 14 | * You should have received a copy of the GNU General Public License along |
15 | * with this program; if not, write to the Free Software Foundation, Inc., | 15 | * with this program; if not, write to the Free Software Foundation, Inc., |
16 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | 16 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
25 | #include <linux/list.h> | 25 | #include <linux/list.h> |
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/elf.h> | 27 | #include <linux/elf.h> |
28 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
29 | #include <linux/syscalls.h> | 29 | #include <linux/syscalls.h> |
30 | #include <linux/moduleloader.h> | 30 | #include <linux/moduleloader.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/poll.h> | 32 | #include <linux/poll.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/wait.h> | 34 | #include <linux/wait.h> |
35 | #include <asm/mipsmtregs.h> | 35 | #include <asm/mipsmtregs.h> |
36 | #include <asm/mips_mt.h> | 36 | #include <asm/mips_mt.h> |
37 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
38 | #include <linux/atomic.h> | 38 | #include <linux/atomic.h> |
39 | #include <asm/cpu.h> | 39 | #include <asm/cpu.h> |
40 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
41 | #include <asm/vpe.h> | 41 | #include <asm/vpe.h> |
42 | #include <asm/rtlx.h> | 42 | #include <asm/rtlx.h> |
43 | #include <asm/setup.h> | 43 | #include <asm/setup.h> |
44 | 44 | ||
45 | static struct rtlx_info *rtlx; | 45 | static struct rtlx_info *rtlx; |
46 | static int major; | 46 | static int major; |
47 | static char module_name[] = "rtlx"; | 47 | static char module_name[] = "rtlx"; |
48 | 48 | ||
49 | static struct chan_waitqueues { | 49 | static struct chan_waitqueues { |
50 | wait_queue_head_t rt_queue; | 50 | wait_queue_head_t rt_queue; |
51 | wait_queue_head_t lx_queue; | 51 | wait_queue_head_t lx_queue; |
52 | atomic_t in_open; | 52 | atomic_t in_open; |
53 | struct mutex mutex; | 53 | struct mutex mutex; |
54 | } channel_wqs[RTLX_CHANNELS]; | 54 | } channel_wqs[RTLX_CHANNELS]; |
55 | 55 | ||
56 | static struct vpe_notifications notify; | 56 | static struct vpe_notifications notify; |
57 | static int sp_stopping; | 57 | static int sp_stopping; |
58 | 58 | ||
59 | extern void *vpe_get_shared(int index); | 59 | extern void *vpe_get_shared(int index); |
60 | 60 | ||
61 | static void rtlx_dispatch(void) | 61 | static void rtlx_dispatch(void) |
62 | { | 62 | { |
63 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); | 63 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); |
64 | } | 64 | } |
65 | 65 | ||
66 | 66 | ||
67 | /* Interrupt handler may be called before rtlx_init has otherwise had | 67 | /* Interrupt handler may be called before rtlx_init has otherwise had |
68 | a chance to run. | 68 | a chance to run. |
69 | */ | 69 | */ |
70 | static irqreturn_t rtlx_interrupt(int irq, void *dev_id) | 70 | static irqreturn_t rtlx_interrupt(int irq, void *dev_id) |
71 | { | 71 | { |
72 | unsigned int vpeflags; | 72 | unsigned int vpeflags; |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | int i; | 74 | int i; |
75 | 75 | ||
76 | /* Ought not to be strictly necessary for SMTC builds */ | 76 | /* Ought not to be strictly necessary for SMTC builds */ |
77 | local_irq_save(flags); | 77 | local_irq_save(flags); |
78 | vpeflags = dvpe(); | 78 | vpeflags = dvpe(); |
79 | set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); | 79 | set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); |
80 | irq_enable_hazard(); | 80 | irq_enable_hazard(); |
81 | evpe(vpeflags); | 81 | evpe(vpeflags); |
82 | local_irq_restore(flags); | 82 | local_irq_restore(flags); |
83 | 83 | ||
84 | for (i = 0; i < RTLX_CHANNELS; i++) { | 84 | for (i = 0; i < RTLX_CHANNELS; i++) { |
85 | wake_up(&channel_wqs[i].lx_queue); | 85 | wake_up(&channel_wqs[i].lx_queue); |
86 | wake_up(&channel_wqs[i].rt_queue); | 86 | wake_up(&channel_wqs[i].rt_queue); |
87 | } | 87 | } |
88 | 88 | ||
89 | return IRQ_HANDLED; | 89 | return IRQ_HANDLED; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void __used dump_rtlx(void) | 92 | static void __used dump_rtlx(void) |
93 | { | 93 | { |
94 | int i; | 94 | int i; |
95 | 95 | ||
96 | printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); | 96 | printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); |
97 | 97 | ||
98 | for (i = 0; i < RTLX_CHANNELS; i++) { | 98 | for (i = 0; i < RTLX_CHANNELS; i++) { |
99 | struct rtlx_channel *chan = &rtlx->channel[i]; | 99 | struct rtlx_channel *chan = &rtlx->channel[i]; |
100 | 100 | ||
101 | printk(" rt_state %d lx_state %d buffer_size %d\n", | 101 | printk(" rt_state %d lx_state %d buffer_size %d\n", |
102 | chan->rt_state, chan->lx_state, chan->buffer_size); | 102 | chan->rt_state, chan->lx_state, chan->buffer_size); |
103 | 103 | ||
104 | printk(" rt_read %d rt_write %d\n", | 104 | printk(" rt_read %d rt_write %d\n", |
105 | chan->rt_read, chan->rt_write); | 105 | chan->rt_read, chan->rt_write); |
106 | 106 | ||
107 | printk(" lx_read %d lx_write %d\n", | 107 | printk(" lx_read %d lx_write %d\n", |
108 | chan->lx_read, chan->lx_write); | 108 | chan->lx_read, chan->lx_write); |
109 | 109 | ||
110 | printk(" rt_buffer <%s>\n", chan->rt_buffer); | 110 | printk(" rt_buffer <%s>\n", chan->rt_buffer); |
111 | printk(" lx_buffer <%s>\n", chan->lx_buffer); | 111 | printk(" lx_buffer <%s>\n", chan->lx_buffer); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | /* call when we have the address of the shared structure from the SP side. */ | 115 | /* call when we have the address of the shared structure from the SP side. */ |
116 | static int rtlx_init(struct rtlx_info *rtlxi) | 116 | static int rtlx_init(struct rtlx_info *rtlxi) |
117 | { | 117 | { |
118 | if (rtlxi->id != RTLX_ID) { | 118 | if (rtlxi->id != RTLX_ID) { |
119 | printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", | 119 | printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", |
120 | rtlxi, rtlxi->id); | 120 | rtlxi, rtlxi->id); |
121 | return -ENOEXEC; | 121 | return -ENOEXEC; |
122 | } | 122 | } |
123 | 123 | ||
124 | rtlx = rtlxi; | 124 | rtlx = rtlxi; |
125 | 125 | ||
126 | return 0; | 126 | return 0; |
127 | } | 127 | } |
128 | 128 | ||
129 | /* notifications */ | 129 | /* notifications */ |
130 | static void starting(int vpe) | 130 | static void starting(int vpe) |
131 | { | 131 | { |
132 | int i; | 132 | int i; |
133 | sp_stopping = 0; | 133 | sp_stopping = 0; |
134 | 134 | ||
135 | /* force a reload of rtlx */ | 135 | /* force a reload of rtlx */ |
136 | rtlx=NULL; | 136 | rtlx=NULL; |
137 | 137 | ||
138 | /* wake up any sleeping rtlx_open's */ | 138 | /* wake up any sleeping rtlx_open's */ |
139 | for (i = 0; i < RTLX_CHANNELS; i++) | 139 | for (i = 0; i < RTLX_CHANNELS; i++) |
140 | wake_up_interruptible(&channel_wqs[i].lx_queue); | 140 | wake_up_interruptible(&channel_wqs[i].lx_queue); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void stopping(int vpe) | 143 | static void stopping(int vpe) |
144 | { | 144 | { |
145 | int i; | 145 | int i; |
146 | 146 | ||
147 | sp_stopping = 1; | 147 | sp_stopping = 1; |
148 | for (i = 0; i < RTLX_CHANNELS; i++) | 148 | for (i = 0; i < RTLX_CHANNELS; i++) |
149 | wake_up_interruptible(&channel_wqs[i].lx_queue); | 149 | wake_up_interruptible(&channel_wqs[i].lx_queue); |
150 | } | 150 | } |
151 | 151 | ||
152 | 152 | ||
153 | int rtlx_open(int index, int can_sleep) | 153 | int rtlx_open(int index, int can_sleep) |
154 | { | 154 | { |
155 | struct rtlx_info **p; | 155 | struct rtlx_info **p; |
156 | struct rtlx_channel *chan; | 156 | struct rtlx_channel *chan; |
157 | enum rtlx_state state; | 157 | enum rtlx_state state; |
158 | int ret = 0; | 158 | int ret = 0; |
159 | 159 | ||
160 | if (index >= RTLX_CHANNELS) { | 160 | if (index >= RTLX_CHANNELS) { |
161 | printk(KERN_DEBUG "rtlx_open index out of range\n"); | 161 | printk(KERN_DEBUG "rtlx_open index out of range\n"); |
162 | return -ENOSYS; | 162 | return -ENOSYS; |
163 | } | 163 | } |
164 | 164 | ||
165 | if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { | 165 | if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { |
166 | printk(KERN_DEBUG "rtlx_open channel %d already opened\n", | 166 | printk(KERN_DEBUG "rtlx_open channel %d already opened\n", |
167 | index); | 167 | index); |
168 | ret = -EBUSY; | 168 | ret = -EBUSY; |
169 | goto out_fail; | 169 | goto out_fail; |
170 | } | 170 | } |
171 | 171 | ||
172 | if (rtlx == NULL) { | 172 | if (rtlx == NULL) { |
173 | if( (p = vpe_get_shared(tclimit)) == NULL) { | 173 | if( (p = vpe_get_shared(tclimit)) == NULL) { |
174 | if (can_sleep) { | 174 | if (can_sleep) { |
175 | __wait_event_interruptible(channel_wqs[index].lx_queue, | 175 | ret = __wait_event_interruptible( |
176 | (p = vpe_get_shared(tclimit)), ret); | 176 | channel_wqs[index].lx_queue, |
177 | (p = vpe_get_shared(tclimit))); | ||
177 | if (ret) | 178 | if (ret) |
178 | goto out_fail; | 179 | goto out_fail; |
179 | } else { | 180 | } else { |
180 | printk(KERN_DEBUG "No SP program loaded, and device " | 181 | printk(KERN_DEBUG "No SP program loaded, and device " |
181 | "opened with O_NONBLOCK\n"); | 182 | "opened with O_NONBLOCK\n"); |
182 | ret = -ENOSYS; | 183 | ret = -ENOSYS; |
183 | goto out_fail; | 184 | goto out_fail; |
184 | } | 185 | } |
185 | } | 186 | } |
186 | 187 | ||
187 | smp_rmb(); | 188 | smp_rmb(); |
188 | if (*p == NULL) { | 189 | if (*p == NULL) { |
189 | if (can_sleep) { | 190 | if (can_sleep) { |
190 | DEFINE_WAIT(wait); | 191 | DEFINE_WAIT(wait); |
191 | 192 | ||
192 | for (;;) { | 193 | for (;;) { |
193 | prepare_to_wait( | 194 | prepare_to_wait( |
194 | &channel_wqs[index].lx_queue, | 195 | &channel_wqs[index].lx_queue, |
195 | &wait, TASK_INTERRUPTIBLE); | 196 | &wait, TASK_INTERRUPTIBLE); |
196 | smp_rmb(); | 197 | smp_rmb(); |
197 | if (*p != NULL) | 198 | if (*p != NULL) |
198 | break; | 199 | break; |
199 | if (!signal_pending(current)) { | 200 | if (!signal_pending(current)) { |
200 | schedule(); | 201 | schedule(); |
201 | continue; | 202 | continue; |
202 | } | 203 | } |
203 | ret = -ERESTARTSYS; | 204 | ret = -ERESTARTSYS; |
204 | goto out_fail; | 205 | goto out_fail; |
205 | } | 206 | } |
206 | finish_wait(&channel_wqs[index].lx_queue, &wait); | 207 | finish_wait(&channel_wqs[index].lx_queue, &wait); |
207 | } else { | 208 | } else { |
208 | pr_err(" *vpe_get_shared is NULL. " | 209 | pr_err(" *vpe_get_shared is NULL. " |
209 | "Has an SP program been loaded?\n"); | 210 | "Has an SP program been loaded?\n"); |
210 | ret = -ENOSYS; | 211 | ret = -ENOSYS; |
211 | goto out_fail; | 212 | goto out_fail; |
212 | } | 213 | } |
213 | } | 214 | } |
214 | 215 | ||
215 | if ((unsigned int)*p < KSEG0) { | 216 | if ((unsigned int)*p < KSEG0) { |
216 | printk(KERN_WARNING "vpe_get_shared returned an " | 217 | printk(KERN_WARNING "vpe_get_shared returned an " |
217 | "invalid pointer maybe an error code %d\n", | 218 | "invalid pointer maybe an error code %d\n", |
218 | (int)*p); | 219 | (int)*p); |
219 | ret = -ENOSYS; | 220 | ret = -ENOSYS; |
220 | goto out_fail; | 221 | goto out_fail; |
221 | } | 222 | } |
222 | 223 | ||
223 | if ((ret = rtlx_init(*p)) < 0) | 224 | if ((ret = rtlx_init(*p)) < 0) |
224 | goto out_ret; | 225 | goto out_ret; |
225 | } | 226 | } |
226 | 227 | ||
227 | chan = &rtlx->channel[index]; | 228 | chan = &rtlx->channel[index]; |
228 | 229 | ||
229 | state = xchg(&chan->lx_state, RTLX_STATE_OPENED); | 230 | state = xchg(&chan->lx_state, RTLX_STATE_OPENED); |
230 | if (state == RTLX_STATE_OPENED) { | 231 | if (state == RTLX_STATE_OPENED) { |
231 | ret = -EBUSY; | 232 | ret = -EBUSY; |
232 | goto out_fail; | 233 | goto out_fail; |
233 | } | 234 | } |
234 | 235 | ||
235 | out_fail: | 236 | out_fail: |
236 | smp_mb(); | 237 | smp_mb(); |
237 | atomic_dec(&channel_wqs[index].in_open); | 238 | atomic_dec(&channel_wqs[index].in_open); |
238 | smp_mb(); | 239 | smp_mb(); |
239 | 240 | ||
240 | out_ret: | 241 | out_ret: |
241 | return ret; | 242 | return ret; |
242 | } | 243 | } |
243 | 244 | ||
244 | int rtlx_release(int index) | 245 | int rtlx_release(int index) |
245 | { | 246 | { |
246 | if (rtlx == NULL) { | 247 | if (rtlx == NULL) { |
247 | pr_err("rtlx_release() with null rtlx\n"); | 248 | pr_err("rtlx_release() with null rtlx\n"); |
248 | return 0; | 249 | return 0; |
249 | } | 250 | } |
250 | rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; | 251 | rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; |
251 | return 0; | 252 | return 0; |
252 | } | 253 | } |
253 | 254 | ||
254 | unsigned int rtlx_read_poll(int index, int can_sleep) | 255 | unsigned int rtlx_read_poll(int index, int can_sleep) |
255 | { | 256 | { |
256 | struct rtlx_channel *chan; | 257 | struct rtlx_channel *chan; |
257 | 258 | ||
258 | if (rtlx == NULL) | 259 | if (rtlx == NULL) |
259 | return 0; | 260 | return 0; |
260 | 261 | ||
261 | chan = &rtlx->channel[index]; | 262 | chan = &rtlx->channel[index]; |
262 | 263 | ||
263 | /* data available to read? */ | 264 | /* data available to read? */ |
264 | if (chan->lx_read == chan->lx_write) { | 265 | if (chan->lx_read == chan->lx_write) { |
265 | if (can_sleep) { | 266 | if (can_sleep) { |
266 | int ret = 0; | 267 | int ret = __wait_event_interruptible( |
267 | 268 | channel_wqs[index].lx_queue, | |
268 | __wait_event_interruptible(channel_wqs[index].lx_queue, | ||
269 | (chan->lx_read != chan->lx_write) || | 269 | (chan->lx_read != chan->lx_write) || |
270 | sp_stopping, ret); | 270 | sp_stopping); |
271 | if (ret) | 271 | if (ret) |
272 | return ret; | 272 | return ret; |
273 | 273 | ||
274 | if (sp_stopping) | 274 | if (sp_stopping) |
275 | return 0; | 275 | return 0; |
276 | } else | 276 | } else |
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | return (chan->lx_write + chan->buffer_size - chan->lx_read) | 280 | return (chan->lx_write + chan->buffer_size - chan->lx_read) |
281 | % chan->buffer_size; | 281 | % chan->buffer_size; |
282 | } | 282 | } |
283 | 283 | ||
284 | static inline int write_spacefree(int read, int write, int size) | 284 | static inline int write_spacefree(int read, int write, int size) |
285 | { | 285 | { |
286 | if (read == write) { | 286 | if (read == write) { |
287 | /* | 287 | /* |
288 | * Never fill the buffer completely, so indexes are always | 288 | * Never fill the buffer completely, so indexes are always |
289 | * equal if empty and only empty, or !equal if data available | 289 | * equal if empty and only empty, or !equal if data available |
290 | */ | 290 | */ |
291 | return size - 1; | 291 | return size - 1; |
292 | } | 292 | } |
293 | 293 | ||
294 | return ((read + size - write) % size) - 1; | 294 | return ((read + size - write) % size) - 1; |
295 | } | 295 | } |
296 | 296 | ||
297 | unsigned int rtlx_write_poll(int index) | 297 | unsigned int rtlx_write_poll(int index) |
298 | { | 298 | { |
299 | struct rtlx_channel *chan = &rtlx->channel[index]; | 299 | struct rtlx_channel *chan = &rtlx->channel[index]; |
300 | 300 | ||
301 | return write_spacefree(chan->rt_read, chan->rt_write, | 301 | return write_spacefree(chan->rt_read, chan->rt_write, |
302 | chan->buffer_size); | 302 | chan->buffer_size); |
303 | } | 303 | } |
304 | 304 | ||
305 | ssize_t rtlx_read(int index, void __user *buff, size_t count) | 305 | ssize_t rtlx_read(int index, void __user *buff, size_t count) |
306 | { | 306 | { |
307 | size_t lx_write, fl = 0L; | 307 | size_t lx_write, fl = 0L; |
308 | struct rtlx_channel *lx; | 308 | struct rtlx_channel *lx; |
309 | unsigned long failed; | 309 | unsigned long failed; |
310 | 310 | ||
311 | if (rtlx == NULL) | 311 | if (rtlx == NULL) |
312 | return -ENOSYS; | 312 | return -ENOSYS; |
313 | 313 | ||
314 | lx = &rtlx->channel[index]; | 314 | lx = &rtlx->channel[index]; |
315 | 315 | ||
316 | mutex_lock(&channel_wqs[index].mutex); | 316 | mutex_lock(&channel_wqs[index].mutex); |
317 | smp_rmb(); | 317 | smp_rmb(); |
318 | lx_write = lx->lx_write; | 318 | lx_write = lx->lx_write; |
319 | 319 | ||
320 | /* find out how much in total */ | 320 | /* find out how much in total */ |
321 | count = min(count, | 321 | count = min(count, |
322 | (size_t)(lx_write + lx->buffer_size - lx->lx_read) | 322 | (size_t)(lx_write + lx->buffer_size - lx->lx_read) |
323 | % lx->buffer_size); | 323 | % lx->buffer_size); |
324 | 324 | ||
325 | /* then how much from the read pointer onwards */ | 325 | /* then how much from the read pointer onwards */ |
326 | fl = min(count, (size_t)lx->buffer_size - lx->lx_read); | 326 | fl = min(count, (size_t)lx->buffer_size - lx->lx_read); |
327 | 327 | ||
328 | failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); | 328 | failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); |
329 | if (failed) | 329 | if (failed) |
330 | goto out; | 330 | goto out; |
331 | 331 | ||
332 | /* and if there is anything left at the beginning of the buffer */ | 332 | /* and if there is anything left at the beginning of the buffer */ |
333 | if (count - fl) | 333 | if (count - fl) |
334 | failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); | 334 | failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); |
335 | 335 | ||
336 | out: | 336 | out: |
337 | count -= failed; | 337 | count -= failed; |
338 | 338 | ||
339 | smp_wmb(); | 339 | smp_wmb(); |
340 | lx->lx_read = (lx->lx_read + count) % lx->buffer_size; | 340 | lx->lx_read = (lx->lx_read + count) % lx->buffer_size; |
341 | smp_wmb(); | 341 | smp_wmb(); |
342 | mutex_unlock(&channel_wqs[index].mutex); | 342 | mutex_unlock(&channel_wqs[index].mutex); |
343 | 343 | ||
344 | return count; | 344 | return count; |
345 | } | 345 | } |
346 | 346 | ||
347 | ssize_t rtlx_write(int index, const void __user *buffer, size_t count) | 347 | ssize_t rtlx_write(int index, const void __user *buffer, size_t count) |
348 | { | 348 | { |
349 | struct rtlx_channel *rt; | 349 | struct rtlx_channel *rt; |
350 | unsigned long failed; | 350 | unsigned long failed; |
351 | size_t rt_read; | 351 | size_t rt_read; |
352 | size_t fl; | 352 | size_t fl; |
353 | 353 | ||
354 | if (rtlx == NULL) | 354 | if (rtlx == NULL) |
355 | return(-ENOSYS); | 355 | return(-ENOSYS); |
356 | 356 | ||
357 | rt = &rtlx->channel[index]; | 357 | rt = &rtlx->channel[index]; |
358 | 358 | ||
359 | mutex_lock(&channel_wqs[index].mutex); | 359 | mutex_lock(&channel_wqs[index].mutex); |
360 | smp_rmb(); | 360 | smp_rmb(); |
361 | rt_read = rt->rt_read; | 361 | rt_read = rt->rt_read; |
362 | 362 | ||
363 | /* total number of bytes to copy */ | 363 | /* total number of bytes to copy */ |
364 | count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, | 364 | count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, |
365 | rt->buffer_size)); | 365 | rt->buffer_size)); |
366 | 366 | ||
367 | /* first bit from write pointer to the end of the buffer, or count */ | 367 | /* first bit from write pointer to the end of the buffer, or count */ |
368 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | 368 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); |
369 | 369 | ||
370 | failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); | 370 | failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); |
371 | if (failed) | 371 | if (failed) |
372 | goto out; | 372 | goto out; |
373 | 373 | ||
374 | /* if there's any left copy to the beginning of the buffer */ | 374 | /* if there's any left copy to the beginning of the buffer */ |
375 | if (count - fl) { | 375 | if (count - fl) { |
376 | failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); | 376 | failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); |
377 | } | 377 | } |
378 | 378 | ||
379 | out: | 379 | out: |
380 | count -= failed; | 380 | count -= failed; |
381 | 381 | ||
382 | smp_wmb(); | 382 | smp_wmb(); |
383 | rt->rt_write = (rt->rt_write + count) % rt->buffer_size; | 383 | rt->rt_write = (rt->rt_write + count) % rt->buffer_size; |
384 | smp_wmb(); | 384 | smp_wmb(); |
385 | mutex_unlock(&channel_wqs[index].mutex); | 385 | mutex_unlock(&channel_wqs[index].mutex); |
386 | 386 | ||
387 | return count; | 387 | return count; |
388 | } | 388 | } |
389 | 389 | ||
390 | 390 | ||
391 | static int file_open(struct inode *inode, struct file *filp) | 391 | static int file_open(struct inode *inode, struct file *filp) |
392 | { | 392 | { |
393 | return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); | 393 | return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); |
394 | } | 394 | } |
395 | 395 | ||
396 | static int file_release(struct inode *inode, struct file *filp) | 396 | static int file_release(struct inode *inode, struct file *filp) |
397 | { | 397 | { |
398 | return rtlx_release(iminor(inode)); | 398 | return rtlx_release(iminor(inode)); |
399 | } | 399 | } |
400 | 400 | ||
401 | static unsigned int file_poll(struct file *file, poll_table * wait) | 401 | static unsigned int file_poll(struct file *file, poll_table * wait) |
402 | { | 402 | { |
403 | int minor = iminor(file_inode(file)); | 403 | int minor = iminor(file_inode(file)); |
404 | unsigned int mask = 0; | 404 | unsigned int mask = 0; |
405 | 405 | ||
406 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | 406 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); |
407 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | 407 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); |
408 | 408 | ||
409 | if (rtlx == NULL) | 409 | if (rtlx == NULL) |
410 | return 0; | 410 | return 0; |
411 | 411 | ||
412 | /* data available to read? */ | 412 | /* data available to read? */ |
413 | if (rtlx_read_poll(minor, 0)) | 413 | if (rtlx_read_poll(minor, 0)) |
414 | mask |= POLLIN | POLLRDNORM; | 414 | mask |= POLLIN | POLLRDNORM; |
415 | 415 | ||
416 | /* space to write */ | 416 | /* space to write */ |
417 | if (rtlx_write_poll(minor)) | 417 | if (rtlx_write_poll(minor)) |
418 | mask |= POLLOUT | POLLWRNORM; | 418 | mask |= POLLOUT | POLLWRNORM; |
419 | 419 | ||
420 | return mask; | 420 | return mask; |
421 | } | 421 | } |
422 | 422 | ||
423 | static ssize_t file_read(struct file *file, char __user * buffer, size_t count, | 423 | static ssize_t file_read(struct file *file, char __user * buffer, size_t count, |
424 | loff_t * ppos) | 424 | loff_t * ppos) |
425 | { | 425 | { |
426 | int minor = iminor(file_inode(file)); | 426 | int minor = iminor(file_inode(file)); |
427 | 427 | ||
428 | /* data available? */ | 428 | /* data available? */ |
429 | if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { | 429 | if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { |
430 | return 0; // -EAGAIN makes cat whinge | 430 | return 0; // -EAGAIN makes cat whinge |
431 | } | 431 | } |
432 | 432 | ||
433 | return rtlx_read(minor, buffer, count); | 433 | return rtlx_read(minor, buffer, count); |
434 | } | 434 | } |
435 | 435 | ||
436 | static ssize_t file_write(struct file *file, const char __user * buffer, | 436 | static ssize_t file_write(struct file *file, const char __user * buffer, |
437 | size_t count, loff_t * ppos) | 437 | size_t count, loff_t * ppos) |
438 | { | 438 | { |
439 | int minor = iminor(file_inode(file)); | 439 | int minor = iminor(file_inode(file)); |
440 | 440 | ||
441 | /* any space left... */ | 441 | /* any space left... */ |
442 | if (!rtlx_write_poll(minor)) { | 442 | if (!rtlx_write_poll(minor)) { |
443 | int ret = 0; | 443 | int ret; |
444 | 444 | ||
445 | if (file->f_flags & O_NONBLOCK) | 445 | if (file->f_flags & O_NONBLOCK) |
446 | return -EAGAIN; | 446 | return -EAGAIN; |
447 | 447 | ||
448 | __wait_event_interruptible(channel_wqs[minor].rt_queue, | 448 | ret = __wait_event_interruptible(channel_wqs[minor].rt_queue, |
449 | rtlx_write_poll(minor), | 449 | rtlx_write_poll(minor)); |
450 | ret); | ||
451 | if (ret) | 450 | if (ret) |
452 | return ret; | 451 | return ret; |
453 | } | 452 | } |
454 | 453 | ||
455 | return rtlx_write(minor, buffer, count); | 454 | return rtlx_write(minor, buffer, count); |
456 | } | 455 | } |
457 | 456 | ||
458 | static const struct file_operations rtlx_fops = { | 457 | static const struct file_operations rtlx_fops = { |
459 | .owner = THIS_MODULE, | 458 | .owner = THIS_MODULE, |
460 | .open = file_open, | 459 | .open = file_open, |
461 | .release = file_release, | 460 | .release = file_release, |
462 | .write = file_write, | 461 | .write = file_write, |
463 | .read = file_read, | 462 | .read = file_read, |
464 | .poll = file_poll, | 463 | .poll = file_poll, |
465 | .llseek = noop_llseek, | 464 | .llseek = noop_llseek, |
466 | }; | 465 | }; |
467 | 466 | ||
468 | static struct irqaction rtlx_irq = { | 467 | static struct irqaction rtlx_irq = { |
469 | .handler = rtlx_interrupt, | 468 | .handler = rtlx_interrupt, |
470 | .name = "RTLX", | 469 | .name = "RTLX", |
471 | }; | 470 | }; |
472 | 471 | ||
473 | static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; | 472 | static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; |
474 | 473 | ||
475 | static char register_chrdev_failed[] __initdata = | 474 | static char register_chrdev_failed[] __initdata = |
476 | KERN_ERR "rtlx_module_init: unable to register device\n"; | 475 | KERN_ERR "rtlx_module_init: unable to register device\n"; |
477 | 476 | ||
478 | static int __init rtlx_module_init(void) | 477 | static int __init rtlx_module_init(void) |
479 | { | 478 | { |
480 | struct device *dev; | 479 | struct device *dev; |
481 | int i, err; | 480 | int i, err; |
482 | 481 | ||
483 | if (!cpu_has_mipsmt) { | 482 | if (!cpu_has_mipsmt) { |
484 | printk("VPE loader: not a MIPS MT capable processor\n"); | 483 | printk("VPE loader: not a MIPS MT capable processor\n"); |
485 | return -ENODEV; | 484 | return -ENODEV; |
486 | } | 485 | } |
487 | 486 | ||
488 | if (tclimit == 0) { | 487 | if (tclimit == 0) { |
489 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | 488 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " |
490 | "initializing RTLX.\nPass maxtcs=<n> argument as kernel " | 489 | "initializing RTLX.\nPass maxtcs=<n> argument as kernel " |
491 | "argument\n"); | 490 | "argument\n"); |
492 | 491 | ||
493 | return -ENODEV; | 492 | return -ENODEV; |
494 | } | 493 | } |
495 | 494 | ||
496 | major = register_chrdev(0, module_name, &rtlx_fops); | 495 | major = register_chrdev(0, module_name, &rtlx_fops); |
497 | if (major < 0) { | 496 | if (major < 0) { |
498 | printk(register_chrdev_failed); | 497 | printk(register_chrdev_failed); |
499 | return major; | 498 | return major; |
500 | } | 499 | } |
501 | 500 | ||
502 | /* initialise the wait queues */ | 501 | /* initialise the wait queues */ |
503 | for (i = 0; i < RTLX_CHANNELS; i++) { | 502 | for (i = 0; i < RTLX_CHANNELS; i++) { |
504 | init_waitqueue_head(&channel_wqs[i].rt_queue); | 503 | init_waitqueue_head(&channel_wqs[i].rt_queue); |
505 | init_waitqueue_head(&channel_wqs[i].lx_queue); | 504 | init_waitqueue_head(&channel_wqs[i].lx_queue); |
506 | atomic_set(&channel_wqs[i].in_open, 0); | 505 | atomic_set(&channel_wqs[i].in_open, 0); |
507 | mutex_init(&channel_wqs[i].mutex); | 506 | mutex_init(&channel_wqs[i].mutex); |
508 | 507 | ||
509 | dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, | 508 | dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, |
510 | "%s%d", module_name, i); | 509 | "%s%d", module_name, i); |
511 | if (IS_ERR(dev)) { | 510 | if (IS_ERR(dev)) { |
512 | err = PTR_ERR(dev); | 511 | err = PTR_ERR(dev); |
513 | goto out_chrdev; | 512 | goto out_chrdev; |
514 | } | 513 | } |
515 | } | 514 | } |
516 | 515 | ||
517 | /* set up notifiers */ | 516 | /* set up notifiers */ |
518 | notify.start = starting; | 517 | notify.start = starting; |
519 | notify.stop = stopping; | 518 | notify.stop = stopping; |
520 | vpe_notify(tclimit, ¬ify); | 519 | vpe_notify(tclimit, ¬ify); |
521 | 520 | ||
522 | if (cpu_has_vint) | 521 | if (cpu_has_vint) |
523 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | 522 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); |
524 | else { | 523 | else { |
525 | pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); | 524 | pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); |
526 | err = -ENODEV; | 525 | err = -ENODEV; |
527 | goto out_chrdev; | 526 | goto out_chrdev; |
528 | } | 527 | } |
529 | 528 | ||
530 | rtlx_irq.dev_id = rtlx; | 529 | rtlx_irq.dev_id = rtlx; |
531 | setup_irq(rtlx_irq_num, &rtlx_irq); | 530 | setup_irq(rtlx_irq_num, &rtlx_irq); |
532 | 531 | ||
533 | return 0; | 532 | return 0; |
534 | 533 | ||
535 | out_chrdev: | 534 | out_chrdev: |
536 | for (i = 0; i < RTLX_CHANNELS; i++) | 535 | for (i = 0; i < RTLX_CHANNELS; i++) |
537 | device_destroy(mt_class, MKDEV(major, i)); | 536 | device_destroy(mt_class, MKDEV(major, i)); |
538 | 537 | ||
539 | return err; | 538 | return err; |
540 | } | 539 | } |
541 | 540 | ||
542 | static void __exit rtlx_module_exit(void) | 541 | static void __exit rtlx_module_exit(void) |
543 | { | 542 | { |
544 | int i; | 543 | int i; |
545 | 544 | ||
546 | for (i = 0; i < RTLX_CHANNELS; i++) | 545 | for (i = 0; i < RTLX_CHANNELS; i++) |
547 | device_destroy(mt_class, MKDEV(major, i)); | 546 | device_destroy(mt_class, MKDEV(major, i)); |
548 | 547 | ||
549 | unregister_chrdev(major, module_name); | 548 | unregister_chrdev(major, module_name); |
550 | } | 549 | } |
551 | 550 | ||
552 | module_init(rtlx_module_init); | 551 | module_init(rtlx_module_init); |
553 | module_exit(rtlx_module_exit); | 552 | module_exit(rtlx_module_exit); |
554 | 553 | ||
555 | MODULE_DESCRIPTION("MIPS RTLX"); | 554 | MODULE_DESCRIPTION("MIPS RTLX"); |
556 | MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); | 555 | MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); |
557 | MODULE_LICENSE("GPL"); | 556 | MODULE_LICENSE("GPL"); |
include/linux/tty.h
1 | #ifndef _LINUX_TTY_H | 1 | #ifndef _LINUX_TTY_H |
2 | #define _LINUX_TTY_H | 2 | #define _LINUX_TTY_H |
3 | 3 | ||
4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
5 | #include <linux/major.h> | 5 | #include <linux/major.h> |
6 | #include <linux/termios.h> | 6 | #include <linux/termios.h> |
7 | #include <linux/workqueue.h> | 7 | #include <linux/workqueue.h> |
8 | #include <linux/tty_driver.h> | 8 | #include <linux/tty_driver.h> |
9 | #include <linux/tty_ldisc.h> | 9 | #include <linux/tty_ldisc.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/tty_flags.h> | 11 | #include <linux/tty_flags.h> |
12 | #include <uapi/linux/tty.h> | 12 | #include <uapi/linux/tty.h> |
13 | #include <linux/rwsem.h> | 13 | #include <linux/rwsem.h> |
14 | #include <linux/llist.h> | 14 | #include <linux/llist.h> |
15 | 15 | ||
16 | 16 | ||
17 | 17 | ||
18 | /* | 18 | /* |
19 | * (Note: the *_driver.minor_start values 1, 64, 128, 192 are | 19 | * (Note: the *_driver.minor_start values 1, 64, 128, 192 are |
20 | * hardcoded at present.) | 20 | * hardcoded at present.) |
21 | */ | 21 | */ |
22 | #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ | 22 | #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ |
23 | #define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */ | 23 | #define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */ |
24 | #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ | 24 | #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * This character is the same as _POSIX_VDISABLE: it cannot be used as | 27 | * This character is the same as _POSIX_VDISABLE: it cannot be used as |
28 | * a c_cc[] character, but indicates that a particular special character | 28 | * a c_cc[] character, but indicates that a particular special character |
29 | * isn't in use (eg VINTR has no character etc) | 29 | * isn't in use (eg VINTR has no character etc) |
30 | */ | 30 | */ |
31 | #define __DISABLED_CHAR '\0' | 31 | #define __DISABLED_CHAR '\0' |
32 | 32 | ||
33 | struct tty_buffer { | 33 | struct tty_buffer { |
34 | union { | 34 | union { |
35 | struct tty_buffer *next; | 35 | struct tty_buffer *next; |
36 | struct llist_node free; | 36 | struct llist_node free; |
37 | }; | 37 | }; |
38 | int used; | 38 | int used; |
39 | int size; | 39 | int size; |
40 | int commit; | 40 | int commit; |
41 | int read; | 41 | int read; |
42 | /* Data points here */ | 42 | /* Data points here */ |
43 | unsigned long data[0]; | 43 | unsigned long data[0]; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) | 46 | static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) |
47 | { | 47 | { |
48 | return ((unsigned char *)b->data) + ofs; | 48 | return ((unsigned char *)b->data) + ofs; |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) | 51 | static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) |
52 | { | 52 | { |
53 | return (char *)char_buf_ptr(b, ofs) + b->size; | 53 | return (char *)char_buf_ptr(b, ofs) + b->size; |
54 | } | 54 | } |
55 | 55 | ||
56 | struct tty_bufhead { | 56 | struct tty_bufhead { |
57 | struct tty_buffer *head; /* Queue head */ | 57 | struct tty_buffer *head; /* Queue head */ |
58 | struct work_struct work; | 58 | struct work_struct work; |
59 | struct mutex lock; | 59 | struct mutex lock; |
60 | atomic_t priority; | 60 | atomic_t priority; |
61 | struct tty_buffer sentinel; | 61 | struct tty_buffer sentinel; |
62 | struct llist_head free; /* Free queue head */ | 62 | struct llist_head free; /* Free queue head */ |
63 | atomic_t memory_used; /* In-use buffers excluding free list */ | 63 | atomic_t memory_used; /* In-use buffers excluding free list */ |
64 | struct tty_buffer *tail; /* Active buffer */ | 64 | struct tty_buffer *tail; /* Active buffer */ |
65 | }; | 65 | }; |
66 | /* | 66 | /* |
67 | * When a break, frame error, or parity error happens, these codes are | 67 | * When a break, frame error, or parity error happens, these codes are |
68 | * stuffed into the flags buffer. | 68 | * stuffed into the flags buffer. |
69 | */ | 69 | */ |
70 | #define TTY_NORMAL 0 | 70 | #define TTY_NORMAL 0 |
71 | #define TTY_BREAK 1 | 71 | #define TTY_BREAK 1 |
72 | #define TTY_FRAME 2 | 72 | #define TTY_FRAME 2 |
73 | #define TTY_PARITY 3 | 73 | #define TTY_PARITY 3 |
74 | #define TTY_OVERRUN 4 | 74 | #define TTY_OVERRUN 4 |
75 | 75 | ||
76 | #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) | 76 | #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) |
77 | #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) | 77 | #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) |
78 | #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) | 78 | #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) |
79 | #define KILL_CHAR(tty) ((tty)->termios.c_cc[VKILL]) | 79 | #define KILL_CHAR(tty) ((tty)->termios.c_cc[VKILL]) |
80 | #define EOF_CHAR(tty) ((tty)->termios.c_cc[VEOF]) | 80 | #define EOF_CHAR(tty) ((tty)->termios.c_cc[VEOF]) |
81 | #define TIME_CHAR(tty) ((tty)->termios.c_cc[VTIME]) | 81 | #define TIME_CHAR(tty) ((tty)->termios.c_cc[VTIME]) |
82 | #define MIN_CHAR(tty) ((tty)->termios.c_cc[VMIN]) | 82 | #define MIN_CHAR(tty) ((tty)->termios.c_cc[VMIN]) |
83 | #define SWTC_CHAR(tty) ((tty)->termios.c_cc[VSWTC]) | 83 | #define SWTC_CHAR(tty) ((tty)->termios.c_cc[VSWTC]) |
84 | #define START_CHAR(tty) ((tty)->termios.c_cc[VSTART]) | 84 | #define START_CHAR(tty) ((tty)->termios.c_cc[VSTART]) |
85 | #define STOP_CHAR(tty) ((tty)->termios.c_cc[VSTOP]) | 85 | #define STOP_CHAR(tty) ((tty)->termios.c_cc[VSTOP]) |
86 | #define SUSP_CHAR(tty) ((tty)->termios.c_cc[VSUSP]) | 86 | #define SUSP_CHAR(tty) ((tty)->termios.c_cc[VSUSP]) |
87 | #define EOL_CHAR(tty) ((tty)->termios.c_cc[VEOL]) | 87 | #define EOL_CHAR(tty) ((tty)->termios.c_cc[VEOL]) |
88 | #define REPRINT_CHAR(tty) ((tty)->termios.c_cc[VREPRINT]) | 88 | #define REPRINT_CHAR(tty) ((tty)->termios.c_cc[VREPRINT]) |
89 | #define DISCARD_CHAR(tty) ((tty)->termios.c_cc[VDISCARD]) | 89 | #define DISCARD_CHAR(tty) ((tty)->termios.c_cc[VDISCARD]) |
90 | #define WERASE_CHAR(tty) ((tty)->termios.c_cc[VWERASE]) | 90 | #define WERASE_CHAR(tty) ((tty)->termios.c_cc[VWERASE]) |
91 | #define LNEXT_CHAR(tty) ((tty)->termios.c_cc[VLNEXT]) | 91 | #define LNEXT_CHAR(tty) ((tty)->termios.c_cc[VLNEXT]) |
92 | #define EOL2_CHAR(tty) ((tty)->termios.c_cc[VEOL2]) | 92 | #define EOL2_CHAR(tty) ((tty)->termios.c_cc[VEOL2]) |
93 | 93 | ||
94 | #define _I_FLAG(tty, f) ((tty)->termios.c_iflag & (f)) | 94 | #define _I_FLAG(tty, f) ((tty)->termios.c_iflag & (f)) |
95 | #define _O_FLAG(tty, f) ((tty)->termios.c_oflag & (f)) | 95 | #define _O_FLAG(tty, f) ((tty)->termios.c_oflag & (f)) |
96 | #define _C_FLAG(tty, f) ((tty)->termios.c_cflag & (f)) | 96 | #define _C_FLAG(tty, f) ((tty)->termios.c_cflag & (f)) |
97 | #define _L_FLAG(tty, f) ((tty)->termios.c_lflag & (f)) | 97 | #define _L_FLAG(tty, f) ((tty)->termios.c_lflag & (f)) |
98 | 98 | ||
99 | #define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK) | 99 | #define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK) |
100 | #define I_BRKINT(tty) _I_FLAG((tty), BRKINT) | 100 | #define I_BRKINT(tty) _I_FLAG((tty), BRKINT) |
101 | #define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR) | 101 | #define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR) |
102 | #define I_PARMRK(tty) _I_FLAG((tty), PARMRK) | 102 | #define I_PARMRK(tty) _I_FLAG((tty), PARMRK) |
103 | #define I_INPCK(tty) _I_FLAG((tty), INPCK) | 103 | #define I_INPCK(tty) _I_FLAG((tty), INPCK) |
104 | #define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP) | 104 | #define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP) |
105 | #define I_INLCR(tty) _I_FLAG((tty), INLCR) | 105 | #define I_INLCR(tty) _I_FLAG((tty), INLCR) |
106 | #define I_IGNCR(tty) _I_FLAG((tty), IGNCR) | 106 | #define I_IGNCR(tty) _I_FLAG((tty), IGNCR) |
107 | #define I_ICRNL(tty) _I_FLAG((tty), ICRNL) | 107 | #define I_ICRNL(tty) _I_FLAG((tty), ICRNL) |
108 | #define I_IUCLC(tty) _I_FLAG((tty), IUCLC) | 108 | #define I_IUCLC(tty) _I_FLAG((tty), IUCLC) |
109 | #define I_IXON(tty) _I_FLAG((tty), IXON) | 109 | #define I_IXON(tty) _I_FLAG((tty), IXON) |
110 | #define I_IXANY(tty) _I_FLAG((tty), IXANY) | 110 | #define I_IXANY(tty) _I_FLAG((tty), IXANY) |
111 | #define I_IXOFF(tty) _I_FLAG((tty), IXOFF) | 111 | #define I_IXOFF(tty) _I_FLAG((tty), IXOFF) |
112 | #define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL) | 112 | #define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL) |
113 | #define I_IUTF8(tty) _I_FLAG((tty), IUTF8) | 113 | #define I_IUTF8(tty) _I_FLAG((tty), IUTF8) |
114 | 114 | ||
115 | #define O_OPOST(tty) _O_FLAG((tty), OPOST) | 115 | #define O_OPOST(tty) _O_FLAG((tty), OPOST) |
116 | #define O_OLCUC(tty) _O_FLAG((tty), OLCUC) | 116 | #define O_OLCUC(tty) _O_FLAG((tty), OLCUC) |
117 | #define O_ONLCR(tty) _O_FLAG((tty), ONLCR) | 117 | #define O_ONLCR(tty) _O_FLAG((tty), ONLCR) |
118 | #define O_OCRNL(tty) _O_FLAG((tty), OCRNL) | 118 | #define O_OCRNL(tty) _O_FLAG((tty), OCRNL) |
119 | #define O_ONOCR(tty) _O_FLAG((tty), ONOCR) | 119 | #define O_ONOCR(tty) _O_FLAG((tty), ONOCR) |
120 | #define O_ONLRET(tty) _O_FLAG((tty), ONLRET) | 120 | #define O_ONLRET(tty) _O_FLAG((tty), ONLRET) |
121 | #define O_OFILL(tty) _O_FLAG((tty), OFILL) | 121 | #define O_OFILL(tty) _O_FLAG((tty), OFILL) |
122 | #define O_OFDEL(tty) _O_FLAG((tty), OFDEL) | 122 | #define O_OFDEL(tty) _O_FLAG((tty), OFDEL) |
123 | #define O_NLDLY(tty) _O_FLAG((tty), NLDLY) | 123 | #define O_NLDLY(tty) _O_FLAG((tty), NLDLY) |
124 | #define O_CRDLY(tty) _O_FLAG((tty), CRDLY) | 124 | #define O_CRDLY(tty) _O_FLAG((tty), CRDLY) |
125 | #define O_TABDLY(tty) _O_FLAG((tty), TABDLY) | 125 | #define O_TABDLY(tty) _O_FLAG((tty), TABDLY) |
126 | #define O_BSDLY(tty) _O_FLAG((tty), BSDLY) | 126 | #define O_BSDLY(tty) _O_FLAG((tty), BSDLY) |
127 | #define O_VTDLY(tty) _O_FLAG((tty), VTDLY) | 127 | #define O_VTDLY(tty) _O_FLAG((tty), VTDLY) |
128 | #define O_FFDLY(tty) _O_FLAG((tty), FFDLY) | 128 | #define O_FFDLY(tty) _O_FLAG((tty), FFDLY) |
129 | 129 | ||
130 | #define C_BAUD(tty) _C_FLAG((tty), CBAUD) | 130 | #define C_BAUD(tty) _C_FLAG((tty), CBAUD) |
131 | #define C_CSIZE(tty) _C_FLAG((tty), CSIZE) | 131 | #define C_CSIZE(tty) _C_FLAG((tty), CSIZE) |
132 | #define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB) | 132 | #define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB) |
133 | #define C_CREAD(tty) _C_FLAG((tty), CREAD) | 133 | #define C_CREAD(tty) _C_FLAG((tty), CREAD) |
134 | #define C_PARENB(tty) _C_FLAG((tty), PARENB) | 134 | #define C_PARENB(tty) _C_FLAG((tty), PARENB) |
135 | #define C_PARODD(tty) _C_FLAG((tty), PARODD) | 135 | #define C_PARODD(tty) _C_FLAG((tty), PARODD) |
136 | #define C_HUPCL(tty) _C_FLAG((tty), HUPCL) | 136 | #define C_HUPCL(tty) _C_FLAG((tty), HUPCL) |
137 | #define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL) | 137 | #define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL) |
138 | #define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD) | 138 | #define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD) |
139 | #define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS) | 139 | #define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS) |
140 | 140 | ||
141 | #define L_ISIG(tty) _L_FLAG((tty), ISIG) | 141 | #define L_ISIG(tty) _L_FLAG((tty), ISIG) |
142 | #define L_ICANON(tty) _L_FLAG((tty), ICANON) | 142 | #define L_ICANON(tty) _L_FLAG((tty), ICANON) |
143 | #define L_XCASE(tty) _L_FLAG((tty), XCASE) | 143 | #define L_XCASE(tty) _L_FLAG((tty), XCASE) |
144 | #define L_ECHO(tty) _L_FLAG((tty), ECHO) | 144 | #define L_ECHO(tty) _L_FLAG((tty), ECHO) |
145 | #define L_ECHOE(tty) _L_FLAG((tty), ECHOE) | 145 | #define L_ECHOE(tty) _L_FLAG((tty), ECHOE) |
146 | #define L_ECHOK(tty) _L_FLAG((tty), ECHOK) | 146 | #define L_ECHOK(tty) _L_FLAG((tty), ECHOK) |
147 | #define L_ECHONL(tty) _L_FLAG((tty), ECHONL) | 147 | #define L_ECHONL(tty) _L_FLAG((tty), ECHONL) |
148 | #define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH) | 148 | #define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH) |
149 | #define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP) | 149 | #define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP) |
150 | #define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL) | 150 | #define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL) |
151 | #define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT) | 151 | #define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT) |
152 | #define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE) | 152 | #define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE) |
153 | #define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) | 153 | #define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) |
154 | #define L_PENDIN(tty) _L_FLAG((tty), PENDIN) | 154 | #define L_PENDIN(tty) _L_FLAG((tty), PENDIN) |
155 | #define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) | 155 | #define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) |
156 | #define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC) | 156 | #define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC) |
157 | 157 | ||
158 | struct device; | 158 | struct device; |
159 | struct signal_struct; | 159 | struct signal_struct; |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * Port level information. Each device keeps its own port level information | 162 | * Port level information. Each device keeps its own port level information |
163 | * so provide a common structure for those ports wanting to use common support | 163 | * so provide a common structure for those ports wanting to use common support |
164 | * routines. | 164 | * routines. |
165 | * | 165 | * |
166 | * The tty port has a different lifetime to the tty so must be kept apart. | 166 | * The tty port has a different lifetime to the tty so must be kept apart. |
167 | * In addition be careful as tty -> port mappings are valid for the life | 167 | * In addition be careful as tty -> port mappings are valid for the life |
168 | * of the tty object but in many cases port -> tty mappings are valid only | 168 | * of the tty object but in many cases port -> tty mappings are valid only |
169 | * until a hangup so don't use the wrong path. | 169 | * until a hangup so don't use the wrong path. |
170 | */ | 170 | */ |
171 | 171 | ||
172 | struct tty_port; | 172 | struct tty_port; |
173 | 173 | ||
174 | struct tty_port_operations { | 174 | struct tty_port_operations { |
175 | /* Return 1 if the carrier is raised */ | 175 | /* Return 1 if the carrier is raised */ |
176 | int (*carrier_raised)(struct tty_port *port); | 176 | int (*carrier_raised)(struct tty_port *port); |
177 | /* Control the DTR line */ | 177 | /* Control the DTR line */ |
178 | void (*dtr_rts)(struct tty_port *port, int raise); | 178 | void (*dtr_rts)(struct tty_port *port, int raise); |
179 | /* Called when the last close completes or a hangup finishes | 179 | /* Called when the last close completes or a hangup finishes |
180 | IFF the port was initialized. Do not use to free resources. Called | 180 | IFF the port was initialized. Do not use to free resources. Called |
181 | under the port mutex to serialize against activate/shutdowns */ | 181 | under the port mutex to serialize against activate/shutdowns */ |
182 | void (*shutdown)(struct tty_port *port); | 182 | void (*shutdown)(struct tty_port *port); |
183 | void (*drop)(struct tty_port *port); | 183 | void (*drop)(struct tty_port *port); |
184 | /* Called under the port mutex from tty_port_open, serialized using | 184 | /* Called under the port mutex from tty_port_open, serialized using |
185 | the port mutex */ | 185 | the port mutex */ |
186 | /* FIXME: long term getting the tty argument *out* of this would be | 186 | /* FIXME: long term getting the tty argument *out* of this would be |
187 | good for consoles */ | 187 | good for consoles */ |
188 | int (*activate)(struct tty_port *port, struct tty_struct *tty); | 188 | int (*activate)(struct tty_port *port, struct tty_struct *tty); |
189 | /* Called on the final put of a port */ | 189 | /* Called on the final put of a port */ |
190 | void (*destruct)(struct tty_port *port); | 190 | void (*destruct)(struct tty_port *port); |
191 | }; | 191 | }; |
192 | 192 | ||
193 | struct tty_port { | 193 | struct tty_port { |
194 | struct tty_bufhead buf; /* Locked internally */ | 194 | struct tty_bufhead buf; /* Locked internally */ |
195 | struct tty_struct *tty; /* Back pointer */ | 195 | struct tty_struct *tty; /* Back pointer */ |
196 | struct tty_struct *itty; /* internal back ptr */ | 196 | struct tty_struct *itty; /* internal back ptr */ |
197 | const struct tty_port_operations *ops; /* Port operations */ | 197 | const struct tty_port_operations *ops; /* Port operations */ |
198 | spinlock_t lock; /* Lock protecting tty field */ | 198 | spinlock_t lock; /* Lock protecting tty field */ |
199 | int blocked_open; /* Waiting to open */ | 199 | int blocked_open; /* Waiting to open */ |
200 | int count; /* Usage count */ | 200 | int count; /* Usage count */ |
201 | wait_queue_head_t open_wait; /* Open waiters */ | 201 | wait_queue_head_t open_wait; /* Open waiters */ |
202 | wait_queue_head_t close_wait; /* Close waiters */ | 202 | wait_queue_head_t close_wait; /* Close waiters */ |
203 | wait_queue_head_t delta_msr_wait; /* Modem status change */ | 203 | wait_queue_head_t delta_msr_wait; /* Modem status change */ |
204 | unsigned long flags; /* TTY flags ASY_*/ | 204 | unsigned long flags; /* TTY flags ASY_*/ |
205 | unsigned char console:1, /* port is a console */ | 205 | unsigned char console:1, /* port is a console */ |
206 | low_latency:1; /* direct buffer flush */ | 206 | low_latency:1; /* direct buffer flush */ |
207 | struct mutex mutex; /* Locking */ | 207 | struct mutex mutex; /* Locking */ |
208 | struct mutex buf_mutex; /* Buffer alloc lock */ | 208 | struct mutex buf_mutex; /* Buffer alloc lock */ |
209 | unsigned char *xmit_buf; /* Optional buffer */ | 209 | unsigned char *xmit_buf; /* Optional buffer */ |
210 | unsigned int close_delay; /* Close port delay */ | 210 | unsigned int close_delay; /* Close port delay */ |
211 | unsigned int closing_wait; /* Delay for output */ | 211 | unsigned int closing_wait; /* Delay for output */ |
212 | int drain_delay; /* Set to zero if no pure time | 212 | int drain_delay; /* Set to zero if no pure time |
213 | based drain is needed else | 213 | based drain is needed else |
214 | set to size of fifo */ | 214 | set to size of fifo */ |
215 | struct kref kref; /* Ref counter */ | 215 | struct kref kref; /* Ref counter */ |
216 | }; | 216 | }; |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * Where all of the state associated with a tty is kept while the tty | 219 | * Where all of the state associated with a tty is kept while the tty |
220 | * is open. Since the termios state should be kept even if the tty | 220 | * is open. Since the termios state should be kept even if the tty |
221 | * has been closed --- for things like the baud rate, etc --- it is | 221 | * has been closed --- for things like the baud rate, etc --- it is |
222 | * not stored here, but rather a pointer to the real state is stored | 222 | * not stored here, but rather a pointer to the real state is stored |
223 | * here. Possible the winsize structure should have the same | 223 | * here. Possible the winsize structure should have the same |
224 | * treatment, but (1) the default 80x24 is usually right and (2) it's | 224 | * treatment, but (1) the default 80x24 is usually right and (2) it's |
225 | * most often used by a windowing system, which will set the correct | 225 | * most often used by a windowing system, which will set the correct |
226 | * size each time the window is created or resized anyway. | 226 | * size each time the window is created or resized anyway. |
227 | * - TYT, 9/14/92 | 227 | * - TYT, 9/14/92 |
228 | */ | 228 | */ |
229 | 229 | ||
230 | struct tty_operations; | 230 | struct tty_operations; |
231 | 231 | ||
232 | struct tty_struct { | 232 | struct tty_struct { |
233 | int magic; | 233 | int magic; |
234 | struct kref kref; | 234 | struct kref kref; |
235 | struct device *dev; | 235 | struct device *dev; |
236 | struct tty_driver *driver; | 236 | struct tty_driver *driver; |
237 | const struct tty_operations *ops; | 237 | const struct tty_operations *ops; |
238 | int index; | 238 | int index; |
239 | 239 | ||
240 | /* Protects ldisc changes: Lock tty not pty */ | 240 | /* Protects ldisc changes: Lock tty not pty */ |
241 | struct ld_semaphore ldisc_sem; | 241 | struct ld_semaphore ldisc_sem; |
242 | struct tty_ldisc *ldisc; | 242 | struct tty_ldisc *ldisc; |
243 | 243 | ||
244 | struct mutex atomic_write_lock; | 244 | struct mutex atomic_write_lock; |
245 | struct mutex legacy_mutex; | 245 | struct mutex legacy_mutex; |
246 | struct mutex throttle_mutex; | 246 | struct mutex throttle_mutex; |
247 | struct rw_semaphore termios_rwsem; | 247 | struct rw_semaphore termios_rwsem; |
248 | struct mutex winsize_mutex; | 248 | struct mutex winsize_mutex; |
249 | spinlock_t ctrl_lock; | 249 | spinlock_t ctrl_lock; |
250 | /* Termios values are protected by the termios rwsem */ | 250 | /* Termios values are protected by the termios rwsem */ |
251 | struct ktermios termios, termios_locked; | 251 | struct ktermios termios, termios_locked; |
252 | struct termiox *termiox; /* May be NULL for unsupported */ | 252 | struct termiox *termiox; /* May be NULL for unsupported */ |
253 | char name[64]; | 253 | char name[64]; |
254 | struct pid *pgrp; /* Protected by ctrl lock */ | 254 | struct pid *pgrp; /* Protected by ctrl lock */ |
255 | struct pid *session; | 255 | struct pid *session; |
256 | unsigned long flags; | 256 | unsigned long flags; |
257 | int count; | 257 | int count; |
258 | struct winsize winsize; /* winsize_mutex */ | 258 | struct winsize winsize; /* winsize_mutex */ |
259 | unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; | 259 | unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; |
260 | unsigned char ctrl_status; /* ctrl_lock */ | 260 | unsigned char ctrl_status; /* ctrl_lock */ |
261 | unsigned int receive_room; /* Bytes free for queue */ | 261 | unsigned int receive_room; /* Bytes free for queue */ |
262 | int flow_change; | 262 | int flow_change; |
263 | 263 | ||
264 | struct tty_struct *link; | 264 | struct tty_struct *link; |
265 | struct fasync_struct *fasync; | 265 | struct fasync_struct *fasync; |
266 | int alt_speed; /* For magic substitution of 38400 bps */ | 266 | int alt_speed; /* For magic substitution of 38400 bps */ |
267 | wait_queue_head_t write_wait; | 267 | wait_queue_head_t write_wait; |
268 | wait_queue_head_t read_wait; | 268 | wait_queue_head_t read_wait; |
269 | struct work_struct hangup_work; | 269 | struct work_struct hangup_work; |
270 | void *disc_data; | 270 | void *disc_data; |
271 | void *driver_data; | 271 | void *driver_data; |
272 | struct list_head tty_files; | 272 | struct list_head tty_files; |
273 | 273 | ||
274 | #define N_TTY_BUF_SIZE 4096 | 274 | #define N_TTY_BUF_SIZE 4096 |
275 | 275 | ||
276 | unsigned char closing:1; | 276 | unsigned char closing:1; |
277 | unsigned char *write_buf; | 277 | unsigned char *write_buf; |
278 | int write_cnt; | 278 | int write_cnt; |
279 | /* If the tty has a pending do_SAK, queue it here - akpm */ | 279 | /* If the tty has a pending do_SAK, queue it here - akpm */ |
280 | struct work_struct SAK_work; | 280 | struct work_struct SAK_work; |
281 | struct tty_port *port; | 281 | struct tty_port *port; |
282 | }; | 282 | }; |
283 | 283 | ||
284 | /* Each of a tty's open files has private_data pointing to tty_file_private */ | 284 | /* Each of a tty's open files has private_data pointing to tty_file_private */ |
285 | struct tty_file_private { | 285 | struct tty_file_private { |
286 | struct tty_struct *tty; | 286 | struct tty_struct *tty; |
287 | struct file *file; | 287 | struct file *file; |
288 | struct list_head list; | 288 | struct list_head list; |
289 | }; | 289 | }; |
290 | 290 | ||
291 | /* tty magic number */ | 291 | /* tty magic number */ |
292 | #define TTY_MAGIC 0x5401 | 292 | #define TTY_MAGIC 0x5401 |
293 | 293 | ||
294 | /* | 294 | /* |
295 | * These bits are used in the flags field of the tty structure. | 295 | * These bits are used in the flags field of the tty structure. |
296 | * | 296 | * |
297 | * So that interrupts won't be able to mess up the queues, | 297 | * So that interrupts won't be able to mess up the queues, |
298 | * copy_to_cooked must be atomic with respect to itself, as must | 298 | * copy_to_cooked must be atomic with respect to itself, as must |
299 | * tty->write. Thus, you must use the inline functions set_bit() and | 299 | * tty->write. Thus, you must use the inline functions set_bit() and |
300 | * clear_bit() to make things atomic. | 300 | * clear_bit() to make things atomic. |
301 | */ | 301 | */ |
302 | #define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */ | 302 | #define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */ |
303 | #define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ | 303 | #define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */ |
304 | #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ | 304 | #define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */ |
305 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ | 305 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ |
306 | #define TTY_DEBUG 4 /* Debugging */ | 306 | #define TTY_DEBUG 4 /* Debugging */ |
307 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ | 307 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ |
308 | #define TTY_CLOSING 7 /* ->close() in progress */ | 308 | #define TTY_CLOSING 7 /* ->close() in progress */ |
309 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | 309 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ |
310 | #define TTY_PTY_LOCK 16 /* pty private */ | 310 | #define TTY_PTY_LOCK 16 /* pty private */ |
311 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ | 311 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ |
312 | #define TTY_HUPPED 18 /* Post driver->hangup() */ | 312 | #define TTY_HUPPED 18 /* Post driver->hangup() */ |
313 | #define TTY_HUPPING 21 /* ->hangup() in progress */ | 313 | #define TTY_HUPPING 21 /* ->hangup() in progress */ |
314 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ | 314 | #define TTY_LDISC_HALTED 22 /* Line discipline is halted */ |
315 | 315 | ||
316 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) | 316 | #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) |
317 | 317 | ||
318 | /* Values for tty->flow_change */ | 318 | /* Values for tty->flow_change */ |
319 | #define TTY_THROTTLE_SAFE 1 | 319 | #define TTY_THROTTLE_SAFE 1 |
320 | #define TTY_UNTHROTTLE_SAFE 2 | 320 | #define TTY_UNTHROTTLE_SAFE 2 |
321 | 321 | ||
322 | static inline void __tty_set_flow_change(struct tty_struct *tty, int val) | 322 | static inline void __tty_set_flow_change(struct tty_struct *tty, int val) |
323 | { | 323 | { |
324 | tty->flow_change = val; | 324 | tty->flow_change = val; |
325 | } | 325 | } |
326 | 326 | ||
327 | static inline void tty_set_flow_change(struct tty_struct *tty, int val) | 327 | static inline void tty_set_flow_change(struct tty_struct *tty, int val) |
328 | { | 328 | { |
329 | tty->flow_change = val; | 329 | tty->flow_change = val; |
330 | smp_mb(); | 330 | smp_mb(); |
331 | } | 331 | } |
332 | 332 | ||
333 | #ifdef CONFIG_TTY | 333 | #ifdef CONFIG_TTY |
334 | extern void console_init(void); | 334 | extern void console_init(void); |
335 | extern void tty_kref_put(struct tty_struct *tty); | 335 | extern void tty_kref_put(struct tty_struct *tty); |
336 | extern struct pid *tty_get_pgrp(struct tty_struct *tty); | 336 | extern struct pid *tty_get_pgrp(struct tty_struct *tty); |
337 | extern void tty_vhangup_self(void); | 337 | extern void tty_vhangup_self(void); |
338 | extern void disassociate_ctty(int priv); | 338 | extern void disassociate_ctty(int priv); |
339 | extern dev_t tty_devnum(struct tty_struct *tty); | 339 | extern dev_t tty_devnum(struct tty_struct *tty); |
340 | extern void proc_clear_tty(struct task_struct *p); | 340 | extern void proc_clear_tty(struct task_struct *p); |
341 | extern struct tty_struct *get_current_tty(void); | 341 | extern struct tty_struct *get_current_tty(void); |
342 | /* tty_io.c */ | 342 | /* tty_io.c */ |
343 | extern int __init tty_init(void); | 343 | extern int __init tty_init(void); |
344 | #else | 344 | #else |
345 | static inline void console_init(void) | 345 | static inline void console_init(void) |
346 | { } | 346 | { } |
347 | static inline void tty_kref_put(struct tty_struct *tty) | 347 | static inline void tty_kref_put(struct tty_struct *tty) |
348 | { } | 348 | { } |
349 | static inline struct pid *tty_get_pgrp(struct tty_struct *tty) | 349 | static inline struct pid *tty_get_pgrp(struct tty_struct *tty) |
350 | { return NULL; } | 350 | { return NULL; } |
351 | static inline void tty_vhangup_self(void) | 351 | static inline void tty_vhangup_self(void) |
352 | { } | 352 | { } |
353 | static inline void disassociate_ctty(int priv) | 353 | static inline void disassociate_ctty(int priv) |
354 | { } | 354 | { } |
355 | static inline dev_t tty_devnum(struct tty_struct *tty) | 355 | static inline dev_t tty_devnum(struct tty_struct *tty) |
356 | { return 0; } | 356 | { return 0; } |
357 | static inline void proc_clear_tty(struct task_struct *p) | 357 | static inline void proc_clear_tty(struct task_struct *p) |
358 | { } | 358 | { } |
359 | static inline struct tty_struct *get_current_tty(void) | 359 | static inline struct tty_struct *get_current_tty(void) |
360 | { return NULL; } | 360 | { return NULL; } |
361 | /* tty_io.c */ | 361 | /* tty_io.c */ |
362 | static inline int __init tty_init(void) | 362 | static inline int __init tty_init(void) |
363 | { return 0; } | 363 | { return 0; } |
364 | #endif | 364 | #endif |
365 | 365 | ||
366 | extern void tty_write_flush(struct tty_struct *); | 366 | extern void tty_write_flush(struct tty_struct *); |
367 | 367 | ||
368 | extern struct ktermios tty_std_termios; | 368 | extern struct ktermios tty_std_termios; |
369 | 369 | ||
370 | extern int vcs_init(void); | 370 | extern int vcs_init(void); |
371 | 371 | ||
372 | extern struct class *tty_class; | 372 | extern struct class *tty_class; |
373 | 373 | ||
374 | /** | 374 | /** |
375 | * tty_kref_get - get a tty reference | 375 | * tty_kref_get - get a tty reference |
376 | * @tty: tty device | 376 | * @tty: tty device |
377 | * | 377 | * |
378 | * Return a new reference to a tty object. The caller must hold | 378 | * Return a new reference to a tty object. The caller must hold |
379 | * sufficient locks/counts to ensure that their existing reference cannot | 379 | * sufficient locks/counts to ensure that their existing reference cannot |
380 | * go away | 380 | * go away |
381 | */ | 381 | */ |
382 | 382 | ||
383 | static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) | 383 | static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) |
384 | { | 384 | { |
385 | if (tty) | 385 | if (tty) |
386 | kref_get(&tty->kref); | 386 | kref_get(&tty->kref); |
387 | return tty; | 387 | return tty; |
388 | } | 388 | } |
389 | 389 | ||
390 | extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, | 390 | extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, |
391 | const char *routine); | 391 | const char *routine); |
392 | extern char *tty_name(struct tty_struct *tty, char *buf); | 392 | extern char *tty_name(struct tty_struct *tty, char *buf); |
393 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); | 393 | extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); |
394 | extern int tty_check_change(struct tty_struct *tty); | 394 | extern int tty_check_change(struct tty_struct *tty); |
395 | extern void stop_tty(struct tty_struct *tty); | 395 | extern void stop_tty(struct tty_struct *tty); |
396 | extern void start_tty(struct tty_struct *tty); | 396 | extern void start_tty(struct tty_struct *tty); |
397 | extern int tty_register_driver(struct tty_driver *driver); | 397 | extern int tty_register_driver(struct tty_driver *driver); |
398 | extern int tty_unregister_driver(struct tty_driver *driver); | 398 | extern int tty_unregister_driver(struct tty_driver *driver); |
399 | extern struct device *tty_register_device(struct tty_driver *driver, | 399 | extern struct device *tty_register_device(struct tty_driver *driver, |
400 | unsigned index, struct device *dev); | 400 | unsigned index, struct device *dev); |
401 | extern struct device *tty_register_device_attr(struct tty_driver *driver, | 401 | extern struct device *tty_register_device_attr(struct tty_driver *driver, |
402 | unsigned index, struct device *device, | 402 | unsigned index, struct device *device, |
403 | void *drvdata, | 403 | void *drvdata, |
404 | const struct attribute_group **attr_grp); | 404 | const struct attribute_group **attr_grp); |
405 | extern void tty_unregister_device(struct tty_driver *driver, unsigned index); | 405 | extern void tty_unregister_device(struct tty_driver *driver, unsigned index); |
406 | extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, | 406 | extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, |
407 | int buflen); | 407 | int buflen); |
408 | extern void tty_write_message(struct tty_struct *tty, char *msg); | 408 | extern void tty_write_message(struct tty_struct *tty, char *msg); |
409 | extern int tty_put_char(struct tty_struct *tty, unsigned char c); | 409 | extern int tty_put_char(struct tty_struct *tty, unsigned char c); |
410 | extern int tty_chars_in_buffer(struct tty_struct *tty); | 410 | extern int tty_chars_in_buffer(struct tty_struct *tty); |
411 | extern int tty_write_room(struct tty_struct *tty); | 411 | extern int tty_write_room(struct tty_struct *tty); |
412 | extern void tty_driver_flush_buffer(struct tty_struct *tty); | 412 | extern void tty_driver_flush_buffer(struct tty_struct *tty); |
413 | extern void tty_throttle(struct tty_struct *tty); | 413 | extern void tty_throttle(struct tty_struct *tty); |
414 | extern void tty_unthrottle(struct tty_struct *tty); | 414 | extern void tty_unthrottle(struct tty_struct *tty); |
415 | extern int tty_throttle_safe(struct tty_struct *tty); | 415 | extern int tty_throttle_safe(struct tty_struct *tty); |
416 | extern int tty_unthrottle_safe(struct tty_struct *tty); | 416 | extern int tty_unthrottle_safe(struct tty_struct *tty); |
417 | extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); | 417 | extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); |
418 | extern void tty_driver_remove_tty(struct tty_driver *driver, | 418 | extern void tty_driver_remove_tty(struct tty_driver *driver, |
419 | struct tty_struct *tty); | 419 | struct tty_struct *tty); |
420 | extern void tty_free_termios(struct tty_struct *tty); | 420 | extern void tty_free_termios(struct tty_struct *tty); |
421 | extern int is_current_pgrp_orphaned(void); | 421 | extern int is_current_pgrp_orphaned(void); |
422 | extern int is_ignored(int sig); | 422 | extern int is_ignored(int sig); |
423 | extern int tty_signal(int sig, struct tty_struct *tty); | 423 | extern int tty_signal(int sig, struct tty_struct *tty); |
424 | extern void tty_hangup(struct tty_struct *tty); | 424 | extern void tty_hangup(struct tty_struct *tty); |
425 | extern void tty_vhangup(struct tty_struct *tty); | 425 | extern void tty_vhangup(struct tty_struct *tty); |
426 | extern void tty_vhangup_locked(struct tty_struct *tty); | 426 | extern void tty_vhangup_locked(struct tty_struct *tty); |
427 | extern void tty_unhangup(struct file *filp); | 427 | extern void tty_unhangup(struct file *filp); |
428 | extern int tty_hung_up_p(struct file *filp); | 428 | extern int tty_hung_up_p(struct file *filp); |
429 | extern void do_SAK(struct tty_struct *tty); | 429 | extern void do_SAK(struct tty_struct *tty); |
430 | extern void __do_SAK(struct tty_struct *tty); | 430 | extern void __do_SAK(struct tty_struct *tty); |
431 | extern void no_tty(void); | 431 | extern void no_tty(void); |
432 | extern void tty_flush_to_ldisc(struct tty_struct *tty); | 432 | extern void tty_flush_to_ldisc(struct tty_struct *tty); |
433 | extern void tty_buffer_free_all(struct tty_port *port); | 433 | extern void tty_buffer_free_all(struct tty_port *port); |
434 | extern void tty_buffer_flush(struct tty_struct *tty); | 434 | extern void tty_buffer_flush(struct tty_struct *tty); |
435 | extern void tty_buffer_init(struct tty_port *port); | 435 | extern void tty_buffer_init(struct tty_port *port); |
436 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); | 436 | extern speed_t tty_termios_baud_rate(struct ktermios *termios); |
437 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); | 437 | extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); |
438 | extern void tty_termios_encode_baud_rate(struct ktermios *termios, | 438 | extern void tty_termios_encode_baud_rate(struct ktermios *termios, |
439 | speed_t ibaud, speed_t obaud); | 439 | speed_t ibaud, speed_t obaud); |
440 | extern void tty_encode_baud_rate(struct tty_struct *tty, | 440 | extern void tty_encode_baud_rate(struct tty_struct *tty, |
441 | speed_t ibaud, speed_t obaud); | 441 | speed_t ibaud, speed_t obaud); |
442 | 442 | ||
443 | /** | 443 | /** |
444 | * tty_get_baud_rate - get tty bit rates | 444 | * tty_get_baud_rate - get tty bit rates |
445 | * @tty: tty to query | 445 | * @tty: tty to query |
446 | * | 446 | * |
447 | * Returns the baud rate as an integer for this terminal. The | 447 | * Returns the baud rate as an integer for this terminal. The |
448 | * termios lock must be held by the caller and the terminal bit | 448 | * termios lock must be held by the caller and the terminal bit |
449 | * flags may be updated. | 449 | * flags may be updated. |
450 | * | 450 | * |
451 | * Locking: none | 451 | * Locking: none |
452 | */ | 452 | */ |
453 | static inline speed_t tty_get_baud_rate(struct tty_struct *tty) | 453 | static inline speed_t tty_get_baud_rate(struct tty_struct *tty) |
454 | { | 454 | { |
455 | return tty_termios_baud_rate(&tty->termios); | 455 | return tty_termios_baud_rate(&tty->termios); |
456 | } | 456 | } |
457 | 457 | ||
458 | extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); | 458 | extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); |
459 | extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); | 459 | extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); |
460 | extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); | 460 | extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); |
461 | 461 | ||
462 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); | 462 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); |
463 | extern void tty_ldisc_deref(struct tty_ldisc *); | 463 | extern void tty_ldisc_deref(struct tty_ldisc *); |
464 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); | 464 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); |
465 | extern void tty_ldisc_hangup(struct tty_struct *tty); | 465 | extern void tty_ldisc_hangup(struct tty_struct *tty); |
466 | extern const struct file_operations tty_ldiscs_proc_fops; | 466 | extern const struct file_operations tty_ldiscs_proc_fops; |
467 | 467 | ||
468 | extern void tty_wakeup(struct tty_struct *tty); | 468 | extern void tty_wakeup(struct tty_struct *tty); |
469 | extern void tty_ldisc_flush(struct tty_struct *tty); | 469 | extern void tty_ldisc_flush(struct tty_struct *tty); |
470 | 470 | ||
471 | extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 471 | extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
472 | extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, | 472 | extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, |
473 | unsigned int cmd, unsigned long arg); | 473 | unsigned int cmd, unsigned long arg); |
474 | extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); | 474 | extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); |
475 | extern void tty_default_fops(struct file_operations *fops); | 475 | extern void tty_default_fops(struct file_operations *fops); |
476 | extern struct tty_struct *alloc_tty_struct(void); | 476 | extern struct tty_struct *alloc_tty_struct(void); |
477 | extern int tty_alloc_file(struct file *file); | 477 | extern int tty_alloc_file(struct file *file); |
478 | extern void tty_add_file(struct tty_struct *tty, struct file *file); | 478 | extern void tty_add_file(struct tty_struct *tty, struct file *file); |
479 | extern void tty_free_file(struct file *file); | 479 | extern void tty_free_file(struct file *file); |
480 | extern void free_tty_struct(struct tty_struct *tty); | 480 | extern void free_tty_struct(struct tty_struct *tty); |
481 | extern void initialize_tty_struct(struct tty_struct *tty, | 481 | extern void initialize_tty_struct(struct tty_struct *tty, |
482 | struct tty_driver *driver, int idx); | 482 | struct tty_driver *driver, int idx); |
483 | extern void deinitialize_tty_struct(struct tty_struct *tty); | 483 | extern void deinitialize_tty_struct(struct tty_struct *tty); |
484 | extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); | 484 | extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); |
485 | extern int tty_release(struct inode *inode, struct file *filp); | 485 | extern int tty_release(struct inode *inode, struct file *filp); |
486 | extern int tty_init_termios(struct tty_struct *tty); | 486 | extern int tty_init_termios(struct tty_struct *tty); |
487 | extern int tty_standard_install(struct tty_driver *driver, | 487 | extern int tty_standard_install(struct tty_driver *driver, |
488 | struct tty_struct *tty); | 488 | struct tty_struct *tty); |
489 | 489 | ||
490 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | 490 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); |
491 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | 491 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); |
492 | 492 | ||
493 | extern struct mutex tty_mutex; | 493 | extern struct mutex tty_mutex; |
494 | extern spinlock_t tty_files_lock; | 494 | extern spinlock_t tty_files_lock; |
495 | 495 | ||
496 | extern void tty_write_unlock(struct tty_struct *tty); | 496 | extern void tty_write_unlock(struct tty_struct *tty); |
497 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); | 497 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); |
498 | #define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) | 498 | #define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) |
499 | 499 | ||
500 | extern void tty_port_init(struct tty_port *port); | 500 | extern void tty_port_init(struct tty_port *port); |
501 | extern void tty_port_link_device(struct tty_port *port, | 501 | extern void tty_port_link_device(struct tty_port *port, |
502 | struct tty_driver *driver, unsigned index); | 502 | struct tty_driver *driver, unsigned index); |
503 | extern struct device *tty_port_register_device(struct tty_port *port, | 503 | extern struct device *tty_port_register_device(struct tty_port *port, |
504 | struct tty_driver *driver, unsigned index, | 504 | struct tty_driver *driver, unsigned index, |
505 | struct device *device); | 505 | struct device *device); |
506 | extern struct device *tty_port_register_device_attr(struct tty_port *port, | 506 | extern struct device *tty_port_register_device_attr(struct tty_port *port, |
507 | struct tty_driver *driver, unsigned index, | 507 | struct tty_driver *driver, unsigned index, |
508 | struct device *device, void *drvdata, | 508 | struct device *device, void *drvdata, |
509 | const struct attribute_group **attr_grp); | 509 | const struct attribute_group **attr_grp); |
510 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); | 510 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); |
511 | extern void tty_port_free_xmit_buf(struct tty_port *port); | 511 | extern void tty_port_free_xmit_buf(struct tty_port *port); |
512 | extern void tty_port_destroy(struct tty_port *port); | 512 | extern void tty_port_destroy(struct tty_port *port); |
513 | extern void tty_port_put(struct tty_port *port); | 513 | extern void tty_port_put(struct tty_port *port); |
514 | 514 | ||
515 | static inline struct tty_port *tty_port_get(struct tty_port *port) | 515 | static inline struct tty_port *tty_port_get(struct tty_port *port) |
516 | { | 516 | { |
517 | if (port) | 517 | if (port) |
518 | kref_get(&port->kref); | 518 | kref_get(&port->kref); |
519 | return port; | 519 | return port; |
520 | } | 520 | } |
521 | 521 | ||
522 | /* If the cts flow control is enabled, return true. */ | 522 | /* If the cts flow control is enabled, return true. */ |
523 | static inline bool tty_port_cts_enabled(struct tty_port *port) | 523 | static inline bool tty_port_cts_enabled(struct tty_port *port) |
524 | { | 524 | { |
525 | return port->flags & ASYNC_CTS_FLOW; | 525 | return port->flags & ASYNC_CTS_FLOW; |
526 | } | 526 | } |
527 | 527 | ||
528 | extern struct tty_struct *tty_port_tty_get(struct tty_port *port); | 528 | extern struct tty_struct *tty_port_tty_get(struct tty_port *port); |
529 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); | 529 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); |
530 | extern int tty_port_carrier_raised(struct tty_port *port); | 530 | extern int tty_port_carrier_raised(struct tty_port *port); |
531 | extern void tty_port_raise_dtr_rts(struct tty_port *port); | 531 | extern void tty_port_raise_dtr_rts(struct tty_port *port); |
532 | extern void tty_port_lower_dtr_rts(struct tty_port *port); | 532 | extern void tty_port_lower_dtr_rts(struct tty_port *port); |
533 | extern void tty_port_hangup(struct tty_port *port); | 533 | extern void tty_port_hangup(struct tty_port *port); |
534 | extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); | 534 | extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); |
535 | extern void tty_port_tty_wakeup(struct tty_port *port); | 535 | extern void tty_port_tty_wakeup(struct tty_port *port); |
536 | extern int tty_port_block_til_ready(struct tty_port *port, | 536 | extern int tty_port_block_til_ready(struct tty_port *port, |
537 | struct tty_struct *tty, struct file *filp); | 537 | struct tty_struct *tty, struct file *filp); |
538 | extern int tty_port_close_start(struct tty_port *port, | 538 | extern int tty_port_close_start(struct tty_port *port, |
539 | struct tty_struct *tty, struct file *filp); | 539 | struct tty_struct *tty, struct file *filp); |
540 | extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); | 540 | extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); |
541 | extern void tty_port_close(struct tty_port *port, | 541 | extern void tty_port_close(struct tty_port *port, |
542 | struct tty_struct *tty, struct file *filp); | 542 | struct tty_struct *tty, struct file *filp); |
543 | extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, | 543 | extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, |
544 | struct tty_struct *tty); | 544 | struct tty_struct *tty); |
545 | extern int tty_port_open(struct tty_port *port, | 545 | extern int tty_port_open(struct tty_port *port, |
546 | struct tty_struct *tty, struct file *filp); | 546 | struct tty_struct *tty, struct file *filp); |
547 | static inline int tty_port_users(struct tty_port *port) | 547 | static inline int tty_port_users(struct tty_port *port) |
548 | { | 548 | { |
549 | return port->count + port->blocked_open; | 549 | return port->count + port->blocked_open; |
550 | } | 550 | } |
551 | 551 | ||
552 | extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); | 552 | extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); |
553 | extern int tty_unregister_ldisc(int disc); | 553 | extern int tty_unregister_ldisc(int disc); |
554 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); | 554 | extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); |
555 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); | 555 | extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); |
556 | extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); | 556 | extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); |
557 | extern void tty_ldisc_init(struct tty_struct *tty); | 557 | extern void tty_ldisc_init(struct tty_struct *tty); |
558 | extern void tty_ldisc_deinit(struct tty_struct *tty); | 558 | extern void tty_ldisc_deinit(struct tty_struct *tty); |
559 | extern void tty_ldisc_begin(void); | 559 | extern void tty_ldisc_begin(void); |
560 | 560 | ||
561 | static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, | 561 | static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, |
562 | char *f, int count) | 562 | char *f, int count) |
563 | { | 563 | { |
564 | if (ld->ops->receive_buf2) | 564 | if (ld->ops->receive_buf2) |
565 | count = ld->ops->receive_buf2(ld->tty, p, f, count); | 565 | count = ld->ops->receive_buf2(ld->tty, p, f, count); |
566 | else { | 566 | else { |
567 | count = min_t(int, count, ld->tty->receive_room); | 567 | count = min_t(int, count, ld->tty->receive_room); |
568 | if (count) | 568 | if (count) |
569 | ld->ops->receive_buf(ld->tty, p, f, count); | 569 | ld->ops->receive_buf(ld->tty, p, f, count); |
570 | } | 570 | } |
571 | return count; | 571 | return count; |
572 | } | 572 | } |
573 | 573 | ||
574 | 574 | ||
575 | /* n_tty.c */ | 575 | /* n_tty.c */ |
576 | extern struct tty_ldisc_ops tty_ldisc_N_TTY; | 576 | extern struct tty_ldisc_ops tty_ldisc_N_TTY; |
577 | extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); | 577 | extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); |
578 | 578 | ||
579 | /* tty_audit.c */ | 579 | /* tty_audit.c */ |
580 | #ifdef CONFIG_AUDIT | 580 | #ifdef CONFIG_AUDIT |
581 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 581 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, |
582 | size_t size, unsigned icanon); | 582 | size_t size, unsigned icanon); |
583 | extern void tty_audit_exit(void); | 583 | extern void tty_audit_exit(void); |
584 | extern void tty_audit_fork(struct signal_struct *sig); | 584 | extern void tty_audit_fork(struct signal_struct *sig); |
585 | extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); | 585 | extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); |
586 | extern void tty_audit_push(struct tty_struct *tty); | 586 | extern void tty_audit_push(struct tty_struct *tty); |
587 | extern int tty_audit_push_current(void); | 587 | extern int tty_audit_push_current(void); |
588 | #else | 588 | #else |
589 | static inline void tty_audit_add_data(struct tty_struct *tty, | 589 | static inline void tty_audit_add_data(struct tty_struct *tty, |
590 | unsigned char *data, size_t size, unsigned icanon) | 590 | unsigned char *data, size_t size, unsigned icanon) |
591 | { | 591 | { |
592 | } | 592 | } |
593 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) | 593 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) |
594 | { | 594 | { |
595 | } | 595 | } |
596 | static inline void tty_audit_exit(void) | 596 | static inline void tty_audit_exit(void) |
597 | { | 597 | { |
598 | } | 598 | } |
599 | static inline void tty_audit_fork(struct signal_struct *sig) | 599 | static inline void tty_audit_fork(struct signal_struct *sig) |
600 | { | 600 | { |
601 | } | 601 | } |
602 | static inline void tty_audit_push(struct tty_struct *tty) | 602 | static inline void tty_audit_push(struct tty_struct *tty) |
603 | { | 603 | { |
604 | } | 604 | } |
605 | static inline int tty_audit_push_current(void) | 605 | static inline int tty_audit_push_current(void) |
606 | { | 606 | { |
607 | return 0; | 607 | return 0; |
608 | } | 608 | } |
609 | #endif | 609 | #endif |
610 | 610 | ||
611 | /* tty_ioctl.c */ | 611 | /* tty_ioctl.c */ |
612 | extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | 612 | extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, |
613 | unsigned int cmd, unsigned long arg); | 613 | unsigned int cmd, unsigned long arg); |
614 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, | 614 | extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, |
615 | unsigned int cmd, unsigned long arg); | 615 | unsigned int cmd, unsigned long arg); |
616 | 616 | ||
617 | /* serial.c */ | 617 | /* serial.c */ |
618 | 618 | ||
619 | extern void serial_console_init(void); | 619 | extern void serial_console_init(void); |
620 | 620 | ||
621 | /* pcxx.c */ | 621 | /* pcxx.c */ |
622 | 622 | ||
623 | extern int pcxe_open(struct tty_struct *tty, struct file *filp); | 623 | extern int pcxe_open(struct tty_struct *tty, struct file *filp); |
624 | 624 | ||
625 | /* vt.c */ | 625 | /* vt.c */ |
626 | 626 | ||
627 | extern int vt_ioctl(struct tty_struct *tty, | 627 | extern int vt_ioctl(struct tty_struct *tty, |
628 | unsigned int cmd, unsigned long arg); | 628 | unsigned int cmd, unsigned long arg); |
629 | 629 | ||
630 | extern long vt_compat_ioctl(struct tty_struct *tty, | 630 | extern long vt_compat_ioctl(struct tty_struct *tty, |
631 | unsigned int cmd, unsigned long arg); | 631 | unsigned int cmd, unsigned long arg); |
632 | 632 | ||
633 | /* tty_mutex.c */ | 633 | /* tty_mutex.c */ |
634 | /* functions for preparation of BKL removal */ | 634 | /* functions for preparation of BKL removal */ |
635 | extern void __lockfunc tty_lock(struct tty_struct *tty); | 635 | extern void __lockfunc tty_lock(struct tty_struct *tty); |
636 | extern void __lockfunc tty_unlock(struct tty_struct *tty); | 636 | extern void __lockfunc tty_unlock(struct tty_struct *tty); |
637 | extern void __lockfunc tty_lock_pair(struct tty_struct *tty, | 637 | extern void __lockfunc tty_lock_pair(struct tty_struct *tty, |
638 | struct tty_struct *tty2); | 638 | struct tty_struct *tty2); |
639 | extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, | 639 | extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, |
640 | struct tty_struct *tty2); | 640 | struct tty_struct *tty2); |
641 | 641 | ||
642 | /* | 642 | /* |
643 | * this shall be called only from where BTM is held (like close) | 643 | * this shall be called only from where BTM is held (like close) |
644 | * | 644 | * |
645 | * We need this to ensure nobody waits for us to finish while we are waiting. | 645 | * We need this to ensure nobody waits for us to finish while we are waiting. |
646 | * Without this we were encountering system stalls. | 646 | * Without this we were encountering system stalls. |
647 | * | 647 | * |
648 | * This should be indeed removed with BTM removal later. | 648 | * This should be indeed removed with BTM removal later. |
649 | * | 649 | * |
650 | * Locking: BTM required. Nobody is allowed to hold port->mutex. | 650 | * Locking: BTM required. Nobody is allowed to hold port->mutex. |
651 | */ | 651 | */ |
652 | static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, | 652 | static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, |
653 | long timeout) | 653 | long timeout) |
654 | { | 654 | { |
655 | tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ | 655 | tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ |
656 | tty_wait_until_sent(tty, timeout); | 656 | tty_wait_until_sent(tty, timeout); |
657 | tty_lock(tty); | 657 | tty_lock(tty); |
658 | } | 658 | } |
659 | 659 | ||
660 | /* | 660 | /* |
661 | * wait_event_interruptible_tty -- wait for a condition with the tty lock held | 661 | * wait_event_interruptible_tty -- wait for a condition with the tty lock held |
662 | * | 662 | * |
663 | * The condition we are waiting for might take a long time to | 663 | * The condition we are waiting for might take a long time to |
664 | * become true, or might depend on another thread taking the | 664 | * become true, or might depend on another thread taking the |
665 | * BTM. In either case, we need to drop the BTM to guarantee | 665 | * BTM. In either case, we need to drop the BTM to guarantee |
666 | * forward progress. This is a leftover from the conversion | 666 | * forward progress. This is a leftover from the conversion |
667 | * from the BKL and should eventually get removed as the BTM | 667 | * from the BKL and should eventually get removed as the BTM |
668 | * falls out of use. | 668 | * falls out of use. |
669 | * | 669 | * |
670 | * Do not use in new code. | 670 | * Do not use in new code. |
671 | */ | 671 | */ |
672 | #define wait_event_interruptible_tty(tty, wq, condition) \ | 672 | #define wait_event_interruptible_tty(tty, wq, condition) \ |
673 | ({ \ | 673 | ({ \ |
674 | int __ret = 0; \ | 674 | int __ret = 0; \ |
675 | if (!(condition)) { \ | 675 | if (!(condition)) \ |
676 | __wait_event_interruptible_tty(tty, wq, condition, __ret); \ | 676 | __ret = __wait_event_interruptible_tty(tty, wq, \ |
677 | } \ | 677 | condition); \ |
678 | __ret; \ | 678 | __ret; \ |
679 | }) | 679 | }) |
680 | 680 | ||
681 | #define __wait_event_interruptible_tty(tty, wq, condition, ret) \ | 681 | #define __wait_event_interruptible_tty(tty, wq, condition) \ |
682 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | 682 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
683 | tty_unlock(tty); \ | 683 | tty_unlock(tty); \ |
684 | schedule(); \ | 684 | schedule(); \ |
685 | tty_lock(tty)) | 685 | tty_lock(tty)) |
686 | 686 | ||
687 | #ifdef CONFIG_PROC_FS | 687 | #ifdef CONFIG_PROC_FS |
688 | extern void proc_tty_register_driver(struct tty_driver *); | 688 | extern void proc_tty_register_driver(struct tty_driver *); |
689 | extern void proc_tty_unregister_driver(struct tty_driver *); | 689 | extern void proc_tty_unregister_driver(struct tty_driver *); |
690 | #else | 690 | #else |
691 | static inline void proc_tty_register_driver(struct tty_driver *d) {} | 691 | static inline void proc_tty_register_driver(struct tty_driver *d) {} |
692 | static inline void proc_tty_unregister_driver(struct tty_driver *d) {} | 692 | static inline void proc_tty_unregister_driver(struct tty_driver *d) {} |
693 | #endif | 693 | #endif |
694 | 694 | ||
695 | #endif | 695 | #endif |
696 | 696 |
include/linux/wait.h
1 | #ifndef _LINUX_WAIT_H | 1 | #ifndef _LINUX_WAIT_H |
2 | #define _LINUX_WAIT_H | 2 | #define _LINUX_WAIT_H |
3 | 3 | ||
4 | 4 | ||
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/stddef.h> | 6 | #include <linux/stddef.h> |
7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
8 | #include <asm/current.h> | 8 | #include <asm/current.h> |
9 | #include <uapi/linux/wait.h> | 9 | #include <uapi/linux/wait.h> |
10 | 10 | ||
11 | typedef struct __wait_queue wait_queue_t; | 11 | typedef struct __wait_queue wait_queue_t; |
12 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); | 12 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); |
13 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); | 13 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); |
14 | 14 | ||
15 | struct __wait_queue { | 15 | struct __wait_queue { |
16 | unsigned int flags; | 16 | unsigned int flags; |
17 | #define WQ_FLAG_EXCLUSIVE 0x01 | 17 | #define WQ_FLAG_EXCLUSIVE 0x01 |
18 | void *private; | 18 | void *private; |
19 | wait_queue_func_t func; | 19 | wait_queue_func_t func; |
20 | struct list_head task_list; | 20 | struct list_head task_list; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct wait_bit_key { | 23 | struct wait_bit_key { |
24 | void *flags; | 24 | void *flags; |
25 | int bit_nr; | 25 | int bit_nr; |
26 | #define WAIT_ATOMIC_T_BIT_NR -1 | 26 | #define WAIT_ATOMIC_T_BIT_NR -1 |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct wait_bit_queue { | 29 | struct wait_bit_queue { |
30 | struct wait_bit_key key; | 30 | struct wait_bit_key key; |
31 | wait_queue_t wait; | 31 | wait_queue_t wait; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | struct __wait_queue_head { | 34 | struct __wait_queue_head { |
35 | spinlock_t lock; | 35 | spinlock_t lock; |
36 | struct list_head task_list; | 36 | struct list_head task_list; |
37 | }; | 37 | }; |
38 | typedef struct __wait_queue_head wait_queue_head_t; | 38 | typedef struct __wait_queue_head wait_queue_head_t; |
39 | 39 | ||
40 | struct task_struct; | 40 | struct task_struct; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Macros for declaration and initialisaton of the datatypes | 43 | * Macros for declaration and initialisaton of the datatypes |
44 | */ | 44 | */ |
45 | 45 | ||
46 | #define __WAITQUEUE_INITIALIZER(name, tsk) { \ | 46 | #define __WAITQUEUE_INITIALIZER(name, tsk) { \ |
47 | .private = tsk, \ | 47 | .private = tsk, \ |
48 | .func = default_wake_function, \ | 48 | .func = default_wake_function, \ |
49 | .task_list = { NULL, NULL } } | 49 | .task_list = { NULL, NULL } } |
50 | 50 | ||
51 | #define DECLARE_WAITQUEUE(name, tsk) \ | 51 | #define DECLARE_WAITQUEUE(name, tsk) \ |
52 | wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) | 52 | wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) |
53 | 53 | ||
54 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ | 54 | #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ |
55 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ | 55 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
56 | .task_list = { &(name).task_list, &(name).task_list } } | 56 | .task_list = { &(name).task_list, &(name).task_list } } |
57 | 57 | ||
58 | #define DECLARE_WAIT_QUEUE_HEAD(name) \ | 58 | #define DECLARE_WAIT_QUEUE_HEAD(name) \ |
59 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) | 59 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) |
60 | 60 | ||
61 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ | 61 | #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ |
62 | { .flags = word, .bit_nr = bit, } | 62 | { .flags = word, .bit_nr = bit, } |
63 | 63 | ||
64 | #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ | 64 | #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ |
65 | { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } | 65 | { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } |
66 | 66 | ||
67 | extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); | 67 | extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); |
68 | 68 | ||
69 | #define init_waitqueue_head(q) \ | 69 | #define init_waitqueue_head(q) \ |
70 | do { \ | 70 | do { \ |
71 | static struct lock_class_key __key; \ | 71 | static struct lock_class_key __key; \ |
72 | \ | 72 | \ |
73 | __init_waitqueue_head((q), #q, &__key); \ | 73 | __init_waitqueue_head((q), #q, &__key); \ |
74 | } while (0) | 74 | } while (0) |
75 | 75 | ||
76 | #ifdef CONFIG_LOCKDEP | 76 | #ifdef CONFIG_LOCKDEP |
77 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ | 77 | # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ |
78 | ({ init_waitqueue_head(&name); name; }) | 78 | ({ init_waitqueue_head(&name); name; }) |
79 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ | 79 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ |
80 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) | 80 | wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) |
81 | #else | 81 | #else |
82 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) | 82 | # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) | 85 | static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) |
86 | { | 86 | { |
87 | q->flags = 0; | 87 | q->flags = 0; |
88 | q->private = p; | 88 | q->private = p; |
89 | q->func = default_wake_function; | 89 | q->func = default_wake_function; |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline void init_waitqueue_func_entry(wait_queue_t *q, | 92 | static inline void init_waitqueue_func_entry(wait_queue_t *q, |
93 | wait_queue_func_t func) | 93 | wait_queue_func_t func) |
94 | { | 94 | { |
95 | q->flags = 0; | 95 | q->flags = 0; |
96 | q->private = NULL; | 96 | q->private = NULL; |
97 | q->func = func; | 97 | q->func = func; |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline int waitqueue_active(wait_queue_head_t *q) | 100 | static inline int waitqueue_active(wait_queue_head_t *q) |
101 | { | 101 | { |
102 | return !list_empty(&q->task_list); | 102 | return !list_empty(&q->task_list); |
103 | } | 103 | } |
104 | 104 | ||
105 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | 105 | extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
106 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); | 106 | extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
107 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); | 107 | extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
108 | 108 | ||
109 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) | 109 | static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
110 | { | 110 | { |
111 | list_add(&new->task_list, &head->task_list); | 111 | list_add(&new->task_list, &head->task_list); |
112 | } | 112 | } |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * Used for wake-one threads: | 115 | * Used for wake-one threads: |
116 | */ | 116 | */ |
117 | static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, | 117 | static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, |
118 | wait_queue_t *wait) | 118 | wait_queue_t *wait) |
119 | { | 119 | { |
120 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 120 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
121 | __add_wait_queue(q, wait); | 121 | __add_wait_queue(q, wait); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, | 124 | static inline void __add_wait_queue_tail(wait_queue_head_t *head, |
125 | wait_queue_t *new) | 125 | wait_queue_t *new) |
126 | { | 126 | { |
127 | list_add_tail(&new->task_list, &head->task_list); | 127 | list_add_tail(&new->task_list, &head->task_list); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, | 130 | static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, |
131 | wait_queue_t *wait) | 131 | wait_queue_t *wait) |
132 | { | 132 | { |
133 | wait->flags |= WQ_FLAG_EXCLUSIVE; | 133 | wait->flags |= WQ_FLAG_EXCLUSIVE; |
134 | __add_wait_queue_tail(q, wait); | 134 | __add_wait_queue_tail(q, wait); |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void __remove_wait_queue(wait_queue_head_t *head, | 137 | static inline void __remove_wait_queue(wait_queue_head_t *head, |
138 | wait_queue_t *old) | 138 | wait_queue_t *old) |
139 | { | 139 | { |
140 | list_del(&old->task_list); | 140 | list_del(&old->task_list); |
141 | } | 141 | } |
142 | 142 | ||
143 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 143 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
144 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 144 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
145 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 145 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
146 | void *key); | 146 | void *key); |
147 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); | 147 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
148 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 148 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
149 | void __wake_up_bit(wait_queue_head_t *, void *, int); | 149 | void __wake_up_bit(wait_queue_head_t *, void *, int); |
150 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 150 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); |
151 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 151 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); |
152 | void wake_up_bit(void *, int); | 152 | void wake_up_bit(void *, int); |
153 | void wake_up_atomic_t(atomic_t *); | 153 | void wake_up_atomic_t(atomic_t *); |
154 | int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); | 154 | int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); |
155 | int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); | 155 | int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); |
156 | int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); | 156 | int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); |
157 | wait_queue_head_t *bit_waitqueue(void *, int); | 157 | wait_queue_head_t *bit_waitqueue(void *, int); |
158 | 158 | ||
159 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) | 159 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
160 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) | 160 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) |
161 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) | 161 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) |
162 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) | 162 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) |
163 | #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) | 163 | #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) |
164 | 164 | ||
165 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) | 165 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
166 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) | 166 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |
167 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) | 167 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
168 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) | 168 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * Wakeup macros to be used to report events to the targets. | 171 | * Wakeup macros to be used to report events to the targets. |
172 | */ | 172 | */ |
173 | #define wake_up_poll(x, m) \ | 173 | #define wake_up_poll(x, m) \ |
174 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) | 174 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) |
175 | #define wake_up_locked_poll(x, m) \ | 175 | #define wake_up_locked_poll(x, m) \ |
176 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) | 176 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) |
177 | #define wake_up_interruptible_poll(x, m) \ | 177 | #define wake_up_interruptible_poll(x, m) \ |
178 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) | 178 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) |
179 | #define wake_up_interruptible_sync_poll(x, m) \ | 179 | #define wake_up_interruptible_sync_poll(x, m) \ |
180 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) | 180 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) |
181 | 181 | ||
182 | #define ___wait_cond_timeout(condition, ret) \ | 182 | #define ___wait_cond_timeout(condition) \ |
183 | ({ \ | 183 | ({ \ |
184 | bool __cond = (condition); \ | 184 | bool __cond = (condition); \ |
185 | if (__cond && !ret) \ | 185 | if (__cond && !__ret) \ |
186 | ret = 1; \ | 186 | __ret = 1; \ |
187 | __cond || !ret; \ | 187 | __cond || !__ret; \ |
188 | }) | 188 | }) |
189 | 189 | ||
190 | #define ___wait_signal_pending(state) \ | 190 | #define ___wait_signal_pending(state) \ |
191 | ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \ | 191 | ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \ |
192 | (state == TASK_KILLABLE && fatal_signal_pending(current))) | 192 | (state == TASK_KILLABLE && fatal_signal_pending(current))) |
193 | 193 | ||
194 | #define ___wait_nop_ret int ret __always_unused | ||
195 | |||
196 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ | 194 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ |
197 | do { \ | 195 | ({ \ |
198 | __label__ __out; \ | 196 | __label__ __out; \ |
199 | DEFINE_WAIT(__wait); \ | 197 | DEFINE_WAIT(__wait); \ |
198 | long __ret = ret; \ | ||
200 | \ | 199 | \ |
201 | for (;;) { \ | 200 | for (;;) { \ |
202 | if (exclusive) \ | 201 | if (exclusive) \ |
203 | prepare_to_wait_exclusive(&wq, &__wait, state); \ | 202 | prepare_to_wait_exclusive(&wq, &__wait, state); \ |
204 | else \ | 203 | else \ |
205 | prepare_to_wait(&wq, &__wait, state); \ | 204 | prepare_to_wait(&wq, &__wait, state); \ |
206 | \ | 205 | \ |
207 | if (condition) \ | 206 | if (condition) \ |
208 | break; \ | 207 | break; \ |
209 | \ | 208 | \ |
210 | if (___wait_signal_pending(state)) { \ | 209 | if (___wait_signal_pending(state)) { \ |
211 | ret = -ERESTARTSYS; \ | 210 | __ret = -ERESTARTSYS; \ |
212 | if (exclusive) { \ | 211 | if (exclusive) { \ |
213 | abort_exclusive_wait(&wq, &__wait, \ | 212 | abort_exclusive_wait(&wq, &__wait, \ |
214 | state, NULL); \ | 213 | state, NULL); \ |
215 | goto __out; \ | 214 | goto __out; \ |
216 | } \ | 215 | } \ |
217 | break; \ | 216 | break; \ |
218 | } \ | 217 | } \ |
219 | \ | 218 | \ |
220 | cmd; \ | 219 | cmd; \ |
221 | } \ | 220 | } \ |
222 | finish_wait(&wq, &__wait); \ | 221 | finish_wait(&wq, &__wait); \ |
223 | __out: ; \ | 222 | __out: __ret; \ |
224 | } while (0) | 223 | }) |
225 | 224 | ||
226 | #define __wait_event(wq, condition) \ | 225 | #define __wait_event(wq, condition) \ |
227 | ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | 226 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
228 | ___wait_nop_ret, schedule()) | 227 | schedule()) |
229 | 228 | ||
230 | /** | 229 | /** |
231 | * wait_event - sleep until a condition gets true | 230 | * wait_event - sleep until a condition gets true |
232 | * @wq: the waitqueue to wait on | 231 | * @wq: the waitqueue to wait on |
233 | * @condition: a C expression for the event to wait for | 232 | * @condition: a C expression for the event to wait for |
234 | * | 233 | * |
235 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the | 234 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
236 | * @condition evaluates to true. The @condition is checked each time | 235 | * @condition evaluates to true. The @condition is checked each time |
237 | * the waitqueue @wq is woken up. | 236 | * the waitqueue @wq is woken up. |
238 | * | 237 | * |
239 | * wake_up() has to be called after changing any variable that could | 238 | * wake_up() has to be called after changing any variable that could |
240 | * change the result of the wait condition. | 239 | * change the result of the wait condition. |
241 | */ | 240 | */ |
242 | #define wait_event(wq, condition) \ | 241 | #define wait_event(wq, condition) \ |
243 | do { \ | 242 | do { \ |
244 | if (condition) \ | 243 | if (condition) \ |
245 | break; \ | 244 | break; \ |
246 | __wait_event(wq, condition); \ | 245 | __wait_event(wq, condition); \ |
247 | } while (0) | 246 | } while (0) |
248 | 247 | ||
249 | #define __wait_event_timeout(wq, condition, ret) \ | 248 | #define __wait_event_timeout(wq, condition, timeout) \ |
250 | ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | 249 | ___wait_event(wq, ___wait_cond_timeout(condition), \ |
251 | TASK_UNINTERRUPTIBLE, 0, ret, \ | 250 | TASK_UNINTERRUPTIBLE, 0, timeout, \ |
252 | ret = schedule_timeout(ret)) | 251 | __ret = schedule_timeout(__ret)) |
253 | 252 | ||
254 | /** | 253 | /** |
255 | * wait_event_timeout - sleep until a condition gets true or a timeout elapses | 254 | * wait_event_timeout - sleep until a condition gets true or a timeout elapses |
256 | * @wq: the waitqueue to wait on | 255 | * @wq: the waitqueue to wait on |
257 | * @condition: a C expression for the event to wait for | 256 | * @condition: a C expression for the event to wait for |
258 | * @timeout: timeout, in jiffies | 257 | * @timeout: timeout, in jiffies |
259 | * | 258 | * |
260 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the | 259 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
261 | * @condition evaluates to true. The @condition is checked each time | 260 | * @condition evaluates to true. The @condition is checked each time |
262 | * the waitqueue @wq is woken up. | 261 | * the waitqueue @wq is woken up. |
263 | * | 262 | * |
264 | * wake_up() has to be called after changing any variable that could | 263 | * wake_up() has to be called after changing any variable that could |
265 | * change the result of the wait condition. | 264 | * change the result of the wait condition. |
266 | * | 265 | * |
267 | * The function returns 0 if the @timeout elapsed, or the remaining | 266 | * The function returns 0 if the @timeout elapsed, or the remaining |
268 | * jiffies (at least 1) if the @condition evaluated to %true before | 267 | * jiffies (at least 1) if the @condition evaluated to %true before |
269 | * the @timeout elapsed. | 268 | * the @timeout elapsed. |
270 | */ | 269 | */ |
271 | #define wait_event_timeout(wq, condition, timeout) \ | 270 | #define wait_event_timeout(wq, condition, timeout) \ |
272 | ({ \ | 271 | ({ \ |
273 | long __ret = timeout; \ | 272 | long __ret = timeout; \ |
274 | if (!(condition)) \ | 273 | if (!(condition)) \ |
275 | __wait_event_timeout(wq, condition, __ret); \ | 274 | __ret = __wait_event_timeout(wq, condition, timeout); \ |
276 | __ret; \ | 275 | __ret; \ |
277 | }) | 276 | }) |
278 | 277 | ||
279 | #define __wait_event_interruptible(wq, condition, ret) \ | 278 | #define __wait_event_interruptible(wq, condition) \ |
280 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | 279 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
281 | schedule()) | 280 | schedule()) |
282 | 281 | ||
283 | /** | 282 | /** |
284 | * wait_event_interruptible - sleep until a condition gets true | 283 | * wait_event_interruptible - sleep until a condition gets true |
285 | * @wq: the waitqueue to wait on | 284 | * @wq: the waitqueue to wait on |
286 | * @condition: a C expression for the event to wait for | 285 | * @condition: a C expression for the event to wait for |
287 | * | 286 | * |
288 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 287 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
289 | * @condition evaluates to true or a signal is received. | 288 | * @condition evaluates to true or a signal is received. |
290 | * The @condition is checked each time the waitqueue @wq is woken up. | 289 | * The @condition is checked each time the waitqueue @wq is woken up. |
291 | * | 290 | * |
292 | * wake_up() has to be called after changing any variable that could | 291 | * wake_up() has to be called after changing any variable that could |
293 | * change the result of the wait condition. | 292 | * change the result of the wait condition. |
294 | * | 293 | * |
295 | * The function will return -ERESTARTSYS if it was interrupted by a | 294 | * The function will return -ERESTARTSYS if it was interrupted by a |
296 | * signal and 0 if @condition evaluated to true. | 295 | * signal and 0 if @condition evaluated to true. |
297 | */ | 296 | */ |
298 | #define wait_event_interruptible(wq, condition) \ | 297 | #define wait_event_interruptible(wq, condition) \ |
299 | ({ \ | 298 | ({ \ |
300 | int __ret = 0; \ | 299 | int __ret = 0; \ |
301 | if (!(condition)) \ | 300 | if (!(condition)) \ |
302 | __wait_event_interruptible(wq, condition, __ret); \ | 301 | __ret = __wait_event_interruptible(wq, condition); \ |
303 | __ret; \ | 302 | __ret; \ |
304 | }) | 303 | }) |
305 | 304 | ||
306 | #define __wait_event_interruptible_timeout(wq, condition, ret) \ | 305 | #define __wait_event_interruptible_timeout(wq, condition, timeout) \ |
307 | ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | 306 | ___wait_event(wq, ___wait_cond_timeout(condition), \ |
308 | TASK_INTERRUPTIBLE, 0, ret, \ | 307 | TASK_INTERRUPTIBLE, 0, timeout, \ |
309 | ret = schedule_timeout(ret)) | 308 | __ret = schedule_timeout(__ret)) |
310 | 309 | ||
311 | /** | 310 | /** |
312 | * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses | 311 | * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses |
313 | * @wq: the waitqueue to wait on | 312 | * @wq: the waitqueue to wait on |
314 | * @condition: a C expression for the event to wait for | 313 | * @condition: a C expression for the event to wait for |
315 | * @timeout: timeout, in jiffies | 314 | * @timeout: timeout, in jiffies |
316 | * | 315 | * |
317 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 316 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
318 | * @condition evaluates to true or a signal is received. | 317 | * @condition evaluates to true or a signal is received. |
319 | * The @condition is checked each time the waitqueue @wq is woken up. | 318 | * The @condition is checked each time the waitqueue @wq is woken up. |
320 | * | 319 | * |
321 | * wake_up() has to be called after changing any variable that could | 320 | * wake_up() has to be called after changing any variable that could |
322 | * change the result of the wait condition. | 321 | * change the result of the wait condition. |
323 | * | 322 | * |
324 | * Returns: | 323 | * Returns: |
325 | * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by | 324 | * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by |
326 | * a signal, or the remaining jiffies (at least 1) if the @condition | 325 | * a signal, or the remaining jiffies (at least 1) if the @condition |
327 | * evaluated to %true before the @timeout elapsed. | 326 | * evaluated to %true before the @timeout elapsed. |
328 | */ | 327 | */ |
329 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ | 328 | #define wait_event_interruptible_timeout(wq, condition, timeout) \ |
330 | ({ \ | 329 | ({ \ |
331 | long __ret = timeout; \ | 330 | long __ret = timeout; \ |
332 | if (!(condition)) \ | 331 | if (!(condition)) \ |
333 | __wait_event_interruptible_timeout(wq, condition, __ret); \ | 332 | __ret = __wait_event_interruptible_timeout(wq, \ |
333 | condition, timeout); \ | ||
334 | __ret; \ | 334 | __ret; \ |
335 | }) | 335 | }) |
336 | 336 | ||
337 | #define __wait_event_hrtimeout(wq, condition, timeout, state) \ | 337 | #define __wait_event_hrtimeout(wq, condition, timeout, state) \ |
338 | ({ \ | 338 | ({ \ |
339 | int __ret = 0; \ | 339 | int __ret = 0; \ |
340 | struct hrtimer_sleeper __t; \ | 340 | struct hrtimer_sleeper __t; \ |
341 | \ | 341 | \ |
342 | hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ | 342 | hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ |
343 | HRTIMER_MODE_REL); \ | 343 | HRTIMER_MODE_REL); \ |
344 | hrtimer_init_sleeper(&__t, current); \ | 344 | hrtimer_init_sleeper(&__t, current); \ |
345 | if ((timeout).tv64 != KTIME_MAX) \ | 345 | if ((timeout).tv64 != KTIME_MAX) \ |
346 | hrtimer_start_range_ns(&__t.timer, timeout, \ | 346 | hrtimer_start_range_ns(&__t.timer, timeout, \ |
347 | current->timer_slack_ns, \ | 347 | current->timer_slack_ns, \ |
348 | HRTIMER_MODE_REL); \ | 348 | HRTIMER_MODE_REL); \ |
349 | \ | 349 | \ |
350 | ___wait_event(wq, condition, state, 0, __ret, \ | 350 | __ret = ___wait_event(wq, condition, state, 0, 0, \ |
351 | if (!__t.task) { \ | 351 | if (!__t.task) { \ |
352 | __ret = -ETIME; \ | 352 | __ret = -ETIME; \ |
353 | break; \ | 353 | break; \ |
354 | } \ | 354 | } \ |
355 | schedule()); \ | 355 | schedule()); \ |
356 | \ | 356 | \ |
357 | hrtimer_cancel(&__t.timer); \ | 357 | hrtimer_cancel(&__t.timer); \ |
358 | destroy_hrtimer_on_stack(&__t.timer); \ | 358 | destroy_hrtimer_on_stack(&__t.timer); \ |
359 | __ret; \ | 359 | __ret; \ |
360 | }) | 360 | }) |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses | 363 | * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses |
364 | * @wq: the waitqueue to wait on | 364 | * @wq: the waitqueue to wait on |
365 | * @condition: a C expression for the event to wait for | 365 | * @condition: a C expression for the event to wait for |
366 | * @timeout: timeout, as a ktime_t | 366 | * @timeout: timeout, as a ktime_t |
367 | * | 367 | * |
368 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the | 368 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
369 | * @condition evaluates to true or a signal is received. | 369 | * @condition evaluates to true or a signal is received. |
370 | * The @condition is checked each time the waitqueue @wq is woken up. | 370 | * The @condition is checked each time the waitqueue @wq is woken up. |
371 | * | 371 | * |
372 | * wake_up() has to be called after changing any variable that could | 372 | * wake_up() has to be called after changing any variable that could |
373 | * change the result of the wait condition. | 373 | * change the result of the wait condition. |
374 | * | 374 | * |
375 | * The function returns 0 if @condition became true, or -ETIME if the timeout | 375 | * The function returns 0 if @condition became true, or -ETIME if the timeout |
376 | * elapsed. | 376 | * elapsed. |
377 | */ | 377 | */ |
378 | #define wait_event_hrtimeout(wq, condition, timeout) \ | 378 | #define wait_event_hrtimeout(wq, condition, timeout) \ |
379 | ({ \ | 379 | ({ \ |
380 | int __ret = 0; \ | 380 | int __ret = 0; \ |
381 | if (!(condition)) \ | 381 | if (!(condition)) \ |
382 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ | 382 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ |
383 | TASK_UNINTERRUPTIBLE); \ | 383 | TASK_UNINTERRUPTIBLE); \ |
384 | __ret; \ | 384 | __ret; \ |
385 | }) | 385 | }) |
386 | 386 | ||
387 | /** | 387 | /** |
388 | * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses | 388 | * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses |
389 | * @wq: the waitqueue to wait on | 389 | * @wq: the waitqueue to wait on |
390 | * @condition: a C expression for the event to wait for | 390 | * @condition: a C expression for the event to wait for |
391 | * @timeout: timeout, as a ktime_t | 391 | * @timeout: timeout, as a ktime_t |
392 | * | 392 | * |
393 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 393 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
394 | * @condition evaluates to true or a signal is received. | 394 | * @condition evaluates to true or a signal is received. |
395 | * The @condition is checked each time the waitqueue @wq is woken up. | 395 | * The @condition is checked each time the waitqueue @wq is woken up. |
396 | * | 396 | * |
397 | * wake_up() has to be called after changing any variable that could | 397 | * wake_up() has to be called after changing any variable that could |
398 | * change the result of the wait condition. | 398 | * change the result of the wait condition. |
399 | * | 399 | * |
400 | * The function returns 0 if @condition became true, -ERESTARTSYS if it was | 400 | * The function returns 0 if @condition became true, -ERESTARTSYS if it was |
401 | * interrupted by a signal, or -ETIME if the timeout elapsed. | 401 | * interrupted by a signal, or -ETIME if the timeout elapsed. |
402 | */ | 402 | */ |
403 | #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ | 403 | #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ |
404 | ({ \ | 404 | ({ \ |
405 | long __ret = 0; \ | 405 | long __ret = 0; \ |
406 | if (!(condition)) \ | 406 | if (!(condition)) \ |
407 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ | 407 | __ret = __wait_event_hrtimeout(wq, condition, timeout, \ |
408 | TASK_INTERRUPTIBLE); \ | 408 | TASK_INTERRUPTIBLE); \ |
409 | __ret; \ | 409 | __ret; \ |
410 | }) | 410 | }) |
411 | 411 | ||
412 | #define __wait_event_interruptible_exclusive(wq, condition, ret) \ | 412 | #define __wait_event_interruptible_exclusive(wq, condition) \ |
413 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, ret, \ | 413 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
414 | schedule()) | 414 | schedule()) |
415 | 415 | ||
416 | #define wait_event_interruptible_exclusive(wq, condition) \ | 416 | #define wait_event_interruptible_exclusive(wq, condition) \ |
417 | ({ \ | 417 | ({ \ |
418 | int __ret = 0; \ | 418 | int __ret = 0; \ |
419 | if (!(condition)) \ | 419 | if (!(condition)) \ |
420 | __wait_event_interruptible_exclusive(wq, condition, __ret);\ | 420 | __ret = __wait_event_interruptible_exclusive(wq, condition);\ |
421 | __ret; \ | 421 | __ret; \ |
422 | }) | 422 | }) |
423 | 423 | ||
424 | 424 | ||
425 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ | 425 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ |
426 | ({ \ | 426 | ({ \ |
427 | int __ret = 0; \ | 427 | int __ret = 0; \ |
428 | DEFINE_WAIT(__wait); \ | 428 | DEFINE_WAIT(__wait); \ |
429 | if (exclusive) \ | 429 | if (exclusive) \ |
430 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ | 430 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ |
431 | do { \ | 431 | do { \ |
432 | if (likely(list_empty(&__wait.task_list))) \ | 432 | if (likely(list_empty(&__wait.task_list))) \ |
433 | __add_wait_queue_tail(&(wq), &__wait); \ | 433 | __add_wait_queue_tail(&(wq), &__wait); \ |
434 | set_current_state(TASK_INTERRUPTIBLE); \ | 434 | set_current_state(TASK_INTERRUPTIBLE); \ |
435 | if (signal_pending(current)) { \ | 435 | if (signal_pending(current)) { \ |
436 | __ret = -ERESTARTSYS; \ | 436 | __ret = -ERESTARTSYS; \ |
437 | break; \ | 437 | break; \ |
438 | } \ | 438 | } \ |
439 | if (irq) \ | 439 | if (irq) \ |
440 | spin_unlock_irq(&(wq).lock); \ | 440 | spin_unlock_irq(&(wq).lock); \ |
441 | else \ | 441 | else \ |
442 | spin_unlock(&(wq).lock); \ | 442 | spin_unlock(&(wq).lock); \ |
443 | schedule(); \ | 443 | schedule(); \ |
444 | if (irq) \ | 444 | if (irq) \ |
445 | spin_lock_irq(&(wq).lock); \ | 445 | spin_lock_irq(&(wq).lock); \ |
446 | else \ | 446 | else \ |
447 | spin_lock(&(wq).lock); \ | 447 | spin_lock(&(wq).lock); \ |
448 | } while (!(condition)); \ | 448 | } while (!(condition)); \ |
449 | __remove_wait_queue(&(wq), &__wait); \ | 449 | __remove_wait_queue(&(wq), &__wait); \ |
450 | __set_current_state(TASK_RUNNING); \ | 450 | __set_current_state(TASK_RUNNING); \ |
451 | __ret; \ | 451 | __ret; \ |
452 | }) | 452 | }) |
453 | 453 | ||
454 | 454 | ||
455 | /** | 455 | /** |
456 | * wait_event_interruptible_locked - sleep until a condition gets true | 456 | * wait_event_interruptible_locked - sleep until a condition gets true |
457 | * @wq: the waitqueue to wait on | 457 | * @wq: the waitqueue to wait on |
458 | * @condition: a C expression for the event to wait for | 458 | * @condition: a C expression for the event to wait for |
459 | * | 459 | * |
460 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 460 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
461 | * @condition evaluates to true or a signal is received. | 461 | * @condition evaluates to true or a signal is received. |
462 | * The @condition is checked each time the waitqueue @wq is woken up. | 462 | * The @condition is checked each time the waitqueue @wq is woken up. |
463 | * | 463 | * |
464 | * It must be called with wq.lock being held. This spinlock is | 464 | * It must be called with wq.lock being held. This spinlock is |
465 | * unlocked while sleeping but @condition testing is done while lock | 465 | * unlocked while sleeping but @condition testing is done while lock |
466 | * is held and when this macro exits the lock is held. | 466 | * is held and when this macro exits the lock is held. |
467 | * | 467 | * |
468 | * The lock is locked/unlocked using spin_lock()/spin_unlock() | 468 | * The lock is locked/unlocked using spin_lock()/spin_unlock() |
469 | * functions which must match the way they are locked/unlocked outside | 469 | * functions which must match the way they are locked/unlocked outside |
470 | * of this macro. | 470 | * of this macro. |
471 | * | 471 | * |
472 | * wake_up_locked() has to be called after changing any variable that could | 472 | * wake_up_locked() has to be called after changing any variable that could |
473 | * change the result of the wait condition. | 473 | * change the result of the wait condition. |
474 | * | 474 | * |
475 | * The function will return -ERESTARTSYS if it was interrupted by a | 475 | * The function will return -ERESTARTSYS if it was interrupted by a |
476 | * signal and 0 if @condition evaluated to true. | 476 | * signal and 0 if @condition evaluated to true. |
477 | */ | 477 | */ |
478 | #define wait_event_interruptible_locked(wq, condition) \ | 478 | #define wait_event_interruptible_locked(wq, condition) \ |
479 | ((condition) \ | 479 | ((condition) \ |
480 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) | 480 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) |
481 | 481 | ||
482 | /** | 482 | /** |
483 | * wait_event_interruptible_locked_irq - sleep until a condition gets true | 483 | * wait_event_interruptible_locked_irq - sleep until a condition gets true |
484 | * @wq: the waitqueue to wait on | 484 | * @wq: the waitqueue to wait on |
485 | * @condition: a C expression for the event to wait for | 485 | * @condition: a C expression for the event to wait for |
486 | * | 486 | * |
487 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 487 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
488 | * @condition evaluates to true or a signal is received. | 488 | * @condition evaluates to true or a signal is received. |
489 | * The @condition is checked each time the waitqueue @wq is woken up. | 489 | * The @condition is checked each time the waitqueue @wq is woken up. |
490 | * | 490 | * |
491 | * It must be called with wq.lock being held. This spinlock is | 491 | * It must be called with wq.lock being held. This spinlock is |
492 | * unlocked while sleeping but @condition testing is done while lock | 492 | * unlocked while sleeping but @condition testing is done while lock |
493 | * is held and when this macro exits the lock is held. | 493 | * is held and when this macro exits the lock is held. |
494 | * | 494 | * |
495 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() | 495 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() |
496 | * functions which must match the way they are locked/unlocked outside | 496 | * functions which must match the way they are locked/unlocked outside |
497 | * of this macro. | 497 | * of this macro. |
498 | * | 498 | * |
499 | * wake_up_locked() has to be called after changing any variable that could | 499 | * wake_up_locked() has to be called after changing any variable that could |
500 | * change the result of the wait condition. | 500 | * change the result of the wait condition. |
501 | * | 501 | * |
502 | * The function will return -ERESTARTSYS if it was interrupted by a | 502 | * The function will return -ERESTARTSYS if it was interrupted by a |
503 | * signal and 0 if @condition evaluated to true. | 503 | * signal and 0 if @condition evaluated to true. |
504 | */ | 504 | */ |
505 | #define wait_event_interruptible_locked_irq(wq, condition) \ | 505 | #define wait_event_interruptible_locked_irq(wq, condition) \ |
506 | ((condition) \ | 506 | ((condition) \ |
507 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) | 507 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) |
508 | 508 | ||
509 | /** | 509 | /** |
510 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true | 510 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true |
511 | * @wq: the waitqueue to wait on | 511 | * @wq: the waitqueue to wait on |
512 | * @condition: a C expression for the event to wait for | 512 | * @condition: a C expression for the event to wait for |
513 | * | 513 | * |
514 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 514 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
515 | * @condition evaluates to true or a signal is received. | 515 | * @condition evaluates to true or a signal is received. |
516 | * The @condition is checked each time the waitqueue @wq is woken up. | 516 | * The @condition is checked each time the waitqueue @wq is woken up. |
517 | * | 517 | * |
518 | * It must be called with wq.lock being held. This spinlock is | 518 | * It must be called with wq.lock being held. This spinlock is |
519 | * unlocked while sleeping but @condition testing is done while lock | 519 | * unlocked while sleeping but @condition testing is done while lock |
520 | * is held and when this macro exits the lock is held. | 520 | * is held and when this macro exits the lock is held. |
521 | * | 521 | * |
522 | * The lock is locked/unlocked using spin_lock()/spin_unlock() | 522 | * The lock is locked/unlocked using spin_lock()/spin_unlock() |
523 | * functions which must match the way they are locked/unlocked outside | 523 | * functions which must match the way they are locked/unlocked outside |
524 | * of this macro. | 524 | * of this macro. |
525 | * | 525 | * |
526 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag | 526 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
527 | * set thus when other process waits process on the list if this | 527 | * set thus when other process waits process on the list if this |
528 | * process is awaken further processes are not considered. | 528 | * process is awaken further processes are not considered. |
529 | * | 529 | * |
530 | * wake_up_locked() has to be called after changing any variable that could | 530 | * wake_up_locked() has to be called after changing any variable that could |
531 | * change the result of the wait condition. | 531 | * change the result of the wait condition. |
532 | * | 532 | * |
533 | * The function will return -ERESTARTSYS if it was interrupted by a | 533 | * The function will return -ERESTARTSYS if it was interrupted by a |
534 | * signal and 0 if @condition evaluated to true. | 534 | * signal and 0 if @condition evaluated to true. |
535 | */ | 535 | */ |
536 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ | 536 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ |
537 | ((condition) \ | 537 | ((condition) \ |
538 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) | 538 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) |
539 | 539 | ||
540 | /** | 540 | /** |
541 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true | 541 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true |
542 | * @wq: the waitqueue to wait on | 542 | * @wq: the waitqueue to wait on |
543 | * @condition: a C expression for the event to wait for | 543 | * @condition: a C expression for the event to wait for |
544 | * | 544 | * |
545 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 545 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
546 | * @condition evaluates to true or a signal is received. | 546 | * @condition evaluates to true or a signal is received. |
547 | * The @condition is checked each time the waitqueue @wq is woken up. | 547 | * The @condition is checked each time the waitqueue @wq is woken up. |
548 | * | 548 | * |
549 | * It must be called with wq.lock being held. This spinlock is | 549 | * It must be called with wq.lock being held. This spinlock is |
550 | * unlocked while sleeping but @condition testing is done while lock | 550 | * unlocked while sleeping but @condition testing is done while lock |
551 | * is held and when this macro exits the lock is held. | 551 | * is held and when this macro exits the lock is held. |
552 | * | 552 | * |
553 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() | 553 | * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() |
554 | * functions which must match the way they are locked/unlocked outside | 554 | * functions which must match the way they are locked/unlocked outside |
555 | * of this macro. | 555 | * of this macro. |
556 | * | 556 | * |
557 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag | 557 | * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag |
558 | * set thus when other process waits process on the list if this | 558 | * set thus when other process waits process on the list if this |
559 | * process is awaken further processes are not considered. | 559 | * process is awaken further processes are not considered. |
560 | * | 560 | * |
561 | * wake_up_locked() has to be called after changing any variable that could | 561 | * wake_up_locked() has to be called after changing any variable that could |
562 | * change the result of the wait condition. | 562 | * change the result of the wait condition. |
563 | * | 563 | * |
564 | * The function will return -ERESTARTSYS if it was interrupted by a | 564 | * The function will return -ERESTARTSYS if it was interrupted by a |
565 | * signal and 0 if @condition evaluated to true. | 565 | * signal and 0 if @condition evaluated to true. |
566 | */ | 566 | */ |
567 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ | 567 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ |
568 | ((condition) \ | 568 | ((condition) \ |
569 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) | 569 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) |
570 | 570 | ||
571 | 571 | ||
572 | 572 | ||
573 | #define __wait_event_killable(wq, condition, ret) \ | 573 | #define __wait_event_killable(wq, condition) \ |
574 | ___wait_event(wq, condition, TASK_KILLABLE, 0, ret, schedule()) | 574 | ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) |
575 | 575 | ||
576 | /** | 576 | /** |
577 | * wait_event_killable - sleep until a condition gets true | 577 | * wait_event_killable - sleep until a condition gets true |
578 | * @wq: the waitqueue to wait on | 578 | * @wq: the waitqueue to wait on |
579 | * @condition: a C expression for the event to wait for | 579 | * @condition: a C expression for the event to wait for |
580 | * | 580 | * |
581 | * The process is put to sleep (TASK_KILLABLE) until the | 581 | * The process is put to sleep (TASK_KILLABLE) until the |
582 | * @condition evaluates to true or a signal is received. | 582 | * @condition evaluates to true or a signal is received. |
583 | * The @condition is checked each time the waitqueue @wq is woken up. | 583 | * The @condition is checked each time the waitqueue @wq is woken up. |
584 | * | 584 | * |
585 | * wake_up() has to be called after changing any variable that could | 585 | * wake_up() has to be called after changing any variable that could |
586 | * change the result of the wait condition. | 586 | * change the result of the wait condition. |
587 | * | 587 | * |
588 | * The function will return -ERESTARTSYS if it was interrupted by a | 588 | * The function will return -ERESTARTSYS if it was interrupted by a |
589 | * signal and 0 if @condition evaluated to true. | 589 | * signal and 0 if @condition evaluated to true. |
590 | */ | 590 | */ |
591 | #define wait_event_killable(wq, condition) \ | 591 | #define wait_event_killable(wq, condition) \ |
592 | ({ \ | 592 | ({ \ |
593 | int __ret = 0; \ | 593 | int __ret = 0; \ |
594 | if (!(condition)) \ | 594 | if (!(condition)) \ |
595 | __wait_event_killable(wq, condition, __ret); \ | 595 | __ret = __wait_event_killable(wq, condition); \ |
596 | __ret; \ | 596 | __ret; \ |
597 | }) | 597 | }) |
598 | 598 | ||
599 | 599 | ||
600 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | 600 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ |
601 | ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | 601 | (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ |
602 | ___wait_nop_ret, \ | 602 | spin_unlock_irq(&lock); \ |
603 | spin_unlock_irq(&lock); \ | 603 | cmd; \ |
604 | cmd; \ | 604 | schedule(); \ |
605 | schedule(); \ | 605 | spin_lock_irq(&lock)) |
606 | spin_lock_irq(&lock)) | ||
607 | 606 | ||
608 | /** | 607 | /** |
609 | * wait_event_lock_irq_cmd - sleep until a condition gets true. The | 608 | * wait_event_lock_irq_cmd - sleep until a condition gets true. The |
610 | * condition is checked under the lock. This | 609 | * condition is checked under the lock. This |
611 | * is expected to be called with the lock | 610 | * is expected to be called with the lock |
612 | * taken. | 611 | * taken. |
613 | * @wq: the waitqueue to wait on | 612 | * @wq: the waitqueue to wait on |
614 | * @condition: a C expression for the event to wait for | 613 | * @condition: a C expression for the event to wait for |
615 | * @lock: a locked spinlock_t, which will be released before cmd | 614 | * @lock: a locked spinlock_t, which will be released before cmd |
616 | * and schedule() and reacquired afterwards. | 615 | * and schedule() and reacquired afterwards. |
617 | * @cmd: a command which is invoked outside the critical section before | 616 | * @cmd: a command which is invoked outside the critical section before |
618 | * sleep | 617 | * sleep |
619 | * | 618 | * |
620 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the | 619 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
621 | * @condition evaluates to true. The @condition is checked each time | 620 | * @condition evaluates to true. The @condition is checked each time |
622 | * the waitqueue @wq is woken up. | 621 | * the waitqueue @wq is woken up. |
623 | * | 622 | * |
624 | * wake_up() has to be called after changing any variable that could | 623 | * wake_up() has to be called after changing any variable that could |
625 | * change the result of the wait condition. | 624 | * change the result of the wait condition. |
626 | * | 625 | * |
627 | * This is supposed to be called while holding the lock. The lock is | 626 | * This is supposed to be called while holding the lock. The lock is |
628 | * dropped before invoking the cmd and going to sleep and is reacquired | 627 | * dropped before invoking the cmd and going to sleep and is reacquired |
629 | * afterwards. | 628 | * afterwards. |
630 | */ | 629 | */ |
631 | #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \ | 630 | #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \ |
632 | do { \ | 631 | do { \ |
633 | if (condition) \ | 632 | if (condition) \ |
634 | break; \ | 633 | break; \ |
635 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | 634 | __wait_event_lock_irq(wq, condition, lock, cmd); \ |
636 | } while (0) | 635 | } while (0) |
637 | 636 | ||
638 | /** | 637 | /** |
639 | * wait_event_lock_irq - sleep until a condition gets true. The | 638 | * wait_event_lock_irq - sleep until a condition gets true. The |
640 | * condition is checked under the lock. This | 639 | * condition is checked under the lock. This |
641 | * is expected to be called with the lock | 640 | * is expected to be called with the lock |
642 | * taken. | 641 | * taken. |
643 | * @wq: the waitqueue to wait on | 642 | * @wq: the waitqueue to wait on |
644 | * @condition: a C expression for the event to wait for | 643 | * @condition: a C expression for the event to wait for |
645 | * @lock: a locked spinlock_t, which will be released before schedule() | 644 | * @lock: a locked spinlock_t, which will be released before schedule() |
646 | * and reacquired afterwards. | 645 | * and reacquired afterwards. |
647 | * | 646 | * |
648 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the | 647 | * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the |
649 | * @condition evaluates to true. The @condition is checked each time | 648 | * @condition evaluates to true. The @condition is checked each time |
650 | * the waitqueue @wq is woken up. | 649 | * the waitqueue @wq is woken up. |
651 | * | 650 | * |
652 | * wake_up() has to be called after changing any variable that could | 651 | * wake_up() has to be called after changing any variable that could |
653 | * change the result of the wait condition. | 652 | * change the result of the wait condition. |
654 | * | 653 | * |
655 | * This is supposed to be called while holding the lock. The lock is | 654 | * This is supposed to be called while holding the lock. The lock is |
656 | * dropped before going to sleep and is reacquired afterwards. | 655 | * dropped before going to sleep and is reacquired afterwards. |
657 | */ | 656 | */ |
658 | #define wait_event_lock_irq(wq, condition, lock) \ | 657 | #define wait_event_lock_irq(wq, condition, lock) \ |
659 | do { \ | 658 | do { \ |
660 | if (condition) \ | 659 | if (condition) \ |
661 | break; \ | 660 | break; \ |
662 | __wait_event_lock_irq(wq, condition, lock, ); \ | 661 | __wait_event_lock_irq(wq, condition, lock, ); \ |
663 | } while (0) | 662 | } while (0) |
664 | 663 | ||
665 | 664 | ||
666 | #define __wait_event_interruptible_lock_irq(wq, condition, lock, ret, cmd) \ | 665 | #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \ |
667 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ | 666 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
668 | spin_unlock_irq(&lock); \ | 667 | spin_unlock_irq(&lock); \ |
669 | cmd; \ | 668 | cmd; \ |
670 | schedule(); \ | 669 | schedule(); \ |
671 | spin_lock_irq(&lock)) | 670 | spin_lock_irq(&lock)) |
672 | 671 | ||
673 | /** | 672 | /** |
674 | * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. | 673 | * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. |
675 | * The condition is checked under the lock. This is expected to | 674 | * The condition is checked under the lock. This is expected to |
676 | * be called with the lock taken. | 675 | * be called with the lock taken. |
677 | * @wq: the waitqueue to wait on | 676 | * @wq: the waitqueue to wait on |
678 | * @condition: a C expression for the event to wait for | 677 | * @condition: a C expression for the event to wait for |
679 | * @lock: a locked spinlock_t, which will be released before cmd and | 678 | * @lock: a locked spinlock_t, which will be released before cmd and |
680 | * schedule() and reacquired afterwards. | 679 | * schedule() and reacquired afterwards. |
681 | * @cmd: a command which is invoked outside the critical section before | 680 | * @cmd: a command which is invoked outside the critical section before |
682 | * sleep | 681 | * sleep |
683 | * | 682 | * |
684 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 683 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
685 | * @condition evaluates to true or a signal is received. The @condition is | 684 | * @condition evaluates to true or a signal is received. The @condition is |
686 | * checked each time the waitqueue @wq is woken up. | 685 | * checked each time the waitqueue @wq is woken up. |
687 | * | 686 | * |
688 | * wake_up() has to be called after changing any variable that could | 687 | * wake_up() has to be called after changing any variable that could |
689 | * change the result of the wait condition. | 688 | * change the result of the wait condition. |
690 | * | 689 | * |
691 | * This is supposed to be called while holding the lock. The lock is | 690 | * This is supposed to be called while holding the lock. The lock is |
692 | * dropped before invoking the cmd and going to sleep and is reacquired | 691 | * dropped before invoking the cmd and going to sleep and is reacquired |
693 | * afterwards. | 692 | * afterwards. |
694 | * | 693 | * |
695 | * The macro will return -ERESTARTSYS if it was interrupted by a signal | 694 | * The macro will return -ERESTARTSYS if it was interrupted by a signal |
696 | * and 0 if @condition evaluated to true. | 695 | * and 0 if @condition evaluated to true. |
697 | */ | 696 | */ |
698 | #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ | 697 | #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ |
699 | ({ \ | 698 | ({ \ |
700 | int __ret = 0; \ | 699 | int __ret = 0; \ |
701 | \ | ||
702 | if (!(condition)) \ | 700 | if (!(condition)) \ |
703 | __wait_event_interruptible_lock_irq(wq, condition, \ | 701 | __ret = __wait_event_interruptible_lock_irq(wq, \ |
704 | lock, __ret, cmd); \ | 702 | condition, lock, cmd); \ |
705 | __ret; \ | 703 | __ret; \ |
706 | }) | 704 | }) |
707 | 705 | ||
708 | /** | 706 | /** |
709 | * wait_event_interruptible_lock_irq - sleep until a condition gets true. | 707 | * wait_event_interruptible_lock_irq - sleep until a condition gets true. |
710 | * The condition is checked under the lock. This is expected | 708 | * The condition is checked under the lock. This is expected |
711 | * to be called with the lock taken. | 709 | * to be called with the lock taken. |
712 | * @wq: the waitqueue to wait on | 710 | * @wq: the waitqueue to wait on |
713 | * @condition: a C expression for the event to wait for | 711 | * @condition: a C expression for the event to wait for |
714 | * @lock: a locked spinlock_t, which will be released before schedule() | 712 | * @lock: a locked spinlock_t, which will be released before schedule() |
715 | * and reacquired afterwards. | 713 | * and reacquired afterwards. |
716 | * | 714 | * |
717 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 715 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
718 | * @condition evaluates to true or signal is received. The @condition is | 716 | * @condition evaluates to true or signal is received. The @condition is |
719 | * checked each time the waitqueue @wq is woken up. | 717 | * checked each time the waitqueue @wq is woken up. |
720 | * | 718 | * |
721 | * wake_up() has to be called after changing any variable that could | 719 | * wake_up() has to be called after changing any variable that could |
722 | * change the result of the wait condition. | 720 | * change the result of the wait condition. |
723 | * | 721 | * |
724 | * This is supposed to be called while holding the lock. The lock is | 722 | * This is supposed to be called while holding the lock. The lock is |
725 | * dropped before going to sleep and is reacquired afterwards. | 723 | * dropped before going to sleep and is reacquired afterwards. |
726 | * | 724 | * |
727 | * The macro will return -ERESTARTSYS if it was interrupted by a signal | 725 | * The macro will return -ERESTARTSYS if it was interrupted by a signal |
728 | * and 0 if @condition evaluated to true. | 726 | * and 0 if @condition evaluated to true. |
729 | */ | 727 | */ |
730 | #define wait_event_interruptible_lock_irq(wq, condition, lock) \ | 728 | #define wait_event_interruptible_lock_irq(wq, condition, lock) \ |
731 | ({ \ | 729 | ({ \ |
732 | int __ret = 0; \ | 730 | int __ret = 0; \ |
733 | \ | ||
734 | if (!(condition)) \ | 731 | if (!(condition)) \ |
735 | __wait_event_interruptible_lock_irq(wq, condition, \ | 732 | __ret = __wait_event_interruptible_lock_irq(wq, \ |
736 | lock, __ret, ); \ | 733 | condition, lock,) \ |
737 | __ret; \ | 734 | __ret; \ |
738 | }) | 735 | }) |
739 | 736 | ||
740 | #define __wait_event_interruptible_lock_irq_timeout(wq, condition, lock, ret) \ | 737 | #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ |
741 | ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ | 738 | lock, timeout) \ |
742 | TASK_INTERRUPTIBLE, 0, ret, \ | 739 | ___wait_event(wq, ___wait_cond_timeout(condition), \ |
743 | spin_unlock_irq(&lock); \ | 740 | TASK_INTERRUPTIBLE, 0, ret, \ |
744 | ret = schedule_timeout(ret); \ | 741 | spin_unlock_irq(&lock); \ |
742 | __ret = schedule_timeout(__ret); \ | ||
745 | spin_lock_irq(&lock)); | 743 | spin_lock_irq(&lock)); |
746 | 744 | ||
747 | /** | 745 | /** |
748 | * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. | 746 | * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. |
749 | * The condition is checked under the lock. This is expected | 747 | * The condition is checked under the lock. This is expected |
750 | * to be called with the lock taken. | 748 | * to be called with the lock taken. |
751 | * @wq: the waitqueue to wait on | 749 | * @wq: the waitqueue to wait on |
752 | * @condition: a C expression for the event to wait for | 750 | * @condition: a C expression for the event to wait for |
753 | * @lock: a locked spinlock_t, which will be released before schedule() | 751 | * @lock: a locked spinlock_t, which will be released before schedule() |
754 | * and reacquired afterwards. | 752 | * and reacquired afterwards. |
755 | * @timeout: timeout, in jiffies | 753 | * @timeout: timeout, in jiffies |
756 | * | 754 | * |
757 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the | 755 | * The process is put to sleep (TASK_INTERRUPTIBLE) until the |
758 | * @condition evaluates to true or signal is received. The @condition is | 756 | * @condition evaluates to true or signal is received. The @condition is |
759 | * checked each time the waitqueue @wq is woken up. | 757 | * checked each time the waitqueue @wq is woken up. |
760 | * | 758 | * |
761 | * wake_up() has to be called after changing any variable that could | 759 | * wake_up() has to be called after changing any variable that could |
762 | * change the result of the wait condition. | 760 | * change the result of the wait condition. |
763 | * | 761 | * |
764 | * This is supposed to be called while holding the lock. The lock is | 762 | * This is supposed to be called while holding the lock. The lock is |
765 | * dropped before going to sleep and is reacquired afterwards. | 763 | * dropped before going to sleep and is reacquired afterwards. |
766 | * | 764 | * |
767 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it | 765 | * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it |
768 | * was interrupted by a signal, and the remaining jiffies otherwise | 766 | * was interrupted by a signal, and the remaining jiffies otherwise |
769 | * if the condition evaluated to true before the timeout elapsed. | 767 | * if the condition evaluated to true before the timeout elapsed. |
770 | */ | 768 | */ |
771 | #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ | 769 | #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ |
772 | timeout) \ | 770 | timeout) \ |
773 | ({ \ | 771 | ({ \ |
774 | int __ret = timeout; \ | 772 | long __ret = timeout; \ |
775 | \ | ||
776 | if (!(condition)) \ | 773 | if (!(condition)) \ |
777 | __wait_event_interruptible_lock_irq_timeout( \ | 774 | __ret = __wait_event_interruptible_lock_irq_timeout( \ |
778 | wq, condition, lock, __ret); \ | 775 | wq, condition, lock, timeout); \ |
779 | __ret; \ | 776 | __ret; \ |
780 | }) | 777 | }) |
781 | 778 | ||
782 | 779 | ||
783 | /* | 780 | /* |
784 | * These are the old interfaces to sleep waiting for an event. | 781 | * These are the old interfaces to sleep waiting for an event. |
785 | * They are racy. DO NOT use them, use the wait_event* interfaces above. | 782 | * They are racy. DO NOT use them, use the wait_event* interfaces above. |
786 | * We plan to remove these interfaces. | 783 | * We plan to remove these interfaces. |
787 | */ | 784 | */ |
788 | extern void sleep_on(wait_queue_head_t *q); | 785 | extern void sleep_on(wait_queue_head_t *q); |
789 | extern long sleep_on_timeout(wait_queue_head_t *q, | 786 | extern long sleep_on_timeout(wait_queue_head_t *q, |
790 | signed long timeout); | 787 | signed long timeout); |
791 | extern void interruptible_sleep_on(wait_queue_head_t *q); | 788 | extern void interruptible_sleep_on(wait_queue_head_t *q); |
792 | extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, | 789 | extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, |
793 | signed long timeout); | 790 | signed long timeout); |
794 | 791 | ||
795 | /* | 792 | /* |
796 | * Waitqueues which are removed from the waitqueue_head at wakeup time | 793 | * Waitqueues which are removed from the waitqueue_head at wakeup time |
797 | */ | 794 | */ |
798 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); | 795 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); |
799 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); | 796 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); |
800 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | 797 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); |
801 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | 798 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, |
802 | unsigned int mode, void *key); | 799 | unsigned int mode, void *key); |
803 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 800 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
804 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 801 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
805 | 802 | ||
806 | #define DEFINE_WAIT_FUNC(name, function) \ | 803 | #define DEFINE_WAIT_FUNC(name, function) \ |
807 | wait_queue_t name = { \ | 804 | wait_queue_t name = { \ |
808 | .private = current, \ | 805 | .private = current, \ |
809 | .func = function, \ | 806 | .func = function, \ |
810 | .task_list = LIST_HEAD_INIT((name).task_list), \ | 807 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
811 | } | 808 | } |
812 | 809 | ||
813 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) | 810 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) |
814 | 811 | ||
815 | #define DEFINE_WAIT_BIT(name, word, bit) \ | 812 | #define DEFINE_WAIT_BIT(name, word, bit) \ |
816 | struct wait_bit_queue name = { \ | 813 | struct wait_bit_queue name = { \ |
817 | .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ | 814 | .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ |
818 | .wait = { \ | 815 | .wait = { \ |
819 | .private = current, \ | 816 | .private = current, \ |
820 | .func = wake_bit_function, \ | 817 | .func = wake_bit_function, \ |
821 | .task_list = \ | 818 | .task_list = \ |
822 | LIST_HEAD_INIT((name).wait.task_list), \ | 819 | LIST_HEAD_INIT((name).wait.task_list), \ |
823 | }, \ | 820 | }, \ |
824 | } | 821 | } |
825 | 822 | ||
826 | #define init_wait(wait) \ | 823 | #define init_wait(wait) \ |
827 | do { \ | 824 | do { \ |
828 | (wait)->private = current; \ | 825 | (wait)->private = current; \ |
829 | (wait)->func = autoremove_wake_function; \ | 826 | (wait)->func = autoremove_wake_function; \ |
830 | INIT_LIST_HEAD(&(wait)->task_list); \ | 827 | INIT_LIST_HEAD(&(wait)->task_list); \ |
831 | (wait)->flags = 0; \ | 828 | (wait)->flags = 0; \ |
832 | } while (0) | 829 | } while (0) |
833 | 830 | ||
834 | /** | 831 | /** |
835 | * wait_on_bit - wait for a bit to be cleared | 832 | * wait_on_bit - wait for a bit to be cleared |
836 | * @word: the word being waited on, a kernel virtual address | 833 | * @word: the word being waited on, a kernel virtual address |
837 | * @bit: the bit of the word being waited on | 834 | * @bit: the bit of the word being waited on |
838 | * @action: the function used to sleep, which may take special actions | 835 | * @action: the function used to sleep, which may take special actions |
839 | * @mode: the task state to sleep in | 836 | * @mode: the task state to sleep in |
840 | * | 837 | * |
841 | * There is a standard hashed waitqueue table for generic use. This | 838 | * There is a standard hashed waitqueue table for generic use. This |
842 | * is the part of the hashtable's accessor API that waits on a bit. | 839 | * is the part of the hashtable's accessor API that waits on a bit. |
843 | * For instance, if one were to have waiters on a bitflag, one would | 840 | * For instance, if one were to have waiters on a bitflag, one would |
844 | * call wait_on_bit() in threads waiting for the bit to clear. | 841 | * call wait_on_bit() in threads waiting for the bit to clear. |
845 | * One uses wait_on_bit() where one is waiting for the bit to clear, | 842 | * One uses wait_on_bit() where one is waiting for the bit to clear, |
846 | * but has no intention of setting it. | 843 | * but has no intention of setting it. |
847 | */ | 844 | */ |
848 | static inline int wait_on_bit(void *word, int bit, | 845 | static inline int wait_on_bit(void *word, int bit, |
849 | int (*action)(void *), unsigned mode) | 846 | int (*action)(void *), unsigned mode) |
850 | { | 847 | { |
851 | if (!test_bit(bit, word)) | 848 | if (!test_bit(bit, word)) |
852 | return 0; | 849 | return 0; |
853 | return out_of_line_wait_on_bit(word, bit, action, mode); | 850 | return out_of_line_wait_on_bit(word, bit, action, mode); |
854 | } | 851 | } |
855 | 852 | ||
856 | /** | 853 | /** |
857 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it | 854 | * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it |
858 | * @word: the word being waited on, a kernel virtual address | 855 | * @word: the word being waited on, a kernel virtual address |
859 | * @bit: the bit of the word being waited on | 856 | * @bit: the bit of the word being waited on |
860 | * @action: the function used to sleep, which may take special actions | 857 | * @action: the function used to sleep, which may take special actions |
861 | * @mode: the task state to sleep in | 858 | * @mode: the task state to sleep in |
862 | * | 859 | * |
863 | * There is a standard hashed waitqueue table for generic use. This | 860 | * There is a standard hashed waitqueue table for generic use. This |
864 | * is the part of the hashtable's accessor API that waits on a bit | 861 | * is the part of the hashtable's accessor API that waits on a bit |
865 | * when one intends to set it, for instance, trying to lock bitflags. | 862 | * when one intends to set it, for instance, trying to lock bitflags. |
866 | * For instance, if one were to have waiters trying to set bitflag | 863 | * For instance, if one were to have waiters trying to set bitflag |
867 | * and waiting for it to clear before setting it, one would call | 864 | * and waiting for it to clear before setting it, one would call |
868 | * wait_on_bit() in threads waiting to be able to set the bit. | 865 | * wait_on_bit() in threads waiting to be able to set the bit. |
869 | * One uses wait_on_bit_lock() where one is waiting for the bit to | 866 | * One uses wait_on_bit_lock() where one is waiting for the bit to |
870 | * clear with the intention of setting it, and when done, clearing it. | 867 | * clear with the intention of setting it, and when done, clearing it. |
871 | */ | 868 | */ |
872 | static inline int wait_on_bit_lock(void *word, int bit, | 869 | static inline int wait_on_bit_lock(void *word, int bit, |
873 | int (*action)(void *), unsigned mode) | 870 | int (*action)(void *), unsigned mode) |
874 | { | 871 | { |
875 | if (!test_and_set_bit(bit, word)) | 872 | if (!test_and_set_bit(bit, word)) |
876 | return 0; | 873 | return 0; |
877 | return out_of_line_wait_on_bit_lock(word, bit, action, mode); | 874 | return out_of_line_wait_on_bit_lock(word, bit, action, mode); |
878 | } | 875 | } |
879 | 876 | ||
880 | /** | 877 | /** |
881 | * wait_on_atomic_t - Wait for an atomic_t to become 0 | 878 | * wait_on_atomic_t - Wait for an atomic_t to become 0 |
882 | * @val: The atomic value being waited on, a kernel virtual address | 879 | * @val: The atomic value being waited on, a kernel virtual address |
883 | * @action: the function used to sleep, which may take special actions | 880 | * @action: the function used to sleep, which may take special actions |
884 | * @mode: the task state to sleep in | 881 | * @mode: the task state to sleep in |
885 | * | 882 | * |
886 | * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for | 883 | * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for |
887 | * the purpose of getting a waitqueue, but we set the key to a bit number | 884 | * the purpose of getting a waitqueue, but we set the key to a bit number |
888 | * outside of the target 'word'. | 885 | * outside of the target 'word'. |
889 | */ | 886 | */ |
890 | static inline | 887 | static inline |
891 | int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) | 888 | int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) |
892 | { | 889 | { |
893 | if (atomic_read(val) == 0) | 890 | if (atomic_read(val) == 0) |
894 | return 0; | 891 | return 0; |
895 | return out_of_line_wait_on_atomic_t(val, action, mode); | 892 | return out_of_line_wait_on_atomic_t(val, action, mode); |
896 | } | 893 | } |
net/irda/af_irda.c
1 | /********************************************************************* | 1 | /********************************************************************* |
2 | * | 2 | * |
3 | * Filename: af_irda.c | 3 | * Filename: af_irda.c |
4 | * Version: 0.9 | 4 | * Version: 0.9 |
5 | * Description: IrDA sockets implementation | 5 | * Description: IrDA sockets implementation |
6 | * Status: Stable | 6 | * Status: Stable |
7 | * Author: Dag Brattli <dagb@cs.uit.no> | 7 | * Author: Dag Brattli <dagb@cs.uit.no> |
8 | * Created at: Sun May 31 10:12:43 1998 | 8 | * Created at: Sun May 31 10:12:43 1998 |
9 | * Modified at: Sat Dec 25 21:10:23 1999 | 9 | * Modified at: Sat Dec 25 21:10:23 1999 |
10 | * Modified by: Dag Brattli <dag@brattli.net> | 10 | * Modified by: Dag Brattli <dag@brattli.net> |
11 | * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc. | 11 | * Sources: af_netroom.c, af_ax25.c, af_rose.c, af_x25.c etc. |
12 | * | 12 | * |
13 | * Copyright (c) 1999 Dag Brattli <dagb@cs.uit.no> | 13 | * Copyright (c) 1999 Dag Brattli <dagb@cs.uit.no> |
14 | * Copyright (c) 1999-2003 Jean Tourrilhes <jt@hpl.hp.com> | 14 | * Copyright (c) 1999-2003 Jean Tourrilhes <jt@hpl.hp.com> |
15 | * All Rights Reserved. | 15 | * All Rights Reserved. |
16 | * | 16 | * |
17 | * This program is free software; you can redistribute it and/or | 17 | * This program is free software; you can redistribute it and/or |
18 | * modify it under the terms of the GNU General Public License as | 18 | * modify it under the terms of the GNU General Public License as |
19 | * published by the Free Software Foundation; either version 2 of | 19 | * published by the Free Software Foundation; either version 2 of |
20 | * the License, or (at your option) any later version. | 20 | * the License, or (at your option) any later version. |
21 | * | 21 | * |
22 | * This program is distributed in the hope that it will be useful, | 22 | * This program is distributed in the hope that it will be useful, |
23 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 23 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
24 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 24 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
25 | * GNU General Public License for more details. | 25 | * GNU General Public License for more details. |
26 | * | 26 | * |
27 | * You should have received a copy of the GNU General Public License | 27 | * You should have received a copy of the GNU General Public License |
28 | * along with this program; if not, write to the Free Software | 28 | * along with this program; if not, write to the Free Software |
29 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | 29 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, |
30 | * MA 02111-1307 USA | 30 | * MA 02111-1307 USA |
31 | * | 31 | * |
32 | * Linux-IrDA now supports four different types of IrDA sockets: | 32 | * Linux-IrDA now supports four different types of IrDA sockets: |
33 | * | 33 | * |
34 | * o SOCK_STREAM: TinyTP connections with SAR disabled. The | 34 | * o SOCK_STREAM: TinyTP connections with SAR disabled. The |
35 | * max SDU size is 0 for conn. of this type | 35 | * max SDU size is 0 for conn. of this type |
36 | * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may | 36 | * o SOCK_SEQPACKET: TinyTP connections with SAR enabled. TTP may |
37 | * fragment the messages, but will preserve | 37 | * fragment the messages, but will preserve |
38 | * the message boundaries | 38 | * the message boundaries |
39 | * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata | 39 | * o SOCK_DGRAM: IRDAPROTO_UNITDATA: TinyTP connections with Unitdata |
40 | * (unreliable) transfers | 40 | * (unreliable) transfers |
41 | * IRDAPROTO_ULTRA: Connectionless and unreliable data | 41 | * IRDAPROTO_ULTRA: Connectionless and unreliable data |
42 | * | 42 | * |
43 | ********************************************************************/ | 43 | ********************************************************************/ |
44 | 44 | ||
45 | #include <linux/capability.h> | 45 | #include <linux/capability.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/types.h> | 47 | #include <linux/types.h> |
48 | #include <linux/socket.h> | 48 | #include <linux/socket.h> |
49 | #include <linux/sockios.h> | 49 | #include <linux/sockios.h> |
50 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
51 | #include <linux/init.h> | 51 | #include <linux/init.h> |
52 | #include <linux/net.h> | 52 | #include <linux/net.h> |
53 | #include <linux/irda.h> | 53 | #include <linux/irda.h> |
54 | #include <linux/poll.h> | 54 | #include <linux/poll.h> |
55 | 55 | ||
56 | #include <asm/ioctls.h> /* TIOCOUTQ, TIOCINQ */ | 56 | #include <asm/ioctls.h> /* TIOCOUTQ, TIOCINQ */ |
57 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
58 | 58 | ||
59 | #include <net/sock.h> | 59 | #include <net/sock.h> |
60 | #include <net/tcp_states.h> | 60 | #include <net/tcp_states.h> |
61 | 61 | ||
62 | #include <net/irda/af_irda.h> | 62 | #include <net/irda/af_irda.h> |
63 | 63 | ||
64 | static int irda_create(struct net *net, struct socket *sock, int protocol, int kern); | 64 | static int irda_create(struct net *net, struct socket *sock, int protocol, int kern); |
65 | 65 | ||
66 | static const struct proto_ops irda_stream_ops; | 66 | static const struct proto_ops irda_stream_ops; |
67 | static const struct proto_ops irda_seqpacket_ops; | 67 | static const struct proto_ops irda_seqpacket_ops; |
68 | static const struct proto_ops irda_dgram_ops; | 68 | static const struct proto_ops irda_dgram_ops; |
69 | 69 | ||
70 | #ifdef CONFIG_IRDA_ULTRA | 70 | #ifdef CONFIG_IRDA_ULTRA |
71 | static const struct proto_ops irda_ultra_ops; | 71 | static const struct proto_ops irda_ultra_ops; |
72 | #define ULTRA_MAX_DATA 382 | 72 | #define ULTRA_MAX_DATA 382 |
73 | #endif /* CONFIG_IRDA_ULTRA */ | 73 | #endif /* CONFIG_IRDA_ULTRA */ |
74 | 74 | ||
75 | #define IRDA_MAX_HEADER (TTP_MAX_HEADER) | 75 | #define IRDA_MAX_HEADER (TTP_MAX_HEADER) |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Function irda_data_indication (instance, sap, skb) | 78 | * Function irda_data_indication (instance, sap, skb) |
79 | * | 79 | * |
80 | * Received some data from TinyTP. Just queue it on the receive queue | 80 | * Received some data from TinyTP. Just queue it on the receive queue |
81 | * | 81 | * |
82 | */ | 82 | */ |
83 | static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) | 83 | static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) |
84 | { | 84 | { |
85 | struct irda_sock *self; | 85 | struct irda_sock *self; |
86 | struct sock *sk; | 86 | struct sock *sk; |
87 | int err; | 87 | int err; |
88 | 88 | ||
89 | IRDA_DEBUG(3, "%s()\n", __func__); | 89 | IRDA_DEBUG(3, "%s()\n", __func__); |
90 | 90 | ||
91 | self = instance; | 91 | self = instance; |
92 | sk = instance; | 92 | sk = instance; |
93 | 93 | ||
94 | err = sock_queue_rcv_skb(sk, skb); | 94 | err = sock_queue_rcv_skb(sk, skb); |
95 | if (err) { | 95 | if (err) { |
96 | IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); | 96 | IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); |
97 | self->rx_flow = FLOW_STOP; | 97 | self->rx_flow = FLOW_STOP; |
98 | 98 | ||
99 | /* When we return error, TTP will need to requeue the skb */ | 99 | /* When we return error, TTP will need to requeue the skb */ |
100 | return err; | 100 | return err; |
101 | } | 101 | } |
102 | 102 | ||
103 | return 0; | 103 | return 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Function irda_disconnect_indication (instance, sap, reason, skb) | 107 | * Function irda_disconnect_indication (instance, sap, reason, skb) |
108 | * | 108 | * |
109 | * Connection has been closed. Check reason to find out why | 109 | * Connection has been closed. Check reason to find out why |
110 | * | 110 | * |
111 | */ | 111 | */ |
112 | static void irda_disconnect_indication(void *instance, void *sap, | 112 | static void irda_disconnect_indication(void *instance, void *sap, |
113 | LM_REASON reason, struct sk_buff *skb) | 113 | LM_REASON reason, struct sk_buff *skb) |
114 | { | 114 | { |
115 | struct irda_sock *self; | 115 | struct irda_sock *self; |
116 | struct sock *sk; | 116 | struct sock *sk; |
117 | 117 | ||
118 | self = instance; | 118 | self = instance; |
119 | 119 | ||
120 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 120 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
121 | 121 | ||
122 | /* Don't care about it, but let's not leak it */ | 122 | /* Don't care about it, but let's not leak it */ |
123 | if(skb) | 123 | if(skb) |
124 | dev_kfree_skb(skb); | 124 | dev_kfree_skb(skb); |
125 | 125 | ||
126 | sk = instance; | 126 | sk = instance; |
127 | if (sk == NULL) { | 127 | if (sk == NULL) { |
128 | IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", | 128 | IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", |
129 | __func__, self); | 129 | __func__, self); |
130 | return; | 130 | return; |
131 | } | 131 | } |
132 | 132 | ||
133 | /* Prevent race conditions with irda_release() and irda_shutdown() */ | 133 | /* Prevent race conditions with irda_release() and irda_shutdown() */ |
134 | bh_lock_sock(sk); | 134 | bh_lock_sock(sk); |
135 | if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { | 135 | if (!sock_flag(sk, SOCK_DEAD) && sk->sk_state != TCP_CLOSE) { |
136 | sk->sk_state = TCP_CLOSE; | 136 | sk->sk_state = TCP_CLOSE; |
137 | sk->sk_shutdown |= SEND_SHUTDOWN; | 137 | sk->sk_shutdown |= SEND_SHUTDOWN; |
138 | 138 | ||
139 | sk->sk_state_change(sk); | 139 | sk->sk_state_change(sk); |
140 | 140 | ||
141 | /* Close our TSAP. | 141 | /* Close our TSAP. |
142 | * If we leave it open, IrLMP put it back into the list of | 142 | * If we leave it open, IrLMP put it back into the list of |
143 | * unconnected LSAPs. The problem is that any incoming request | 143 | * unconnected LSAPs. The problem is that any incoming request |
144 | * can then be matched to this socket (and it will be, because | 144 | * can then be matched to this socket (and it will be, because |
145 | * it is at the head of the list). This would prevent any | 145 | * it is at the head of the list). This would prevent any |
146 | * listening socket waiting on the same TSAP to get those | 146 | * listening socket waiting on the same TSAP to get those |
147 | * requests. Some apps forget to close sockets, or hang to it | 147 | * requests. Some apps forget to close sockets, or hang to it |
148 | * a bit too long, so we may stay in this dead state long | 148 | * a bit too long, so we may stay in this dead state long |
149 | * enough to be noticed... | 149 | * enough to be noticed... |
150 | * Note : all socket function do check sk->sk_state, so we are | 150 | * Note : all socket function do check sk->sk_state, so we are |
151 | * safe... | 151 | * safe... |
152 | * Jean II | 152 | * Jean II |
153 | */ | 153 | */ |
154 | if (self->tsap) { | 154 | if (self->tsap) { |
155 | irttp_close_tsap(self->tsap); | 155 | irttp_close_tsap(self->tsap); |
156 | self->tsap = NULL; | 156 | self->tsap = NULL; |
157 | } | 157 | } |
158 | } | 158 | } |
159 | bh_unlock_sock(sk); | 159 | bh_unlock_sock(sk); |
160 | 160 | ||
161 | /* Note : once we are there, there is not much you want to do | 161 | /* Note : once we are there, there is not much you want to do |
162 | * with the socket anymore, apart from closing it. | 162 | * with the socket anymore, apart from closing it. |
163 | * For example, bind() and connect() won't reset sk->sk_err, | 163 | * For example, bind() and connect() won't reset sk->sk_err, |
164 | * sk->sk_shutdown and sk->sk_flags to valid values... | 164 | * sk->sk_shutdown and sk->sk_flags to valid values... |
165 | * Jean II | 165 | * Jean II |
166 | */ | 166 | */ |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
170 | * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb) | 170 | * Function irda_connect_confirm (instance, sap, qos, max_sdu_size, skb) |
171 | * | 171 | * |
172 | * Connections has been confirmed by the remote device | 172 | * Connections has been confirmed by the remote device |
173 | * | 173 | * |
174 | */ | 174 | */ |
175 | static void irda_connect_confirm(void *instance, void *sap, | 175 | static void irda_connect_confirm(void *instance, void *sap, |
176 | struct qos_info *qos, | 176 | struct qos_info *qos, |
177 | __u32 max_sdu_size, __u8 max_header_size, | 177 | __u32 max_sdu_size, __u8 max_header_size, |
178 | struct sk_buff *skb) | 178 | struct sk_buff *skb) |
179 | { | 179 | { |
180 | struct irda_sock *self; | 180 | struct irda_sock *self; |
181 | struct sock *sk; | 181 | struct sock *sk; |
182 | 182 | ||
183 | self = instance; | 183 | self = instance; |
184 | 184 | ||
185 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 185 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
186 | 186 | ||
187 | sk = instance; | 187 | sk = instance; |
188 | if (sk == NULL) { | 188 | if (sk == NULL) { |
189 | dev_kfree_skb(skb); | 189 | dev_kfree_skb(skb); |
190 | return; | 190 | return; |
191 | } | 191 | } |
192 | 192 | ||
193 | dev_kfree_skb(skb); | 193 | dev_kfree_skb(skb); |
194 | // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb); | 194 | // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb); |
195 | 195 | ||
196 | /* How much header space do we need to reserve */ | 196 | /* How much header space do we need to reserve */ |
197 | self->max_header_size = max_header_size; | 197 | self->max_header_size = max_header_size; |
198 | 198 | ||
199 | /* IrTTP max SDU size in transmit direction */ | 199 | /* IrTTP max SDU size in transmit direction */ |
200 | self->max_sdu_size_tx = max_sdu_size; | 200 | self->max_sdu_size_tx = max_sdu_size; |
201 | 201 | ||
202 | /* Find out what the largest chunk of data that we can transmit is */ | 202 | /* Find out what the largest chunk of data that we can transmit is */ |
203 | switch (sk->sk_type) { | 203 | switch (sk->sk_type) { |
204 | case SOCK_STREAM: | 204 | case SOCK_STREAM: |
205 | if (max_sdu_size != 0) { | 205 | if (max_sdu_size != 0) { |
206 | IRDA_ERROR("%s: max_sdu_size must be 0\n", | 206 | IRDA_ERROR("%s: max_sdu_size must be 0\n", |
207 | __func__); | 207 | __func__); |
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 210 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
211 | break; | 211 | break; |
212 | case SOCK_SEQPACKET: | 212 | case SOCK_SEQPACKET: |
213 | if (max_sdu_size == 0) { | 213 | if (max_sdu_size == 0) { |
214 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", | 214 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", |
215 | __func__); | 215 | __func__); |
216 | return; | 216 | return; |
217 | } | 217 | } |
218 | self->max_data_size = max_sdu_size; | 218 | self->max_data_size = max_sdu_size; |
219 | break; | 219 | break; |
220 | default: | 220 | default: |
221 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 221 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
222 | } | 222 | } |
223 | 223 | ||
224 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, | 224 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, |
225 | self->max_data_size); | 225 | self->max_data_size); |
226 | 226 | ||
227 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); | 227 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); |
228 | 228 | ||
229 | /* We are now connected! */ | 229 | /* We are now connected! */ |
230 | sk->sk_state = TCP_ESTABLISHED; | 230 | sk->sk_state = TCP_ESTABLISHED; |
231 | sk->sk_state_change(sk); | 231 | sk->sk_state_change(sk); |
232 | } | 232 | } |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata) | 235 | * Function irda_connect_indication(instance, sap, qos, max_sdu_size, userdata) |
236 | * | 236 | * |
237 | * Incoming connection | 237 | * Incoming connection |
238 | * | 238 | * |
239 | */ | 239 | */ |
240 | static void irda_connect_indication(void *instance, void *sap, | 240 | static void irda_connect_indication(void *instance, void *sap, |
241 | struct qos_info *qos, __u32 max_sdu_size, | 241 | struct qos_info *qos, __u32 max_sdu_size, |
242 | __u8 max_header_size, struct sk_buff *skb) | 242 | __u8 max_header_size, struct sk_buff *skb) |
243 | { | 243 | { |
244 | struct irda_sock *self; | 244 | struct irda_sock *self; |
245 | struct sock *sk; | 245 | struct sock *sk; |
246 | 246 | ||
247 | self = instance; | 247 | self = instance; |
248 | 248 | ||
249 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 249 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
250 | 250 | ||
251 | sk = instance; | 251 | sk = instance; |
252 | if (sk == NULL) { | 252 | if (sk == NULL) { |
253 | dev_kfree_skb(skb); | 253 | dev_kfree_skb(skb); |
254 | return; | 254 | return; |
255 | } | 255 | } |
256 | 256 | ||
257 | /* How much header space do we need to reserve */ | 257 | /* How much header space do we need to reserve */ |
258 | self->max_header_size = max_header_size; | 258 | self->max_header_size = max_header_size; |
259 | 259 | ||
260 | /* IrTTP max SDU size in transmit direction */ | 260 | /* IrTTP max SDU size in transmit direction */ |
261 | self->max_sdu_size_tx = max_sdu_size; | 261 | self->max_sdu_size_tx = max_sdu_size; |
262 | 262 | ||
263 | /* Find out what the largest chunk of data that we can transmit is */ | 263 | /* Find out what the largest chunk of data that we can transmit is */ |
264 | switch (sk->sk_type) { | 264 | switch (sk->sk_type) { |
265 | case SOCK_STREAM: | 265 | case SOCK_STREAM: |
266 | if (max_sdu_size != 0) { | 266 | if (max_sdu_size != 0) { |
267 | IRDA_ERROR("%s: max_sdu_size must be 0\n", | 267 | IRDA_ERROR("%s: max_sdu_size must be 0\n", |
268 | __func__); | 268 | __func__); |
269 | kfree_skb(skb); | 269 | kfree_skb(skb); |
270 | return; | 270 | return; |
271 | } | 271 | } |
272 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 272 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
273 | break; | 273 | break; |
274 | case SOCK_SEQPACKET: | 274 | case SOCK_SEQPACKET: |
275 | if (max_sdu_size == 0) { | 275 | if (max_sdu_size == 0) { |
276 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", | 276 | IRDA_ERROR("%s: max_sdu_size cannot be 0\n", |
277 | __func__); | 277 | __func__); |
278 | kfree_skb(skb); | 278 | kfree_skb(skb); |
279 | return; | 279 | return; |
280 | } | 280 | } |
281 | self->max_data_size = max_sdu_size; | 281 | self->max_data_size = max_sdu_size; |
282 | break; | 282 | break; |
283 | default: | 283 | default: |
284 | self->max_data_size = irttp_get_max_seg_size(self->tsap); | 284 | self->max_data_size = irttp_get_max_seg_size(self->tsap); |
285 | } | 285 | } |
286 | 286 | ||
287 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, | 287 | IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, |
288 | self->max_data_size); | 288 | self->max_data_size); |
289 | 289 | ||
290 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); | 290 | memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); |
291 | 291 | ||
292 | skb_queue_tail(&sk->sk_receive_queue, skb); | 292 | skb_queue_tail(&sk->sk_receive_queue, skb); |
293 | sk->sk_state_change(sk); | 293 | sk->sk_state_change(sk); |
294 | } | 294 | } |
295 | 295 | ||
296 | /* | 296 | /* |
297 | * Function irda_connect_response (handle) | 297 | * Function irda_connect_response (handle) |
298 | * | 298 | * |
299 | * Accept incoming connection | 299 | * Accept incoming connection |
300 | * | 300 | * |
301 | */ | 301 | */ |
302 | static void irda_connect_response(struct irda_sock *self) | 302 | static void irda_connect_response(struct irda_sock *self) |
303 | { | 303 | { |
304 | struct sk_buff *skb; | 304 | struct sk_buff *skb; |
305 | 305 | ||
306 | IRDA_DEBUG(2, "%s()\n", __func__); | 306 | IRDA_DEBUG(2, "%s()\n", __func__); |
307 | 307 | ||
308 | skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL); | 308 | skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL); |
309 | if (skb == NULL) { | 309 | if (skb == NULL) { |
310 | IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", | 310 | IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", |
311 | __func__); | 311 | __func__); |
312 | return; | 312 | return; |
313 | } | 313 | } |
314 | 314 | ||
315 | /* Reserve space for MUX_CONTROL and LAP header */ | 315 | /* Reserve space for MUX_CONTROL and LAP header */ |
316 | skb_reserve(skb, IRDA_MAX_HEADER); | 316 | skb_reserve(skb, IRDA_MAX_HEADER); |
317 | 317 | ||
318 | irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); | 318 | irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); |
319 | } | 319 | } |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Function irda_flow_indication (instance, sap, flow) | 322 | * Function irda_flow_indication (instance, sap, flow) |
323 | * | 323 | * |
324 | * Used by TinyTP to tell us if it can accept more data or not | 324 | * Used by TinyTP to tell us if it can accept more data or not |
325 | * | 325 | * |
326 | */ | 326 | */ |
327 | static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) | 327 | static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) |
328 | { | 328 | { |
329 | struct irda_sock *self; | 329 | struct irda_sock *self; |
330 | struct sock *sk; | 330 | struct sock *sk; |
331 | 331 | ||
332 | IRDA_DEBUG(2, "%s()\n", __func__); | 332 | IRDA_DEBUG(2, "%s()\n", __func__); |
333 | 333 | ||
334 | self = instance; | 334 | self = instance; |
335 | sk = instance; | 335 | sk = instance; |
336 | BUG_ON(sk == NULL); | 336 | BUG_ON(sk == NULL); |
337 | 337 | ||
338 | switch (flow) { | 338 | switch (flow) { |
339 | case FLOW_STOP: | 339 | case FLOW_STOP: |
340 | IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", | 340 | IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", |
341 | __func__); | 341 | __func__); |
342 | self->tx_flow = flow; | 342 | self->tx_flow = flow; |
343 | break; | 343 | break; |
344 | case FLOW_START: | 344 | case FLOW_START: |
345 | self->tx_flow = flow; | 345 | self->tx_flow = flow; |
346 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", | 346 | IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", |
347 | __func__); | 347 | __func__); |
348 | wake_up_interruptible(sk_sleep(sk)); | 348 | wake_up_interruptible(sk_sleep(sk)); |
349 | break; | 349 | break; |
350 | default: | 350 | default: |
351 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); | 351 | IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); |
352 | /* Unknown flow command, better stop */ | 352 | /* Unknown flow command, better stop */ |
353 | self->tx_flow = flow; | 353 | self->tx_flow = flow; |
354 | break; | 354 | break; |
355 | } | 355 | } |
356 | } | 356 | } |
357 | 357 | ||
358 | /* | 358 | /* |
359 | * Function irda_getvalue_confirm (obj_id, value, priv) | 359 | * Function irda_getvalue_confirm (obj_id, value, priv) |
360 | * | 360 | * |
361 | * Got answer from remote LM-IAS, just pass object to requester... | 361 | * Got answer from remote LM-IAS, just pass object to requester... |
362 | * | 362 | * |
363 | * Note : duplicate from above, but we need our own version that | 363 | * Note : duplicate from above, but we need our own version that |
364 | * doesn't touch the dtsap_sel and save the full value structure... | 364 | * doesn't touch the dtsap_sel and save the full value structure... |
365 | */ | 365 | */ |
366 | static void irda_getvalue_confirm(int result, __u16 obj_id, | 366 | static void irda_getvalue_confirm(int result, __u16 obj_id, |
367 | struct ias_value *value, void *priv) | 367 | struct ias_value *value, void *priv) |
368 | { | 368 | { |
369 | struct irda_sock *self; | 369 | struct irda_sock *self; |
370 | 370 | ||
371 | self = priv; | 371 | self = priv; |
372 | if (!self) { | 372 | if (!self) { |
373 | IRDA_WARNING("%s: lost myself!\n", __func__); | 373 | IRDA_WARNING("%s: lost myself!\n", __func__); |
374 | return; | 374 | return; |
375 | } | 375 | } |
376 | 376 | ||
377 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 377 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
378 | 378 | ||
379 | /* We probably don't need to make any more queries */ | 379 | /* We probably don't need to make any more queries */ |
380 | iriap_close(self->iriap); | 380 | iriap_close(self->iriap); |
381 | self->iriap = NULL; | 381 | self->iriap = NULL; |
382 | 382 | ||
383 | /* Check if request succeeded */ | 383 | /* Check if request succeeded */ |
384 | if (result != IAS_SUCCESS) { | 384 | if (result != IAS_SUCCESS) { |
385 | IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__, | 385 | IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__, |
386 | result); | 386 | result); |
387 | 387 | ||
388 | self->errno = result; /* We really need it later */ | 388 | self->errno = result; /* We really need it later */ |
389 | 389 | ||
390 | /* Wake up any processes waiting for result */ | 390 | /* Wake up any processes waiting for result */ |
391 | wake_up_interruptible(&self->query_wait); | 391 | wake_up_interruptible(&self->query_wait); |
392 | 392 | ||
393 | return; | 393 | return; |
394 | } | 394 | } |
395 | 395 | ||
396 | /* Pass the object to the caller (so the caller must delete it) */ | 396 | /* Pass the object to the caller (so the caller must delete it) */ |
397 | self->ias_result = value; | 397 | self->ias_result = value; |
398 | self->errno = 0; | 398 | self->errno = 0; |
399 | 399 | ||
400 | /* Wake up any processes waiting for result */ | 400 | /* Wake up any processes waiting for result */ |
401 | wake_up_interruptible(&self->query_wait); | 401 | wake_up_interruptible(&self->query_wait); |
402 | } | 402 | } |
403 | 403 | ||
404 | /* | 404 | /* |
405 | * Function irda_selective_discovery_indication (discovery) | 405 | * Function irda_selective_discovery_indication (discovery) |
406 | * | 406 | * |
407 | * Got a selective discovery indication from IrLMP. | 407 | * Got a selective discovery indication from IrLMP. |
408 | * | 408 | * |
409 | * IrLMP is telling us that this node is new and matching our hint bit | 409 | * IrLMP is telling us that this node is new and matching our hint bit |
410 | * filter. Wake up any process waiting for answer... | 410 | * filter. Wake up any process waiting for answer... |
411 | */ | 411 | */ |
412 | static void irda_selective_discovery_indication(discinfo_t *discovery, | 412 | static void irda_selective_discovery_indication(discinfo_t *discovery, |
413 | DISCOVERY_MODE mode, | 413 | DISCOVERY_MODE mode, |
414 | void *priv) | 414 | void *priv) |
415 | { | 415 | { |
416 | struct irda_sock *self; | 416 | struct irda_sock *self; |
417 | 417 | ||
418 | IRDA_DEBUG(2, "%s()\n", __func__); | 418 | IRDA_DEBUG(2, "%s()\n", __func__); |
419 | 419 | ||
420 | self = priv; | 420 | self = priv; |
421 | if (!self) { | 421 | if (!self) { |
422 | IRDA_WARNING("%s: lost myself!\n", __func__); | 422 | IRDA_WARNING("%s: lost myself!\n", __func__); |
423 | return; | 423 | return; |
424 | } | 424 | } |
425 | 425 | ||
426 | /* Pass parameter to the caller */ | 426 | /* Pass parameter to the caller */ |
427 | self->cachedaddr = discovery->daddr; | 427 | self->cachedaddr = discovery->daddr; |
428 | 428 | ||
429 | /* Wake up process if its waiting for device to be discovered */ | 429 | /* Wake up process if its waiting for device to be discovered */ |
430 | wake_up_interruptible(&self->query_wait); | 430 | wake_up_interruptible(&self->query_wait); |
431 | } | 431 | } |
432 | 432 | ||
433 | /* | 433 | /* |
434 | * Function irda_discovery_timeout (priv) | 434 | * Function irda_discovery_timeout (priv) |
435 | * | 435 | * |
436 | * Timeout in the selective discovery process | 436 | * Timeout in the selective discovery process |
437 | * | 437 | * |
438 | * We were waiting for a node to be discovered, but nothing has come up | 438 | * We were waiting for a node to be discovered, but nothing has come up |
439 | * so far. Wake up the user and tell him that we failed... | 439 | * so far. Wake up the user and tell him that we failed... |
440 | */ | 440 | */ |
441 | static void irda_discovery_timeout(u_long priv) | 441 | static void irda_discovery_timeout(u_long priv) |
442 | { | 442 | { |
443 | struct irda_sock *self; | 443 | struct irda_sock *self; |
444 | 444 | ||
445 | IRDA_DEBUG(2, "%s()\n", __func__); | 445 | IRDA_DEBUG(2, "%s()\n", __func__); |
446 | 446 | ||
447 | self = (struct irda_sock *) priv; | 447 | self = (struct irda_sock *) priv; |
448 | BUG_ON(self == NULL); | 448 | BUG_ON(self == NULL); |
449 | 449 | ||
450 | /* Nothing for the caller */ | 450 | /* Nothing for the caller */ |
451 | self->cachelog = NULL; | 451 | self->cachelog = NULL; |
452 | self->cachedaddr = 0; | 452 | self->cachedaddr = 0; |
453 | self->errno = -ETIME; | 453 | self->errno = -ETIME; |
454 | 454 | ||
455 | /* Wake up process if its still waiting... */ | 455 | /* Wake up process if its still waiting... */ |
456 | wake_up_interruptible(&self->query_wait); | 456 | wake_up_interruptible(&self->query_wait); |
457 | } | 457 | } |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * Function irda_open_tsap (self) | 460 | * Function irda_open_tsap (self) |
461 | * | 461 | * |
462 | * Open local Transport Service Access Point (TSAP) | 462 | * Open local Transport Service Access Point (TSAP) |
463 | * | 463 | * |
464 | */ | 464 | */ |
465 | static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) | 465 | static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) |
466 | { | 466 | { |
467 | notify_t notify; | 467 | notify_t notify; |
468 | 468 | ||
469 | if (self->tsap) { | 469 | if (self->tsap) { |
470 | IRDA_DEBUG(0, "%s: busy!\n", __func__); | 470 | IRDA_DEBUG(0, "%s: busy!\n", __func__); |
471 | return -EBUSY; | 471 | return -EBUSY; |
472 | } | 472 | } |
473 | 473 | ||
474 | /* Initialize callbacks to be used by the IrDA stack */ | 474 | /* Initialize callbacks to be used by the IrDA stack */ |
475 | irda_notify_init(¬ify); | 475 | irda_notify_init(¬ify); |
476 | notify.connect_confirm = irda_connect_confirm; | 476 | notify.connect_confirm = irda_connect_confirm; |
477 | notify.connect_indication = irda_connect_indication; | 477 | notify.connect_indication = irda_connect_indication; |
478 | notify.disconnect_indication = irda_disconnect_indication; | 478 | notify.disconnect_indication = irda_disconnect_indication; |
479 | notify.data_indication = irda_data_indication; | 479 | notify.data_indication = irda_data_indication; |
480 | notify.udata_indication = irda_data_indication; | 480 | notify.udata_indication = irda_data_indication; |
481 | notify.flow_indication = irda_flow_indication; | 481 | notify.flow_indication = irda_flow_indication; |
482 | notify.instance = self; | 482 | notify.instance = self; |
483 | strncpy(notify.name, name, NOTIFY_MAX_NAME); | 483 | strncpy(notify.name, name, NOTIFY_MAX_NAME); |
484 | 484 | ||
485 | self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, | 485 | self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, |
486 | ¬ify); | 486 | ¬ify); |
487 | if (self->tsap == NULL) { | 487 | if (self->tsap == NULL) { |
488 | IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", | 488 | IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", |
489 | __func__); | 489 | __func__); |
490 | return -ENOMEM; | 490 | return -ENOMEM; |
491 | } | 491 | } |
492 | /* Remember which TSAP selector we actually got */ | 492 | /* Remember which TSAP selector we actually got */ |
493 | self->stsap_sel = self->tsap->stsap_sel; | 493 | self->stsap_sel = self->tsap->stsap_sel; |
494 | 494 | ||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * Function irda_open_lsap (self) | 499 | * Function irda_open_lsap (self) |
500 | * | 500 | * |
501 | * Open local Link Service Access Point (LSAP). Used for opening Ultra | 501 | * Open local Link Service Access Point (LSAP). Used for opening Ultra |
502 | * sockets | 502 | * sockets |
503 | */ | 503 | */ |
504 | #ifdef CONFIG_IRDA_ULTRA | 504 | #ifdef CONFIG_IRDA_ULTRA |
505 | static int irda_open_lsap(struct irda_sock *self, int pid) | 505 | static int irda_open_lsap(struct irda_sock *self, int pid) |
506 | { | 506 | { |
507 | notify_t notify; | 507 | notify_t notify; |
508 | 508 | ||
509 | if (self->lsap) { | 509 | if (self->lsap) { |
510 | IRDA_WARNING("%s(), busy!\n", __func__); | 510 | IRDA_WARNING("%s(), busy!\n", __func__); |
511 | return -EBUSY; | 511 | return -EBUSY; |
512 | } | 512 | } |
513 | 513 | ||
514 | /* Initialize callbacks to be used by the IrDA stack */ | 514 | /* Initialize callbacks to be used by the IrDA stack */ |
515 | irda_notify_init(¬ify); | 515 | irda_notify_init(¬ify); |
516 | notify.udata_indication = irda_data_indication; | 516 | notify.udata_indication = irda_data_indication; |
517 | notify.instance = self; | 517 | notify.instance = self; |
518 | strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME); | 518 | strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME); |
519 | 519 | ||
520 | self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); | 520 | self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); |
521 | if (self->lsap == NULL) { | 521 | if (self->lsap == NULL) { |
522 | IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); | 522 | IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); |
523 | return -ENOMEM; | 523 | return -ENOMEM; |
524 | } | 524 | } |
525 | 525 | ||
526 | return 0; | 526 | return 0; |
527 | } | 527 | } |
528 | #endif /* CONFIG_IRDA_ULTRA */ | 528 | #endif /* CONFIG_IRDA_ULTRA */ |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * Function irda_find_lsap_sel (self, name) | 531 | * Function irda_find_lsap_sel (self, name) |
532 | * | 532 | * |
533 | * Try to lookup LSAP selector in remote LM-IAS | 533 | * Try to lookup LSAP selector in remote LM-IAS |
534 | * | 534 | * |
535 | * Basically, we start a IAP query, and then go to sleep. When the query | 535 | * Basically, we start a IAP query, and then go to sleep. When the query |
536 | * return, irda_getvalue_confirm will wake us up, and we can examine the | 536 | * return, irda_getvalue_confirm will wake us up, and we can examine the |
537 | * result of the query... | 537 | * result of the query... |
538 | * Note that in some case, the query fail even before we go to sleep, | 538 | * Note that in some case, the query fail even before we go to sleep, |
539 | * creating some races... | 539 | * creating some races... |
540 | */ | 540 | */ |
541 | static int irda_find_lsap_sel(struct irda_sock *self, char *name) | 541 | static int irda_find_lsap_sel(struct irda_sock *self, char *name) |
542 | { | 542 | { |
543 | IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); | 543 | IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); |
544 | 544 | ||
545 | if (self->iriap) { | 545 | if (self->iriap) { |
546 | IRDA_WARNING("%s(): busy with a previous query\n", | 546 | IRDA_WARNING("%s(): busy with a previous query\n", |
547 | __func__); | 547 | __func__); |
548 | return -EBUSY; | 548 | return -EBUSY; |
549 | } | 549 | } |
550 | 550 | ||
551 | self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, | 551 | self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, |
552 | irda_getvalue_confirm); | 552 | irda_getvalue_confirm); |
553 | if(self->iriap == NULL) | 553 | if(self->iriap == NULL) |
554 | return -ENOMEM; | 554 | return -ENOMEM; |
555 | 555 | ||
556 | /* Treat unexpected wakeup as disconnect */ | 556 | /* Treat unexpected wakeup as disconnect */ |
557 | self->errno = -EHOSTUNREACH; | 557 | self->errno = -EHOSTUNREACH; |
558 | 558 | ||
559 | /* Query remote LM-IAS */ | 559 | /* Query remote LM-IAS */ |
560 | iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, | 560 | iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, |
561 | name, "IrDA:TinyTP:LsapSel"); | 561 | name, "IrDA:TinyTP:LsapSel"); |
562 | 562 | ||
563 | /* Wait for answer, if not yet finished (or failed) */ | 563 | /* Wait for answer, if not yet finished (or failed) */ |
564 | if (wait_event_interruptible(self->query_wait, (self->iriap==NULL))) | 564 | if (wait_event_interruptible(self->query_wait, (self->iriap==NULL))) |
565 | /* Treat signals as disconnect */ | 565 | /* Treat signals as disconnect */ |
566 | return -EHOSTUNREACH; | 566 | return -EHOSTUNREACH; |
567 | 567 | ||
568 | /* Check what happened */ | 568 | /* Check what happened */ |
569 | if (self->errno) | 569 | if (self->errno) |
570 | { | 570 | { |
571 | /* Requested object/attribute doesn't exist */ | 571 | /* Requested object/attribute doesn't exist */ |
572 | if((self->errno == IAS_CLASS_UNKNOWN) || | 572 | if((self->errno == IAS_CLASS_UNKNOWN) || |
573 | (self->errno == IAS_ATTRIB_UNKNOWN)) | 573 | (self->errno == IAS_ATTRIB_UNKNOWN)) |
574 | return -EADDRNOTAVAIL; | 574 | return -EADDRNOTAVAIL; |
575 | else | 575 | else |
576 | return -EHOSTUNREACH; | 576 | return -EHOSTUNREACH; |
577 | } | 577 | } |
578 | 578 | ||
579 | /* Get the remote TSAP selector */ | 579 | /* Get the remote TSAP selector */ |
580 | switch (self->ias_result->type) { | 580 | switch (self->ias_result->type) { |
581 | case IAS_INTEGER: | 581 | case IAS_INTEGER: |
582 | IRDA_DEBUG(4, "%s() int=%d\n", | 582 | IRDA_DEBUG(4, "%s() int=%d\n", |
583 | __func__, self->ias_result->t.integer); | 583 | __func__, self->ias_result->t.integer); |
584 | 584 | ||
585 | if (self->ias_result->t.integer != -1) | 585 | if (self->ias_result->t.integer != -1) |
586 | self->dtsap_sel = self->ias_result->t.integer; | 586 | self->dtsap_sel = self->ias_result->t.integer; |
587 | else | 587 | else |
588 | self->dtsap_sel = 0; | 588 | self->dtsap_sel = 0; |
589 | break; | 589 | break; |
590 | default: | 590 | default: |
591 | self->dtsap_sel = 0; | 591 | self->dtsap_sel = 0; |
592 | IRDA_DEBUG(0, "%s(), bad type!\n", __func__); | 592 | IRDA_DEBUG(0, "%s(), bad type!\n", __func__); |
593 | break; | 593 | break; |
594 | } | 594 | } |
595 | if (self->ias_result) | 595 | if (self->ias_result) |
596 | irias_delete_value(self->ias_result); | 596 | irias_delete_value(self->ias_result); |
597 | 597 | ||
598 | if (self->dtsap_sel) | 598 | if (self->dtsap_sel) |
599 | return 0; | 599 | return 0; |
600 | 600 | ||
601 | return -EADDRNOTAVAIL; | 601 | return -EADDRNOTAVAIL; |
602 | } | 602 | } |
603 | 603 | ||
604 | /* | 604 | /* |
605 | * Function irda_discover_daddr_and_lsap_sel (self, name) | 605 | * Function irda_discover_daddr_and_lsap_sel (self, name) |
606 | * | 606 | * |
607 | * This try to find a device with the requested service. | 607 | * This try to find a device with the requested service. |
608 | * | 608 | * |
609 | * It basically look into the discovery log. For each address in the list, | 609 | * It basically look into the discovery log. For each address in the list, |
610 | * it queries the LM-IAS of the device to find if this device offer | 610 | * it queries the LM-IAS of the device to find if this device offer |
611 | * the requested service. | 611 | * the requested service. |
612 | * If there is more than one node supporting the service, we complain | 612 | * If there is more than one node supporting the service, we complain |
613 | * to the user (it should move devices around). | 613 | * to the user (it should move devices around). |
614 | * The, we set both the destination address and the lsap selector to point | 614 | * The, we set both the destination address and the lsap selector to point |
615 | * on the service on the unique device we have found. | 615 | * on the service on the unique device we have found. |
616 | * | 616 | * |
617 | * Note : this function fails if there is more than one device in range, | 617 | * Note : this function fails if there is more than one device in range, |
618 | * because IrLMP doesn't disconnect the LAP when the last LSAP is closed. | 618 | * because IrLMP doesn't disconnect the LAP when the last LSAP is closed. |
619 | * Moreover, we would need to wait the LAP disconnection... | 619 | * Moreover, we would need to wait the LAP disconnection... |
620 | */ | 620 | */ |
621 | static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) | 621 | static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) |
622 | { | 622 | { |
623 | discinfo_t *discoveries; /* Copy of the discovery log */ | 623 | discinfo_t *discoveries; /* Copy of the discovery log */ |
624 | int number; /* Number of nodes in the log */ | 624 | int number; /* Number of nodes in the log */ |
625 | int i; | 625 | int i; |
626 | int err = -ENETUNREACH; | 626 | int err = -ENETUNREACH; |
627 | __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ | 627 | __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ |
628 | __u8 dtsap_sel = 0x0; /* TSAP associated with it */ | 628 | __u8 dtsap_sel = 0x0; /* TSAP associated with it */ |
629 | 629 | ||
630 | IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); | 630 | IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); |
631 | 631 | ||
632 | /* Ask lmp for the current discovery log | 632 | /* Ask lmp for the current discovery log |
633 | * Note : we have to use irlmp_get_discoveries(), as opposed | 633 | * Note : we have to use irlmp_get_discoveries(), as opposed |
634 | * to play with the cachelog directly, because while we are | 634 | * to play with the cachelog directly, because while we are |
635 | * making our ias query, le log might change... */ | 635 | * making our ias query, le log might change... */ |
636 | discoveries = irlmp_get_discoveries(&number, self->mask.word, | 636 | discoveries = irlmp_get_discoveries(&number, self->mask.word, |
637 | self->nslots); | 637 | self->nslots); |
638 | /* Check if the we got some results */ | 638 | /* Check if the we got some results */ |
639 | if (discoveries == NULL) | 639 | if (discoveries == NULL) |
640 | return -ENETUNREACH; /* No nodes discovered */ | 640 | return -ENETUNREACH; /* No nodes discovered */ |
641 | 641 | ||
642 | /* | 642 | /* |
643 | * Now, check all discovered devices (if any), and connect | 643 | * Now, check all discovered devices (if any), and connect |
644 | * client only about the services that the client is | 644 | * client only about the services that the client is |
645 | * interested in... | 645 | * interested in... |
646 | */ | 646 | */ |
647 | for(i = 0; i < number; i++) { | 647 | for(i = 0; i < number; i++) { |
648 | /* Try the address in the log */ | 648 | /* Try the address in the log */ |
649 | self->daddr = discoveries[i].daddr; | 649 | self->daddr = discoveries[i].daddr; |
650 | self->saddr = 0x0; | 650 | self->saddr = 0x0; |
651 | IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", | 651 | IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", |
652 | __func__, self->daddr); | 652 | __func__, self->daddr); |
653 | 653 | ||
654 | /* Query remote LM-IAS for this service */ | 654 | /* Query remote LM-IAS for this service */ |
655 | err = irda_find_lsap_sel(self, name); | 655 | err = irda_find_lsap_sel(self, name); |
656 | switch (err) { | 656 | switch (err) { |
657 | case 0: | 657 | case 0: |
658 | /* We found the requested service */ | 658 | /* We found the requested service */ |
659 | if(daddr != DEV_ADDR_ANY) { | 659 | if(daddr != DEV_ADDR_ANY) { |
660 | IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", | 660 | IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", |
661 | __func__, name); | 661 | __func__, name); |
662 | self->daddr = DEV_ADDR_ANY; | 662 | self->daddr = DEV_ADDR_ANY; |
663 | kfree(discoveries); | 663 | kfree(discoveries); |
664 | return -ENOTUNIQ; | 664 | return -ENOTUNIQ; |
665 | } | 665 | } |
666 | /* First time we found that one, save it ! */ | 666 | /* First time we found that one, save it ! */ |
667 | daddr = self->daddr; | 667 | daddr = self->daddr; |
668 | dtsap_sel = self->dtsap_sel; | 668 | dtsap_sel = self->dtsap_sel; |
669 | break; | 669 | break; |
670 | case -EADDRNOTAVAIL: | 670 | case -EADDRNOTAVAIL: |
671 | /* Requested service simply doesn't exist on this node */ | 671 | /* Requested service simply doesn't exist on this node */ |
672 | break; | 672 | break; |
673 | default: | 673 | default: |
674 | /* Something bad did happen :-( */ | 674 | /* Something bad did happen :-( */ |
675 | IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); | 675 | IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); |
676 | self->daddr = DEV_ADDR_ANY; | 676 | self->daddr = DEV_ADDR_ANY; |
677 | kfree(discoveries); | 677 | kfree(discoveries); |
678 | return -EHOSTUNREACH; | 678 | return -EHOSTUNREACH; |
679 | break; | 679 | break; |
680 | } | 680 | } |
681 | } | 681 | } |
682 | /* Cleanup our copy of the discovery log */ | 682 | /* Cleanup our copy of the discovery log */ |
683 | kfree(discoveries); | 683 | kfree(discoveries); |
684 | 684 | ||
685 | /* Check out what we found */ | 685 | /* Check out what we found */ |
686 | if(daddr == DEV_ADDR_ANY) { | 686 | if(daddr == DEV_ADDR_ANY) { |
687 | IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", | 687 | IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", |
688 | __func__, name); | 688 | __func__, name); |
689 | self->daddr = DEV_ADDR_ANY; | 689 | self->daddr = DEV_ADDR_ANY; |
690 | return -EADDRNOTAVAIL; | 690 | return -EADDRNOTAVAIL; |
691 | } | 691 | } |
692 | 692 | ||
693 | /* Revert back to discovered device & service */ | 693 | /* Revert back to discovered device & service */ |
694 | self->daddr = daddr; | 694 | self->daddr = daddr; |
695 | self->saddr = 0x0; | 695 | self->saddr = 0x0; |
696 | self->dtsap_sel = dtsap_sel; | 696 | self->dtsap_sel = dtsap_sel; |
697 | 697 | ||
698 | IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", | 698 | IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", |
699 | __func__, name, self->daddr); | 699 | __func__, name, self->daddr); |
700 | 700 | ||
701 | return 0; | 701 | return 0; |
702 | } | 702 | } |
703 | 703 | ||
704 | /* | 704 | /* |
705 | * Function irda_getname (sock, uaddr, uaddr_len, peer) | 705 | * Function irda_getname (sock, uaddr, uaddr_len, peer) |
706 | * | 706 | * |
707 | * Return the our own, or peers socket address (sockaddr_irda) | 707 | * Return the our own, or peers socket address (sockaddr_irda) |
708 | * | 708 | * |
709 | */ | 709 | */ |
710 | static int irda_getname(struct socket *sock, struct sockaddr *uaddr, | 710 | static int irda_getname(struct socket *sock, struct sockaddr *uaddr, |
711 | int *uaddr_len, int peer) | 711 | int *uaddr_len, int peer) |
712 | { | 712 | { |
713 | struct sockaddr_irda saddr; | 713 | struct sockaddr_irda saddr; |
714 | struct sock *sk = sock->sk; | 714 | struct sock *sk = sock->sk; |
715 | struct irda_sock *self = irda_sk(sk); | 715 | struct irda_sock *self = irda_sk(sk); |
716 | 716 | ||
717 | memset(&saddr, 0, sizeof(saddr)); | 717 | memset(&saddr, 0, sizeof(saddr)); |
718 | if (peer) { | 718 | if (peer) { |
719 | if (sk->sk_state != TCP_ESTABLISHED) | 719 | if (sk->sk_state != TCP_ESTABLISHED) |
720 | return -ENOTCONN; | 720 | return -ENOTCONN; |
721 | 721 | ||
722 | saddr.sir_family = AF_IRDA; | 722 | saddr.sir_family = AF_IRDA; |
723 | saddr.sir_lsap_sel = self->dtsap_sel; | 723 | saddr.sir_lsap_sel = self->dtsap_sel; |
724 | saddr.sir_addr = self->daddr; | 724 | saddr.sir_addr = self->daddr; |
725 | } else { | 725 | } else { |
726 | saddr.sir_family = AF_IRDA; | 726 | saddr.sir_family = AF_IRDA; |
727 | saddr.sir_lsap_sel = self->stsap_sel; | 727 | saddr.sir_lsap_sel = self->stsap_sel; |
728 | saddr.sir_addr = self->saddr; | 728 | saddr.sir_addr = self->saddr; |
729 | } | 729 | } |
730 | 730 | ||
731 | IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); | 731 | IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); |
732 | IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); | 732 | IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); |
733 | 733 | ||
734 | /* uaddr_len come to us uninitialised */ | 734 | /* uaddr_len come to us uninitialised */ |
735 | *uaddr_len = sizeof (struct sockaddr_irda); | 735 | *uaddr_len = sizeof (struct sockaddr_irda); |
736 | memcpy(uaddr, &saddr, *uaddr_len); | 736 | memcpy(uaddr, &saddr, *uaddr_len); |
737 | 737 | ||
738 | return 0; | 738 | return 0; |
739 | } | 739 | } |
740 | 740 | ||
741 | /* | 741 | /* |
742 | * Function irda_listen (sock, backlog) | 742 | * Function irda_listen (sock, backlog) |
743 | * | 743 | * |
744 | * Just move to the listen state | 744 | * Just move to the listen state |
745 | * | 745 | * |
746 | */ | 746 | */ |
747 | static int irda_listen(struct socket *sock, int backlog) | 747 | static int irda_listen(struct socket *sock, int backlog) |
748 | { | 748 | { |
749 | struct sock *sk = sock->sk; | 749 | struct sock *sk = sock->sk; |
750 | int err = -EOPNOTSUPP; | 750 | int err = -EOPNOTSUPP; |
751 | 751 | ||
752 | IRDA_DEBUG(2, "%s()\n", __func__); | 752 | IRDA_DEBUG(2, "%s()\n", __func__); |
753 | 753 | ||
754 | lock_sock(sk); | 754 | lock_sock(sk); |
755 | 755 | ||
756 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && | 756 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && |
757 | (sk->sk_type != SOCK_DGRAM)) | 757 | (sk->sk_type != SOCK_DGRAM)) |
758 | goto out; | 758 | goto out; |
759 | 759 | ||
760 | if (sk->sk_state != TCP_LISTEN) { | 760 | if (sk->sk_state != TCP_LISTEN) { |
761 | sk->sk_max_ack_backlog = backlog; | 761 | sk->sk_max_ack_backlog = backlog; |
762 | sk->sk_state = TCP_LISTEN; | 762 | sk->sk_state = TCP_LISTEN; |
763 | 763 | ||
764 | err = 0; | 764 | err = 0; |
765 | } | 765 | } |
766 | out: | 766 | out: |
767 | release_sock(sk); | 767 | release_sock(sk); |
768 | 768 | ||
769 | return err; | 769 | return err; |
770 | } | 770 | } |
771 | 771 | ||
772 | /* | 772 | /* |
773 | * Function irda_bind (sock, uaddr, addr_len) | 773 | * Function irda_bind (sock, uaddr, addr_len) |
774 | * | 774 | * |
775 | * Used by servers to register their well known TSAP | 775 | * Used by servers to register their well known TSAP |
776 | * | 776 | * |
777 | */ | 777 | */ |
778 | static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 778 | static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
779 | { | 779 | { |
780 | struct sock *sk = sock->sk; | 780 | struct sock *sk = sock->sk; |
781 | struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; | 781 | struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; |
782 | struct irda_sock *self = irda_sk(sk); | 782 | struct irda_sock *self = irda_sk(sk); |
783 | int err; | 783 | int err; |
784 | 784 | ||
785 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 785 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
786 | 786 | ||
787 | if (addr_len != sizeof(struct sockaddr_irda)) | 787 | if (addr_len != sizeof(struct sockaddr_irda)) |
788 | return -EINVAL; | 788 | return -EINVAL; |
789 | 789 | ||
790 | lock_sock(sk); | 790 | lock_sock(sk); |
791 | #ifdef CONFIG_IRDA_ULTRA | 791 | #ifdef CONFIG_IRDA_ULTRA |
792 | /* Special care for Ultra sockets */ | 792 | /* Special care for Ultra sockets */ |
793 | if ((sk->sk_type == SOCK_DGRAM) && | 793 | if ((sk->sk_type == SOCK_DGRAM) && |
794 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { | 794 | (sk->sk_protocol == IRDAPROTO_ULTRA)) { |
795 | self->pid = addr->sir_lsap_sel; | 795 | self->pid = addr->sir_lsap_sel; |
796 | err = -EOPNOTSUPP; | 796 | err = -EOPNOTSUPP; |
797 | if (self->pid & 0x80) { | 797 | if (self->pid & 0x80) { |
798 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); | 798 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
799 | goto out; | 799 | goto out; |
800 | } | 800 | } |
801 | err = irda_open_lsap(self, self->pid); | 801 | err = irda_open_lsap(self, self->pid); |
802 | if (err < 0) | 802 | if (err < 0) |
803 | goto out; | 803 | goto out; |
804 | 804 | ||
805 | /* Pretend we are connected */ | 805 | /* Pretend we are connected */ |
806 | sock->state = SS_CONNECTED; | 806 | sock->state = SS_CONNECTED; |
807 | sk->sk_state = TCP_ESTABLISHED; | 807 | sk->sk_state = TCP_ESTABLISHED; |
808 | err = 0; | 808 | err = 0; |
809 | 809 | ||
810 | goto out; | 810 | goto out; |
811 | } | 811 | } |
812 | #endif /* CONFIG_IRDA_ULTRA */ | 812 | #endif /* CONFIG_IRDA_ULTRA */ |
813 | 813 | ||
814 | self->ias_obj = irias_new_object(addr->sir_name, jiffies); | 814 | self->ias_obj = irias_new_object(addr->sir_name, jiffies); |
815 | err = -ENOMEM; | 815 | err = -ENOMEM; |
816 | if (self->ias_obj == NULL) | 816 | if (self->ias_obj == NULL) |
817 | goto out; | 817 | goto out; |
818 | 818 | ||
819 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); | 819 | err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); |
820 | if (err < 0) { | 820 | if (err < 0) { |
821 | irias_delete_object(self->ias_obj); | 821 | irias_delete_object(self->ias_obj); |
822 | self->ias_obj = NULL; | 822 | self->ias_obj = NULL; |
823 | goto out; | 823 | goto out; |
824 | } | 824 | } |
825 | 825 | ||
826 | /* Register with LM-IAS */ | 826 | /* Register with LM-IAS */ |
827 | irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel", | 827 | irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel", |
828 | self->stsap_sel, IAS_KERNEL_ATTR); | 828 | self->stsap_sel, IAS_KERNEL_ATTR); |
829 | irias_insert_object(self->ias_obj); | 829 | irias_insert_object(self->ias_obj); |
830 | 830 | ||
831 | err = 0; | 831 | err = 0; |
832 | out: | 832 | out: |
833 | release_sock(sk); | 833 | release_sock(sk); |
834 | return err; | 834 | return err; |
835 | } | 835 | } |
836 | 836 | ||
837 | /* | 837 | /* |
838 | * Function irda_accept (sock, newsock, flags) | 838 | * Function irda_accept (sock, newsock, flags) |
839 | * | 839 | * |
840 | * Wait for incoming connection | 840 | * Wait for incoming connection |
841 | * | 841 | * |
842 | */ | 842 | */ |
843 | static int irda_accept(struct socket *sock, struct socket *newsock, int flags) | 843 | static int irda_accept(struct socket *sock, struct socket *newsock, int flags) |
844 | { | 844 | { |
845 | struct sock *sk = sock->sk; | 845 | struct sock *sk = sock->sk; |
846 | struct irda_sock *new, *self = irda_sk(sk); | 846 | struct irda_sock *new, *self = irda_sk(sk); |
847 | struct sock *newsk; | 847 | struct sock *newsk; |
848 | struct sk_buff *skb; | 848 | struct sk_buff *skb; |
849 | int err; | 849 | int err; |
850 | 850 | ||
851 | IRDA_DEBUG(2, "%s()\n", __func__); | 851 | IRDA_DEBUG(2, "%s()\n", __func__); |
852 | 852 | ||
853 | err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); | 853 | err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); |
854 | if (err) | 854 | if (err) |
855 | return err; | 855 | return err; |
856 | 856 | ||
857 | err = -EINVAL; | 857 | err = -EINVAL; |
858 | 858 | ||
859 | lock_sock(sk); | 859 | lock_sock(sk); |
860 | if (sock->state != SS_UNCONNECTED) | 860 | if (sock->state != SS_UNCONNECTED) |
861 | goto out; | 861 | goto out; |
862 | 862 | ||
863 | if ((sk = sock->sk) == NULL) | 863 | if ((sk = sock->sk) == NULL) |
864 | goto out; | 864 | goto out; |
865 | 865 | ||
866 | err = -EOPNOTSUPP; | 866 | err = -EOPNOTSUPP; |
867 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && | 867 | if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && |
868 | (sk->sk_type != SOCK_DGRAM)) | 868 | (sk->sk_type != SOCK_DGRAM)) |
869 | goto out; | 869 | goto out; |
870 | 870 | ||
871 | err = -EINVAL; | 871 | err = -EINVAL; |
872 | if (sk->sk_state != TCP_LISTEN) | 872 | if (sk->sk_state != TCP_LISTEN) |
873 | goto out; | 873 | goto out; |
874 | 874 | ||
875 | /* | 875 | /* |
876 | * The read queue this time is holding sockets ready to use | 876 | * The read queue this time is holding sockets ready to use |
877 | * hooked into the SABM we saved | 877 | * hooked into the SABM we saved |
878 | */ | 878 | */ |
879 | 879 | ||
880 | /* | 880 | /* |
881 | * We can perform the accept only if there is incoming data | 881 | * We can perform the accept only if there is incoming data |
882 | * on the listening socket. | 882 | * on the listening socket. |
883 | * So, we will block the caller until we receive any data. | 883 | * So, we will block the caller until we receive any data. |
884 | * If the caller was waiting on select() or poll() before | 884 | * If the caller was waiting on select() or poll() before |
885 | * calling us, the data is waiting for us ;-) | 885 | * calling us, the data is waiting for us ;-) |
886 | * Jean II | 886 | * Jean II |
887 | */ | 887 | */ |
888 | while (1) { | 888 | while (1) { |
889 | skb = skb_dequeue(&sk->sk_receive_queue); | 889 | skb = skb_dequeue(&sk->sk_receive_queue); |
890 | if (skb) | 890 | if (skb) |
891 | break; | 891 | break; |
892 | 892 | ||
893 | /* Non blocking operation */ | 893 | /* Non blocking operation */ |
894 | err = -EWOULDBLOCK; | 894 | err = -EWOULDBLOCK; |
895 | if (flags & O_NONBLOCK) | 895 | if (flags & O_NONBLOCK) |
896 | goto out; | 896 | goto out; |
897 | 897 | ||
898 | err = wait_event_interruptible(*(sk_sleep(sk)), | 898 | err = wait_event_interruptible(*(sk_sleep(sk)), |
899 | skb_peek(&sk->sk_receive_queue)); | 899 | skb_peek(&sk->sk_receive_queue)); |
900 | if (err) | 900 | if (err) |
901 | goto out; | 901 | goto out; |
902 | } | 902 | } |
903 | 903 | ||
904 | newsk = newsock->sk; | 904 | newsk = newsock->sk; |
905 | err = -EIO; | 905 | err = -EIO; |
906 | if (newsk == NULL) | 906 | if (newsk == NULL) |
907 | goto out; | 907 | goto out; |
908 | 908 | ||
909 | newsk->sk_state = TCP_ESTABLISHED; | 909 | newsk->sk_state = TCP_ESTABLISHED; |
910 | 910 | ||
911 | new = irda_sk(newsk); | 911 | new = irda_sk(newsk); |
912 | 912 | ||
913 | /* Now attach up the new socket */ | 913 | /* Now attach up the new socket */ |
914 | new->tsap = irttp_dup(self->tsap, new); | 914 | new->tsap = irttp_dup(self->tsap, new); |
915 | err = -EPERM; /* value does not seem to make sense. -arnd */ | 915 | err = -EPERM; /* value does not seem to make sense. -arnd */ |
916 | if (!new->tsap) { | 916 | if (!new->tsap) { |
917 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); | 917 | IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); |
918 | kfree_skb(skb); | 918 | kfree_skb(skb); |
919 | goto out; | 919 | goto out; |
920 | } | 920 | } |
921 | 921 | ||
922 | new->stsap_sel = new->tsap->stsap_sel; | 922 | new->stsap_sel = new->tsap->stsap_sel; |
923 | new->dtsap_sel = new->tsap->dtsap_sel; | 923 | new->dtsap_sel = new->tsap->dtsap_sel; |
924 | new->saddr = irttp_get_saddr(new->tsap); | 924 | new->saddr = irttp_get_saddr(new->tsap); |
925 | new->daddr = irttp_get_daddr(new->tsap); | 925 | new->daddr = irttp_get_daddr(new->tsap); |
926 | 926 | ||
927 | new->max_sdu_size_tx = self->max_sdu_size_tx; | 927 | new->max_sdu_size_tx = self->max_sdu_size_tx; |
928 | new->max_sdu_size_rx = self->max_sdu_size_rx; | 928 | new->max_sdu_size_rx = self->max_sdu_size_rx; |
929 | new->max_data_size = self->max_data_size; | 929 | new->max_data_size = self->max_data_size; |
930 | new->max_header_size = self->max_header_size; | 930 | new->max_header_size = self->max_header_size; |
931 | 931 | ||
932 | memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); | 932 | memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); |
933 | 933 | ||
934 | /* Clean up the original one to keep it in listen state */ | 934 | /* Clean up the original one to keep it in listen state */ |
935 | irttp_listen(self->tsap); | 935 | irttp_listen(self->tsap); |
936 | 936 | ||
937 | kfree_skb(skb); | 937 | kfree_skb(skb); |
938 | sk->sk_ack_backlog--; | 938 | sk->sk_ack_backlog--; |
939 | 939 | ||
940 | newsock->state = SS_CONNECTED; | 940 | newsock->state = SS_CONNECTED; |
941 | 941 | ||
942 | irda_connect_response(new); | 942 | irda_connect_response(new); |
943 | err = 0; | 943 | err = 0; |
944 | out: | 944 | out: |
945 | release_sock(sk); | 945 | release_sock(sk); |
946 | return err; | 946 | return err; |
947 | } | 947 | } |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * Function irda_connect (sock, uaddr, addr_len, flags) | 950 | * Function irda_connect (sock, uaddr, addr_len, flags) |
951 | * | 951 | * |
952 | * Connect to a IrDA device | 952 | * Connect to a IrDA device |
953 | * | 953 | * |
954 | * The main difference with a "standard" connect is that with IrDA we need | 954 | * The main difference with a "standard" connect is that with IrDA we need |
955 | * to resolve the service name into a TSAP selector (in TCP, port number | 955 | * to resolve the service name into a TSAP selector (in TCP, port number |
956 | * doesn't have to be resolved). | 956 | * doesn't have to be resolved). |
957 | * Because of this service name resolution, we can offer "auto-connect", | 957 | * Because of this service name resolution, we can offer "auto-connect", |
958 | * where we connect to a service without specifying a destination address. | 958 | * where we connect to a service without specifying a destination address. |
959 | * | 959 | * |
960 | * Note : by consulting "errno", the user space caller may learn the cause | 960 | * Note : by consulting "errno", the user space caller may learn the cause |
961 | * of the failure. Most of them are visible in the function, others may come | 961 | * of the failure. Most of them are visible in the function, others may come |
962 | * from subroutines called and are listed here : | 962 | * from subroutines called and are listed here : |
963 | * o EBUSY : already processing a connect | 963 | * o EBUSY : already processing a connect |
964 | * o EHOSTUNREACH : bad addr->sir_addr argument | 964 | * o EHOSTUNREACH : bad addr->sir_addr argument |
965 | * o EADDRNOTAVAIL : bad addr->sir_name argument | 965 | * o EADDRNOTAVAIL : bad addr->sir_name argument |
966 | * o ENOTUNIQ : more than one node has addr->sir_name (auto-connect) | 966 | * o ENOTUNIQ : more than one node has addr->sir_name (auto-connect) |
967 | * o ENETUNREACH : no node found on the network (auto-connect) | 967 | * o ENETUNREACH : no node found on the network (auto-connect) |
968 | */ | 968 | */ |
969 | static int irda_connect(struct socket *sock, struct sockaddr *uaddr, | 969 | static int irda_connect(struct socket *sock, struct sockaddr *uaddr, |
970 | int addr_len, int flags) | 970 | int addr_len, int flags) |
971 | { | 971 | { |
972 | struct sock *sk = sock->sk; | 972 | struct sock *sk = sock->sk; |
973 | struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; | 973 | struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; |
974 | struct irda_sock *self = irda_sk(sk); | 974 | struct irda_sock *self = irda_sk(sk); |
975 | int err; | 975 | int err; |
976 | 976 | ||
977 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 977 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
978 | 978 | ||
979 | lock_sock(sk); | 979 | lock_sock(sk); |
980 | /* Don't allow connect for Ultra sockets */ | 980 | /* Don't allow connect for Ultra sockets */ |
981 | err = -ESOCKTNOSUPPORT; | 981 | err = -ESOCKTNOSUPPORT; |
982 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) | 982 | if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) |
983 | goto out; | 983 | goto out; |
984 | 984 | ||
985 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { | 985 | if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
986 | sock->state = SS_CONNECTED; | 986 | sock->state = SS_CONNECTED; |
987 | err = 0; | 987 | err = 0; |
988 | goto out; /* Connect completed during a ERESTARTSYS event */ | 988 | goto out; /* Connect completed during a ERESTARTSYS event */ |
989 | } | 989 | } |
990 | 990 | ||
991 | if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { | 991 | if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { |
992 | sock->state = SS_UNCONNECTED; | 992 | sock->state = SS_UNCONNECTED; |
993 | err = -ECONNREFUSED; | 993 | err = -ECONNREFUSED; |
994 | goto out; | 994 | goto out; |
995 | } | 995 | } |
996 | 996 | ||
997 | err = -EISCONN; /* No reconnect on a seqpacket socket */ | 997 | err = -EISCONN; /* No reconnect on a seqpacket socket */ |
998 | if (sk->sk_state == TCP_ESTABLISHED) | 998 | if (sk->sk_state == TCP_ESTABLISHED) |
999 | goto out; | 999 | goto out; |
1000 | 1000 | ||
1001 | sk->sk_state = TCP_CLOSE; | 1001 | sk->sk_state = TCP_CLOSE; |
1002 | sock->state = SS_UNCONNECTED; | 1002 | sock->state = SS_UNCONNECTED; |
1003 | 1003 | ||
1004 | err = -EINVAL; | 1004 | err = -EINVAL; |
1005 | if (addr_len != sizeof(struct sockaddr_irda)) | 1005 | if (addr_len != sizeof(struct sockaddr_irda)) |
1006 | goto out; | 1006 | goto out; |
1007 | 1007 | ||
1008 | /* Check if user supplied any destination device address */ | 1008 | /* Check if user supplied any destination device address */ |
1009 | if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { | 1009 | if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { |
1010 | /* Try to find one suitable */ | 1010 | /* Try to find one suitable */ |
1011 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); | 1011 | err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); |
1012 | if (err) { | 1012 | if (err) { |
1013 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); | 1013 | IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); |
1014 | goto out; | 1014 | goto out; |
1015 | } | 1015 | } |
1016 | } else { | 1016 | } else { |
1017 | /* Use the one provided by the user */ | 1017 | /* Use the one provided by the user */ |
1018 | self->daddr = addr->sir_addr; | 1018 | self->daddr = addr->sir_addr; |
1019 | IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); | 1019 | IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); |
1020 | 1020 | ||
1021 | /* If we don't have a valid service name, we assume the | 1021 | /* If we don't have a valid service name, we assume the |
1022 | * user want to connect on a specific LSAP. Prevent | 1022 | * user want to connect on a specific LSAP. Prevent |
1023 | * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */ | 1023 | * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */ |
1024 | if((addr->sir_name[0] != '\0') || | 1024 | if((addr->sir_name[0] != '\0') || |
1025 | (addr->sir_lsap_sel >= 0x70)) { | 1025 | (addr->sir_lsap_sel >= 0x70)) { |
1026 | /* Query remote LM-IAS using service name */ | 1026 | /* Query remote LM-IAS using service name */ |
1027 | err = irda_find_lsap_sel(self, addr->sir_name); | 1027 | err = irda_find_lsap_sel(self, addr->sir_name); |
1028 | if (err) { | 1028 | if (err) { |
1029 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); | 1029 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1030 | goto out; | 1030 | goto out; |
1031 | } | 1031 | } |
1032 | } else { | 1032 | } else { |
1033 | /* Directly connect to the remote LSAP | 1033 | /* Directly connect to the remote LSAP |
1034 | * specified by the sir_lsap field. | 1034 | * specified by the sir_lsap field. |
1035 | * Please use with caution, in IrDA LSAPs are | 1035 | * Please use with caution, in IrDA LSAPs are |
1036 | * dynamic and there is no "well-known" LSAP. */ | 1036 | * dynamic and there is no "well-known" LSAP. */ |
1037 | self->dtsap_sel = addr->sir_lsap_sel; | 1037 | self->dtsap_sel = addr->sir_lsap_sel; |
1038 | } | 1038 | } |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | /* Check if we have opened a local TSAP */ | 1041 | /* Check if we have opened a local TSAP */ |
1042 | if (!self->tsap) | 1042 | if (!self->tsap) |
1043 | irda_open_tsap(self, LSAP_ANY, addr->sir_name); | 1043 | irda_open_tsap(self, LSAP_ANY, addr->sir_name); |
1044 | 1044 | ||
1045 | /* Move to connecting socket, start sending Connect Requests */ | 1045 | /* Move to connecting socket, start sending Connect Requests */ |
1046 | sock->state = SS_CONNECTING; | 1046 | sock->state = SS_CONNECTING; |
1047 | sk->sk_state = TCP_SYN_SENT; | 1047 | sk->sk_state = TCP_SYN_SENT; |
1048 | 1048 | ||
1049 | /* Connect to remote device */ | 1049 | /* Connect to remote device */ |
1050 | err = irttp_connect_request(self->tsap, self->dtsap_sel, | 1050 | err = irttp_connect_request(self->tsap, self->dtsap_sel, |
1051 | self->saddr, self->daddr, NULL, | 1051 | self->saddr, self->daddr, NULL, |
1052 | self->max_sdu_size_rx, NULL); | 1052 | self->max_sdu_size_rx, NULL); |
1053 | if (err) { | 1053 | if (err) { |
1054 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); | 1054 | IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); |
1055 | goto out; | 1055 | goto out; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | /* Now the loop */ | 1058 | /* Now the loop */ |
1059 | err = -EINPROGRESS; | 1059 | err = -EINPROGRESS; |
1060 | if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) | 1060 | if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
1061 | goto out; | 1061 | goto out; |
1062 | 1062 | ||
1063 | err = -ERESTARTSYS; | 1063 | err = -ERESTARTSYS; |
1064 | if (wait_event_interruptible(*(sk_sleep(sk)), | 1064 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1065 | (sk->sk_state != TCP_SYN_SENT))) | 1065 | (sk->sk_state != TCP_SYN_SENT))) |
1066 | goto out; | 1066 | goto out; |
1067 | 1067 | ||
1068 | if (sk->sk_state != TCP_ESTABLISHED) { | 1068 | if (sk->sk_state != TCP_ESTABLISHED) { |
1069 | sock->state = SS_UNCONNECTED; | 1069 | sock->state = SS_UNCONNECTED; |
1070 | if (sk->sk_prot->disconnect(sk, flags)) | 1070 | if (sk->sk_prot->disconnect(sk, flags)) |
1071 | sock->state = SS_DISCONNECTING; | 1071 | sock->state = SS_DISCONNECTING; |
1072 | err = sock_error(sk); | 1072 | err = sock_error(sk); |
1073 | if (!err) | 1073 | if (!err) |
1074 | err = -ECONNRESET; | 1074 | err = -ECONNRESET; |
1075 | goto out; | 1075 | goto out; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | sock->state = SS_CONNECTED; | 1078 | sock->state = SS_CONNECTED; |
1079 | 1079 | ||
1080 | /* At this point, IrLMP has assigned our source address */ | 1080 | /* At this point, IrLMP has assigned our source address */ |
1081 | self->saddr = irttp_get_saddr(self->tsap); | 1081 | self->saddr = irttp_get_saddr(self->tsap); |
1082 | err = 0; | 1082 | err = 0; |
1083 | out: | 1083 | out: |
1084 | release_sock(sk); | 1084 | release_sock(sk); |
1085 | return err; | 1085 | return err; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | static struct proto irda_proto = { | 1088 | static struct proto irda_proto = { |
1089 | .name = "IRDA", | 1089 | .name = "IRDA", |
1090 | .owner = THIS_MODULE, | 1090 | .owner = THIS_MODULE, |
1091 | .obj_size = sizeof(struct irda_sock), | 1091 | .obj_size = sizeof(struct irda_sock), |
1092 | }; | 1092 | }; |
1093 | 1093 | ||
1094 | /* | 1094 | /* |
1095 | * Function irda_create (sock, protocol) | 1095 | * Function irda_create (sock, protocol) |
1096 | * | 1096 | * |
1097 | * Create IrDA socket | 1097 | * Create IrDA socket |
1098 | * | 1098 | * |
1099 | */ | 1099 | */ |
1100 | static int irda_create(struct net *net, struct socket *sock, int protocol, | 1100 | static int irda_create(struct net *net, struct socket *sock, int protocol, |
1101 | int kern) | 1101 | int kern) |
1102 | { | 1102 | { |
1103 | struct sock *sk; | 1103 | struct sock *sk; |
1104 | struct irda_sock *self; | 1104 | struct irda_sock *self; |
1105 | 1105 | ||
1106 | IRDA_DEBUG(2, "%s()\n", __func__); | 1106 | IRDA_DEBUG(2, "%s()\n", __func__); |
1107 | 1107 | ||
1108 | if (net != &init_net) | 1108 | if (net != &init_net) |
1109 | return -EAFNOSUPPORT; | 1109 | return -EAFNOSUPPORT; |
1110 | 1110 | ||
1111 | /* Check for valid socket type */ | 1111 | /* Check for valid socket type */ |
1112 | switch (sock->type) { | 1112 | switch (sock->type) { |
1113 | case SOCK_STREAM: /* For TTP connections with SAR disabled */ | 1113 | case SOCK_STREAM: /* For TTP connections with SAR disabled */ |
1114 | case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */ | 1114 | case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */ |
1115 | case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */ | 1115 | case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */ |
1116 | break; | 1116 | break; |
1117 | default: | 1117 | default: |
1118 | return -ESOCKTNOSUPPORT; | 1118 | return -ESOCKTNOSUPPORT; |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | /* Allocate networking socket */ | 1121 | /* Allocate networking socket */ |
1122 | sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto); | 1122 | sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto); |
1123 | if (sk == NULL) | 1123 | if (sk == NULL) |
1124 | return -ENOMEM; | 1124 | return -ENOMEM; |
1125 | 1125 | ||
1126 | self = irda_sk(sk); | 1126 | self = irda_sk(sk); |
1127 | IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); | 1127 | IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); |
1128 | 1128 | ||
1129 | init_waitqueue_head(&self->query_wait); | 1129 | init_waitqueue_head(&self->query_wait); |
1130 | 1130 | ||
1131 | switch (sock->type) { | 1131 | switch (sock->type) { |
1132 | case SOCK_STREAM: | 1132 | case SOCK_STREAM: |
1133 | sock->ops = &irda_stream_ops; | 1133 | sock->ops = &irda_stream_ops; |
1134 | self->max_sdu_size_rx = TTP_SAR_DISABLE; | 1134 | self->max_sdu_size_rx = TTP_SAR_DISABLE; |
1135 | break; | 1135 | break; |
1136 | case SOCK_SEQPACKET: | 1136 | case SOCK_SEQPACKET: |
1137 | sock->ops = &irda_seqpacket_ops; | 1137 | sock->ops = &irda_seqpacket_ops; |
1138 | self->max_sdu_size_rx = TTP_SAR_UNBOUND; | 1138 | self->max_sdu_size_rx = TTP_SAR_UNBOUND; |
1139 | break; | 1139 | break; |
1140 | case SOCK_DGRAM: | 1140 | case SOCK_DGRAM: |
1141 | switch (protocol) { | 1141 | switch (protocol) { |
1142 | #ifdef CONFIG_IRDA_ULTRA | 1142 | #ifdef CONFIG_IRDA_ULTRA |
1143 | case IRDAPROTO_ULTRA: | 1143 | case IRDAPROTO_ULTRA: |
1144 | sock->ops = &irda_ultra_ops; | 1144 | sock->ops = &irda_ultra_ops; |
1145 | /* Initialise now, because we may send on unbound | 1145 | /* Initialise now, because we may send on unbound |
1146 | * sockets. Jean II */ | 1146 | * sockets. Jean II */ |
1147 | self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER; | 1147 | self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER; |
1148 | self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER; | 1148 | self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER; |
1149 | break; | 1149 | break; |
1150 | #endif /* CONFIG_IRDA_ULTRA */ | 1150 | #endif /* CONFIG_IRDA_ULTRA */ |
1151 | case IRDAPROTO_UNITDATA: | 1151 | case IRDAPROTO_UNITDATA: |
1152 | sock->ops = &irda_dgram_ops; | 1152 | sock->ops = &irda_dgram_ops; |
1153 | /* We let Unitdata conn. be like seqpack conn. */ | 1153 | /* We let Unitdata conn. be like seqpack conn. */ |
1154 | self->max_sdu_size_rx = TTP_SAR_UNBOUND; | 1154 | self->max_sdu_size_rx = TTP_SAR_UNBOUND; |
1155 | break; | 1155 | break; |
1156 | default: | 1156 | default: |
1157 | sk_free(sk); | 1157 | sk_free(sk); |
1158 | return -ESOCKTNOSUPPORT; | 1158 | return -ESOCKTNOSUPPORT; |
1159 | } | 1159 | } |
1160 | break; | 1160 | break; |
1161 | default: | 1161 | default: |
1162 | sk_free(sk); | 1162 | sk_free(sk); |
1163 | return -ESOCKTNOSUPPORT; | 1163 | return -ESOCKTNOSUPPORT; |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | /* Initialise networking socket struct */ | 1166 | /* Initialise networking socket struct */ |
1167 | sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ | 1167 | sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ |
1168 | sk->sk_family = PF_IRDA; | 1168 | sk->sk_family = PF_IRDA; |
1169 | sk->sk_protocol = protocol; | 1169 | sk->sk_protocol = protocol; |
1170 | 1170 | ||
1171 | /* Register as a client with IrLMP */ | 1171 | /* Register as a client with IrLMP */ |
1172 | self->ckey = irlmp_register_client(0, NULL, NULL, NULL); | 1172 | self->ckey = irlmp_register_client(0, NULL, NULL, NULL); |
1173 | self->mask.word = 0xffff; | 1173 | self->mask.word = 0xffff; |
1174 | self->rx_flow = self->tx_flow = FLOW_START; | 1174 | self->rx_flow = self->tx_flow = FLOW_START; |
1175 | self->nslots = DISCOVERY_DEFAULT_SLOTS; | 1175 | self->nslots = DISCOVERY_DEFAULT_SLOTS; |
1176 | self->daddr = DEV_ADDR_ANY; /* Until we get connected */ | 1176 | self->daddr = DEV_ADDR_ANY; /* Until we get connected */ |
1177 | self->saddr = 0x0; /* so IrLMP assign us any link */ | 1177 | self->saddr = 0x0; /* so IrLMP assign us any link */ |
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | /* | 1181 | /* |
1182 | * Function irda_destroy_socket (self) | 1182 | * Function irda_destroy_socket (self) |
1183 | * | 1183 | * |
1184 | * Destroy socket | 1184 | * Destroy socket |
1185 | * | 1185 | * |
1186 | */ | 1186 | */ |
1187 | static void irda_destroy_socket(struct irda_sock *self) | 1187 | static void irda_destroy_socket(struct irda_sock *self) |
1188 | { | 1188 | { |
1189 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 1189 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
1190 | 1190 | ||
1191 | /* Unregister with IrLMP */ | 1191 | /* Unregister with IrLMP */ |
1192 | irlmp_unregister_client(self->ckey); | 1192 | irlmp_unregister_client(self->ckey); |
1193 | irlmp_unregister_service(self->skey); | 1193 | irlmp_unregister_service(self->skey); |
1194 | 1194 | ||
1195 | /* Unregister with LM-IAS */ | 1195 | /* Unregister with LM-IAS */ |
1196 | if (self->ias_obj) { | 1196 | if (self->ias_obj) { |
1197 | irias_delete_object(self->ias_obj); | 1197 | irias_delete_object(self->ias_obj); |
1198 | self->ias_obj = NULL; | 1198 | self->ias_obj = NULL; |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | if (self->iriap) { | 1201 | if (self->iriap) { |
1202 | iriap_close(self->iriap); | 1202 | iriap_close(self->iriap); |
1203 | self->iriap = NULL; | 1203 | self->iriap = NULL; |
1204 | } | 1204 | } |
1205 | 1205 | ||
1206 | if (self->tsap) { | 1206 | if (self->tsap) { |
1207 | irttp_disconnect_request(self->tsap, NULL, P_NORMAL); | 1207 | irttp_disconnect_request(self->tsap, NULL, P_NORMAL); |
1208 | irttp_close_tsap(self->tsap); | 1208 | irttp_close_tsap(self->tsap); |
1209 | self->tsap = NULL; | 1209 | self->tsap = NULL; |
1210 | } | 1210 | } |
1211 | #ifdef CONFIG_IRDA_ULTRA | 1211 | #ifdef CONFIG_IRDA_ULTRA |
1212 | if (self->lsap) { | 1212 | if (self->lsap) { |
1213 | irlmp_close_lsap(self->lsap); | 1213 | irlmp_close_lsap(self->lsap); |
1214 | self->lsap = NULL; | 1214 | self->lsap = NULL; |
1215 | } | 1215 | } |
1216 | #endif /* CONFIG_IRDA_ULTRA */ | 1216 | #endif /* CONFIG_IRDA_ULTRA */ |
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | /* | 1219 | /* |
1220 | * Function irda_release (sock) | 1220 | * Function irda_release (sock) |
1221 | */ | 1221 | */ |
1222 | static int irda_release(struct socket *sock) | 1222 | static int irda_release(struct socket *sock) |
1223 | { | 1223 | { |
1224 | struct sock *sk = sock->sk; | 1224 | struct sock *sk = sock->sk; |
1225 | 1225 | ||
1226 | IRDA_DEBUG(2, "%s()\n", __func__); | 1226 | IRDA_DEBUG(2, "%s()\n", __func__); |
1227 | 1227 | ||
1228 | if (sk == NULL) | 1228 | if (sk == NULL) |
1229 | return 0; | 1229 | return 0; |
1230 | 1230 | ||
1231 | lock_sock(sk); | 1231 | lock_sock(sk); |
1232 | sk->sk_state = TCP_CLOSE; | 1232 | sk->sk_state = TCP_CLOSE; |
1233 | sk->sk_shutdown |= SEND_SHUTDOWN; | 1233 | sk->sk_shutdown |= SEND_SHUTDOWN; |
1234 | sk->sk_state_change(sk); | 1234 | sk->sk_state_change(sk); |
1235 | 1235 | ||
1236 | /* Destroy IrDA socket */ | 1236 | /* Destroy IrDA socket */ |
1237 | irda_destroy_socket(irda_sk(sk)); | 1237 | irda_destroy_socket(irda_sk(sk)); |
1238 | 1238 | ||
1239 | sock_orphan(sk); | 1239 | sock_orphan(sk); |
1240 | sock->sk = NULL; | 1240 | sock->sk = NULL; |
1241 | release_sock(sk); | 1241 | release_sock(sk); |
1242 | 1242 | ||
1243 | /* Purge queues (see sock_init_data()) */ | 1243 | /* Purge queues (see sock_init_data()) */ |
1244 | skb_queue_purge(&sk->sk_receive_queue); | 1244 | skb_queue_purge(&sk->sk_receive_queue); |
1245 | 1245 | ||
1246 | /* Destroy networking socket if we are the last reference on it, | 1246 | /* Destroy networking socket if we are the last reference on it, |
1247 | * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ | 1247 | * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ |
1248 | sock_put(sk); | 1248 | sock_put(sk); |
1249 | 1249 | ||
1250 | /* Notes on socket locking and deallocation... - Jean II | 1250 | /* Notes on socket locking and deallocation... - Jean II |
1251 | * In theory we should put pairs of sock_hold() / sock_put() to | 1251 | * In theory we should put pairs of sock_hold() / sock_put() to |
1252 | * prevent the socket to be destroyed whenever there is an | 1252 | * prevent the socket to be destroyed whenever there is an |
1253 | * outstanding request or outstanding incoming packet or event. | 1253 | * outstanding request or outstanding incoming packet or event. |
1254 | * | 1254 | * |
1255 | * 1) This may include IAS request, both in connect and getsockopt. | 1255 | * 1) This may include IAS request, both in connect and getsockopt. |
1256 | * Unfortunately, the situation is a bit more messy than it looks, | 1256 | * Unfortunately, the situation is a bit more messy than it looks, |
1257 | * because we close iriap and kfree(self) above. | 1257 | * because we close iriap and kfree(self) above. |
1258 | * | 1258 | * |
1259 | * 2) This may include selective discovery in getsockopt. | 1259 | * 2) This may include selective discovery in getsockopt. |
1260 | * Same stuff as above, irlmp registration and self are gone. | 1260 | * Same stuff as above, irlmp registration and self are gone. |
1261 | * | 1261 | * |
1262 | * Probably 1 and 2 may not matter, because it's all triggered | 1262 | * Probably 1 and 2 may not matter, because it's all triggered |
1263 | * by a process and the socket layer already prevent the | 1263 | * by a process and the socket layer already prevent the |
1264 | * socket to go away while a process is holding it, through | 1264 | * socket to go away while a process is holding it, through |
1265 | * sockfd_put() and fput()... | 1265 | * sockfd_put() and fput()... |
1266 | * | 1266 | * |
1267 | * 3) This may include deferred TSAP closure. In particular, | 1267 | * 3) This may include deferred TSAP closure. In particular, |
1268 | * we may receive a late irda_disconnect_indication() | 1268 | * we may receive a late irda_disconnect_indication() |
1269 | * Fortunately, (tsap_cb *)->close_pend should protect us | 1269 | * Fortunately, (tsap_cb *)->close_pend should protect us |
1270 | * from that. | 1270 | * from that. |
1271 | * | 1271 | * |
1272 | * I did some testing on SMP, and it looks solid. And the socket | 1272 | * I did some testing on SMP, and it looks solid. And the socket |
1273 | * memory leak is now gone... - Jean II | 1273 | * memory leak is now gone... - Jean II |
1274 | */ | 1274 | */ |
1275 | 1275 | ||
1276 | return 0; | 1276 | return 0; |
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | /* | 1279 | /* |
1280 | * Function irda_sendmsg (iocb, sock, msg, len) | 1280 | * Function irda_sendmsg (iocb, sock, msg, len) |
1281 | * | 1281 | * |
1282 | * Send message down to TinyTP. This function is used for both STREAM and | 1282 | * Send message down to TinyTP. This function is used for both STREAM and |
1283 | * SEQPACK services. This is possible since it forces the client to | 1283 | * SEQPACK services. This is possible since it forces the client to |
1284 | * fragment the message if necessary | 1284 | * fragment the message if necessary |
1285 | */ | 1285 | */ |
1286 | static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, | 1286 | static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, |
1287 | struct msghdr *msg, size_t len) | 1287 | struct msghdr *msg, size_t len) |
1288 | { | 1288 | { |
1289 | struct sock *sk = sock->sk; | 1289 | struct sock *sk = sock->sk; |
1290 | struct irda_sock *self; | 1290 | struct irda_sock *self; |
1291 | struct sk_buff *skb; | 1291 | struct sk_buff *skb; |
1292 | int err = -EPIPE; | 1292 | int err = -EPIPE; |
1293 | 1293 | ||
1294 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1294 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1295 | 1295 | ||
1296 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ | 1296 | /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ |
1297 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | | 1297 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | |
1298 | MSG_NOSIGNAL)) { | 1298 | MSG_NOSIGNAL)) { |
1299 | return -EINVAL; | 1299 | return -EINVAL; |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | lock_sock(sk); | 1302 | lock_sock(sk); |
1303 | 1303 | ||
1304 | if (sk->sk_shutdown & SEND_SHUTDOWN) | 1304 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1305 | goto out_err; | 1305 | goto out_err; |
1306 | 1306 | ||
1307 | if (sk->sk_state != TCP_ESTABLISHED) { | 1307 | if (sk->sk_state != TCP_ESTABLISHED) { |
1308 | err = -ENOTCONN; | 1308 | err = -ENOTCONN; |
1309 | goto out; | 1309 | goto out; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | self = irda_sk(sk); | 1312 | self = irda_sk(sk); |
1313 | 1313 | ||
1314 | /* Check if IrTTP is wants us to slow down */ | 1314 | /* Check if IrTTP is wants us to slow down */ |
1315 | 1315 | ||
1316 | if (wait_event_interruptible(*(sk_sleep(sk)), | 1316 | if (wait_event_interruptible(*(sk_sleep(sk)), |
1317 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { | 1317 | (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { |
1318 | err = -ERESTARTSYS; | 1318 | err = -ERESTARTSYS; |
1319 | goto out; | 1319 | goto out; |
1320 | } | 1320 | } |
1321 | 1321 | ||
1322 | /* Check if we are still connected */ | 1322 | /* Check if we are still connected */ |
1323 | if (sk->sk_state != TCP_ESTABLISHED) { | 1323 | if (sk->sk_state != TCP_ESTABLISHED) { |
1324 | err = -ENOTCONN; | 1324 | err = -ENOTCONN; |
1325 | goto out; | 1325 | goto out; |
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | /* Check that we don't send out too big frames */ | 1328 | /* Check that we don't send out too big frames */ |
1329 | if (len > self->max_data_size) { | 1329 | if (len > self->max_data_size) { |
1330 | IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", | 1330 | IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", |
1331 | __func__, len, self->max_data_size); | 1331 | __func__, len, self->max_data_size); |
1332 | len = self->max_data_size; | 1332 | len = self->max_data_size; |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, | 1335 | skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, |
1336 | msg->msg_flags & MSG_DONTWAIT, &err); | 1336 | msg->msg_flags & MSG_DONTWAIT, &err); |
1337 | if (!skb) | 1337 | if (!skb) |
1338 | goto out_err; | 1338 | goto out_err; |
1339 | 1339 | ||
1340 | skb_reserve(skb, self->max_header_size + 16); | 1340 | skb_reserve(skb, self->max_header_size + 16); |
1341 | skb_reset_transport_header(skb); | 1341 | skb_reset_transport_header(skb); |
1342 | skb_put(skb, len); | 1342 | skb_put(skb, len); |
1343 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1343 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1344 | if (err) { | 1344 | if (err) { |
1345 | kfree_skb(skb); | 1345 | kfree_skb(skb); |
1346 | goto out_err; | 1346 | goto out_err; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | /* | 1349 | /* |
1350 | * Just send the message to TinyTP, and let it deal with possible | 1350 | * Just send the message to TinyTP, and let it deal with possible |
1351 | * errors. No need to duplicate all that here | 1351 | * errors. No need to duplicate all that here |
1352 | */ | 1352 | */ |
1353 | err = irttp_data_request(self->tsap, skb); | 1353 | err = irttp_data_request(self->tsap, skb); |
1354 | if (err) { | 1354 | if (err) { |
1355 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1355 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1356 | goto out_err; | 1356 | goto out_err; |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | release_sock(sk); | 1359 | release_sock(sk); |
1360 | /* Tell client how much data we actually sent */ | 1360 | /* Tell client how much data we actually sent */ |
1361 | return len; | 1361 | return len; |
1362 | 1362 | ||
1363 | out_err: | 1363 | out_err: |
1364 | err = sk_stream_error(sk, msg->msg_flags, err); | 1364 | err = sk_stream_error(sk, msg->msg_flags, err); |
1365 | out: | 1365 | out: |
1366 | release_sock(sk); | 1366 | release_sock(sk); |
1367 | return err; | 1367 | return err; |
1368 | 1368 | ||
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | /* | 1371 | /* |
1372 | * Function irda_recvmsg_dgram (iocb, sock, msg, size, flags) | 1372 | * Function irda_recvmsg_dgram (iocb, sock, msg, size, flags) |
1373 | * | 1373 | * |
1374 | * Try to receive message and copy it to user. The frame is discarded | 1374 | * Try to receive message and copy it to user. The frame is discarded |
1375 | * after being read, regardless of how much the user actually read | 1375 | * after being read, regardless of how much the user actually read |
1376 | */ | 1376 | */ |
1377 | static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | 1377 | static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, |
1378 | struct msghdr *msg, size_t size, int flags) | 1378 | struct msghdr *msg, size_t size, int flags) |
1379 | { | 1379 | { |
1380 | struct sock *sk = sock->sk; | 1380 | struct sock *sk = sock->sk; |
1381 | struct irda_sock *self = irda_sk(sk); | 1381 | struct irda_sock *self = irda_sk(sk); |
1382 | struct sk_buff *skb; | 1382 | struct sk_buff *skb; |
1383 | size_t copied; | 1383 | size_t copied; |
1384 | int err; | 1384 | int err; |
1385 | 1385 | ||
1386 | IRDA_DEBUG(4, "%s()\n", __func__); | 1386 | IRDA_DEBUG(4, "%s()\n", __func__); |
1387 | 1387 | ||
1388 | msg->msg_namelen = 0; | 1388 | msg->msg_namelen = 0; |
1389 | 1389 | ||
1390 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1390 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1391 | flags & MSG_DONTWAIT, &err); | 1391 | flags & MSG_DONTWAIT, &err); |
1392 | if (!skb) | 1392 | if (!skb) |
1393 | return err; | 1393 | return err; |
1394 | 1394 | ||
1395 | skb_reset_transport_header(skb); | 1395 | skb_reset_transport_header(skb); |
1396 | copied = skb->len; | 1396 | copied = skb->len; |
1397 | 1397 | ||
1398 | if (copied > size) { | 1398 | if (copied > size) { |
1399 | IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", | 1399 | IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", |
1400 | __func__, copied, size); | 1400 | __func__, copied, size); |
1401 | copied = size; | 1401 | copied = size; |
1402 | msg->msg_flags |= MSG_TRUNC; | 1402 | msg->msg_flags |= MSG_TRUNC; |
1403 | } | 1403 | } |
1404 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1404 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1405 | 1405 | ||
1406 | skb_free_datagram(sk, skb); | 1406 | skb_free_datagram(sk, skb); |
1407 | 1407 | ||
1408 | /* | 1408 | /* |
1409 | * Check if we have previously stopped IrTTP and we know | 1409 | * Check if we have previously stopped IrTTP and we know |
1410 | * have more free space in our rx_queue. If so tell IrTTP | 1410 | * have more free space in our rx_queue. If so tell IrTTP |
1411 | * to start delivering frames again before our rx_queue gets | 1411 | * to start delivering frames again before our rx_queue gets |
1412 | * empty | 1412 | * empty |
1413 | */ | 1413 | */ |
1414 | if (self->rx_flow == FLOW_STOP) { | 1414 | if (self->rx_flow == FLOW_STOP) { |
1415 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { | 1415 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { |
1416 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); | 1416 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); |
1417 | self->rx_flow = FLOW_START; | 1417 | self->rx_flow = FLOW_START; |
1418 | irttp_flow_request(self->tsap, FLOW_START); | 1418 | irttp_flow_request(self->tsap, FLOW_START); |
1419 | } | 1419 | } |
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | return copied; | 1422 | return copied; |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | /* | 1425 | /* |
1426 | * Function irda_recvmsg_stream (iocb, sock, msg, size, flags) | 1426 | * Function irda_recvmsg_stream (iocb, sock, msg, size, flags) |
1427 | */ | 1427 | */ |
1428 | static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, | 1428 | static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, |
1429 | struct msghdr *msg, size_t size, int flags) | 1429 | struct msghdr *msg, size_t size, int flags) |
1430 | { | 1430 | { |
1431 | struct sock *sk = sock->sk; | 1431 | struct sock *sk = sock->sk; |
1432 | struct irda_sock *self = irda_sk(sk); | 1432 | struct irda_sock *self = irda_sk(sk); |
1433 | int noblock = flags & MSG_DONTWAIT; | 1433 | int noblock = flags & MSG_DONTWAIT; |
1434 | size_t copied = 0; | 1434 | size_t copied = 0; |
1435 | int target, err; | 1435 | int target, err; |
1436 | long timeo; | 1436 | long timeo; |
1437 | 1437 | ||
1438 | IRDA_DEBUG(3, "%s()\n", __func__); | 1438 | IRDA_DEBUG(3, "%s()\n", __func__); |
1439 | 1439 | ||
1440 | if ((err = sock_error(sk)) < 0) | 1440 | if ((err = sock_error(sk)) < 0) |
1441 | return err; | 1441 | return err; |
1442 | 1442 | ||
1443 | if (sock->flags & __SO_ACCEPTCON) | 1443 | if (sock->flags & __SO_ACCEPTCON) |
1444 | return -EINVAL; | 1444 | return -EINVAL; |
1445 | 1445 | ||
1446 | err =-EOPNOTSUPP; | 1446 | err =-EOPNOTSUPP; |
1447 | if (flags & MSG_OOB) | 1447 | if (flags & MSG_OOB) |
1448 | return -EOPNOTSUPP; | 1448 | return -EOPNOTSUPP; |
1449 | 1449 | ||
1450 | err = 0; | 1450 | err = 0; |
1451 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); | 1451 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); |
1452 | timeo = sock_rcvtimeo(sk, noblock); | 1452 | timeo = sock_rcvtimeo(sk, noblock); |
1453 | 1453 | ||
1454 | msg->msg_namelen = 0; | 1454 | msg->msg_namelen = 0; |
1455 | 1455 | ||
1456 | do { | 1456 | do { |
1457 | int chunk; | 1457 | int chunk; |
1458 | struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); | 1458 | struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); |
1459 | 1459 | ||
1460 | if (skb == NULL) { | 1460 | if (skb == NULL) { |
1461 | DEFINE_WAIT(wait); | 1461 | DEFINE_WAIT(wait); |
1462 | err = 0; | 1462 | err = 0; |
1463 | 1463 | ||
1464 | if (copied >= target) | 1464 | if (copied >= target) |
1465 | break; | 1465 | break; |
1466 | 1466 | ||
1467 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 1467 | prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1468 | 1468 | ||
1469 | /* | 1469 | /* |
1470 | * POSIX 1003.1g mandates this order. | 1470 | * POSIX 1003.1g mandates this order. |
1471 | */ | 1471 | */ |
1472 | err = sock_error(sk); | 1472 | err = sock_error(sk); |
1473 | if (err) | 1473 | if (err) |
1474 | ; | 1474 | ; |
1475 | else if (sk->sk_shutdown & RCV_SHUTDOWN) | 1475 | else if (sk->sk_shutdown & RCV_SHUTDOWN) |
1476 | ; | 1476 | ; |
1477 | else if (noblock) | 1477 | else if (noblock) |
1478 | err = -EAGAIN; | 1478 | err = -EAGAIN; |
1479 | else if (signal_pending(current)) | 1479 | else if (signal_pending(current)) |
1480 | err = sock_intr_errno(timeo); | 1480 | err = sock_intr_errno(timeo); |
1481 | else if (sk->sk_state != TCP_ESTABLISHED) | 1481 | else if (sk->sk_state != TCP_ESTABLISHED) |
1482 | err = -ENOTCONN; | 1482 | err = -ENOTCONN; |
1483 | else if (skb_peek(&sk->sk_receive_queue) == NULL) | 1483 | else if (skb_peek(&sk->sk_receive_queue) == NULL) |
1484 | /* Wait process until data arrives */ | 1484 | /* Wait process until data arrives */ |
1485 | schedule(); | 1485 | schedule(); |
1486 | 1486 | ||
1487 | finish_wait(sk_sleep(sk), &wait); | 1487 | finish_wait(sk_sleep(sk), &wait); |
1488 | 1488 | ||
1489 | if (err) | 1489 | if (err) |
1490 | return err; | 1490 | return err; |
1491 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 1491 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1492 | break; | 1492 | break; |
1493 | 1493 | ||
1494 | continue; | 1494 | continue; |
1495 | } | 1495 | } |
1496 | 1496 | ||
1497 | chunk = min_t(unsigned int, skb->len, size); | 1497 | chunk = min_t(unsigned int, skb->len, size); |
1498 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { | 1498 | if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { |
1499 | skb_queue_head(&sk->sk_receive_queue, skb); | 1499 | skb_queue_head(&sk->sk_receive_queue, skb); |
1500 | if (copied == 0) | 1500 | if (copied == 0) |
1501 | copied = -EFAULT; | 1501 | copied = -EFAULT; |
1502 | break; | 1502 | break; |
1503 | } | 1503 | } |
1504 | copied += chunk; | 1504 | copied += chunk; |
1505 | size -= chunk; | 1505 | size -= chunk; |
1506 | 1506 | ||
1507 | /* Mark read part of skb as used */ | 1507 | /* Mark read part of skb as used */ |
1508 | if (!(flags & MSG_PEEK)) { | 1508 | if (!(flags & MSG_PEEK)) { |
1509 | skb_pull(skb, chunk); | 1509 | skb_pull(skb, chunk); |
1510 | 1510 | ||
1511 | /* put the skb back if we didn't use it up.. */ | 1511 | /* put the skb back if we didn't use it up.. */ |
1512 | if (skb->len) { | 1512 | if (skb->len) { |
1513 | IRDA_DEBUG(1, "%s(), back on q!\n", | 1513 | IRDA_DEBUG(1, "%s(), back on q!\n", |
1514 | __func__); | 1514 | __func__); |
1515 | skb_queue_head(&sk->sk_receive_queue, skb); | 1515 | skb_queue_head(&sk->sk_receive_queue, skb); |
1516 | break; | 1516 | break; |
1517 | } | 1517 | } |
1518 | 1518 | ||
1519 | kfree_skb(skb); | 1519 | kfree_skb(skb); |
1520 | } else { | 1520 | } else { |
1521 | IRDA_DEBUG(0, "%s() questionable!?\n", __func__); | 1521 | IRDA_DEBUG(0, "%s() questionable!?\n", __func__); |
1522 | 1522 | ||
1523 | /* put message back and return */ | 1523 | /* put message back and return */ |
1524 | skb_queue_head(&sk->sk_receive_queue, skb); | 1524 | skb_queue_head(&sk->sk_receive_queue, skb); |
1525 | break; | 1525 | break; |
1526 | } | 1526 | } |
1527 | } while (size); | 1527 | } while (size); |
1528 | 1528 | ||
1529 | /* | 1529 | /* |
1530 | * Check if we have previously stopped IrTTP and we know | 1530 | * Check if we have previously stopped IrTTP and we know |
1531 | * have more free space in our rx_queue. If so tell IrTTP | 1531 | * have more free space in our rx_queue. If so tell IrTTP |
1532 | * to start delivering frames again before our rx_queue gets | 1532 | * to start delivering frames again before our rx_queue gets |
1533 | * empty | 1533 | * empty |
1534 | */ | 1534 | */ |
1535 | if (self->rx_flow == FLOW_STOP) { | 1535 | if (self->rx_flow == FLOW_STOP) { |
1536 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { | 1536 | if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { |
1537 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); | 1537 | IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); |
1538 | self->rx_flow = FLOW_START; | 1538 | self->rx_flow = FLOW_START; |
1539 | irttp_flow_request(self->tsap, FLOW_START); | 1539 | irttp_flow_request(self->tsap, FLOW_START); |
1540 | } | 1540 | } |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | return copied; | 1543 | return copied; |
1544 | } | 1544 | } |
1545 | 1545 | ||
1546 | /* | 1546 | /* |
1547 | * Function irda_sendmsg_dgram (iocb, sock, msg, len) | 1547 | * Function irda_sendmsg_dgram (iocb, sock, msg, len) |
1548 | * | 1548 | * |
1549 | * Send message down to TinyTP for the unreliable sequenced | 1549 | * Send message down to TinyTP for the unreliable sequenced |
1550 | * packet service... | 1550 | * packet service... |
1551 | * | 1551 | * |
1552 | */ | 1552 | */ |
1553 | static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, | 1553 | static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, |
1554 | struct msghdr *msg, size_t len) | 1554 | struct msghdr *msg, size_t len) |
1555 | { | 1555 | { |
1556 | struct sock *sk = sock->sk; | 1556 | struct sock *sk = sock->sk; |
1557 | struct irda_sock *self; | 1557 | struct irda_sock *self; |
1558 | struct sk_buff *skb; | 1558 | struct sk_buff *skb; |
1559 | int err; | 1559 | int err; |
1560 | 1560 | ||
1561 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1561 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1562 | 1562 | ||
1563 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1563 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1564 | return -EINVAL; | 1564 | return -EINVAL; |
1565 | 1565 | ||
1566 | lock_sock(sk); | 1566 | lock_sock(sk); |
1567 | 1567 | ||
1568 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1568 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1569 | send_sig(SIGPIPE, current, 0); | 1569 | send_sig(SIGPIPE, current, 0); |
1570 | err = -EPIPE; | 1570 | err = -EPIPE; |
1571 | goto out; | 1571 | goto out; |
1572 | } | 1572 | } |
1573 | 1573 | ||
1574 | err = -ENOTCONN; | 1574 | err = -ENOTCONN; |
1575 | if (sk->sk_state != TCP_ESTABLISHED) | 1575 | if (sk->sk_state != TCP_ESTABLISHED) |
1576 | goto out; | 1576 | goto out; |
1577 | 1577 | ||
1578 | self = irda_sk(sk); | 1578 | self = irda_sk(sk); |
1579 | 1579 | ||
1580 | /* | 1580 | /* |
1581 | * Check that we don't send out too big frames. This is an unreliable | 1581 | * Check that we don't send out too big frames. This is an unreliable |
1582 | * service, so we have no fragmentation and no coalescence | 1582 | * service, so we have no fragmentation and no coalescence |
1583 | */ | 1583 | */ |
1584 | if (len > self->max_data_size) { | 1584 | if (len > self->max_data_size) { |
1585 | IRDA_DEBUG(0, "%s(), Warning to much data! " | 1585 | IRDA_DEBUG(0, "%s(), Warning to much data! " |
1586 | "Chopping frame from %zd to %d bytes!\n", | 1586 | "Chopping frame from %zd to %d bytes!\n", |
1587 | __func__, len, self->max_data_size); | 1587 | __func__, len, self->max_data_size); |
1588 | len = self->max_data_size; | 1588 | len = self->max_data_size; |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, | 1591 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, |
1592 | msg->msg_flags & MSG_DONTWAIT, &err); | 1592 | msg->msg_flags & MSG_DONTWAIT, &err); |
1593 | err = -ENOBUFS; | 1593 | err = -ENOBUFS; |
1594 | if (!skb) | 1594 | if (!skb) |
1595 | goto out; | 1595 | goto out; |
1596 | 1596 | ||
1597 | skb_reserve(skb, self->max_header_size); | 1597 | skb_reserve(skb, self->max_header_size); |
1598 | skb_reset_transport_header(skb); | 1598 | skb_reset_transport_header(skb); |
1599 | 1599 | ||
1600 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); | 1600 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); |
1601 | skb_put(skb, len); | 1601 | skb_put(skb, len); |
1602 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1602 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1603 | if (err) { | 1603 | if (err) { |
1604 | kfree_skb(skb); | 1604 | kfree_skb(skb); |
1605 | goto out; | 1605 | goto out; |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | /* | 1608 | /* |
1609 | * Just send the message to TinyTP, and let it deal with possible | 1609 | * Just send the message to TinyTP, and let it deal with possible |
1610 | * errors. No need to duplicate all that here | 1610 | * errors. No need to duplicate all that here |
1611 | */ | 1611 | */ |
1612 | err = irttp_udata_request(self->tsap, skb); | 1612 | err = irttp_udata_request(self->tsap, skb); |
1613 | if (err) { | 1613 | if (err) { |
1614 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1614 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1615 | goto out; | 1615 | goto out; |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | release_sock(sk); | 1618 | release_sock(sk); |
1619 | return len; | 1619 | return len; |
1620 | 1620 | ||
1621 | out: | 1621 | out: |
1622 | release_sock(sk); | 1622 | release_sock(sk); |
1623 | return err; | 1623 | return err; |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * Function irda_sendmsg_ultra (iocb, sock, msg, len) | 1627 | * Function irda_sendmsg_ultra (iocb, sock, msg, len) |
1628 | * | 1628 | * |
1629 | * Send message down to IrLMP for the unreliable Ultra | 1629 | * Send message down to IrLMP for the unreliable Ultra |
1630 | * packet service... | 1630 | * packet service... |
1631 | */ | 1631 | */ |
1632 | #ifdef CONFIG_IRDA_ULTRA | 1632 | #ifdef CONFIG_IRDA_ULTRA |
1633 | static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, | 1633 | static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, |
1634 | struct msghdr *msg, size_t len) | 1634 | struct msghdr *msg, size_t len) |
1635 | { | 1635 | { |
1636 | struct sock *sk = sock->sk; | 1636 | struct sock *sk = sock->sk; |
1637 | struct irda_sock *self; | 1637 | struct irda_sock *self; |
1638 | __u8 pid = 0; | 1638 | __u8 pid = 0; |
1639 | int bound = 0; | 1639 | int bound = 0; |
1640 | struct sk_buff *skb; | 1640 | struct sk_buff *skb; |
1641 | int err; | 1641 | int err; |
1642 | 1642 | ||
1643 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); | 1643 | IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); |
1644 | 1644 | ||
1645 | err = -EINVAL; | 1645 | err = -EINVAL; |
1646 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) | 1646 | if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) |
1647 | return -EINVAL; | 1647 | return -EINVAL; |
1648 | 1648 | ||
1649 | lock_sock(sk); | 1649 | lock_sock(sk); |
1650 | 1650 | ||
1651 | err = -EPIPE; | 1651 | err = -EPIPE; |
1652 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1652 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1653 | send_sig(SIGPIPE, current, 0); | 1653 | send_sig(SIGPIPE, current, 0); |
1654 | goto out; | 1654 | goto out; |
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | self = irda_sk(sk); | 1657 | self = irda_sk(sk); |
1658 | 1658 | ||
1659 | /* Check if an address was specified with sendto. Jean II */ | 1659 | /* Check if an address was specified with sendto. Jean II */ |
1660 | if (msg->msg_name) { | 1660 | if (msg->msg_name) { |
1661 | struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; | 1661 | struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; |
1662 | err = -EINVAL; | 1662 | err = -EINVAL; |
1663 | /* Check address, extract pid. Jean II */ | 1663 | /* Check address, extract pid. Jean II */ |
1664 | if (msg->msg_namelen < sizeof(*addr)) | 1664 | if (msg->msg_namelen < sizeof(*addr)) |
1665 | goto out; | 1665 | goto out; |
1666 | if (addr->sir_family != AF_IRDA) | 1666 | if (addr->sir_family != AF_IRDA) |
1667 | goto out; | 1667 | goto out; |
1668 | 1668 | ||
1669 | pid = addr->sir_lsap_sel; | 1669 | pid = addr->sir_lsap_sel; |
1670 | if (pid & 0x80) { | 1670 | if (pid & 0x80) { |
1671 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); | 1671 | IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); |
1672 | err = -EOPNOTSUPP; | 1672 | err = -EOPNOTSUPP; |
1673 | goto out; | 1673 | goto out; |
1674 | } | 1674 | } |
1675 | } else { | 1675 | } else { |
1676 | /* Check that the socket is properly bound to an Ultra | 1676 | /* Check that the socket is properly bound to an Ultra |
1677 | * port. Jean II */ | 1677 | * port. Jean II */ |
1678 | if ((self->lsap == NULL) || | 1678 | if ((self->lsap == NULL) || |
1679 | (sk->sk_state != TCP_ESTABLISHED)) { | 1679 | (sk->sk_state != TCP_ESTABLISHED)) { |
1680 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", | 1680 | IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", |
1681 | __func__); | 1681 | __func__); |
1682 | err = -ENOTCONN; | 1682 | err = -ENOTCONN; |
1683 | goto out; | 1683 | goto out; |
1684 | } | 1684 | } |
1685 | /* Use PID from socket */ | 1685 | /* Use PID from socket */ |
1686 | bound = 1; | 1686 | bound = 1; |
1687 | } | 1687 | } |
1688 | 1688 | ||
1689 | /* | 1689 | /* |
1690 | * Check that we don't send out too big frames. This is an unreliable | 1690 | * Check that we don't send out too big frames. This is an unreliable |
1691 | * service, so we have no fragmentation and no coalescence | 1691 | * service, so we have no fragmentation and no coalescence |
1692 | */ | 1692 | */ |
1693 | if (len > self->max_data_size) { | 1693 | if (len > self->max_data_size) { |
1694 | IRDA_DEBUG(0, "%s(), Warning to much data! " | 1694 | IRDA_DEBUG(0, "%s(), Warning to much data! " |
1695 | "Chopping frame from %zd to %d bytes!\n", | 1695 | "Chopping frame from %zd to %d bytes!\n", |
1696 | __func__, len, self->max_data_size); | 1696 | __func__, len, self->max_data_size); |
1697 | len = self->max_data_size; | 1697 | len = self->max_data_size; |
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, | 1700 | skb = sock_alloc_send_skb(sk, len + self->max_header_size, |
1701 | msg->msg_flags & MSG_DONTWAIT, &err); | 1701 | msg->msg_flags & MSG_DONTWAIT, &err); |
1702 | err = -ENOBUFS; | 1702 | err = -ENOBUFS; |
1703 | if (!skb) | 1703 | if (!skb) |
1704 | goto out; | 1704 | goto out; |
1705 | 1705 | ||
1706 | skb_reserve(skb, self->max_header_size); | 1706 | skb_reserve(skb, self->max_header_size); |
1707 | skb_reset_transport_header(skb); | 1707 | skb_reset_transport_header(skb); |
1708 | 1708 | ||
1709 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); | 1709 | IRDA_DEBUG(4, "%s(), appending user data\n", __func__); |
1710 | skb_put(skb, len); | 1710 | skb_put(skb, len); |
1711 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); | 1711 | err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); |
1712 | if (err) { | 1712 | if (err) { |
1713 | kfree_skb(skb); | 1713 | kfree_skb(skb); |
1714 | goto out; | 1714 | goto out; |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), | 1717 | err = irlmp_connless_data_request((bound ? self->lsap : NULL), |
1718 | skb, pid); | 1718 | skb, pid); |
1719 | if (err) | 1719 | if (err) |
1720 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); | 1720 | IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); |
1721 | out: | 1721 | out: |
1722 | release_sock(sk); | 1722 | release_sock(sk); |
1723 | return err ? : len; | 1723 | return err ? : len; |
1724 | } | 1724 | } |
1725 | #endif /* CONFIG_IRDA_ULTRA */ | 1725 | #endif /* CONFIG_IRDA_ULTRA */ |
1726 | 1726 | ||
1727 | /* | 1727 | /* |
1728 | * Function irda_shutdown (sk, how) | 1728 | * Function irda_shutdown (sk, how) |
1729 | */ | 1729 | */ |
1730 | static int irda_shutdown(struct socket *sock, int how) | 1730 | static int irda_shutdown(struct socket *sock, int how) |
1731 | { | 1731 | { |
1732 | struct sock *sk = sock->sk; | 1732 | struct sock *sk = sock->sk; |
1733 | struct irda_sock *self = irda_sk(sk); | 1733 | struct irda_sock *self = irda_sk(sk); |
1734 | 1734 | ||
1735 | IRDA_DEBUG(1, "%s(%p)\n", __func__, self); | 1735 | IRDA_DEBUG(1, "%s(%p)\n", __func__, self); |
1736 | 1736 | ||
1737 | lock_sock(sk); | 1737 | lock_sock(sk); |
1738 | 1738 | ||
1739 | sk->sk_state = TCP_CLOSE; | 1739 | sk->sk_state = TCP_CLOSE; |
1740 | sk->sk_shutdown |= SEND_SHUTDOWN; | 1740 | sk->sk_shutdown |= SEND_SHUTDOWN; |
1741 | sk->sk_state_change(sk); | 1741 | sk->sk_state_change(sk); |
1742 | 1742 | ||
1743 | if (self->iriap) { | 1743 | if (self->iriap) { |
1744 | iriap_close(self->iriap); | 1744 | iriap_close(self->iriap); |
1745 | self->iriap = NULL; | 1745 | self->iriap = NULL; |
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | if (self->tsap) { | 1748 | if (self->tsap) { |
1749 | irttp_disconnect_request(self->tsap, NULL, P_NORMAL); | 1749 | irttp_disconnect_request(self->tsap, NULL, P_NORMAL); |
1750 | irttp_close_tsap(self->tsap); | 1750 | irttp_close_tsap(self->tsap); |
1751 | self->tsap = NULL; | 1751 | self->tsap = NULL; |
1752 | } | 1752 | } |
1753 | 1753 | ||
1754 | /* A few cleanup so the socket look as good as new... */ | 1754 | /* A few cleanup so the socket look as good as new... */ |
1755 | self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */ | 1755 | self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */ |
1756 | self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ | 1756 | self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ |
1757 | self->saddr = 0x0; /* so IrLMP assign us any link */ | 1757 | self->saddr = 0x0; /* so IrLMP assign us any link */ |
1758 | 1758 | ||
1759 | release_sock(sk); | 1759 | release_sock(sk); |
1760 | 1760 | ||
1761 | return 0; | 1761 | return 0; |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | /* | 1764 | /* |
1765 | * Function irda_poll (file, sock, wait) | 1765 | * Function irda_poll (file, sock, wait) |
1766 | */ | 1766 | */ |
1767 | static unsigned int irda_poll(struct file * file, struct socket *sock, | 1767 | static unsigned int irda_poll(struct file * file, struct socket *sock, |
1768 | poll_table *wait) | 1768 | poll_table *wait) |
1769 | { | 1769 | { |
1770 | struct sock *sk = sock->sk; | 1770 | struct sock *sk = sock->sk; |
1771 | struct irda_sock *self = irda_sk(sk); | 1771 | struct irda_sock *self = irda_sk(sk); |
1772 | unsigned int mask; | 1772 | unsigned int mask; |
1773 | 1773 | ||
1774 | IRDA_DEBUG(4, "%s()\n", __func__); | 1774 | IRDA_DEBUG(4, "%s()\n", __func__); |
1775 | 1775 | ||
1776 | poll_wait(file, sk_sleep(sk), wait); | 1776 | poll_wait(file, sk_sleep(sk), wait); |
1777 | mask = 0; | 1777 | mask = 0; |
1778 | 1778 | ||
1779 | /* Exceptional events? */ | 1779 | /* Exceptional events? */ |
1780 | if (sk->sk_err) | 1780 | if (sk->sk_err) |
1781 | mask |= POLLERR; | 1781 | mask |= POLLERR; |
1782 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 1782 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
1783 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); | 1783 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); |
1784 | mask |= POLLHUP; | 1784 | mask |= POLLHUP; |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | /* Readable? */ | 1787 | /* Readable? */ |
1788 | if (!skb_queue_empty(&sk->sk_receive_queue)) { | 1788 | if (!skb_queue_empty(&sk->sk_receive_queue)) { |
1789 | IRDA_DEBUG(4, "Socket is readable\n"); | 1789 | IRDA_DEBUG(4, "Socket is readable\n"); |
1790 | mask |= POLLIN | POLLRDNORM; | 1790 | mask |= POLLIN | POLLRDNORM; |
1791 | } | 1791 | } |
1792 | 1792 | ||
1793 | /* Connection-based need to check for termination and startup */ | 1793 | /* Connection-based need to check for termination and startup */ |
1794 | switch (sk->sk_type) { | 1794 | switch (sk->sk_type) { |
1795 | case SOCK_STREAM: | 1795 | case SOCK_STREAM: |
1796 | if (sk->sk_state == TCP_CLOSE) { | 1796 | if (sk->sk_state == TCP_CLOSE) { |
1797 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); | 1797 | IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); |
1798 | mask |= POLLHUP; | 1798 | mask |= POLLHUP; |
1799 | } | 1799 | } |
1800 | 1800 | ||
1801 | if (sk->sk_state == TCP_ESTABLISHED) { | 1801 | if (sk->sk_state == TCP_ESTABLISHED) { |
1802 | if ((self->tx_flow == FLOW_START) && | 1802 | if ((self->tx_flow == FLOW_START) && |
1803 | sock_writeable(sk)) | 1803 | sock_writeable(sk)) |
1804 | { | 1804 | { |
1805 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 1805 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
1806 | } | 1806 | } |
1807 | } | 1807 | } |
1808 | break; | 1808 | break; |
1809 | case SOCK_SEQPACKET: | 1809 | case SOCK_SEQPACKET: |
1810 | if ((self->tx_flow == FLOW_START) && | 1810 | if ((self->tx_flow == FLOW_START) && |
1811 | sock_writeable(sk)) | 1811 | sock_writeable(sk)) |
1812 | { | 1812 | { |
1813 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 1813 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
1814 | } | 1814 | } |
1815 | break; | 1815 | break; |
1816 | case SOCK_DGRAM: | 1816 | case SOCK_DGRAM: |
1817 | if (sock_writeable(sk)) | 1817 | if (sock_writeable(sk)) |
1818 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 1818 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
1819 | break; | 1819 | break; |
1820 | default: | 1820 | default: |
1821 | break; | 1821 | break; |
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | return mask; | 1824 | return mask; |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | /* | 1827 | /* |
1828 | * Function irda_ioctl (sock, cmd, arg) | 1828 | * Function irda_ioctl (sock, cmd, arg) |
1829 | */ | 1829 | */ |
1830 | static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 1830 | static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1831 | { | 1831 | { |
1832 | struct sock *sk = sock->sk; | 1832 | struct sock *sk = sock->sk; |
1833 | int err; | 1833 | int err; |
1834 | 1834 | ||
1835 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); | 1835 | IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); |
1836 | 1836 | ||
1837 | err = -EINVAL; | 1837 | err = -EINVAL; |
1838 | switch (cmd) { | 1838 | switch (cmd) { |
1839 | case TIOCOUTQ: { | 1839 | case TIOCOUTQ: { |
1840 | long amount; | 1840 | long amount; |
1841 | 1841 | ||
1842 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); | 1842 | amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); |
1843 | if (amount < 0) | 1843 | if (amount < 0) |
1844 | amount = 0; | 1844 | amount = 0; |
1845 | err = put_user(amount, (unsigned int __user *)arg); | 1845 | err = put_user(amount, (unsigned int __user *)arg); |
1846 | break; | 1846 | break; |
1847 | } | 1847 | } |
1848 | 1848 | ||
1849 | case TIOCINQ: { | 1849 | case TIOCINQ: { |
1850 | struct sk_buff *skb; | 1850 | struct sk_buff *skb; |
1851 | long amount = 0L; | 1851 | long amount = 0L; |
1852 | /* These two are safe on a single CPU system as only user tasks fiddle here */ | 1852 | /* These two are safe on a single CPU system as only user tasks fiddle here */ |
1853 | if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) | 1853 | if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) |
1854 | amount = skb->len; | 1854 | amount = skb->len; |
1855 | err = put_user(amount, (unsigned int __user *)arg); | 1855 | err = put_user(amount, (unsigned int __user *)arg); |
1856 | break; | 1856 | break; |
1857 | } | 1857 | } |
1858 | 1858 | ||
1859 | case SIOCGSTAMP: | 1859 | case SIOCGSTAMP: |
1860 | if (sk != NULL) | 1860 | if (sk != NULL) |
1861 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); | 1861 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); |
1862 | break; | 1862 | break; |
1863 | 1863 | ||
1864 | case SIOCGIFADDR: | 1864 | case SIOCGIFADDR: |
1865 | case SIOCSIFADDR: | 1865 | case SIOCSIFADDR: |
1866 | case SIOCGIFDSTADDR: | 1866 | case SIOCGIFDSTADDR: |
1867 | case SIOCSIFDSTADDR: | 1867 | case SIOCSIFDSTADDR: |
1868 | case SIOCGIFBRDADDR: | 1868 | case SIOCGIFBRDADDR: |
1869 | case SIOCSIFBRDADDR: | 1869 | case SIOCSIFBRDADDR: |
1870 | case SIOCGIFNETMASK: | 1870 | case SIOCGIFNETMASK: |
1871 | case SIOCSIFNETMASK: | 1871 | case SIOCSIFNETMASK: |
1872 | case SIOCGIFMETRIC: | 1872 | case SIOCGIFMETRIC: |
1873 | case SIOCSIFMETRIC: | 1873 | case SIOCSIFMETRIC: |
1874 | break; | 1874 | break; |
1875 | default: | 1875 | default: |
1876 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); | 1876 | IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); |
1877 | err = -ENOIOCTLCMD; | 1877 | err = -ENOIOCTLCMD; |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | return err; | 1880 | return err; |
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | #ifdef CONFIG_COMPAT | 1883 | #ifdef CONFIG_COMPAT |
1884 | /* | 1884 | /* |
1885 | * Function irda_ioctl (sock, cmd, arg) | 1885 | * Function irda_ioctl (sock, cmd, arg) |
1886 | */ | 1886 | */ |
1887 | static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | 1887 | static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1888 | { | 1888 | { |
1889 | /* | 1889 | /* |
1890 | * All IRDA's ioctl are standard ones. | 1890 | * All IRDA's ioctl are standard ones. |
1891 | */ | 1891 | */ |
1892 | return -ENOIOCTLCMD; | 1892 | return -ENOIOCTLCMD; |
1893 | } | 1893 | } |
1894 | #endif | 1894 | #endif |
1895 | 1895 | ||
1896 | /* | 1896 | /* |
1897 | * Function irda_setsockopt (sock, level, optname, optval, optlen) | 1897 | * Function irda_setsockopt (sock, level, optname, optval, optlen) |
1898 | * | 1898 | * |
1899 | * Set some options for the socket | 1899 | * Set some options for the socket |
1900 | * | 1900 | * |
1901 | */ | 1901 | */ |
1902 | static int irda_setsockopt(struct socket *sock, int level, int optname, | 1902 | static int irda_setsockopt(struct socket *sock, int level, int optname, |
1903 | char __user *optval, unsigned int optlen) | 1903 | char __user *optval, unsigned int optlen) |
1904 | { | 1904 | { |
1905 | struct sock *sk = sock->sk; | 1905 | struct sock *sk = sock->sk; |
1906 | struct irda_sock *self = irda_sk(sk); | 1906 | struct irda_sock *self = irda_sk(sk); |
1907 | struct irda_ias_set *ias_opt; | 1907 | struct irda_ias_set *ias_opt; |
1908 | struct ias_object *ias_obj; | 1908 | struct ias_object *ias_obj; |
1909 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ | 1909 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ |
1910 | int opt, free_ias = 0, err = 0; | 1910 | int opt, free_ias = 0, err = 0; |
1911 | 1911 | ||
1912 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 1912 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
1913 | 1913 | ||
1914 | if (level != SOL_IRLMP) | 1914 | if (level != SOL_IRLMP) |
1915 | return -ENOPROTOOPT; | 1915 | return -ENOPROTOOPT; |
1916 | 1916 | ||
1917 | lock_sock(sk); | 1917 | lock_sock(sk); |
1918 | 1918 | ||
1919 | switch (optname) { | 1919 | switch (optname) { |
1920 | case IRLMP_IAS_SET: | 1920 | case IRLMP_IAS_SET: |
1921 | /* The user want to add an attribute to an existing IAS object | 1921 | /* The user want to add an attribute to an existing IAS object |
1922 | * (in the IAS database) or to create a new object with this | 1922 | * (in the IAS database) or to create a new object with this |
1923 | * attribute. | 1923 | * attribute. |
1924 | * We first query IAS to know if the object exist, and then | 1924 | * We first query IAS to know if the object exist, and then |
1925 | * create the right attribute... | 1925 | * create the right attribute... |
1926 | */ | 1926 | */ |
1927 | 1927 | ||
1928 | if (optlen != sizeof(struct irda_ias_set)) { | 1928 | if (optlen != sizeof(struct irda_ias_set)) { |
1929 | err = -EINVAL; | 1929 | err = -EINVAL; |
1930 | goto out; | 1930 | goto out; |
1931 | } | 1931 | } |
1932 | 1932 | ||
1933 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); | 1933 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); |
1934 | if (ias_opt == NULL) { | 1934 | if (ias_opt == NULL) { |
1935 | err = -ENOMEM; | 1935 | err = -ENOMEM; |
1936 | goto out; | 1936 | goto out; |
1937 | } | 1937 | } |
1938 | 1938 | ||
1939 | /* Copy query to the driver. */ | 1939 | /* Copy query to the driver. */ |
1940 | if (copy_from_user(ias_opt, optval, optlen)) { | 1940 | if (copy_from_user(ias_opt, optval, optlen)) { |
1941 | kfree(ias_opt); | 1941 | kfree(ias_opt); |
1942 | err = -EFAULT; | 1942 | err = -EFAULT; |
1943 | goto out; | 1943 | goto out; |
1944 | } | 1944 | } |
1945 | 1945 | ||
1946 | /* Find the object we target. | 1946 | /* Find the object we target. |
1947 | * If the user gives us an empty string, we use the object | 1947 | * If the user gives us an empty string, we use the object |
1948 | * associated with this socket. This will workaround | 1948 | * associated with this socket. This will workaround |
1949 | * duplicated class name - Jean II */ | 1949 | * duplicated class name - Jean II */ |
1950 | if(ias_opt->irda_class_name[0] == '\0') { | 1950 | if(ias_opt->irda_class_name[0] == '\0') { |
1951 | if(self->ias_obj == NULL) { | 1951 | if(self->ias_obj == NULL) { |
1952 | kfree(ias_opt); | 1952 | kfree(ias_opt); |
1953 | err = -EINVAL; | 1953 | err = -EINVAL; |
1954 | goto out; | 1954 | goto out; |
1955 | } | 1955 | } |
1956 | ias_obj = self->ias_obj; | 1956 | ias_obj = self->ias_obj; |
1957 | } else | 1957 | } else |
1958 | ias_obj = irias_find_object(ias_opt->irda_class_name); | 1958 | ias_obj = irias_find_object(ias_opt->irda_class_name); |
1959 | 1959 | ||
1960 | /* Only ROOT can mess with the global IAS database. | 1960 | /* Only ROOT can mess with the global IAS database. |
1961 | * Users can only add attributes to the object associated | 1961 | * Users can only add attributes to the object associated |
1962 | * with the socket they own - Jean II */ | 1962 | * with the socket they own - Jean II */ |
1963 | if((!capable(CAP_NET_ADMIN)) && | 1963 | if((!capable(CAP_NET_ADMIN)) && |
1964 | ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { | 1964 | ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { |
1965 | kfree(ias_opt); | 1965 | kfree(ias_opt); |
1966 | err = -EPERM; | 1966 | err = -EPERM; |
1967 | goto out; | 1967 | goto out; |
1968 | } | 1968 | } |
1969 | 1969 | ||
1970 | /* If the object doesn't exist, create it */ | 1970 | /* If the object doesn't exist, create it */ |
1971 | if(ias_obj == (struct ias_object *) NULL) { | 1971 | if(ias_obj == (struct ias_object *) NULL) { |
1972 | /* Create a new object */ | 1972 | /* Create a new object */ |
1973 | ias_obj = irias_new_object(ias_opt->irda_class_name, | 1973 | ias_obj = irias_new_object(ias_opt->irda_class_name, |
1974 | jiffies); | 1974 | jiffies); |
1975 | if (ias_obj == NULL) { | 1975 | if (ias_obj == NULL) { |
1976 | kfree(ias_opt); | 1976 | kfree(ias_opt); |
1977 | err = -ENOMEM; | 1977 | err = -ENOMEM; |
1978 | goto out; | 1978 | goto out; |
1979 | } | 1979 | } |
1980 | free_ias = 1; | 1980 | free_ias = 1; |
1981 | } | 1981 | } |
1982 | 1982 | ||
1983 | /* Do we have the attribute already ? */ | 1983 | /* Do we have the attribute already ? */ |
1984 | if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) { | 1984 | if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) { |
1985 | kfree(ias_opt); | 1985 | kfree(ias_opt); |
1986 | if (free_ias) { | 1986 | if (free_ias) { |
1987 | kfree(ias_obj->name); | 1987 | kfree(ias_obj->name); |
1988 | kfree(ias_obj); | 1988 | kfree(ias_obj); |
1989 | } | 1989 | } |
1990 | err = -EINVAL; | 1990 | err = -EINVAL; |
1991 | goto out; | 1991 | goto out; |
1992 | } | 1992 | } |
1993 | 1993 | ||
1994 | /* Look at the type */ | 1994 | /* Look at the type */ |
1995 | switch(ias_opt->irda_attrib_type) { | 1995 | switch(ias_opt->irda_attrib_type) { |
1996 | case IAS_INTEGER: | 1996 | case IAS_INTEGER: |
1997 | /* Add an integer attribute */ | 1997 | /* Add an integer attribute */ |
1998 | irias_add_integer_attrib( | 1998 | irias_add_integer_attrib( |
1999 | ias_obj, | 1999 | ias_obj, |
2000 | ias_opt->irda_attrib_name, | 2000 | ias_opt->irda_attrib_name, |
2001 | ias_opt->attribute.irda_attrib_int, | 2001 | ias_opt->attribute.irda_attrib_int, |
2002 | IAS_USER_ATTR); | 2002 | IAS_USER_ATTR); |
2003 | break; | 2003 | break; |
2004 | case IAS_OCT_SEQ: | 2004 | case IAS_OCT_SEQ: |
2005 | /* Check length */ | 2005 | /* Check length */ |
2006 | if(ias_opt->attribute.irda_attrib_octet_seq.len > | 2006 | if(ias_opt->attribute.irda_attrib_octet_seq.len > |
2007 | IAS_MAX_OCTET_STRING) { | 2007 | IAS_MAX_OCTET_STRING) { |
2008 | kfree(ias_opt); | 2008 | kfree(ias_opt); |
2009 | if (free_ias) { | 2009 | if (free_ias) { |
2010 | kfree(ias_obj->name); | 2010 | kfree(ias_obj->name); |
2011 | kfree(ias_obj); | 2011 | kfree(ias_obj); |
2012 | } | 2012 | } |
2013 | 2013 | ||
2014 | err = -EINVAL; | 2014 | err = -EINVAL; |
2015 | goto out; | 2015 | goto out; |
2016 | } | 2016 | } |
2017 | /* Add an octet sequence attribute */ | 2017 | /* Add an octet sequence attribute */ |
2018 | irias_add_octseq_attrib( | 2018 | irias_add_octseq_attrib( |
2019 | ias_obj, | 2019 | ias_obj, |
2020 | ias_opt->irda_attrib_name, | 2020 | ias_opt->irda_attrib_name, |
2021 | ias_opt->attribute.irda_attrib_octet_seq.octet_seq, | 2021 | ias_opt->attribute.irda_attrib_octet_seq.octet_seq, |
2022 | ias_opt->attribute.irda_attrib_octet_seq.len, | 2022 | ias_opt->attribute.irda_attrib_octet_seq.len, |
2023 | IAS_USER_ATTR); | 2023 | IAS_USER_ATTR); |
2024 | break; | 2024 | break; |
2025 | case IAS_STRING: | 2025 | case IAS_STRING: |
2026 | /* Should check charset & co */ | 2026 | /* Should check charset & co */ |
2027 | /* Check length */ | 2027 | /* Check length */ |
2028 | /* The length is encoded in a __u8, and | 2028 | /* The length is encoded in a __u8, and |
2029 | * IAS_MAX_STRING == 256, so there is no way | 2029 | * IAS_MAX_STRING == 256, so there is no way |
2030 | * userspace can pass us a string too large. | 2030 | * userspace can pass us a string too large. |
2031 | * Jean II */ | 2031 | * Jean II */ |
2032 | /* NULL terminate the string (avoid troubles) */ | 2032 | /* NULL terminate the string (avoid troubles) */ |
2033 | ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0'; | 2033 | ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0'; |
2034 | /* Add a string attribute */ | 2034 | /* Add a string attribute */ |
2035 | irias_add_string_attrib( | 2035 | irias_add_string_attrib( |
2036 | ias_obj, | 2036 | ias_obj, |
2037 | ias_opt->irda_attrib_name, | 2037 | ias_opt->irda_attrib_name, |
2038 | ias_opt->attribute.irda_attrib_string.string, | 2038 | ias_opt->attribute.irda_attrib_string.string, |
2039 | IAS_USER_ATTR); | 2039 | IAS_USER_ATTR); |
2040 | break; | 2040 | break; |
2041 | default : | 2041 | default : |
2042 | kfree(ias_opt); | 2042 | kfree(ias_opt); |
2043 | if (free_ias) { | 2043 | if (free_ias) { |
2044 | kfree(ias_obj->name); | 2044 | kfree(ias_obj->name); |
2045 | kfree(ias_obj); | 2045 | kfree(ias_obj); |
2046 | } | 2046 | } |
2047 | err = -EINVAL; | 2047 | err = -EINVAL; |
2048 | goto out; | 2048 | goto out; |
2049 | } | 2049 | } |
2050 | irias_insert_object(ias_obj); | 2050 | irias_insert_object(ias_obj); |
2051 | kfree(ias_opt); | 2051 | kfree(ias_opt); |
2052 | break; | 2052 | break; |
2053 | case IRLMP_IAS_DEL: | 2053 | case IRLMP_IAS_DEL: |
2054 | /* The user want to delete an object from our local IAS | 2054 | /* The user want to delete an object from our local IAS |
2055 | * database. We just need to query the IAS, check is the | 2055 | * database. We just need to query the IAS, check is the |
2056 | * object is not owned by the kernel and delete it. | 2056 | * object is not owned by the kernel and delete it. |
2057 | */ | 2057 | */ |
2058 | 2058 | ||
2059 | if (optlen != sizeof(struct irda_ias_set)) { | 2059 | if (optlen != sizeof(struct irda_ias_set)) { |
2060 | err = -EINVAL; | 2060 | err = -EINVAL; |
2061 | goto out; | 2061 | goto out; |
2062 | } | 2062 | } |
2063 | 2063 | ||
2064 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); | 2064 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); |
2065 | if (ias_opt == NULL) { | 2065 | if (ias_opt == NULL) { |
2066 | err = -ENOMEM; | 2066 | err = -ENOMEM; |
2067 | goto out; | 2067 | goto out; |
2068 | } | 2068 | } |
2069 | 2069 | ||
2070 | /* Copy query to the driver. */ | 2070 | /* Copy query to the driver. */ |
2071 | if (copy_from_user(ias_opt, optval, optlen)) { | 2071 | if (copy_from_user(ias_opt, optval, optlen)) { |
2072 | kfree(ias_opt); | 2072 | kfree(ias_opt); |
2073 | err = -EFAULT; | 2073 | err = -EFAULT; |
2074 | goto out; | 2074 | goto out; |
2075 | } | 2075 | } |
2076 | 2076 | ||
2077 | /* Find the object we target. | 2077 | /* Find the object we target. |
2078 | * If the user gives us an empty string, we use the object | 2078 | * If the user gives us an empty string, we use the object |
2079 | * associated with this socket. This will workaround | 2079 | * associated with this socket. This will workaround |
2080 | * duplicated class name - Jean II */ | 2080 | * duplicated class name - Jean II */ |
2081 | if(ias_opt->irda_class_name[0] == '\0') | 2081 | if(ias_opt->irda_class_name[0] == '\0') |
2082 | ias_obj = self->ias_obj; | 2082 | ias_obj = self->ias_obj; |
2083 | else | 2083 | else |
2084 | ias_obj = irias_find_object(ias_opt->irda_class_name); | 2084 | ias_obj = irias_find_object(ias_opt->irda_class_name); |
2085 | if(ias_obj == (struct ias_object *) NULL) { | 2085 | if(ias_obj == (struct ias_object *) NULL) { |
2086 | kfree(ias_opt); | 2086 | kfree(ias_opt); |
2087 | err = -EINVAL; | 2087 | err = -EINVAL; |
2088 | goto out; | 2088 | goto out; |
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | /* Only ROOT can mess with the global IAS database. | 2091 | /* Only ROOT can mess with the global IAS database. |
2092 | * Users can only del attributes from the object associated | 2092 | * Users can only del attributes from the object associated |
2093 | * with the socket they own - Jean II */ | 2093 | * with the socket they own - Jean II */ |
2094 | if((!capable(CAP_NET_ADMIN)) && | 2094 | if((!capable(CAP_NET_ADMIN)) && |
2095 | ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { | 2095 | ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { |
2096 | kfree(ias_opt); | 2096 | kfree(ias_opt); |
2097 | err = -EPERM; | 2097 | err = -EPERM; |
2098 | goto out; | 2098 | goto out; |
2099 | } | 2099 | } |
2100 | 2100 | ||
2101 | /* Find the attribute (in the object) we target */ | 2101 | /* Find the attribute (in the object) we target */ |
2102 | ias_attr = irias_find_attrib(ias_obj, | 2102 | ias_attr = irias_find_attrib(ias_obj, |
2103 | ias_opt->irda_attrib_name); | 2103 | ias_opt->irda_attrib_name); |
2104 | if(ias_attr == (struct ias_attrib *) NULL) { | 2104 | if(ias_attr == (struct ias_attrib *) NULL) { |
2105 | kfree(ias_opt); | 2105 | kfree(ias_opt); |
2106 | err = -EINVAL; | 2106 | err = -EINVAL; |
2107 | goto out; | 2107 | goto out; |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | /* Check is the user space own the object */ | 2110 | /* Check is the user space own the object */ |
2111 | if(ias_attr->value->owner != IAS_USER_ATTR) { | 2111 | if(ias_attr->value->owner != IAS_USER_ATTR) { |
2112 | IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); | 2112 | IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); |
2113 | kfree(ias_opt); | 2113 | kfree(ias_opt); |
2114 | err = -EPERM; | 2114 | err = -EPERM; |
2115 | goto out; | 2115 | goto out; |
2116 | } | 2116 | } |
2117 | 2117 | ||
2118 | /* Remove the attribute (and maybe the object) */ | 2118 | /* Remove the attribute (and maybe the object) */ |
2119 | irias_delete_attrib(ias_obj, ias_attr, 1); | 2119 | irias_delete_attrib(ias_obj, ias_attr, 1); |
2120 | kfree(ias_opt); | 2120 | kfree(ias_opt); |
2121 | break; | 2121 | break; |
2122 | case IRLMP_MAX_SDU_SIZE: | 2122 | case IRLMP_MAX_SDU_SIZE: |
2123 | if (optlen < sizeof(int)) { | 2123 | if (optlen < sizeof(int)) { |
2124 | err = -EINVAL; | 2124 | err = -EINVAL; |
2125 | goto out; | 2125 | goto out; |
2126 | } | 2126 | } |
2127 | 2127 | ||
2128 | if (get_user(opt, (int __user *)optval)) { | 2128 | if (get_user(opt, (int __user *)optval)) { |
2129 | err = -EFAULT; | 2129 | err = -EFAULT; |
2130 | goto out; | 2130 | goto out; |
2131 | } | 2131 | } |
2132 | 2132 | ||
2133 | /* Only possible for a seqpacket service (TTP with SAR) */ | 2133 | /* Only possible for a seqpacket service (TTP with SAR) */ |
2134 | if (sk->sk_type != SOCK_SEQPACKET) { | 2134 | if (sk->sk_type != SOCK_SEQPACKET) { |
2135 | IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", | 2135 | IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", |
2136 | __func__, opt); | 2136 | __func__, opt); |
2137 | self->max_sdu_size_rx = opt; | 2137 | self->max_sdu_size_rx = opt; |
2138 | } else { | 2138 | } else { |
2139 | IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", | 2139 | IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", |
2140 | __func__); | 2140 | __func__); |
2141 | err = -ENOPROTOOPT; | 2141 | err = -ENOPROTOOPT; |
2142 | goto out; | 2142 | goto out; |
2143 | } | 2143 | } |
2144 | break; | 2144 | break; |
2145 | case IRLMP_HINTS_SET: | 2145 | case IRLMP_HINTS_SET: |
2146 | if (optlen < sizeof(int)) { | 2146 | if (optlen < sizeof(int)) { |
2147 | err = -EINVAL; | 2147 | err = -EINVAL; |
2148 | goto out; | 2148 | goto out; |
2149 | } | 2149 | } |
2150 | 2150 | ||
2151 | /* The input is really a (__u8 hints[2]), easier as an int */ | 2151 | /* The input is really a (__u8 hints[2]), easier as an int */ |
2152 | if (get_user(opt, (int __user *)optval)) { | 2152 | if (get_user(opt, (int __user *)optval)) { |
2153 | err = -EFAULT; | 2153 | err = -EFAULT; |
2154 | goto out; | 2154 | goto out; |
2155 | } | 2155 | } |
2156 | 2156 | ||
2157 | /* Unregister any old registration */ | 2157 | /* Unregister any old registration */ |
2158 | if (self->skey) | 2158 | if (self->skey) |
2159 | irlmp_unregister_service(self->skey); | 2159 | irlmp_unregister_service(self->skey); |
2160 | 2160 | ||
2161 | self->skey = irlmp_register_service((__u16) opt); | 2161 | self->skey = irlmp_register_service((__u16) opt); |
2162 | break; | 2162 | break; |
2163 | case IRLMP_HINT_MASK_SET: | 2163 | case IRLMP_HINT_MASK_SET: |
2164 | /* As opposed to the previous case which set the hint bits | 2164 | /* As opposed to the previous case which set the hint bits |
2165 | * that we advertise, this one set the filter we use when | 2165 | * that we advertise, this one set the filter we use when |
2166 | * making a discovery (nodes which don't match any hint | 2166 | * making a discovery (nodes which don't match any hint |
2167 | * bit in the mask are not reported). | 2167 | * bit in the mask are not reported). |
2168 | */ | 2168 | */ |
2169 | if (optlen < sizeof(int)) { | 2169 | if (optlen < sizeof(int)) { |
2170 | err = -EINVAL; | 2170 | err = -EINVAL; |
2171 | goto out; | 2171 | goto out; |
2172 | } | 2172 | } |
2173 | 2173 | ||
2174 | /* The input is really a (__u8 hints[2]), easier as an int */ | 2174 | /* The input is really a (__u8 hints[2]), easier as an int */ |
2175 | if (get_user(opt, (int __user *)optval)) { | 2175 | if (get_user(opt, (int __user *)optval)) { |
2176 | err = -EFAULT; | 2176 | err = -EFAULT; |
2177 | goto out; | 2177 | goto out; |
2178 | } | 2178 | } |
2179 | 2179 | ||
2180 | /* Set the new hint mask */ | 2180 | /* Set the new hint mask */ |
2181 | self->mask.word = (__u16) opt; | 2181 | self->mask.word = (__u16) opt; |
2182 | /* Mask out extension bits */ | 2182 | /* Mask out extension bits */ |
2183 | self->mask.word &= 0x7f7f; | 2183 | self->mask.word &= 0x7f7f; |
2184 | /* Check if no bits */ | 2184 | /* Check if no bits */ |
2185 | if(!self->mask.word) | 2185 | if(!self->mask.word) |
2186 | self->mask.word = 0xFFFF; | 2186 | self->mask.word = 0xFFFF; |
2187 | 2187 | ||
2188 | break; | 2188 | break; |
2189 | default: | 2189 | default: |
2190 | err = -ENOPROTOOPT; | 2190 | err = -ENOPROTOOPT; |
2191 | break; | 2191 | break; |
2192 | } | 2192 | } |
2193 | 2193 | ||
2194 | out: | 2194 | out: |
2195 | release_sock(sk); | 2195 | release_sock(sk); |
2196 | 2196 | ||
2197 | return err; | 2197 | return err; |
2198 | } | 2198 | } |
2199 | 2199 | ||
2200 | /* | 2200 | /* |
2201 | * Function irda_extract_ias_value(ias_opt, ias_value) | 2201 | * Function irda_extract_ias_value(ias_opt, ias_value) |
2202 | * | 2202 | * |
2203 | * Translate internal IAS value structure to the user space representation | 2203 | * Translate internal IAS value structure to the user space representation |
2204 | * | 2204 | * |
2205 | * The external representation of IAS values, as we exchange them with | 2205 | * The external representation of IAS values, as we exchange them with |
2206 | * user space program is quite different from the internal representation, | 2206 | * user space program is quite different from the internal representation, |
2207 | * as stored in the IAS database (because we need a flat structure for | 2207 | * as stored in the IAS database (because we need a flat structure for |
2208 | * crossing kernel boundary). | 2208 | * crossing kernel boundary). |
2209 | * This function transform the former in the latter. We also check | 2209 | * This function transform the former in the latter. We also check |
2210 | * that the value type is valid. | 2210 | * that the value type is valid. |
2211 | */ | 2211 | */ |
2212 | static int irda_extract_ias_value(struct irda_ias_set *ias_opt, | 2212 | static int irda_extract_ias_value(struct irda_ias_set *ias_opt, |
2213 | struct ias_value *ias_value) | 2213 | struct ias_value *ias_value) |
2214 | { | 2214 | { |
2215 | /* Look at the type */ | 2215 | /* Look at the type */ |
2216 | switch (ias_value->type) { | 2216 | switch (ias_value->type) { |
2217 | case IAS_INTEGER: | 2217 | case IAS_INTEGER: |
2218 | /* Copy the integer */ | 2218 | /* Copy the integer */ |
2219 | ias_opt->attribute.irda_attrib_int = ias_value->t.integer; | 2219 | ias_opt->attribute.irda_attrib_int = ias_value->t.integer; |
2220 | break; | 2220 | break; |
2221 | case IAS_OCT_SEQ: | 2221 | case IAS_OCT_SEQ: |
2222 | /* Set length */ | 2222 | /* Set length */ |
2223 | ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len; | 2223 | ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len; |
2224 | /* Copy over */ | 2224 | /* Copy over */ |
2225 | memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq, | 2225 | memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq, |
2226 | ias_value->t.oct_seq, ias_value->len); | 2226 | ias_value->t.oct_seq, ias_value->len); |
2227 | break; | 2227 | break; |
2228 | case IAS_STRING: | 2228 | case IAS_STRING: |
2229 | /* Set length */ | 2229 | /* Set length */ |
2230 | ias_opt->attribute.irda_attrib_string.len = ias_value->len; | 2230 | ias_opt->attribute.irda_attrib_string.len = ias_value->len; |
2231 | ias_opt->attribute.irda_attrib_string.charset = ias_value->charset; | 2231 | ias_opt->attribute.irda_attrib_string.charset = ias_value->charset; |
2232 | /* Copy over */ | 2232 | /* Copy over */ |
2233 | memcpy(ias_opt->attribute.irda_attrib_string.string, | 2233 | memcpy(ias_opt->attribute.irda_attrib_string.string, |
2234 | ias_value->t.string, ias_value->len); | 2234 | ias_value->t.string, ias_value->len); |
2235 | /* NULL terminate the string (avoid troubles) */ | 2235 | /* NULL terminate the string (avoid troubles) */ |
2236 | ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0'; | 2236 | ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0'; |
2237 | break; | 2237 | break; |
2238 | case IAS_MISSING: | 2238 | case IAS_MISSING: |
2239 | default : | 2239 | default : |
2240 | return -EINVAL; | 2240 | return -EINVAL; |
2241 | } | 2241 | } |
2242 | 2242 | ||
2243 | /* Copy type over */ | 2243 | /* Copy type over */ |
2244 | ias_opt->irda_attrib_type = ias_value->type; | 2244 | ias_opt->irda_attrib_type = ias_value->type; |
2245 | 2245 | ||
2246 | return 0; | 2246 | return 0; |
2247 | } | 2247 | } |
2248 | 2248 | ||
2249 | /* | 2249 | /* |
2250 | * Function irda_getsockopt (sock, level, optname, optval, optlen) | 2250 | * Function irda_getsockopt (sock, level, optname, optval, optlen) |
2251 | */ | 2251 | */ |
2252 | static int irda_getsockopt(struct socket *sock, int level, int optname, | 2252 | static int irda_getsockopt(struct socket *sock, int level, int optname, |
2253 | char __user *optval, int __user *optlen) | 2253 | char __user *optval, int __user *optlen) |
2254 | { | 2254 | { |
2255 | struct sock *sk = sock->sk; | 2255 | struct sock *sk = sock->sk; |
2256 | struct irda_sock *self = irda_sk(sk); | 2256 | struct irda_sock *self = irda_sk(sk); |
2257 | struct irda_device_list list; | 2257 | struct irda_device_list list; |
2258 | struct irda_device_info *discoveries; | 2258 | struct irda_device_info *discoveries; |
2259 | struct irda_ias_set * ias_opt; /* IAS get/query params */ | 2259 | struct irda_ias_set * ias_opt; /* IAS get/query params */ |
2260 | struct ias_object * ias_obj; /* Object in IAS */ | 2260 | struct ias_object * ias_obj; /* Object in IAS */ |
2261 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ | 2261 | struct ias_attrib * ias_attr; /* Attribute in IAS object */ |
2262 | int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */ | 2262 | int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */ |
2263 | int val = 0; | 2263 | int val = 0; |
2264 | int len = 0; | 2264 | int len = 0; |
2265 | int err = 0; | 2265 | int err = 0; |
2266 | int offset, total; | 2266 | int offset, total; |
2267 | 2267 | ||
2268 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); | 2268 | IRDA_DEBUG(2, "%s(%p)\n", __func__, self); |
2269 | 2269 | ||
2270 | if (level != SOL_IRLMP) | 2270 | if (level != SOL_IRLMP) |
2271 | return -ENOPROTOOPT; | 2271 | return -ENOPROTOOPT; |
2272 | 2272 | ||
2273 | if (get_user(len, optlen)) | 2273 | if (get_user(len, optlen)) |
2274 | return -EFAULT; | 2274 | return -EFAULT; |
2275 | 2275 | ||
2276 | if(len < 0) | 2276 | if(len < 0) |
2277 | return -EINVAL; | 2277 | return -EINVAL; |
2278 | 2278 | ||
2279 | lock_sock(sk); | 2279 | lock_sock(sk); |
2280 | 2280 | ||
2281 | switch (optname) { | 2281 | switch (optname) { |
2282 | case IRLMP_ENUMDEVICES: | 2282 | case IRLMP_ENUMDEVICES: |
2283 | 2283 | ||
2284 | /* Offset to first device entry */ | 2284 | /* Offset to first device entry */ |
2285 | offset = sizeof(struct irda_device_list) - | 2285 | offset = sizeof(struct irda_device_list) - |
2286 | sizeof(struct irda_device_info); | 2286 | sizeof(struct irda_device_info); |
2287 | 2287 | ||
2288 | if (len < offset) { | 2288 | if (len < offset) { |
2289 | err = -EINVAL; | 2289 | err = -EINVAL; |
2290 | goto out; | 2290 | goto out; |
2291 | } | 2291 | } |
2292 | 2292 | ||
2293 | /* Ask lmp for the current discovery log */ | 2293 | /* Ask lmp for the current discovery log */ |
2294 | discoveries = irlmp_get_discoveries(&list.len, self->mask.word, | 2294 | discoveries = irlmp_get_discoveries(&list.len, self->mask.word, |
2295 | self->nslots); | 2295 | self->nslots); |
2296 | /* Check if the we got some results */ | 2296 | /* Check if the we got some results */ |
2297 | if (discoveries == NULL) { | 2297 | if (discoveries == NULL) { |
2298 | err = -EAGAIN; | 2298 | err = -EAGAIN; |
2299 | goto out; /* Didn't find any devices */ | 2299 | goto out; /* Didn't find any devices */ |
2300 | } | 2300 | } |
2301 | 2301 | ||
2302 | /* Write total list length back to client */ | 2302 | /* Write total list length back to client */ |
2303 | if (copy_to_user(optval, &list, offset)) | 2303 | if (copy_to_user(optval, &list, offset)) |
2304 | err = -EFAULT; | 2304 | err = -EFAULT; |
2305 | 2305 | ||
2306 | /* Copy the list itself - watch for overflow */ | 2306 | /* Copy the list itself - watch for overflow */ |
2307 | if (list.len > 2048) { | 2307 | if (list.len > 2048) { |
2308 | err = -EINVAL; | 2308 | err = -EINVAL; |
2309 | goto bed; | 2309 | goto bed; |
2310 | } | 2310 | } |
2311 | total = offset + (list.len * sizeof(struct irda_device_info)); | 2311 | total = offset + (list.len * sizeof(struct irda_device_info)); |
2312 | if (total > len) | 2312 | if (total > len) |
2313 | total = len; | 2313 | total = len; |
2314 | if (copy_to_user(optval+offset, discoveries, total - offset)) | 2314 | if (copy_to_user(optval+offset, discoveries, total - offset)) |
2315 | err = -EFAULT; | 2315 | err = -EFAULT; |
2316 | 2316 | ||
2317 | /* Write total number of bytes used back to client */ | 2317 | /* Write total number of bytes used back to client */ |
2318 | if (put_user(total, optlen)) | 2318 | if (put_user(total, optlen)) |
2319 | err = -EFAULT; | 2319 | err = -EFAULT; |
2320 | bed: | 2320 | bed: |
2321 | /* Free up our buffer */ | 2321 | /* Free up our buffer */ |
2322 | kfree(discoveries); | 2322 | kfree(discoveries); |
2323 | break; | 2323 | break; |
2324 | case IRLMP_MAX_SDU_SIZE: | 2324 | case IRLMP_MAX_SDU_SIZE: |
2325 | val = self->max_data_size; | 2325 | val = self->max_data_size; |
2326 | len = sizeof(int); | 2326 | len = sizeof(int); |
2327 | if (put_user(len, optlen)) { | 2327 | if (put_user(len, optlen)) { |
2328 | err = -EFAULT; | 2328 | err = -EFAULT; |
2329 | goto out; | 2329 | goto out; |
2330 | } | 2330 | } |
2331 | 2331 | ||
2332 | if (copy_to_user(optval, &val, len)) { | 2332 | if (copy_to_user(optval, &val, len)) { |
2333 | err = -EFAULT; | 2333 | err = -EFAULT; |
2334 | goto out; | 2334 | goto out; |
2335 | } | 2335 | } |
2336 | 2336 | ||
2337 | break; | 2337 | break; |
2338 | case IRLMP_IAS_GET: | 2338 | case IRLMP_IAS_GET: |
2339 | /* The user want an object from our local IAS database. | 2339 | /* The user want an object from our local IAS database. |
2340 | * We just need to query the IAS and return the value | 2340 | * We just need to query the IAS and return the value |
2341 | * that we found */ | 2341 | * that we found */ |
2342 | 2342 | ||
2343 | /* Check that the user has allocated the right space for us */ | 2343 | /* Check that the user has allocated the right space for us */ |
2344 | if (len != sizeof(struct irda_ias_set)) { | 2344 | if (len != sizeof(struct irda_ias_set)) { |
2345 | err = -EINVAL; | 2345 | err = -EINVAL; |
2346 | goto out; | 2346 | goto out; |
2347 | } | 2347 | } |
2348 | 2348 | ||
2349 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); | 2349 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); |
2350 | if (ias_opt == NULL) { | 2350 | if (ias_opt == NULL) { |
2351 | err = -ENOMEM; | 2351 | err = -ENOMEM; |
2352 | goto out; | 2352 | goto out; |
2353 | } | 2353 | } |
2354 | 2354 | ||
2355 | /* Copy query to the driver. */ | 2355 | /* Copy query to the driver. */ |
2356 | if (copy_from_user(ias_opt, optval, len)) { | 2356 | if (copy_from_user(ias_opt, optval, len)) { |
2357 | kfree(ias_opt); | 2357 | kfree(ias_opt); |
2358 | err = -EFAULT; | 2358 | err = -EFAULT; |
2359 | goto out; | 2359 | goto out; |
2360 | } | 2360 | } |
2361 | 2361 | ||
2362 | /* Find the object we target. | 2362 | /* Find the object we target. |
2363 | * If the user gives us an empty string, we use the object | 2363 | * If the user gives us an empty string, we use the object |
2364 | * associated with this socket. This will workaround | 2364 | * associated with this socket. This will workaround |
2365 | * duplicated class name - Jean II */ | 2365 | * duplicated class name - Jean II */ |
2366 | if(ias_opt->irda_class_name[0] == '\0') | 2366 | if(ias_opt->irda_class_name[0] == '\0') |
2367 | ias_obj = self->ias_obj; | 2367 | ias_obj = self->ias_obj; |
2368 | else | 2368 | else |
2369 | ias_obj = irias_find_object(ias_opt->irda_class_name); | 2369 | ias_obj = irias_find_object(ias_opt->irda_class_name); |
2370 | if(ias_obj == (struct ias_object *) NULL) { | 2370 | if(ias_obj == (struct ias_object *) NULL) { |
2371 | kfree(ias_opt); | 2371 | kfree(ias_opt); |
2372 | err = -EINVAL; | 2372 | err = -EINVAL; |
2373 | goto out; | 2373 | goto out; |
2374 | } | 2374 | } |
2375 | 2375 | ||
2376 | /* Find the attribute (in the object) we target */ | 2376 | /* Find the attribute (in the object) we target */ |
2377 | ias_attr = irias_find_attrib(ias_obj, | 2377 | ias_attr = irias_find_attrib(ias_obj, |
2378 | ias_opt->irda_attrib_name); | 2378 | ias_opt->irda_attrib_name); |
2379 | if(ias_attr == (struct ias_attrib *) NULL) { | 2379 | if(ias_attr == (struct ias_attrib *) NULL) { |
2380 | kfree(ias_opt); | 2380 | kfree(ias_opt); |
2381 | err = -EINVAL; | 2381 | err = -EINVAL; |
2382 | goto out; | 2382 | goto out; |
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | /* Translate from internal to user structure */ | 2385 | /* Translate from internal to user structure */ |
2386 | err = irda_extract_ias_value(ias_opt, ias_attr->value); | 2386 | err = irda_extract_ias_value(ias_opt, ias_attr->value); |
2387 | if(err) { | 2387 | if(err) { |
2388 | kfree(ias_opt); | 2388 | kfree(ias_opt); |
2389 | goto out; | 2389 | goto out; |
2390 | } | 2390 | } |
2391 | 2391 | ||
2392 | /* Copy reply to the user */ | 2392 | /* Copy reply to the user */ |
2393 | if (copy_to_user(optval, ias_opt, | 2393 | if (copy_to_user(optval, ias_opt, |
2394 | sizeof(struct irda_ias_set))) { | 2394 | sizeof(struct irda_ias_set))) { |
2395 | kfree(ias_opt); | 2395 | kfree(ias_opt); |
2396 | err = -EFAULT; | 2396 | err = -EFAULT; |
2397 | goto out; | 2397 | goto out; |
2398 | } | 2398 | } |
2399 | /* Note : don't need to put optlen, we checked it */ | 2399 | /* Note : don't need to put optlen, we checked it */ |
2400 | kfree(ias_opt); | 2400 | kfree(ias_opt); |
2401 | break; | 2401 | break; |
2402 | case IRLMP_IAS_QUERY: | 2402 | case IRLMP_IAS_QUERY: |
2403 | /* The user want an object from a remote IAS database. | 2403 | /* The user want an object from a remote IAS database. |
2404 | * We need to use IAP to query the remote database and | 2404 | * We need to use IAP to query the remote database and |
2405 | * then wait for the answer to come back. */ | 2405 | * then wait for the answer to come back. */ |
2406 | 2406 | ||
2407 | /* Check that the user has allocated the right space for us */ | 2407 | /* Check that the user has allocated the right space for us */ |
2408 | if (len != sizeof(struct irda_ias_set)) { | 2408 | if (len != sizeof(struct irda_ias_set)) { |
2409 | err = -EINVAL; | 2409 | err = -EINVAL; |
2410 | goto out; | 2410 | goto out; |
2411 | } | 2411 | } |
2412 | 2412 | ||
2413 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); | 2413 | ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); |
2414 | if (ias_opt == NULL) { | 2414 | if (ias_opt == NULL) { |
2415 | err = -ENOMEM; | 2415 | err = -ENOMEM; |
2416 | goto out; | 2416 | goto out; |
2417 | } | 2417 | } |
2418 | 2418 | ||
2419 | /* Copy query to the driver. */ | 2419 | /* Copy query to the driver. */ |
2420 | if (copy_from_user(ias_opt, optval, len)) { | 2420 | if (copy_from_user(ias_opt, optval, len)) { |
2421 | kfree(ias_opt); | 2421 | kfree(ias_opt); |
2422 | err = -EFAULT; | 2422 | err = -EFAULT; |
2423 | goto out; | 2423 | goto out; |
2424 | } | 2424 | } |
2425 | 2425 | ||
2426 | /* At this point, there are two cases... | 2426 | /* At this point, there are two cases... |
2427 | * 1) the socket is connected - that's the easy case, we | 2427 | * 1) the socket is connected - that's the easy case, we |
2428 | * just query the device we are connected to... | 2428 | * just query the device we are connected to... |
2429 | * 2) the socket is not connected - the user doesn't want | 2429 | * 2) the socket is not connected - the user doesn't want |
2430 | * to connect and/or may not have a valid service name | 2430 | * to connect and/or may not have a valid service name |
2431 | * (so can't create a fake connection). In this case, | 2431 | * (so can't create a fake connection). In this case, |
2432 | * we assume that the user pass us a valid destination | 2432 | * we assume that the user pass us a valid destination |
2433 | * address in the requesting structure... | 2433 | * address in the requesting structure... |
2434 | */ | 2434 | */ |
2435 | if(self->daddr != DEV_ADDR_ANY) { | 2435 | if(self->daddr != DEV_ADDR_ANY) { |
2436 | /* We are connected - reuse known daddr */ | 2436 | /* We are connected - reuse known daddr */ |
2437 | daddr = self->daddr; | 2437 | daddr = self->daddr; |
2438 | } else { | 2438 | } else { |
2439 | /* We are not connected, we must specify a valid | 2439 | /* We are not connected, we must specify a valid |
2440 | * destination address */ | 2440 | * destination address */ |
2441 | daddr = ias_opt->daddr; | 2441 | daddr = ias_opt->daddr; |
2442 | if((!daddr) || (daddr == DEV_ADDR_ANY)) { | 2442 | if((!daddr) || (daddr == DEV_ADDR_ANY)) { |
2443 | kfree(ias_opt); | 2443 | kfree(ias_opt); |
2444 | err = -EINVAL; | 2444 | err = -EINVAL; |
2445 | goto out; | 2445 | goto out; |
2446 | } | 2446 | } |
2447 | } | 2447 | } |
2448 | 2448 | ||
2449 | /* Check that we can proceed with IAP */ | 2449 | /* Check that we can proceed with IAP */ |
2450 | if (self->iriap) { | 2450 | if (self->iriap) { |
2451 | IRDA_WARNING("%s: busy with a previous query\n", | 2451 | IRDA_WARNING("%s: busy with a previous query\n", |
2452 | __func__); | 2452 | __func__); |
2453 | kfree(ias_opt); | 2453 | kfree(ias_opt); |
2454 | err = -EBUSY; | 2454 | err = -EBUSY; |
2455 | goto out; | 2455 | goto out; |
2456 | } | 2456 | } |
2457 | 2457 | ||
2458 | self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, | 2458 | self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, |
2459 | irda_getvalue_confirm); | 2459 | irda_getvalue_confirm); |
2460 | 2460 | ||
2461 | if (self->iriap == NULL) { | 2461 | if (self->iriap == NULL) { |
2462 | kfree(ias_opt); | 2462 | kfree(ias_opt); |
2463 | err = -ENOMEM; | 2463 | err = -ENOMEM; |
2464 | goto out; | 2464 | goto out; |
2465 | } | 2465 | } |
2466 | 2466 | ||
2467 | /* Treat unexpected wakeup as disconnect */ | 2467 | /* Treat unexpected wakeup as disconnect */ |
2468 | self->errno = -EHOSTUNREACH; | 2468 | self->errno = -EHOSTUNREACH; |
2469 | 2469 | ||
2470 | /* Query remote LM-IAS */ | 2470 | /* Query remote LM-IAS */ |
2471 | iriap_getvaluebyclass_request(self->iriap, | 2471 | iriap_getvaluebyclass_request(self->iriap, |
2472 | self->saddr, daddr, | 2472 | self->saddr, daddr, |
2473 | ias_opt->irda_class_name, | 2473 | ias_opt->irda_class_name, |
2474 | ias_opt->irda_attrib_name); | 2474 | ias_opt->irda_attrib_name); |
2475 | 2475 | ||
2476 | /* Wait for answer, if not yet finished (or failed) */ | 2476 | /* Wait for answer, if not yet finished (or failed) */ |
2477 | if (wait_event_interruptible(self->query_wait, | 2477 | if (wait_event_interruptible(self->query_wait, |
2478 | (self->iriap == NULL))) { | 2478 | (self->iriap == NULL))) { |
2479 | /* pending request uses copy of ias_opt-content | 2479 | /* pending request uses copy of ias_opt-content |
2480 | * we can free it regardless! */ | 2480 | * we can free it regardless! */ |
2481 | kfree(ias_opt); | 2481 | kfree(ias_opt); |
2482 | /* Treat signals as disconnect */ | 2482 | /* Treat signals as disconnect */ |
2483 | err = -EHOSTUNREACH; | 2483 | err = -EHOSTUNREACH; |
2484 | goto out; | 2484 | goto out; |
2485 | } | 2485 | } |
2486 | 2486 | ||
2487 | /* Check what happened */ | 2487 | /* Check what happened */ |
2488 | if (self->errno) | 2488 | if (self->errno) |
2489 | { | 2489 | { |
2490 | kfree(ias_opt); | 2490 | kfree(ias_opt); |
2491 | /* Requested object/attribute doesn't exist */ | 2491 | /* Requested object/attribute doesn't exist */ |
2492 | if((self->errno == IAS_CLASS_UNKNOWN) || | 2492 | if((self->errno == IAS_CLASS_UNKNOWN) || |
2493 | (self->errno == IAS_ATTRIB_UNKNOWN)) | 2493 | (self->errno == IAS_ATTRIB_UNKNOWN)) |
2494 | err = -EADDRNOTAVAIL; | 2494 | err = -EADDRNOTAVAIL; |
2495 | else | 2495 | else |
2496 | err = -EHOSTUNREACH; | 2496 | err = -EHOSTUNREACH; |
2497 | 2497 | ||
2498 | goto out; | 2498 | goto out; |
2499 | } | 2499 | } |
2500 | 2500 | ||
2501 | /* Translate from internal to user structure */ | 2501 | /* Translate from internal to user structure */ |
2502 | err = irda_extract_ias_value(ias_opt, self->ias_result); | 2502 | err = irda_extract_ias_value(ias_opt, self->ias_result); |
2503 | if (self->ias_result) | 2503 | if (self->ias_result) |
2504 | irias_delete_value(self->ias_result); | 2504 | irias_delete_value(self->ias_result); |
2505 | if (err) { | 2505 | if (err) { |
2506 | kfree(ias_opt); | 2506 | kfree(ias_opt); |
2507 | goto out; | 2507 | goto out; |
2508 | } | 2508 | } |
2509 | 2509 | ||
2510 | /* Copy reply to the user */ | 2510 | /* Copy reply to the user */ |
2511 | if (copy_to_user(optval, ias_opt, | 2511 | if (copy_to_user(optval, ias_opt, |
2512 | sizeof(struct irda_ias_set))) { | 2512 | sizeof(struct irda_ias_set))) { |
2513 | kfree(ias_opt); | 2513 | kfree(ias_opt); |
2514 | err = -EFAULT; | 2514 | err = -EFAULT; |
2515 | goto out; | 2515 | goto out; |
2516 | } | 2516 | } |
2517 | /* Note : don't need to put optlen, we checked it */ | 2517 | /* Note : don't need to put optlen, we checked it */ |
2518 | kfree(ias_opt); | 2518 | kfree(ias_opt); |
2519 | break; | 2519 | break; |
2520 | case IRLMP_WAITDEVICE: | 2520 | case IRLMP_WAITDEVICE: |
2521 | /* This function is just another way of seeing life ;-) | 2521 | /* This function is just another way of seeing life ;-) |
2522 | * IRLMP_ENUMDEVICES assumes that you have a static network, | 2522 | * IRLMP_ENUMDEVICES assumes that you have a static network, |
2523 | * and that you just want to pick one of the devices present. | 2523 | * and that you just want to pick one of the devices present. |
2524 | * On the other hand, in here we assume that no device is | 2524 | * On the other hand, in here we assume that no device is |
2525 | * present and that at some point in the future a device will | 2525 | * present and that at some point in the future a device will |
2526 | * come into range. When this device arrive, we just wake | 2526 | * come into range. When this device arrive, we just wake |
2527 | * up the caller, so that he has time to connect to it before | 2527 | * up the caller, so that he has time to connect to it before |
2528 | * the device goes away... | 2528 | * the device goes away... |
2529 | * Note : once the node has been discovered for more than a | 2529 | * Note : once the node has been discovered for more than a |
2530 | * few second, it won't trigger this function, unless it | 2530 | * few second, it won't trigger this function, unless it |
2531 | * goes away and come back changes its hint bits (so we | 2531 | * goes away and come back changes its hint bits (so we |
2532 | * might call it IRLMP_WAITNEWDEVICE). | 2532 | * might call it IRLMP_WAITNEWDEVICE). |
2533 | */ | 2533 | */ |
2534 | 2534 | ||
2535 | /* Check that the user is passing us an int */ | 2535 | /* Check that the user is passing us an int */ |
2536 | if (len != sizeof(int)) { | 2536 | if (len != sizeof(int)) { |
2537 | err = -EINVAL; | 2537 | err = -EINVAL; |
2538 | goto out; | 2538 | goto out; |
2539 | } | 2539 | } |
2540 | /* Get timeout in ms (max time we block the caller) */ | 2540 | /* Get timeout in ms (max time we block the caller) */ |
2541 | if (get_user(val, (int __user *)optval)) { | 2541 | if (get_user(val, (int __user *)optval)) { |
2542 | err = -EFAULT; | 2542 | err = -EFAULT; |
2543 | goto out; | 2543 | goto out; |
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | /* Tell IrLMP we want to be notified */ | 2546 | /* Tell IrLMP we want to be notified */ |
2547 | irlmp_update_client(self->ckey, self->mask.word, | 2547 | irlmp_update_client(self->ckey, self->mask.word, |
2548 | irda_selective_discovery_indication, | 2548 | irda_selective_discovery_indication, |
2549 | NULL, (void *) self); | 2549 | NULL, (void *) self); |
2550 | 2550 | ||
2551 | /* Do some discovery (and also return cached results) */ | 2551 | /* Do some discovery (and also return cached results) */ |
2552 | irlmp_discovery_request(self->nslots); | 2552 | irlmp_discovery_request(self->nslots); |
2553 | 2553 | ||
2554 | /* Wait until a node is discovered */ | 2554 | /* Wait until a node is discovered */ |
2555 | if (!self->cachedaddr) { | 2555 | if (!self->cachedaddr) { |
2556 | IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); | 2556 | IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); |
2557 | 2557 | ||
2558 | /* Set watchdog timer to expire in <val> ms. */ | 2558 | /* Set watchdog timer to expire in <val> ms. */ |
2559 | self->errno = 0; | 2559 | self->errno = 0; |
2560 | setup_timer(&self->watchdog, irda_discovery_timeout, | 2560 | setup_timer(&self->watchdog, irda_discovery_timeout, |
2561 | (unsigned long)self); | 2561 | (unsigned long)self); |
2562 | mod_timer(&self->watchdog, | 2562 | mod_timer(&self->watchdog, |
2563 | jiffies + msecs_to_jiffies(val)); | 2563 | jiffies + msecs_to_jiffies(val)); |
2564 | 2564 | ||
2565 | /* Wait for IR-LMP to call us back */ | 2565 | /* Wait for IR-LMP to call us back */ |
2566 | __wait_event_interruptible(self->query_wait, | 2566 | err = __wait_event_interruptible(self->query_wait, |
2567 | (self->cachedaddr != 0 || self->errno == -ETIME), | 2567 | (self->cachedaddr != 0 || self->errno == -ETIME)); |
2568 | err); | ||
2569 | 2568 | ||
2570 | /* If watchdog is still activated, kill it! */ | 2569 | /* If watchdog is still activated, kill it! */ |
2571 | del_timer(&(self->watchdog)); | 2570 | del_timer(&(self->watchdog)); |
2572 | 2571 | ||
2573 | IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); | 2572 | IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); |
2574 | 2573 | ||
2575 | if (err != 0) | 2574 | if (err != 0) |
2576 | goto out; | 2575 | goto out; |
2577 | } | 2576 | } |
2578 | else | 2577 | else |
2579 | IRDA_DEBUG(1, "%s(), found immediately !\n", | 2578 | IRDA_DEBUG(1, "%s(), found immediately !\n", |
2580 | __func__); | 2579 | __func__); |
2581 | 2580 | ||
2582 | /* Tell IrLMP that we have been notified */ | 2581 | /* Tell IrLMP that we have been notified */ |
2583 | irlmp_update_client(self->ckey, self->mask.word, | 2582 | irlmp_update_client(self->ckey, self->mask.word, |
2584 | NULL, NULL, NULL); | 2583 | NULL, NULL, NULL); |
2585 | 2584 | ||
2586 | /* Check if the we got some results */ | 2585 | /* Check if the we got some results */ |
2587 | if (!self->cachedaddr) { | 2586 | if (!self->cachedaddr) { |
2588 | err = -EAGAIN; /* Didn't find any devices */ | 2587 | err = -EAGAIN; /* Didn't find any devices */ |
2589 | goto out; | 2588 | goto out; |
2590 | } | 2589 | } |
2591 | daddr = self->cachedaddr; | 2590 | daddr = self->cachedaddr; |
2592 | /* Cleanup */ | 2591 | /* Cleanup */ |
2593 | self->cachedaddr = 0; | 2592 | self->cachedaddr = 0; |
2594 | 2593 | ||
2595 | /* We return the daddr of the device that trigger the | 2594 | /* We return the daddr of the device that trigger the |
2596 | * wakeup. As irlmp pass us only the new devices, we | 2595 | * wakeup. As irlmp pass us only the new devices, we |
2597 | * are sure that it's not an old device. | 2596 | * are sure that it's not an old device. |
2598 | * If the user want more details, he should query | 2597 | * If the user want more details, he should query |
2599 | * the whole discovery log and pick one device... | 2598 | * the whole discovery log and pick one device... |
2600 | */ | 2599 | */ |
2601 | if (put_user(daddr, (int __user *)optval)) { | 2600 | if (put_user(daddr, (int __user *)optval)) { |
2602 | err = -EFAULT; | 2601 | err = -EFAULT; |
2603 | goto out; | 2602 | goto out; |
2604 | } | 2603 | } |
2605 | 2604 | ||
2606 | break; | 2605 | break; |
2607 | default: | 2606 | default: |
2608 | err = -ENOPROTOOPT; | 2607 | err = -ENOPROTOOPT; |
2609 | } | 2608 | } |
2610 | 2609 | ||
2611 | out: | 2610 | out: |
2612 | 2611 | ||
2613 | release_sock(sk); | 2612 | release_sock(sk); |
2614 | 2613 | ||
2615 | return err; | 2614 | return err; |
2616 | } | 2615 | } |
2617 | 2616 | ||
2618 | static const struct net_proto_family irda_family_ops = { | 2617 | static const struct net_proto_family irda_family_ops = { |
2619 | .family = PF_IRDA, | 2618 | .family = PF_IRDA, |
2620 | .create = irda_create, | 2619 | .create = irda_create, |
2621 | .owner = THIS_MODULE, | 2620 | .owner = THIS_MODULE, |
2622 | }; | 2621 | }; |
2623 | 2622 | ||
2624 | static const struct proto_ops irda_stream_ops = { | 2623 | static const struct proto_ops irda_stream_ops = { |
2625 | .family = PF_IRDA, | 2624 | .family = PF_IRDA, |
2626 | .owner = THIS_MODULE, | 2625 | .owner = THIS_MODULE, |
2627 | .release = irda_release, | 2626 | .release = irda_release, |
2628 | .bind = irda_bind, | 2627 | .bind = irda_bind, |
2629 | .connect = irda_connect, | 2628 | .connect = irda_connect, |
2630 | .socketpair = sock_no_socketpair, | 2629 | .socketpair = sock_no_socketpair, |
2631 | .accept = irda_accept, | 2630 | .accept = irda_accept, |
2632 | .getname = irda_getname, | 2631 | .getname = irda_getname, |
2633 | .poll = irda_poll, | 2632 | .poll = irda_poll, |
2634 | .ioctl = irda_ioctl, | 2633 | .ioctl = irda_ioctl, |
2635 | #ifdef CONFIG_COMPAT | 2634 | #ifdef CONFIG_COMPAT |
2636 | .compat_ioctl = irda_compat_ioctl, | 2635 | .compat_ioctl = irda_compat_ioctl, |
2637 | #endif | 2636 | #endif |
2638 | .listen = irda_listen, | 2637 | .listen = irda_listen, |
2639 | .shutdown = irda_shutdown, | 2638 | .shutdown = irda_shutdown, |
2640 | .setsockopt = irda_setsockopt, | 2639 | .setsockopt = irda_setsockopt, |
2641 | .getsockopt = irda_getsockopt, | 2640 | .getsockopt = irda_getsockopt, |
2642 | .sendmsg = irda_sendmsg, | 2641 | .sendmsg = irda_sendmsg, |
2643 | .recvmsg = irda_recvmsg_stream, | 2642 | .recvmsg = irda_recvmsg_stream, |
2644 | .mmap = sock_no_mmap, | 2643 | .mmap = sock_no_mmap, |
2645 | .sendpage = sock_no_sendpage, | 2644 | .sendpage = sock_no_sendpage, |
2646 | }; | 2645 | }; |
2647 | 2646 | ||
2648 | static const struct proto_ops irda_seqpacket_ops = { | 2647 | static const struct proto_ops irda_seqpacket_ops = { |
2649 | .family = PF_IRDA, | 2648 | .family = PF_IRDA, |
2650 | .owner = THIS_MODULE, | 2649 | .owner = THIS_MODULE, |
2651 | .release = irda_release, | 2650 | .release = irda_release, |
2652 | .bind = irda_bind, | 2651 | .bind = irda_bind, |
2653 | .connect = irda_connect, | 2652 | .connect = irda_connect, |
2654 | .socketpair = sock_no_socketpair, | 2653 | .socketpair = sock_no_socketpair, |
2655 | .accept = irda_accept, | 2654 | .accept = irda_accept, |
2656 | .getname = irda_getname, | 2655 | .getname = irda_getname, |
2657 | .poll = datagram_poll, | 2656 | .poll = datagram_poll, |
2658 | .ioctl = irda_ioctl, | 2657 | .ioctl = irda_ioctl, |
2659 | #ifdef CONFIG_COMPAT | 2658 | #ifdef CONFIG_COMPAT |
2660 | .compat_ioctl = irda_compat_ioctl, | 2659 | .compat_ioctl = irda_compat_ioctl, |
2661 | #endif | 2660 | #endif |
2662 | .listen = irda_listen, | 2661 | .listen = irda_listen, |
2663 | .shutdown = irda_shutdown, | 2662 | .shutdown = irda_shutdown, |
2664 | .setsockopt = irda_setsockopt, | 2663 | .setsockopt = irda_setsockopt, |
2665 | .getsockopt = irda_getsockopt, | 2664 | .getsockopt = irda_getsockopt, |
2666 | .sendmsg = irda_sendmsg, | 2665 | .sendmsg = irda_sendmsg, |
2667 | .recvmsg = irda_recvmsg_dgram, | 2666 | .recvmsg = irda_recvmsg_dgram, |
2668 | .mmap = sock_no_mmap, | 2667 | .mmap = sock_no_mmap, |
2669 | .sendpage = sock_no_sendpage, | 2668 | .sendpage = sock_no_sendpage, |
2670 | }; | 2669 | }; |
2671 | 2670 | ||
2672 | static const struct proto_ops irda_dgram_ops = { | 2671 | static const struct proto_ops irda_dgram_ops = { |
2673 | .family = PF_IRDA, | 2672 | .family = PF_IRDA, |
2674 | .owner = THIS_MODULE, | 2673 | .owner = THIS_MODULE, |
2675 | .release = irda_release, | 2674 | .release = irda_release, |
2676 | .bind = irda_bind, | 2675 | .bind = irda_bind, |
2677 | .connect = irda_connect, | 2676 | .connect = irda_connect, |
2678 | .socketpair = sock_no_socketpair, | 2677 | .socketpair = sock_no_socketpair, |
2679 | .accept = irda_accept, | 2678 | .accept = irda_accept, |
2680 | .getname = irda_getname, | 2679 | .getname = irda_getname, |
2681 | .poll = datagram_poll, | 2680 | .poll = datagram_poll, |
2682 | .ioctl = irda_ioctl, | 2681 | .ioctl = irda_ioctl, |
2683 | #ifdef CONFIG_COMPAT | 2682 | #ifdef CONFIG_COMPAT |
2684 | .compat_ioctl = irda_compat_ioctl, | 2683 | .compat_ioctl = irda_compat_ioctl, |
2685 | #endif | 2684 | #endif |
2686 | .listen = irda_listen, | 2685 | .listen = irda_listen, |
2687 | .shutdown = irda_shutdown, | 2686 | .shutdown = irda_shutdown, |
2688 | .setsockopt = irda_setsockopt, | 2687 | .setsockopt = irda_setsockopt, |
2689 | .getsockopt = irda_getsockopt, | 2688 | .getsockopt = irda_getsockopt, |
2690 | .sendmsg = irda_sendmsg_dgram, | 2689 | .sendmsg = irda_sendmsg_dgram, |
2691 | .recvmsg = irda_recvmsg_dgram, | 2690 | .recvmsg = irda_recvmsg_dgram, |
2692 | .mmap = sock_no_mmap, | 2691 | .mmap = sock_no_mmap, |
2693 | .sendpage = sock_no_sendpage, | 2692 | .sendpage = sock_no_sendpage, |
2694 | }; | 2693 | }; |
2695 | 2694 | ||
2696 | #ifdef CONFIG_IRDA_ULTRA | 2695 | #ifdef CONFIG_IRDA_ULTRA |
2697 | static const struct proto_ops irda_ultra_ops = { | 2696 | static const struct proto_ops irda_ultra_ops = { |
2698 | .family = PF_IRDA, | 2697 | .family = PF_IRDA, |
2699 | .owner = THIS_MODULE, | 2698 | .owner = THIS_MODULE, |
2700 | .release = irda_release, | 2699 | .release = irda_release, |
2701 | .bind = irda_bind, | 2700 | .bind = irda_bind, |
2702 | .connect = sock_no_connect, | 2701 | .connect = sock_no_connect, |
2703 | .socketpair = sock_no_socketpair, | 2702 | .socketpair = sock_no_socketpair, |
2704 | .accept = sock_no_accept, | 2703 | .accept = sock_no_accept, |
2705 | .getname = irda_getname, | 2704 | .getname = irda_getname, |
2706 | .poll = datagram_poll, | 2705 | .poll = datagram_poll, |
2707 | .ioctl = irda_ioctl, | 2706 | .ioctl = irda_ioctl, |
2708 | #ifdef CONFIG_COMPAT | 2707 | #ifdef CONFIG_COMPAT |
2709 | .compat_ioctl = irda_compat_ioctl, | 2708 | .compat_ioctl = irda_compat_ioctl, |
2710 | #endif | 2709 | #endif |
2711 | .listen = sock_no_listen, | 2710 | .listen = sock_no_listen, |
2712 | .shutdown = irda_shutdown, | 2711 | .shutdown = irda_shutdown, |
2713 | .setsockopt = irda_setsockopt, | 2712 | .setsockopt = irda_setsockopt, |
2714 | .getsockopt = irda_getsockopt, | 2713 | .getsockopt = irda_getsockopt, |
2715 | .sendmsg = irda_sendmsg_ultra, | 2714 | .sendmsg = irda_sendmsg_ultra, |
2716 | .recvmsg = irda_recvmsg_dgram, | 2715 | .recvmsg = irda_recvmsg_dgram, |
2717 | .mmap = sock_no_mmap, | 2716 | .mmap = sock_no_mmap, |
2718 | .sendpage = sock_no_sendpage, | 2717 | .sendpage = sock_no_sendpage, |
2719 | }; | 2718 | }; |
2720 | #endif /* CONFIG_IRDA_ULTRA */ | 2719 | #endif /* CONFIG_IRDA_ULTRA */ |
2721 | 2720 | ||
2722 | /* | 2721 | /* |
2723 | * Function irsock_init (pro) | 2722 | * Function irsock_init (pro) |
2724 | * | 2723 | * |
2725 | * Initialize IrDA protocol | 2724 | * Initialize IrDA protocol |
2726 | * | 2725 | * |
2727 | */ | 2726 | */ |
2728 | int __init irsock_init(void) | 2727 | int __init irsock_init(void) |
2729 | { | 2728 | { |
2730 | int rc = proto_register(&irda_proto, 0); | 2729 | int rc = proto_register(&irda_proto, 0); |
2731 | 2730 | ||
2732 | if (rc == 0) | 2731 | if (rc == 0) |
2733 | rc = sock_register(&irda_family_ops); | 2732 | rc = sock_register(&irda_family_ops); |
2734 | 2733 | ||
2735 | return rc; | 2734 | return rc; |
2736 | } | 2735 | } |
2737 | 2736 | ||
2738 | /* | 2737 | /* |
2739 | * Function irsock_cleanup (void) | 2738 | * Function irsock_cleanup (void) |
2740 | * | 2739 | * |
2741 | * Remove IrDA protocol | 2740 | * Remove IrDA protocol |
2742 | * | 2741 | * |
2743 | */ | 2742 | */ |
2744 | void irsock_cleanup(void) | 2743 | void irsock_cleanup(void) |
2745 | { | 2744 | { |
2746 | sock_unregister(PF_IRDA); | 2745 | sock_unregister(PF_IRDA); |
2747 | proto_unregister(&irda_proto); | 2746 | proto_unregister(&irda_proto); |
2748 | } | 2747 | } |
2749 | 2748 |
net/netfilter/ipvs/ip_vs_sync.c
1 | /* | 1 | /* |
2 | * IPVS An implementation of the IP virtual server support for the | 2 | * IPVS An implementation of the IP virtual server support for the |
3 | * LINUX operating system. IPVS is now implemented as a module | 3 | * LINUX operating system. IPVS is now implemented as a module |
4 | * over the NetFilter framework. IPVS can be used to build a | 4 | * over the NetFilter framework. IPVS can be used to build a |
5 | * high-performance and highly available server based on a | 5 | * high-performance and highly available server based on a |
6 | * cluster of servers. | 6 | * cluster of servers. |
7 | * | 7 | * |
8 | * Version 1, is capable of handling both version 0 and 1 messages. | 8 | * Version 1, is capable of handling both version 0 and 1 messages. |
9 | * Version 0 is the plain old format. | 9 | * Version 0 is the plain old format. |
10 | * Note Version 0 receivers will just drop Ver 1 messages. | 10 | * Note Version 0 receivers will just drop Ver 1 messages. |
11 | * Version 1 is capable of handle IPv6, Persistence data, | 11 | * Version 1 is capable of handle IPv6, Persistence data, |
12 | * time-outs, and firewall marks. | 12 | * time-outs, and firewall marks. |
13 | * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. | 13 | * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. |
14 | * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 | 14 | * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 |
15 | * | 15 | * |
16 | * Definitions Message: is a complete datagram | 16 | * Definitions Message: is a complete datagram |
17 | * Sync_conn: is a part of a Message | 17 | * Sync_conn: is a part of a Message |
18 | * Param Data is an option to a Sync_conn. | 18 | * Param Data is an option to a Sync_conn. |
19 | * | 19 | * |
20 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | 20 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> |
21 | * | 21 | * |
22 | * ip_vs_sync: sync connection info from master load balancer to backups | 22 | * ip_vs_sync: sync connection info from master load balancer to backups |
23 | * through multicast | 23 | * through multicast |
24 | * | 24 | * |
25 | * Changes: | 25 | * Changes: |
26 | * Alexandre Cassen : Added master & backup support at a time. | 26 | * Alexandre Cassen : Added master & backup support at a time. |
27 | * Alexandre Cassen : Added SyncID support for incoming sync | 27 | * Alexandre Cassen : Added SyncID support for incoming sync |
28 | * messages filtering. | 28 | * messages filtering. |
29 | * Justin Ossevoort : Fix endian problem on sync message size. | 29 | * Justin Ossevoort : Fix endian problem on sync message size. |
30 | * Hans Schillstrom : Added Version 1: i.e. IPv6, | 30 | * Hans Schillstrom : Added Version 1: i.e. IPv6, |
31 | * Persistence support, fwmark and time-out. | 31 | * Persistence support, fwmark and time-out. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define KMSG_COMPONENT "IPVS" | 34 | #define KMSG_COMPONENT "IPVS" |
35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
36 | 36 | ||
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/inetdevice.h> | 39 | #include <linux/inetdevice.h> |
40 | #include <linux/net.h> | 40 | #include <linux/net.h> |
41 | #include <linux/completion.h> | 41 | #include <linux/completion.h> |
42 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
43 | #include <linux/skbuff.h> | 43 | #include <linux/skbuff.h> |
44 | #include <linux/in.h> | 44 | #include <linux/in.h> |
45 | #include <linux/igmp.h> /* for ip_mc_join_group */ | 45 | #include <linux/igmp.h> /* for ip_mc_join_group */ |
46 | #include <linux/udp.h> | 46 | #include <linux/udp.h> |
47 | #include <linux/err.h> | 47 | #include <linux/err.h> |
48 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
49 | #include <linux/wait.h> | 49 | #include <linux/wait.h> |
50 | #include <linux/kernel.h> | 50 | #include <linux/kernel.h> |
51 | 51 | ||
52 | #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ | 52 | #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ |
53 | 53 | ||
54 | #include <net/ip.h> | 54 | #include <net/ip.h> |
55 | #include <net/sock.h> | 55 | #include <net/sock.h> |
56 | 56 | ||
57 | #include <net/ip_vs.h> | 57 | #include <net/ip_vs.h> |
58 | 58 | ||
59 | #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ | 59 | #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ |
60 | #define IP_VS_SYNC_PORT 8848 /* multicast port */ | 60 | #define IP_VS_SYNC_PORT 8848 /* multicast port */ |
61 | 61 | ||
62 | #define SYNC_PROTO_VER 1 /* Protocol version in header */ | 62 | #define SYNC_PROTO_VER 1 /* Protocol version in header */ |
63 | 63 | ||
64 | static struct lock_class_key __ipvs_sync_key; | 64 | static struct lock_class_key __ipvs_sync_key; |
65 | /* | 65 | /* |
66 | * IPVS sync connection entry | 66 | * IPVS sync connection entry |
67 | * Version 0, i.e. original version. | 67 | * Version 0, i.e. original version. |
68 | */ | 68 | */ |
69 | struct ip_vs_sync_conn_v0 { | 69 | struct ip_vs_sync_conn_v0 { |
70 | __u8 reserved; | 70 | __u8 reserved; |
71 | 71 | ||
72 | /* Protocol, addresses and port numbers */ | 72 | /* Protocol, addresses and port numbers */ |
73 | __u8 protocol; /* Which protocol (TCP/UDP) */ | 73 | __u8 protocol; /* Which protocol (TCP/UDP) */ |
74 | __be16 cport; | 74 | __be16 cport; |
75 | __be16 vport; | 75 | __be16 vport; |
76 | __be16 dport; | 76 | __be16 dport; |
77 | __be32 caddr; /* client address */ | 77 | __be32 caddr; /* client address */ |
78 | __be32 vaddr; /* virtual address */ | 78 | __be32 vaddr; /* virtual address */ |
79 | __be32 daddr; /* destination address */ | 79 | __be32 daddr; /* destination address */ |
80 | 80 | ||
81 | /* Flags and state transition */ | 81 | /* Flags and state transition */ |
82 | __be16 flags; /* status flags */ | 82 | __be16 flags; /* status flags */ |
83 | __be16 state; /* state info */ | 83 | __be16 state; /* state info */ |
84 | 84 | ||
85 | /* The sequence options start here */ | 85 | /* The sequence options start here */ |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct ip_vs_sync_conn_options { | 88 | struct ip_vs_sync_conn_options { |
89 | struct ip_vs_seq in_seq; /* incoming seq. struct */ | 89 | struct ip_vs_seq in_seq; /* incoming seq. struct */ |
90 | struct ip_vs_seq out_seq; /* outgoing seq. struct */ | 90 | struct ip_vs_seq out_seq; /* outgoing seq. struct */ |
91 | }; | 91 | }; |
92 | 92 | ||
93 | /* | 93 | /* |
94 | Sync Connection format (sync_conn) | 94 | Sync Connection format (sync_conn) |
95 | 95 | ||
96 | 0 1 2 3 | 96 | 0 1 2 3 |
97 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | 97 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
98 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 98 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
99 | | Type | Protocol | Ver. | Size | | 99 | | Type | Protocol | Ver. | Size | |
100 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 100 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
101 | | Flags | | 101 | | Flags | |
102 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 102 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
103 | | State | cport | | 103 | | State | cport | |
104 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 104 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
105 | | vport | dport | | 105 | | vport | dport | |
106 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 106 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
107 | | fwmark | | 107 | | fwmark | |
108 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 108 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
109 | | timeout (in sec.) | | 109 | | timeout (in sec.) | |
110 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 110 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
111 | | ... | | 111 | | ... | |
112 | | IP-Addresses (v4 or v6) | | 112 | | IP-Addresses (v4 or v6) | |
113 | | ... | | 113 | | ... | |
114 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 114 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
115 | Optional Parameters. | 115 | Optional Parameters. |
116 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 116 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
117 | | Param. Type | Param. Length | Param. data | | 117 | | Param. Type | Param. Length | Param. data | |
118 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | 118 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | |
119 | | ... | | 119 | | ... | |
120 | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 120 | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
121 | | | Param Type | Param. Length | | 121 | | | Param Type | Param. Length | |
122 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 122 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
123 | | Param data | | 123 | | Param data | |
124 | | Last Param data should be padded for 32 bit alignment | | 124 | | Last Param data should be padded for 32 bit alignment | |
125 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 125 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
126 | */ | 126 | */ |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Type 0, IPv4 sync connection format | 129 | * Type 0, IPv4 sync connection format |
130 | */ | 130 | */ |
131 | struct ip_vs_sync_v4 { | 131 | struct ip_vs_sync_v4 { |
132 | __u8 type; | 132 | __u8 type; |
133 | __u8 protocol; /* Which protocol (TCP/UDP) */ | 133 | __u8 protocol; /* Which protocol (TCP/UDP) */ |
134 | __be16 ver_size; /* Version msb 4 bits */ | 134 | __be16 ver_size; /* Version msb 4 bits */ |
135 | /* Flags and state transition */ | 135 | /* Flags and state transition */ |
136 | __be32 flags; /* status flags */ | 136 | __be32 flags; /* status flags */ |
137 | __be16 state; /* state info */ | 137 | __be16 state; /* state info */ |
138 | /* Protocol, addresses and port numbers */ | 138 | /* Protocol, addresses and port numbers */ |
139 | __be16 cport; | 139 | __be16 cport; |
140 | __be16 vport; | 140 | __be16 vport; |
141 | __be16 dport; | 141 | __be16 dport; |
142 | __be32 fwmark; /* Firewall mark from skb */ | 142 | __be32 fwmark; /* Firewall mark from skb */ |
143 | __be32 timeout; /* cp timeout */ | 143 | __be32 timeout; /* cp timeout */ |
144 | __be32 caddr; /* client address */ | 144 | __be32 caddr; /* client address */ |
145 | __be32 vaddr; /* virtual address */ | 145 | __be32 vaddr; /* virtual address */ |
146 | __be32 daddr; /* destination address */ | 146 | __be32 daddr; /* destination address */ |
147 | /* The sequence options start here */ | 147 | /* The sequence options start here */ |
148 | /* PE data padded to 32bit alignment after seq. options */ | 148 | /* PE data padded to 32bit alignment after seq. options */ |
149 | }; | 149 | }; |
150 | /* | 150 | /* |
151 | * Type 2 messages IPv6 | 151 | * Type 2 messages IPv6 |
152 | */ | 152 | */ |
153 | struct ip_vs_sync_v6 { | 153 | struct ip_vs_sync_v6 { |
154 | __u8 type; | 154 | __u8 type; |
155 | __u8 protocol; /* Which protocol (TCP/UDP) */ | 155 | __u8 protocol; /* Which protocol (TCP/UDP) */ |
156 | __be16 ver_size; /* Version msb 4 bits */ | 156 | __be16 ver_size; /* Version msb 4 bits */ |
157 | /* Flags and state transition */ | 157 | /* Flags and state transition */ |
158 | __be32 flags; /* status flags */ | 158 | __be32 flags; /* status flags */ |
159 | __be16 state; /* state info */ | 159 | __be16 state; /* state info */ |
160 | /* Protocol, addresses and port numbers */ | 160 | /* Protocol, addresses and port numbers */ |
161 | __be16 cport; | 161 | __be16 cport; |
162 | __be16 vport; | 162 | __be16 vport; |
163 | __be16 dport; | 163 | __be16 dport; |
164 | __be32 fwmark; /* Firewall mark from skb */ | 164 | __be32 fwmark; /* Firewall mark from skb */ |
165 | __be32 timeout; /* cp timeout */ | 165 | __be32 timeout; /* cp timeout */ |
166 | struct in6_addr caddr; /* client address */ | 166 | struct in6_addr caddr; /* client address */ |
167 | struct in6_addr vaddr; /* virtual address */ | 167 | struct in6_addr vaddr; /* virtual address */ |
168 | struct in6_addr daddr; /* destination address */ | 168 | struct in6_addr daddr; /* destination address */ |
169 | /* The sequence options start here */ | 169 | /* The sequence options start here */ |
170 | /* PE data padded to 32bit alignment after seq. options */ | 170 | /* PE data padded to 32bit alignment after seq. options */ |
171 | }; | 171 | }; |
172 | 172 | ||
173 | union ip_vs_sync_conn { | 173 | union ip_vs_sync_conn { |
174 | struct ip_vs_sync_v4 v4; | 174 | struct ip_vs_sync_v4 v4; |
175 | struct ip_vs_sync_v6 v6; | 175 | struct ip_vs_sync_v6 v6; |
176 | }; | 176 | }; |
177 | 177 | ||
178 | /* Bits in Type field in above */ | 178 | /* Bits in Type field in above */ |
179 | #define STYPE_INET6 0 | 179 | #define STYPE_INET6 0 |
180 | #define STYPE_F_INET6 (1 << STYPE_INET6) | 180 | #define STYPE_F_INET6 (1 << STYPE_INET6) |
181 | 181 | ||
182 | #define SVER_SHIFT 12 /* Shift to get version */ | 182 | #define SVER_SHIFT 12 /* Shift to get version */ |
183 | #define SVER_MASK 0x0fff /* Mask to strip version */ | 183 | #define SVER_MASK 0x0fff /* Mask to strip version */ |
184 | 184 | ||
185 | #define IPVS_OPT_SEQ_DATA 1 | 185 | #define IPVS_OPT_SEQ_DATA 1 |
186 | #define IPVS_OPT_PE_DATA 2 | 186 | #define IPVS_OPT_PE_DATA 2 |
187 | #define IPVS_OPT_PE_NAME 3 | 187 | #define IPVS_OPT_PE_NAME 3 |
188 | #define IPVS_OPT_PARAM 7 | 188 | #define IPVS_OPT_PARAM 7 |
189 | 189 | ||
190 | #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) | 190 | #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) |
191 | #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) | 191 | #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) |
192 | #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) | 192 | #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) |
193 | #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) | 193 | #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) |
194 | 194 | ||
195 | struct ip_vs_sync_thread_data { | 195 | struct ip_vs_sync_thread_data { |
196 | struct net *net; | 196 | struct net *net; |
197 | struct socket *sock; | 197 | struct socket *sock; |
198 | char *buf; | 198 | char *buf; |
199 | int id; | 199 | int id; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | /* Version 0 definition of packet sizes */ | 202 | /* Version 0 definition of packet sizes */ |
203 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) | 203 | #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) |
204 | #define FULL_CONN_SIZE \ | 204 | #define FULL_CONN_SIZE \ |
205 | (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) | 205 | (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) |
206 | 206 | ||
207 | 207 | ||
208 | /* | 208 | /* |
209 | The master mulitcasts messages (Datagrams) to the backup load balancers | 209 | The master mulitcasts messages (Datagrams) to the backup load balancers |
210 | in the following format. | 210 | in the following format. |
211 | 211 | ||
212 | Version 1: | 212 | Version 1: |
213 | Note, first byte should be Zero, so ver 0 receivers will drop the packet. | 213 | Note, first byte should be Zero, so ver 0 receivers will drop the packet. |
214 | 214 | ||
215 | 0 1 2 3 | 215 | 0 1 2 3 |
216 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | 216 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
217 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 217 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
218 | | 0 | SyncID | Size | | 218 | | 0 | SyncID | Size | |
219 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 219 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
220 | | Count Conns | Version | Reserved, set to Zero | | 220 | | Count Conns | Version | Reserved, set to Zero | |
221 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 221 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
222 | | | | 222 | | | |
223 | | IPVS Sync Connection (1) | | 223 | | IPVS Sync Connection (1) | |
224 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 224 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
225 | | . | | 225 | | . | |
226 | ~ . ~ | 226 | ~ . ~ |
227 | | . | | 227 | | . | |
228 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 228 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
229 | | | | 229 | | | |
230 | | IPVS Sync Connection (n) | | 230 | | IPVS Sync Connection (n) | |
231 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 231 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
232 | 232 | ||
233 | Version 0 Header | 233 | Version 0 Header |
234 | 0 1 2 3 | 234 | 0 1 2 3 |
235 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | 235 | 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
236 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 236 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
237 | | Count Conns | SyncID | Size | | 237 | | Count Conns | SyncID | Size | |
238 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 238 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
239 | | IPVS Sync Connection (1) | | 239 | | IPVS Sync Connection (1) | |
240 | */ | 240 | */ |
241 | 241 | ||
242 | #define SYNC_MESG_HEADER_LEN 4 | 242 | #define SYNC_MESG_HEADER_LEN 4 |
243 | #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */ | 243 | #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */ |
244 | 244 | ||
245 | /* Version 0 header */ | 245 | /* Version 0 header */ |
246 | struct ip_vs_sync_mesg_v0 { | 246 | struct ip_vs_sync_mesg_v0 { |
247 | __u8 nr_conns; | 247 | __u8 nr_conns; |
248 | __u8 syncid; | 248 | __u8 syncid; |
249 | __be16 size; | 249 | __be16 size; |
250 | 250 | ||
251 | /* ip_vs_sync_conn entries start here */ | 251 | /* ip_vs_sync_conn entries start here */ |
252 | }; | 252 | }; |
253 | 253 | ||
254 | /* Version 1 header */ | 254 | /* Version 1 header */ |
255 | struct ip_vs_sync_mesg { | 255 | struct ip_vs_sync_mesg { |
256 | __u8 reserved; /* must be zero */ | 256 | __u8 reserved; /* must be zero */ |
257 | __u8 syncid; | 257 | __u8 syncid; |
258 | __be16 size; | 258 | __be16 size; |
259 | __u8 nr_conns; | 259 | __u8 nr_conns; |
260 | __s8 version; /* SYNC_PROTO_VER */ | 260 | __s8 version; /* SYNC_PROTO_VER */ |
261 | __u16 spare; | 261 | __u16 spare; |
262 | /* ip_vs_sync_conn entries start here */ | 262 | /* ip_vs_sync_conn entries start here */ |
263 | }; | 263 | }; |
264 | 264 | ||
265 | struct ip_vs_sync_buff { | 265 | struct ip_vs_sync_buff { |
266 | struct list_head list; | 266 | struct list_head list; |
267 | unsigned long firstuse; | 267 | unsigned long firstuse; |
268 | 268 | ||
269 | /* pointers for the message data */ | 269 | /* pointers for the message data */ |
270 | struct ip_vs_sync_mesg *mesg; | 270 | struct ip_vs_sync_mesg *mesg; |
271 | unsigned char *head; | 271 | unsigned char *head; |
272 | unsigned char *end; | 272 | unsigned char *end; |
273 | }; | 273 | }; |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * Copy of struct ip_vs_seq | 276 | * Copy of struct ip_vs_seq |
277 | * From unaligned network order to aligned host order | 277 | * From unaligned network order to aligned host order |
278 | */ | 278 | */ |
279 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) | 279 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) |
280 | { | 280 | { |
281 | ho->init_seq = get_unaligned_be32(&no->init_seq); | 281 | ho->init_seq = get_unaligned_be32(&no->init_seq); |
282 | ho->delta = get_unaligned_be32(&no->delta); | 282 | ho->delta = get_unaligned_be32(&no->delta); |
283 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); | 283 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); |
284 | } | 284 | } |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * Copy of struct ip_vs_seq | 287 | * Copy of struct ip_vs_seq |
288 | * From Aligned host order to unaligned network order | 288 | * From Aligned host order to unaligned network order |
289 | */ | 289 | */ |
290 | static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) | 290 | static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) |
291 | { | 291 | { |
292 | put_unaligned_be32(ho->init_seq, &no->init_seq); | 292 | put_unaligned_be32(ho->init_seq, &no->init_seq); |
293 | put_unaligned_be32(ho->delta, &no->delta); | 293 | put_unaligned_be32(ho->delta, &no->delta); |
294 | put_unaligned_be32(ho->previous_delta, &no->previous_delta); | 294 | put_unaligned_be32(ho->previous_delta, &no->previous_delta); |
295 | } | 295 | } |
296 | 296 | ||
297 | static inline struct ip_vs_sync_buff * | 297 | static inline struct ip_vs_sync_buff * |
298 | sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) | 298 | sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) |
299 | { | 299 | { |
300 | struct ip_vs_sync_buff *sb; | 300 | struct ip_vs_sync_buff *sb; |
301 | 301 | ||
302 | spin_lock_bh(&ipvs->sync_lock); | 302 | spin_lock_bh(&ipvs->sync_lock); |
303 | if (list_empty(&ms->sync_queue)) { | 303 | if (list_empty(&ms->sync_queue)) { |
304 | sb = NULL; | 304 | sb = NULL; |
305 | __set_current_state(TASK_INTERRUPTIBLE); | 305 | __set_current_state(TASK_INTERRUPTIBLE); |
306 | } else { | 306 | } else { |
307 | sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, | 307 | sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, |
308 | list); | 308 | list); |
309 | list_del(&sb->list); | 309 | list_del(&sb->list); |
310 | ms->sync_queue_len--; | 310 | ms->sync_queue_len--; |
311 | if (!ms->sync_queue_len) | 311 | if (!ms->sync_queue_len) |
312 | ms->sync_queue_delay = 0; | 312 | ms->sync_queue_delay = 0; |
313 | } | 313 | } |
314 | spin_unlock_bh(&ipvs->sync_lock); | 314 | spin_unlock_bh(&ipvs->sync_lock); |
315 | 315 | ||
316 | return sb; | 316 | return sb; |
317 | } | 317 | } |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * Create a new sync buffer for Version 1 proto. | 320 | * Create a new sync buffer for Version 1 proto. |
321 | */ | 321 | */ |
322 | static inline struct ip_vs_sync_buff * | 322 | static inline struct ip_vs_sync_buff * |
323 | ip_vs_sync_buff_create(struct netns_ipvs *ipvs) | 323 | ip_vs_sync_buff_create(struct netns_ipvs *ipvs) |
324 | { | 324 | { |
325 | struct ip_vs_sync_buff *sb; | 325 | struct ip_vs_sync_buff *sb; |
326 | 326 | ||
327 | if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) | 327 | if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) |
328 | return NULL; | 328 | return NULL; |
329 | 329 | ||
330 | sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); | 330 | sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); |
331 | if (!sb->mesg) { | 331 | if (!sb->mesg) { |
332 | kfree(sb); | 332 | kfree(sb); |
333 | return NULL; | 333 | return NULL; |
334 | } | 334 | } |
335 | sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ | 335 | sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ |
336 | sb->mesg->version = SYNC_PROTO_VER; | 336 | sb->mesg->version = SYNC_PROTO_VER; |
337 | sb->mesg->syncid = ipvs->master_syncid; | 337 | sb->mesg->syncid = ipvs->master_syncid; |
338 | sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg)); | 338 | sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg)); |
339 | sb->mesg->nr_conns = 0; | 339 | sb->mesg->nr_conns = 0; |
340 | sb->mesg->spare = 0; | 340 | sb->mesg->spare = 0; |
341 | sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); | 341 | sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); |
342 | sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen; | 342 | sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen; |
343 | 343 | ||
344 | sb->firstuse = jiffies; | 344 | sb->firstuse = jiffies; |
345 | return sb; | 345 | return sb; |
346 | } | 346 | } |
347 | 347 | ||
348 | static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) | 348 | static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) |
349 | { | 349 | { |
350 | kfree(sb->mesg); | 350 | kfree(sb->mesg); |
351 | kfree(sb); | 351 | kfree(sb); |
352 | } | 352 | } |
353 | 353 | ||
354 | static inline void sb_queue_tail(struct netns_ipvs *ipvs, | 354 | static inline void sb_queue_tail(struct netns_ipvs *ipvs, |
355 | struct ipvs_master_sync_state *ms) | 355 | struct ipvs_master_sync_state *ms) |
356 | { | 356 | { |
357 | struct ip_vs_sync_buff *sb = ms->sync_buff; | 357 | struct ip_vs_sync_buff *sb = ms->sync_buff; |
358 | 358 | ||
359 | spin_lock(&ipvs->sync_lock); | 359 | spin_lock(&ipvs->sync_lock); |
360 | if (ipvs->sync_state & IP_VS_STATE_MASTER && | 360 | if (ipvs->sync_state & IP_VS_STATE_MASTER && |
361 | ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { | 361 | ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { |
362 | if (!ms->sync_queue_len) | 362 | if (!ms->sync_queue_len) |
363 | schedule_delayed_work(&ms->master_wakeup_work, | 363 | schedule_delayed_work(&ms->master_wakeup_work, |
364 | max(IPVS_SYNC_SEND_DELAY, 1)); | 364 | max(IPVS_SYNC_SEND_DELAY, 1)); |
365 | ms->sync_queue_len++; | 365 | ms->sync_queue_len++; |
366 | list_add_tail(&sb->list, &ms->sync_queue); | 366 | list_add_tail(&sb->list, &ms->sync_queue); |
367 | if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) | 367 | if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) |
368 | wake_up_process(ms->master_thread); | 368 | wake_up_process(ms->master_thread); |
369 | } else | 369 | } else |
370 | ip_vs_sync_buff_release(sb); | 370 | ip_vs_sync_buff_release(sb); |
371 | spin_unlock(&ipvs->sync_lock); | 371 | spin_unlock(&ipvs->sync_lock); |
372 | } | 372 | } |
373 | 373 | ||
374 | /* | 374 | /* |
375 | * Get the current sync buffer if it has been created for more | 375 | * Get the current sync buffer if it has been created for more |
376 | * than the specified time or the specified time is zero. | 376 | * than the specified time or the specified time is zero. |
377 | */ | 377 | */ |
378 | static inline struct ip_vs_sync_buff * | 378 | static inline struct ip_vs_sync_buff * |
379 | get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, | 379 | get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, |
380 | unsigned long time) | 380 | unsigned long time) |
381 | { | 381 | { |
382 | struct ip_vs_sync_buff *sb; | 382 | struct ip_vs_sync_buff *sb; |
383 | 383 | ||
384 | spin_lock_bh(&ipvs->sync_buff_lock); | 384 | spin_lock_bh(&ipvs->sync_buff_lock); |
385 | sb = ms->sync_buff; | 385 | sb = ms->sync_buff; |
386 | if (sb && time_after_eq(jiffies - sb->firstuse, time)) { | 386 | if (sb && time_after_eq(jiffies - sb->firstuse, time)) { |
387 | ms->sync_buff = NULL; | 387 | ms->sync_buff = NULL; |
388 | __set_current_state(TASK_RUNNING); | 388 | __set_current_state(TASK_RUNNING); |
389 | } else | 389 | } else |
390 | sb = NULL; | 390 | sb = NULL; |
391 | spin_unlock_bh(&ipvs->sync_buff_lock); | 391 | spin_unlock_bh(&ipvs->sync_buff_lock); |
392 | return sb; | 392 | return sb; |
393 | } | 393 | } |
394 | 394 | ||
395 | static inline int | 395 | static inline int |
396 | select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) | 396 | select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) |
397 | { | 397 | { |
398 | return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; | 398 | return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; |
399 | } | 399 | } |
400 | 400 | ||
401 | /* | 401 | /* |
402 | * Create a new sync buffer for Version 0 proto. | 402 | * Create a new sync buffer for Version 0 proto. |
403 | */ | 403 | */ |
404 | static inline struct ip_vs_sync_buff * | 404 | static inline struct ip_vs_sync_buff * |
405 | ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) | 405 | ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) |
406 | { | 406 | { |
407 | struct ip_vs_sync_buff *sb; | 407 | struct ip_vs_sync_buff *sb; |
408 | struct ip_vs_sync_mesg_v0 *mesg; | 408 | struct ip_vs_sync_mesg_v0 *mesg; |
409 | 409 | ||
410 | if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) | 410 | if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) |
411 | return NULL; | 411 | return NULL; |
412 | 412 | ||
413 | sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); | 413 | sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); |
414 | if (!sb->mesg) { | 414 | if (!sb->mesg) { |
415 | kfree(sb); | 415 | kfree(sb); |
416 | return NULL; | 416 | return NULL; |
417 | } | 417 | } |
418 | mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; | 418 | mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; |
419 | mesg->nr_conns = 0; | 419 | mesg->nr_conns = 0; |
420 | mesg->syncid = ipvs->master_syncid; | 420 | mesg->syncid = ipvs->master_syncid; |
421 | mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0)); | 421 | mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0)); |
422 | sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); | 422 | sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); |
423 | sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen; | 423 | sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen; |
424 | sb->firstuse = jiffies; | 424 | sb->firstuse = jiffies; |
425 | return sb; | 425 | return sb; |
426 | } | 426 | } |
427 | 427 | ||
428 | /* Check if connection is controlled by persistence */ | 428 | /* Check if connection is controlled by persistence */ |
429 | static inline bool in_persistence(struct ip_vs_conn *cp) | 429 | static inline bool in_persistence(struct ip_vs_conn *cp) |
430 | { | 430 | { |
431 | for (cp = cp->control; cp; cp = cp->control) { | 431 | for (cp = cp->control; cp; cp = cp->control) { |
432 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) | 432 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) |
433 | return true; | 433 | return true; |
434 | } | 434 | } |
435 | return false; | 435 | return false; |
436 | } | 436 | } |
437 | 437 | ||
438 | /* Check if conn should be synced. | 438 | /* Check if conn should be synced. |
439 | * pkts: conn packets, use sysctl_sync_threshold to avoid packet check | 439 | * pkts: conn packets, use sysctl_sync_threshold to avoid packet check |
440 | * - (1) sync_refresh_period: reduce sync rate. Additionally, retry | 440 | * - (1) sync_refresh_period: reduce sync rate. Additionally, retry |
441 | * sync_retries times with period of sync_refresh_period/8 | 441 | * sync_retries times with period of sync_refresh_period/8 |
442 | * - (2) if both sync_refresh_period and sync_period are 0 send sync only | 442 | * - (2) if both sync_refresh_period and sync_period are 0 send sync only |
443 | * for state changes or only once when pkts matches sync_threshold | 443 | * for state changes or only once when pkts matches sync_threshold |
444 | * - (3) templates: rate can be reduced only with sync_refresh_period or | 444 | * - (3) templates: rate can be reduced only with sync_refresh_period or |
445 | * with (2) | 445 | * with (2) |
446 | */ | 446 | */ |
447 | static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, | 447 | static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, |
448 | struct ip_vs_conn *cp, int pkts) | 448 | struct ip_vs_conn *cp, int pkts) |
449 | { | 449 | { |
450 | unsigned long orig = ACCESS_ONCE(cp->sync_endtime); | 450 | unsigned long orig = ACCESS_ONCE(cp->sync_endtime); |
451 | unsigned long now = jiffies; | 451 | unsigned long now = jiffies; |
452 | unsigned long n = (now + cp->timeout) & ~3UL; | 452 | unsigned long n = (now + cp->timeout) & ~3UL; |
453 | unsigned int sync_refresh_period; | 453 | unsigned int sync_refresh_period; |
454 | int sync_period; | 454 | int sync_period; |
455 | int force; | 455 | int force; |
456 | 456 | ||
457 | /* Check if we sync in current state */ | 457 | /* Check if we sync in current state */ |
458 | if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) | 458 | if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) |
459 | force = 0; | 459 | force = 0; |
460 | else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp))) | 460 | else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp))) |
461 | return 0; | 461 | return 0; |
462 | else if (likely(cp->protocol == IPPROTO_TCP)) { | 462 | else if (likely(cp->protocol == IPPROTO_TCP)) { |
463 | if (!((1 << cp->state) & | 463 | if (!((1 << cp->state) & |
464 | ((1 << IP_VS_TCP_S_ESTABLISHED) | | 464 | ((1 << IP_VS_TCP_S_ESTABLISHED) | |
465 | (1 << IP_VS_TCP_S_FIN_WAIT) | | 465 | (1 << IP_VS_TCP_S_FIN_WAIT) | |
466 | (1 << IP_VS_TCP_S_CLOSE) | | 466 | (1 << IP_VS_TCP_S_CLOSE) | |
467 | (1 << IP_VS_TCP_S_CLOSE_WAIT) | | 467 | (1 << IP_VS_TCP_S_CLOSE_WAIT) | |
468 | (1 << IP_VS_TCP_S_TIME_WAIT)))) | 468 | (1 << IP_VS_TCP_S_TIME_WAIT)))) |
469 | return 0; | 469 | return 0; |
470 | force = cp->state != cp->old_state; | 470 | force = cp->state != cp->old_state; |
471 | if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) | 471 | if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) |
472 | goto set; | 472 | goto set; |
473 | } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { | 473 | } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { |
474 | if (!((1 << cp->state) & | 474 | if (!((1 << cp->state) & |
475 | ((1 << IP_VS_SCTP_S_ESTABLISHED) | | 475 | ((1 << IP_VS_SCTP_S_ESTABLISHED) | |
476 | (1 << IP_VS_SCTP_S_SHUTDOWN_SENT) | | 476 | (1 << IP_VS_SCTP_S_SHUTDOWN_SENT) | |
477 | (1 << IP_VS_SCTP_S_SHUTDOWN_RECEIVED) | | 477 | (1 << IP_VS_SCTP_S_SHUTDOWN_RECEIVED) | |
478 | (1 << IP_VS_SCTP_S_SHUTDOWN_ACK_SENT) | | 478 | (1 << IP_VS_SCTP_S_SHUTDOWN_ACK_SENT) | |
479 | (1 << IP_VS_SCTP_S_CLOSED)))) | 479 | (1 << IP_VS_SCTP_S_CLOSED)))) |
480 | return 0; | 480 | return 0; |
481 | force = cp->state != cp->old_state; | 481 | force = cp->state != cp->old_state; |
482 | if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) | 482 | if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) |
483 | goto set; | 483 | goto set; |
484 | } else { | 484 | } else { |
485 | /* UDP or another protocol with single state */ | 485 | /* UDP or another protocol with single state */ |
486 | force = 0; | 486 | force = 0; |
487 | } | 487 | } |
488 | 488 | ||
489 | sync_refresh_period = sysctl_sync_refresh_period(ipvs); | 489 | sync_refresh_period = sysctl_sync_refresh_period(ipvs); |
490 | if (sync_refresh_period > 0) { | 490 | if (sync_refresh_period > 0) { |
491 | long diff = n - orig; | 491 | long diff = n - orig; |
492 | long min_diff = max(cp->timeout >> 1, 10UL * HZ); | 492 | long min_diff = max(cp->timeout >> 1, 10UL * HZ); |
493 | 493 | ||
494 | /* Avoid sync if difference is below sync_refresh_period | 494 | /* Avoid sync if difference is below sync_refresh_period |
495 | * and below the half timeout. | 495 | * and below the half timeout. |
496 | */ | 496 | */ |
497 | if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { | 497 | if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { |
498 | int retries = orig & 3; | 498 | int retries = orig & 3; |
499 | 499 | ||
500 | if (retries >= sysctl_sync_retries(ipvs)) | 500 | if (retries >= sysctl_sync_retries(ipvs)) |
501 | return 0; | 501 | return 0; |
502 | if (time_before(now, orig - cp->timeout + | 502 | if (time_before(now, orig - cp->timeout + |
503 | (sync_refresh_period >> 3))) | 503 | (sync_refresh_period >> 3))) |
504 | return 0; | 504 | return 0; |
505 | n |= retries + 1; | 505 | n |= retries + 1; |
506 | } | 506 | } |
507 | } | 507 | } |
508 | sync_period = sysctl_sync_period(ipvs); | 508 | sync_period = sysctl_sync_period(ipvs); |
509 | if (sync_period > 0) { | 509 | if (sync_period > 0) { |
510 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && | 510 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && |
511 | pkts % sync_period != sysctl_sync_threshold(ipvs)) | 511 | pkts % sync_period != sysctl_sync_threshold(ipvs)) |
512 | return 0; | 512 | return 0; |
513 | } else if (sync_refresh_period <= 0 && | 513 | } else if (sync_refresh_period <= 0 && |
514 | pkts != sysctl_sync_threshold(ipvs)) | 514 | pkts != sysctl_sync_threshold(ipvs)) |
515 | return 0; | 515 | return 0; |
516 | 516 | ||
517 | set: | 517 | set: |
518 | cp->old_state = cp->state; | 518 | cp->old_state = cp->state; |
519 | n = cmpxchg(&cp->sync_endtime, orig, n); | 519 | n = cmpxchg(&cp->sync_endtime, orig, n); |
520 | return n == orig || force; | 520 | return n == orig || force; |
521 | } | 521 | } |
522 | 522 | ||
523 | /* | 523 | /* |
524 | * Version 0 , could be switched in by sys_ctl. | 524 | * Version 0 , could be switched in by sys_ctl. |
525 | * Add an ip_vs_conn information into the current sync_buff. | 525 | * Add an ip_vs_conn information into the current sync_buff. |
526 | */ | 526 | */ |
527 | static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, | 527 | static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, |
528 | int pkts) | 528 | int pkts) |
529 | { | 529 | { |
530 | struct netns_ipvs *ipvs = net_ipvs(net); | 530 | struct netns_ipvs *ipvs = net_ipvs(net); |
531 | struct ip_vs_sync_mesg_v0 *m; | 531 | struct ip_vs_sync_mesg_v0 *m; |
532 | struct ip_vs_sync_conn_v0 *s; | 532 | struct ip_vs_sync_conn_v0 *s; |
533 | struct ip_vs_sync_buff *buff; | 533 | struct ip_vs_sync_buff *buff; |
534 | struct ipvs_master_sync_state *ms; | 534 | struct ipvs_master_sync_state *ms; |
535 | int id; | 535 | int id; |
536 | int len; | 536 | int len; |
537 | 537 | ||
538 | if (unlikely(cp->af != AF_INET)) | 538 | if (unlikely(cp->af != AF_INET)) |
539 | return; | 539 | return; |
540 | /* Do not sync ONE PACKET */ | 540 | /* Do not sync ONE PACKET */ |
541 | if (cp->flags & IP_VS_CONN_F_ONE_PACKET) | 541 | if (cp->flags & IP_VS_CONN_F_ONE_PACKET) |
542 | return; | 542 | return; |
543 | 543 | ||
544 | if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) | 544 | if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) |
545 | return; | 545 | return; |
546 | 546 | ||
547 | spin_lock_bh(&ipvs->sync_buff_lock); | 547 | spin_lock_bh(&ipvs->sync_buff_lock); |
548 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { | 548 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { |
549 | spin_unlock_bh(&ipvs->sync_buff_lock); | 549 | spin_unlock_bh(&ipvs->sync_buff_lock); |
550 | return; | 550 | return; |
551 | } | 551 | } |
552 | 552 | ||
553 | id = select_master_thread_id(ipvs, cp); | 553 | id = select_master_thread_id(ipvs, cp); |
554 | ms = &ipvs->ms[id]; | 554 | ms = &ipvs->ms[id]; |
555 | buff = ms->sync_buff; | 555 | buff = ms->sync_buff; |
556 | if (buff) { | 556 | if (buff) { |
557 | m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; | 557 | m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; |
558 | /* Send buffer if it is for v1 */ | 558 | /* Send buffer if it is for v1 */ |
559 | if (!m->nr_conns) { | 559 | if (!m->nr_conns) { |
560 | sb_queue_tail(ipvs, ms); | 560 | sb_queue_tail(ipvs, ms); |
561 | ms->sync_buff = NULL; | 561 | ms->sync_buff = NULL; |
562 | buff = NULL; | 562 | buff = NULL; |
563 | } | 563 | } |
564 | } | 564 | } |
565 | if (!buff) { | 565 | if (!buff) { |
566 | buff = ip_vs_sync_buff_create_v0(ipvs); | 566 | buff = ip_vs_sync_buff_create_v0(ipvs); |
567 | if (!buff) { | 567 | if (!buff) { |
568 | spin_unlock_bh(&ipvs->sync_buff_lock); | 568 | spin_unlock_bh(&ipvs->sync_buff_lock); |
569 | pr_err("ip_vs_sync_buff_create failed.\n"); | 569 | pr_err("ip_vs_sync_buff_create failed.\n"); |
570 | return; | 570 | return; |
571 | } | 571 | } |
572 | ms->sync_buff = buff; | 572 | ms->sync_buff = buff; |
573 | } | 573 | } |
574 | 574 | ||
575 | len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : | 575 | len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : |
576 | SIMPLE_CONN_SIZE; | 576 | SIMPLE_CONN_SIZE; |
577 | m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; | 577 | m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; |
578 | s = (struct ip_vs_sync_conn_v0 *) buff->head; | 578 | s = (struct ip_vs_sync_conn_v0 *) buff->head; |
579 | 579 | ||
580 | /* copy members */ | 580 | /* copy members */ |
581 | s->reserved = 0; | 581 | s->reserved = 0; |
582 | s->protocol = cp->protocol; | 582 | s->protocol = cp->protocol; |
583 | s->cport = cp->cport; | 583 | s->cport = cp->cport; |
584 | s->vport = cp->vport; | 584 | s->vport = cp->vport; |
585 | s->dport = cp->dport; | 585 | s->dport = cp->dport; |
586 | s->caddr = cp->caddr.ip; | 586 | s->caddr = cp->caddr.ip; |
587 | s->vaddr = cp->vaddr.ip; | 587 | s->vaddr = cp->vaddr.ip; |
588 | s->daddr = cp->daddr.ip; | 588 | s->daddr = cp->daddr.ip; |
589 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); | 589 | s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); |
590 | s->state = htons(cp->state); | 590 | s->state = htons(cp->state); |
591 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { | 591 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { |
592 | struct ip_vs_sync_conn_options *opt = | 592 | struct ip_vs_sync_conn_options *opt = |
593 | (struct ip_vs_sync_conn_options *)&s[1]; | 593 | (struct ip_vs_sync_conn_options *)&s[1]; |
594 | memcpy(opt, &cp->in_seq, sizeof(*opt)); | 594 | memcpy(opt, &cp->in_seq, sizeof(*opt)); |
595 | } | 595 | } |
596 | 596 | ||
597 | m->nr_conns++; | 597 | m->nr_conns++; |
598 | m->size = htons(ntohs(m->size) + len); | 598 | m->size = htons(ntohs(m->size) + len); |
599 | buff->head += len; | 599 | buff->head += len; |
600 | 600 | ||
601 | /* check if there is a space for next one */ | 601 | /* check if there is a space for next one */ |
602 | if (buff->head + FULL_CONN_SIZE > buff->end) { | 602 | if (buff->head + FULL_CONN_SIZE > buff->end) { |
603 | sb_queue_tail(ipvs, ms); | 603 | sb_queue_tail(ipvs, ms); |
604 | ms->sync_buff = NULL; | 604 | ms->sync_buff = NULL; |
605 | } | 605 | } |
606 | spin_unlock_bh(&ipvs->sync_buff_lock); | 606 | spin_unlock_bh(&ipvs->sync_buff_lock); |
607 | 607 | ||
608 | /* synchronize its controller if it has */ | 608 | /* synchronize its controller if it has */ |
609 | cp = cp->control; | 609 | cp = cp->control; |
610 | if (cp) { | 610 | if (cp) { |
611 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) | 611 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) |
612 | pkts = atomic_add_return(1, &cp->in_pkts); | 612 | pkts = atomic_add_return(1, &cp->in_pkts); |
613 | else | 613 | else |
614 | pkts = sysctl_sync_threshold(ipvs); | 614 | pkts = sysctl_sync_threshold(ipvs); |
615 | ip_vs_sync_conn(net, cp->control, pkts); | 615 | ip_vs_sync_conn(net, cp->control, pkts); |
616 | } | 616 | } |
617 | } | 617 | } |
618 | 618 | ||
619 | /* | 619 | /* |
620 | * Add an ip_vs_conn information into the current sync_buff. | 620 | * Add an ip_vs_conn information into the current sync_buff. |
621 | * Called by ip_vs_in. | 621 | * Called by ip_vs_in. |
622 | * Sending Version 1 messages | 622 | * Sending Version 1 messages |
623 | */ | 623 | */ |
624 | void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) | 624 | void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) |
625 | { | 625 | { |
626 | struct netns_ipvs *ipvs = net_ipvs(net); | 626 | struct netns_ipvs *ipvs = net_ipvs(net); |
627 | struct ip_vs_sync_mesg *m; | 627 | struct ip_vs_sync_mesg *m; |
628 | union ip_vs_sync_conn *s; | 628 | union ip_vs_sync_conn *s; |
629 | struct ip_vs_sync_buff *buff; | 629 | struct ip_vs_sync_buff *buff; |
630 | struct ipvs_master_sync_state *ms; | 630 | struct ipvs_master_sync_state *ms; |
631 | int id; | 631 | int id; |
632 | __u8 *p; | 632 | __u8 *p; |
633 | unsigned int len, pe_name_len, pad; | 633 | unsigned int len, pe_name_len, pad; |
634 | 634 | ||
635 | /* Handle old version of the protocol */ | 635 | /* Handle old version of the protocol */ |
636 | if (sysctl_sync_ver(ipvs) == 0) { | 636 | if (sysctl_sync_ver(ipvs) == 0) { |
637 | ip_vs_sync_conn_v0(net, cp, pkts); | 637 | ip_vs_sync_conn_v0(net, cp, pkts); |
638 | return; | 638 | return; |
639 | } | 639 | } |
640 | /* Do not sync ONE PACKET */ | 640 | /* Do not sync ONE PACKET */ |
641 | if (cp->flags & IP_VS_CONN_F_ONE_PACKET) | 641 | if (cp->flags & IP_VS_CONN_F_ONE_PACKET) |
642 | goto control; | 642 | goto control; |
643 | sloop: | 643 | sloop: |
644 | if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) | 644 | if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) |
645 | goto control; | 645 | goto control; |
646 | 646 | ||
647 | /* Sanity checks */ | 647 | /* Sanity checks */ |
648 | pe_name_len = 0; | 648 | pe_name_len = 0; |
649 | if (cp->pe_data_len) { | 649 | if (cp->pe_data_len) { |
650 | if (!cp->pe_data || !cp->dest) { | 650 | if (!cp->pe_data || !cp->dest) { |
651 | IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); | 651 | IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); |
652 | return; | 652 | return; |
653 | } | 653 | } |
654 | pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); | 654 | pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); |
655 | } | 655 | } |
656 | 656 | ||
657 | spin_lock_bh(&ipvs->sync_buff_lock); | 657 | spin_lock_bh(&ipvs->sync_buff_lock); |
658 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { | 658 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { |
659 | spin_unlock_bh(&ipvs->sync_buff_lock); | 659 | spin_unlock_bh(&ipvs->sync_buff_lock); |
660 | return; | 660 | return; |
661 | } | 661 | } |
662 | 662 | ||
663 | id = select_master_thread_id(ipvs, cp); | 663 | id = select_master_thread_id(ipvs, cp); |
664 | ms = &ipvs->ms[id]; | 664 | ms = &ipvs->ms[id]; |
665 | 665 | ||
666 | #ifdef CONFIG_IP_VS_IPV6 | 666 | #ifdef CONFIG_IP_VS_IPV6 |
667 | if (cp->af == AF_INET6) | 667 | if (cp->af == AF_INET6) |
668 | len = sizeof(struct ip_vs_sync_v6); | 668 | len = sizeof(struct ip_vs_sync_v6); |
669 | else | 669 | else |
670 | #endif | 670 | #endif |
671 | len = sizeof(struct ip_vs_sync_v4); | 671 | len = sizeof(struct ip_vs_sync_v4); |
672 | 672 | ||
673 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) | 673 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) |
674 | len += sizeof(struct ip_vs_sync_conn_options) + 2; | 674 | len += sizeof(struct ip_vs_sync_conn_options) + 2; |
675 | 675 | ||
676 | if (cp->pe_data_len) | 676 | if (cp->pe_data_len) |
677 | len += cp->pe_data_len + 2; /* + Param hdr field */ | 677 | len += cp->pe_data_len + 2; /* + Param hdr field */ |
678 | if (pe_name_len) | 678 | if (pe_name_len) |
679 | len += pe_name_len + 2; | 679 | len += pe_name_len + 2; |
680 | 680 | ||
681 | /* check if there is a space for this one */ | 681 | /* check if there is a space for this one */ |
682 | pad = 0; | 682 | pad = 0; |
683 | buff = ms->sync_buff; | 683 | buff = ms->sync_buff; |
684 | if (buff) { | 684 | if (buff) { |
685 | m = buff->mesg; | 685 | m = buff->mesg; |
686 | pad = (4 - (size_t) buff->head) & 3; | 686 | pad = (4 - (size_t) buff->head) & 3; |
687 | /* Send buffer if it is for v0 */ | 687 | /* Send buffer if it is for v0 */ |
688 | if (buff->head + len + pad > buff->end || m->reserved) { | 688 | if (buff->head + len + pad > buff->end || m->reserved) { |
689 | sb_queue_tail(ipvs, ms); | 689 | sb_queue_tail(ipvs, ms); |
690 | ms->sync_buff = NULL; | 690 | ms->sync_buff = NULL; |
691 | buff = NULL; | 691 | buff = NULL; |
692 | pad = 0; | 692 | pad = 0; |
693 | } | 693 | } |
694 | } | 694 | } |
695 | 695 | ||
696 | if (!buff) { | 696 | if (!buff) { |
697 | buff = ip_vs_sync_buff_create(ipvs); | 697 | buff = ip_vs_sync_buff_create(ipvs); |
698 | if (!buff) { | 698 | if (!buff) { |
699 | spin_unlock_bh(&ipvs->sync_buff_lock); | 699 | spin_unlock_bh(&ipvs->sync_buff_lock); |
700 | pr_err("ip_vs_sync_buff_create failed.\n"); | 700 | pr_err("ip_vs_sync_buff_create failed.\n"); |
701 | return; | 701 | return; |
702 | } | 702 | } |
703 | ms->sync_buff = buff; | 703 | ms->sync_buff = buff; |
704 | m = buff->mesg; | 704 | m = buff->mesg; |
705 | } | 705 | } |
706 | 706 | ||
707 | p = buff->head; | 707 | p = buff->head; |
708 | buff->head += pad + len; | 708 | buff->head += pad + len; |
709 | m->size = htons(ntohs(m->size) + pad + len); | 709 | m->size = htons(ntohs(m->size) + pad + len); |
710 | /* Add ev. padding from prev. sync_conn */ | 710 | /* Add ev. padding from prev. sync_conn */ |
711 | while (pad--) | 711 | while (pad--) |
712 | *(p++) = 0; | 712 | *(p++) = 0; |
713 | 713 | ||
714 | s = (union ip_vs_sync_conn *)p; | 714 | s = (union ip_vs_sync_conn *)p; |
715 | 715 | ||
716 | /* Set message type & copy members */ | 716 | /* Set message type & copy members */ |
717 | s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); | 717 | s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); |
718 | s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ | 718 | s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ |
719 | s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); | 719 | s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); |
720 | s->v4.state = htons(cp->state); | 720 | s->v4.state = htons(cp->state); |
721 | s->v4.protocol = cp->protocol; | 721 | s->v4.protocol = cp->protocol; |
722 | s->v4.cport = cp->cport; | 722 | s->v4.cport = cp->cport; |
723 | s->v4.vport = cp->vport; | 723 | s->v4.vport = cp->vport; |
724 | s->v4.dport = cp->dport; | 724 | s->v4.dport = cp->dport; |
725 | s->v4.fwmark = htonl(cp->fwmark); | 725 | s->v4.fwmark = htonl(cp->fwmark); |
726 | s->v4.timeout = htonl(cp->timeout / HZ); | 726 | s->v4.timeout = htonl(cp->timeout / HZ); |
727 | m->nr_conns++; | 727 | m->nr_conns++; |
728 | 728 | ||
729 | #ifdef CONFIG_IP_VS_IPV6 | 729 | #ifdef CONFIG_IP_VS_IPV6 |
730 | if (cp->af == AF_INET6) { | 730 | if (cp->af == AF_INET6) { |
731 | p += sizeof(struct ip_vs_sync_v6); | 731 | p += sizeof(struct ip_vs_sync_v6); |
732 | s->v6.caddr = cp->caddr.in6; | 732 | s->v6.caddr = cp->caddr.in6; |
733 | s->v6.vaddr = cp->vaddr.in6; | 733 | s->v6.vaddr = cp->vaddr.in6; |
734 | s->v6.daddr = cp->daddr.in6; | 734 | s->v6.daddr = cp->daddr.in6; |
735 | } else | 735 | } else |
736 | #endif | 736 | #endif |
737 | { | 737 | { |
738 | p += sizeof(struct ip_vs_sync_v4); /* options ptr */ | 738 | p += sizeof(struct ip_vs_sync_v4); /* options ptr */ |
739 | s->v4.caddr = cp->caddr.ip; | 739 | s->v4.caddr = cp->caddr.ip; |
740 | s->v4.vaddr = cp->vaddr.ip; | 740 | s->v4.vaddr = cp->vaddr.ip; |
741 | s->v4.daddr = cp->daddr.ip; | 741 | s->v4.daddr = cp->daddr.ip; |
742 | } | 742 | } |
743 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { | 743 | if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { |
744 | *(p++) = IPVS_OPT_SEQ_DATA; | 744 | *(p++) = IPVS_OPT_SEQ_DATA; |
745 | *(p++) = sizeof(struct ip_vs_sync_conn_options); | 745 | *(p++) = sizeof(struct ip_vs_sync_conn_options); |
746 | hton_seq((struct ip_vs_seq *)p, &cp->in_seq); | 746 | hton_seq((struct ip_vs_seq *)p, &cp->in_seq); |
747 | p += sizeof(struct ip_vs_seq); | 747 | p += sizeof(struct ip_vs_seq); |
748 | hton_seq((struct ip_vs_seq *)p, &cp->out_seq); | 748 | hton_seq((struct ip_vs_seq *)p, &cp->out_seq); |
749 | p += sizeof(struct ip_vs_seq); | 749 | p += sizeof(struct ip_vs_seq); |
750 | } | 750 | } |
751 | /* Handle pe data */ | 751 | /* Handle pe data */ |
752 | if (cp->pe_data_len && cp->pe_data) { | 752 | if (cp->pe_data_len && cp->pe_data) { |
753 | *(p++) = IPVS_OPT_PE_DATA; | 753 | *(p++) = IPVS_OPT_PE_DATA; |
754 | *(p++) = cp->pe_data_len; | 754 | *(p++) = cp->pe_data_len; |
755 | memcpy(p, cp->pe_data, cp->pe_data_len); | 755 | memcpy(p, cp->pe_data, cp->pe_data_len); |
756 | p += cp->pe_data_len; | 756 | p += cp->pe_data_len; |
757 | if (pe_name_len) { | 757 | if (pe_name_len) { |
758 | /* Add PE_NAME */ | 758 | /* Add PE_NAME */ |
759 | *(p++) = IPVS_OPT_PE_NAME; | 759 | *(p++) = IPVS_OPT_PE_NAME; |
760 | *(p++) = pe_name_len; | 760 | *(p++) = pe_name_len; |
761 | memcpy(p, cp->pe->name, pe_name_len); | 761 | memcpy(p, cp->pe->name, pe_name_len); |
762 | p += pe_name_len; | 762 | p += pe_name_len; |
763 | } | 763 | } |
764 | } | 764 | } |
765 | 765 | ||
766 | spin_unlock_bh(&ipvs->sync_buff_lock); | 766 | spin_unlock_bh(&ipvs->sync_buff_lock); |
767 | 767 | ||
768 | control: | 768 | control: |
769 | /* synchronize its controller if it has */ | 769 | /* synchronize its controller if it has */ |
770 | cp = cp->control; | 770 | cp = cp->control; |
771 | if (!cp) | 771 | if (!cp) |
772 | return; | 772 | return; |
773 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) | 773 | if (cp->flags & IP_VS_CONN_F_TEMPLATE) |
774 | pkts = atomic_add_return(1, &cp->in_pkts); | 774 | pkts = atomic_add_return(1, &cp->in_pkts); |
775 | else | 775 | else |
776 | pkts = sysctl_sync_threshold(ipvs); | 776 | pkts = sysctl_sync_threshold(ipvs); |
777 | goto sloop; | 777 | goto sloop; |
778 | } | 778 | } |
779 | 779 | ||
780 | /* | 780 | /* |
781 | * fill_param used by version 1 | 781 | * fill_param used by version 1 |
782 | */ | 782 | */ |
783 | static inline int | 783 | static inline int |
784 | ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, | 784 | ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, |
785 | struct ip_vs_conn_param *p, | 785 | struct ip_vs_conn_param *p, |
786 | __u8 *pe_data, unsigned int pe_data_len, | 786 | __u8 *pe_data, unsigned int pe_data_len, |
787 | __u8 *pe_name, unsigned int pe_name_len) | 787 | __u8 *pe_name, unsigned int pe_name_len) |
788 | { | 788 | { |
789 | #ifdef CONFIG_IP_VS_IPV6 | 789 | #ifdef CONFIG_IP_VS_IPV6 |
790 | if (af == AF_INET6) | 790 | if (af == AF_INET6) |
791 | ip_vs_conn_fill_param(net, af, sc->v6.protocol, | 791 | ip_vs_conn_fill_param(net, af, sc->v6.protocol, |
792 | (const union nf_inet_addr *)&sc->v6.caddr, | 792 | (const union nf_inet_addr *)&sc->v6.caddr, |
793 | sc->v6.cport, | 793 | sc->v6.cport, |
794 | (const union nf_inet_addr *)&sc->v6.vaddr, | 794 | (const union nf_inet_addr *)&sc->v6.vaddr, |
795 | sc->v6.vport, p); | 795 | sc->v6.vport, p); |
796 | else | 796 | else |
797 | #endif | 797 | #endif |
798 | ip_vs_conn_fill_param(net, af, sc->v4.protocol, | 798 | ip_vs_conn_fill_param(net, af, sc->v4.protocol, |
799 | (const union nf_inet_addr *)&sc->v4.caddr, | 799 | (const union nf_inet_addr *)&sc->v4.caddr, |
800 | sc->v4.cport, | 800 | sc->v4.cport, |
801 | (const union nf_inet_addr *)&sc->v4.vaddr, | 801 | (const union nf_inet_addr *)&sc->v4.vaddr, |
802 | sc->v4.vport, p); | 802 | sc->v4.vport, p); |
803 | /* Handle pe data */ | 803 | /* Handle pe data */ |
804 | if (pe_data_len) { | 804 | if (pe_data_len) { |
805 | if (pe_name_len) { | 805 | if (pe_name_len) { |
806 | char buff[IP_VS_PENAME_MAXLEN+1]; | 806 | char buff[IP_VS_PENAME_MAXLEN+1]; |
807 | 807 | ||
808 | memcpy(buff, pe_name, pe_name_len); | 808 | memcpy(buff, pe_name, pe_name_len); |
809 | buff[pe_name_len]=0; | 809 | buff[pe_name_len]=0; |
810 | p->pe = __ip_vs_pe_getbyname(buff); | 810 | p->pe = __ip_vs_pe_getbyname(buff); |
811 | if (!p->pe) { | 811 | if (!p->pe) { |
812 | IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", | 812 | IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", |
813 | buff); | 813 | buff); |
814 | return 1; | 814 | return 1; |
815 | } | 815 | } |
816 | } else { | 816 | } else { |
817 | IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); | 817 | IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); |
818 | return 1; | 818 | return 1; |
819 | } | 819 | } |
820 | 820 | ||
821 | p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); | 821 | p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); |
822 | if (!p->pe_data) { | 822 | if (!p->pe_data) { |
823 | if (p->pe->module) | 823 | if (p->pe->module) |
824 | module_put(p->pe->module); | 824 | module_put(p->pe->module); |
825 | return -ENOMEM; | 825 | return -ENOMEM; |
826 | } | 826 | } |
827 | p->pe_data_len = pe_data_len; | 827 | p->pe_data_len = pe_data_len; |
828 | } | 828 | } |
829 | return 0; | 829 | return 0; |
830 | } | 830 | } |
831 | 831 | ||
832 | /* | 832 | /* |
833 | * Connection Add / Update. | 833 | * Connection Add / Update. |
834 | * Common for version 0 and 1 reception of backup sync_conns. | 834 | * Common for version 0 and 1 reception of backup sync_conns. |
835 | * Param: ... | 835 | * Param: ... |
836 | * timeout is in sec. | 836 | * timeout is in sec. |
837 | */ | 837 | */ |
838 | static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, | 838 | static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, |
839 | unsigned int flags, unsigned int state, | 839 | unsigned int flags, unsigned int state, |
840 | unsigned int protocol, unsigned int type, | 840 | unsigned int protocol, unsigned int type, |
841 | const union nf_inet_addr *daddr, __be16 dport, | 841 | const union nf_inet_addr *daddr, __be16 dport, |
842 | unsigned long timeout, __u32 fwmark, | 842 | unsigned long timeout, __u32 fwmark, |
843 | struct ip_vs_sync_conn_options *opt) | 843 | struct ip_vs_sync_conn_options *opt) |
844 | { | 844 | { |
845 | struct ip_vs_dest *dest; | 845 | struct ip_vs_dest *dest; |
846 | struct ip_vs_conn *cp; | 846 | struct ip_vs_conn *cp; |
847 | struct netns_ipvs *ipvs = net_ipvs(net); | 847 | struct netns_ipvs *ipvs = net_ipvs(net); |
848 | 848 | ||
849 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) | 849 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) |
850 | cp = ip_vs_conn_in_get(param); | 850 | cp = ip_vs_conn_in_get(param); |
851 | else | 851 | else |
852 | cp = ip_vs_ct_in_get(param); | 852 | cp = ip_vs_ct_in_get(param); |
853 | 853 | ||
854 | if (cp) { | 854 | if (cp) { |
855 | /* Free pe_data */ | 855 | /* Free pe_data */ |
856 | kfree(param->pe_data); | 856 | kfree(param->pe_data); |
857 | 857 | ||
858 | dest = cp->dest; | 858 | dest = cp->dest; |
859 | spin_lock_bh(&cp->lock); | 859 | spin_lock_bh(&cp->lock); |
860 | if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && | 860 | if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && |
861 | !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { | 861 | !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { |
862 | if (flags & IP_VS_CONN_F_INACTIVE) { | 862 | if (flags & IP_VS_CONN_F_INACTIVE) { |
863 | atomic_dec(&dest->activeconns); | 863 | atomic_dec(&dest->activeconns); |
864 | atomic_inc(&dest->inactconns); | 864 | atomic_inc(&dest->inactconns); |
865 | } else { | 865 | } else { |
866 | atomic_inc(&dest->activeconns); | 866 | atomic_inc(&dest->activeconns); |
867 | atomic_dec(&dest->inactconns); | 867 | atomic_dec(&dest->inactconns); |
868 | } | 868 | } |
869 | } | 869 | } |
870 | flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; | 870 | flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; |
871 | flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; | 871 | flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; |
872 | cp->flags = flags; | 872 | cp->flags = flags; |
873 | spin_unlock_bh(&cp->lock); | 873 | spin_unlock_bh(&cp->lock); |
874 | if (!dest) | 874 | if (!dest) |
875 | ip_vs_try_bind_dest(cp); | 875 | ip_vs_try_bind_dest(cp); |
876 | } else { | 876 | } else { |
877 | /* | 877 | /* |
878 | * Find the appropriate destination for the connection. | 878 | * Find the appropriate destination for the connection. |
879 | * If it is not found the connection will remain unbound | 879 | * If it is not found the connection will remain unbound |
880 | * but still handled. | 880 | * but still handled. |
881 | */ | 881 | */ |
882 | rcu_read_lock(); | 882 | rcu_read_lock(); |
883 | dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, | 883 | dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, |
884 | param->vport, protocol, fwmark, flags); | 884 | param->vport, protocol, fwmark, flags); |
885 | 885 | ||
886 | cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); | 886 | cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); |
887 | rcu_read_unlock(); | 887 | rcu_read_unlock(); |
888 | if (!cp) { | 888 | if (!cp) { |
889 | if (param->pe_data) | 889 | if (param->pe_data) |
890 | kfree(param->pe_data); | 890 | kfree(param->pe_data); |
891 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); | 891 | IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); |
892 | return; | 892 | return; |
893 | } | 893 | } |
894 | } | 894 | } |
895 | 895 | ||
896 | if (opt) | 896 | if (opt) |
897 | memcpy(&cp->in_seq, opt, sizeof(*opt)); | 897 | memcpy(&cp->in_seq, opt, sizeof(*opt)); |
898 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); | 898 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); |
899 | cp->state = state; | 899 | cp->state = state; |
900 | cp->old_state = cp->state; | 900 | cp->old_state = cp->state; |
901 | /* | 901 | /* |
902 | * For Ver 0 messages style | 902 | * For Ver 0 messages style |
903 | * - Not possible to recover the right timeout for templates | 903 | * - Not possible to recover the right timeout for templates |
904 | * - can not find the right fwmark | 904 | * - can not find the right fwmark |
905 | * virtual service. If needed, we can do it for | 905 | * virtual service. If needed, we can do it for |
906 | * non-fwmark persistent services. | 906 | * non-fwmark persistent services. |
907 | * Ver 1 messages style. | 907 | * Ver 1 messages style. |
908 | * - No problem. | 908 | * - No problem. |
909 | */ | 909 | */ |
910 | if (timeout) { | 910 | if (timeout) { |
911 | if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) | 911 | if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) |
912 | timeout = MAX_SCHEDULE_TIMEOUT / HZ; | 912 | timeout = MAX_SCHEDULE_TIMEOUT / HZ; |
913 | cp->timeout = timeout*HZ; | 913 | cp->timeout = timeout*HZ; |
914 | } else { | 914 | } else { |
915 | struct ip_vs_proto_data *pd; | 915 | struct ip_vs_proto_data *pd; |
916 | 916 | ||
917 | pd = ip_vs_proto_data_get(net, protocol); | 917 | pd = ip_vs_proto_data_get(net, protocol); |
918 | if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) | 918 | if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) |
919 | cp->timeout = pd->timeout_table[state]; | 919 | cp->timeout = pd->timeout_table[state]; |
920 | else | 920 | else |
921 | cp->timeout = (3*60*HZ); | 921 | cp->timeout = (3*60*HZ); |
922 | } | 922 | } |
923 | ip_vs_conn_put(cp); | 923 | ip_vs_conn_put(cp); |
924 | } | 924 | } |
925 | 925 | ||
926 | /* | 926 | /* |
927 | * Process received multicast message for Version 0 | 927 | * Process received multicast message for Version 0 |
928 | */ | 928 | */ |
929 | static void ip_vs_process_message_v0(struct net *net, const char *buffer, | 929 | static void ip_vs_process_message_v0(struct net *net, const char *buffer, |
930 | const size_t buflen) | 930 | const size_t buflen) |
931 | { | 931 | { |
932 | struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; | 932 | struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; |
933 | struct ip_vs_sync_conn_v0 *s; | 933 | struct ip_vs_sync_conn_v0 *s; |
934 | struct ip_vs_sync_conn_options *opt; | 934 | struct ip_vs_sync_conn_options *opt; |
935 | struct ip_vs_protocol *pp; | 935 | struct ip_vs_protocol *pp; |
936 | struct ip_vs_conn_param param; | 936 | struct ip_vs_conn_param param; |
937 | char *p; | 937 | char *p; |
938 | int i; | 938 | int i; |
939 | 939 | ||
940 | p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); | 940 | p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); |
941 | for (i=0; i<m->nr_conns; i++) { | 941 | for (i=0; i<m->nr_conns; i++) { |
942 | unsigned int flags, state; | 942 | unsigned int flags, state; |
943 | 943 | ||
944 | if (p + SIMPLE_CONN_SIZE > buffer+buflen) { | 944 | if (p + SIMPLE_CONN_SIZE > buffer+buflen) { |
945 | IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); | 945 | IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); |
946 | return; | 946 | return; |
947 | } | 947 | } |
948 | s = (struct ip_vs_sync_conn_v0 *) p; | 948 | s = (struct ip_vs_sync_conn_v0 *) p; |
949 | flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; | 949 | flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; |
950 | flags &= ~IP_VS_CONN_F_HASHED; | 950 | flags &= ~IP_VS_CONN_F_HASHED; |
951 | if (flags & IP_VS_CONN_F_SEQ_MASK) { | 951 | if (flags & IP_VS_CONN_F_SEQ_MASK) { |
952 | opt = (struct ip_vs_sync_conn_options *)&s[1]; | 952 | opt = (struct ip_vs_sync_conn_options *)&s[1]; |
953 | p += FULL_CONN_SIZE; | 953 | p += FULL_CONN_SIZE; |
954 | if (p > buffer+buflen) { | 954 | if (p > buffer+buflen) { |
955 | IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); | 955 | IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); |
956 | return; | 956 | return; |
957 | } | 957 | } |
958 | } else { | 958 | } else { |
959 | opt = NULL; | 959 | opt = NULL; |
960 | p += SIMPLE_CONN_SIZE; | 960 | p += SIMPLE_CONN_SIZE; |
961 | } | 961 | } |
962 | 962 | ||
963 | state = ntohs(s->state); | 963 | state = ntohs(s->state); |
964 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { | 964 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { |
965 | pp = ip_vs_proto_get(s->protocol); | 965 | pp = ip_vs_proto_get(s->protocol); |
966 | if (!pp) { | 966 | if (!pp) { |
967 | IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", | 967 | IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", |
968 | s->protocol); | 968 | s->protocol); |
969 | continue; | 969 | continue; |
970 | } | 970 | } |
971 | if (state >= pp->num_states) { | 971 | if (state >= pp->num_states) { |
972 | IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", | 972 | IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", |
973 | pp->name, state); | 973 | pp->name, state); |
974 | continue; | 974 | continue; |
975 | } | 975 | } |
976 | } else { | 976 | } else { |
977 | /* protocol in templates is not used for state/timeout */ | 977 | /* protocol in templates is not used for state/timeout */ |
978 | if (state > 0) { | 978 | if (state > 0) { |
979 | IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", | 979 | IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", |
980 | state); | 980 | state); |
981 | state = 0; | 981 | state = 0; |
982 | } | 982 | } |
983 | } | 983 | } |
984 | 984 | ||
985 | ip_vs_conn_fill_param(net, AF_INET, s->protocol, | 985 | ip_vs_conn_fill_param(net, AF_INET, s->protocol, |
986 | (const union nf_inet_addr *)&s->caddr, | 986 | (const union nf_inet_addr *)&s->caddr, |
987 | s->cport, | 987 | s->cport, |
988 | (const union nf_inet_addr *)&s->vaddr, | 988 | (const union nf_inet_addr *)&s->vaddr, |
989 | s->vport, ¶m); | 989 | s->vport, ¶m); |
990 | 990 | ||
991 | /* Send timeout as Zero */ | 991 | /* Send timeout as Zero */ |
992 | ip_vs_proc_conn(net, ¶m, flags, state, s->protocol, AF_INET, | 992 | ip_vs_proc_conn(net, ¶m, flags, state, s->protocol, AF_INET, |
993 | (union nf_inet_addr *)&s->daddr, s->dport, | 993 | (union nf_inet_addr *)&s->daddr, s->dport, |
994 | 0, 0, opt); | 994 | 0, 0, opt); |
995 | } | 995 | } |
996 | } | 996 | } |
997 | 997 | ||
998 | /* | 998 | /* |
999 | * Handle options | 999 | * Handle options |
1000 | */ | 1000 | */ |
1001 | static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, | 1001 | static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, |
1002 | __u32 *opt_flags, | 1002 | __u32 *opt_flags, |
1003 | struct ip_vs_sync_conn_options *opt) | 1003 | struct ip_vs_sync_conn_options *opt) |
1004 | { | 1004 | { |
1005 | struct ip_vs_sync_conn_options *topt; | 1005 | struct ip_vs_sync_conn_options *topt; |
1006 | 1006 | ||
1007 | topt = (struct ip_vs_sync_conn_options *)p; | 1007 | topt = (struct ip_vs_sync_conn_options *)p; |
1008 | 1008 | ||
1009 | if (plen != sizeof(struct ip_vs_sync_conn_options)) { | 1009 | if (plen != sizeof(struct ip_vs_sync_conn_options)) { |
1010 | IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); | 1010 | IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); |
1011 | return -EINVAL; | 1011 | return -EINVAL; |
1012 | } | 1012 | } |
1013 | if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { | 1013 | if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { |
1014 | IP_VS_DBG(2, "BACKUP, conn options found twice\n"); | 1014 | IP_VS_DBG(2, "BACKUP, conn options found twice\n"); |
1015 | return -EINVAL; | 1015 | return -EINVAL; |
1016 | } | 1016 | } |
1017 | ntoh_seq(&topt->in_seq, &opt->in_seq); | 1017 | ntoh_seq(&topt->in_seq, &opt->in_seq); |
1018 | ntoh_seq(&topt->out_seq, &opt->out_seq); | 1018 | ntoh_seq(&topt->out_seq, &opt->out_seq); |
1019 | *opt_flags |= IPVS_OPT_F_SEQ_DATA; | 1019 | *opt_flags |= IPVS_OPT_F_SEQ_DATA; |
1020 | return 0; | 1020 | return 0; |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, | 1023 | static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, |
1024 | __u8 **data, unsigned int maxlen, | 1024 | __u8 **data, unsigned int maxlen, |
1025 | __u32 *opt_flags, __u32 flag) | 1025 | __u32 *opt_flags, __u32 flag) |
1026 | { | 1026 | { |
1027 | if (plen > maxlen) { | 1027 | if (plen > maxlen) { |
1028 | IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); | 1028 | IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); |
1029 | return -EINVAL; | 1029 | return -EINVAL; |
1030 | } | 1030 | } |
1031 | if (*opt_flags & flag) { | 1031 | if (*opt_flags & flag) { |
1032 | IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); | 1032 | IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); |
1033 | return -EINVAL; | 1033 | return -EINVAL; |
1034 | } | 1034 | } |
1035 | *data_len = plen; | 1035 | *data_len = plen; |
1036 | *data = p; | 1036 | *data = p; |
1037 | *opt_flags |= flag; | 1037 | *opt_flags |= flag; |
1038 | return 0; | 1038 | return 0; |
1039 | } | 1039 | } |
1040 | /* | 1040 | /* |
1041 | * Process a Version 1 sync. connection | 1041 | * Process a Version 1 sync. connection |
1042 | */ | 1042 | */ |
1043 | static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) | 1043 | static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) |
1044 | { | 1044 | { |
1045 | struct ip_vs_sync_conn_options opt; | 1045 | struct ip_vs_sync_conn_options opt; |
1046 | union ip_vs_sync_conn *s; | 1046 | union ip_vs_sync_conn *s; |
1047 | struct ip_vs_protocol *pp; | 1047 | struct ip_vs_protocol *pp; |
1048 | struct ip_vs_conn_param param; | 1048 | struct ip_vs_conn_param param; |
1049 | __u32 flags; | 1049 | __u32 flags; |
1050 | unsigned int af, state, pe_data_len=0, pe_name_len=0; | 1050 | unsigned int af, state, pe_data_len=0, pe_name_len=0; |
1051 | __u8 *pe_data=NULL, *pe_name=NULL; | 1051 | __u8 *pe_data=NULL, *pe_name=NULL; |
1052 | __u32 opt_flags=0; | 1052 | __u32 opt_flags=0; |
1053 | int retc=0; | 1053 | int retc=0; |
1054 | 1054 | ||
1055 | s = (union ip_vs_sync_conn *) p; | 1055 | s = (union ip_vs_sync_conn *) p; |
1056 | 1056 | ||
1057 | if (s->v6.type & STYPE_F_INET6) { | 1057 | if (s->v6.type & STYPE_F_INET6) { |
1058 | #ifdef CONFIG_IP_VS_IPV6 | 1058 | #ifdef CONFIG_IP_VS_IPV6 |
1059 | af = AF_INET6; | 1059 | af = AF_INET6; |
1060 | p += sizeof(struct ip_vs_sync_v6); | 1060 | p += sizeof(struct ip_vs_sync_v6); |
1061 | #else | 1061 | #else |
1062 | IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); | 1062 | IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); |
1063 | retc = 10; | 1063 | retc = 10; |
1064 | goto out; | 1064 | goto out; |
1065 | #endif | 1065 | #endif |
1066 | } else if (!s->v4.type) { | 1066 | } else if (!s->v4.type) { |
1067 | af = AF_INET; | 1067 | af = AF_INET; |
1068 | p += sizeof(struct ip_vs_sync_v4); | 1068 | p += sizeof(struct ip_vs_sync_v4); |
1069 | } else { | 1069 | } else { |
1070 | return -10; | 1070 | return -10; |
1071 | } | 1071 | } |
1072 | if (p > msg_end) | 1072 | if (p > msg_end) |
1073 | return -20; | 1073 | return -20; |
1074 | 1074 | ||
1075 | /* Process optional params check Type & Len. */ | 1075 | /* Process optional params check Type & Len. */ |
1076 | while (p < msg_end) { | 1076 | while (p < msg_end) { |
1077 | int ptype; | 1077 | int ptype; |
1078 | int plen; | 1078 | int plen; |
1079 | 1079 | ||
1080 | if (p+2 > msg_end) | 1080 | if (p+2 > msg_end) |
1081 | return -30; | 1081 | return -30; |
1082 | ptype = *(p++); | 1082 | ptype = *(p++); |
1083 | plen = *(p++); | 1083 | plen = *(p++); |
1084 | 1084 | ||
1085 | if (!plen || ((p + plen) > msg_end)) | 1085 | if (!plen || ((p + plen) > msg_end)) |
1086 | return -40; | 1086 | return -40; |
1087 | /* Handle seq option p = param data */ | 1087 | /* Handle seq option p = param data */ |
1088 | switch (ptype & ~IPVS_OPT_F_PARAM) { | 1088 | switch (ptype & ~IPVS_OPT_F_PARAM) { |
1089 | case IPVS_OPT_SEQ_DATA: | 1089 | case IPVS_OPT_SEQ_DATA: |
1090 | if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) | 1090 | if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) |
1091 | return -50; | 1091 | return -50; |
1092 | break; | 1092 | break; |
1093 | 1093 | ||
1094 | case IPVS_OPT_PE_DATA: | 1094 | case IPVS_OPT_PE_DATA: |
1095 | if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, | 1095 | if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, |
1096 | IP_VS_PEDATA_MAXLEN, &opt_flags, | 1096 | IP_VS_PEDATA_MAXLEN, &opt_flags, |
1097 | IPVS_OPT_F_PE_DATA)) | 1097 | IPVS_OPT_F_PE_DATA)) |
1098 | return -60; | 1098 | return -60; |
1099 | break; | 1099 | break; |
1100 | 1100 | ||
1101 | case IPVS_OPT_PE_NAME: | 1101 | case IPVS_OPT_PE_NAME: |
1102 | if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, | 1102 | if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, |
1103 | IP_VS_PENAME_MAXLEN, &opt_flags, | 1103 | IP_VS_PENAME_MAXLEN, &opt_flags, |
1104 | IPVS_OPT_F_PE_NAME)) | 1104 | IPVS_OPT_F_PE_NAME)) |
1105 | return -70; | 1105 | return -70; |
1106 | break; | 1106 | break; |
1107 | 1107 | ||
1108 | default: | 1108 | default: |
1109 | /* Param data mandatory ? */ | 1109 | /* Param data mandatory ? */ |
1110 | if (!(ptype & IPVS_OPT_F_PARAM)) { | 1110 | if (!(ptype & IPVS_OPT_F_PARAM)) { |
1111 | IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", | 1111 | IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", |
1112 | ptype & ~IPVS_OPT_F_PARAM); | 1112 | ptype & ~IPVS_OPT_F_PARAM); |
1113 | retc = 20; | 1113 | retc = 20; |
1114 | goto out; | 1114 | goto out; |
1115 | } | 1115 | } |
1116 | } | 1116 | } |
1117 | p += plen; /* Next option */ | 1117 | p += plen; /* Next option */ |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* Get flags and Mask off unsupported */ | 1120 | /* Get flags and Mask off unsupported */ |
1121 | flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; | 1121 | flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; |
1122 | flags |= IP_VS_CONN_F_SYNC; | 1122 | flags |= IP_VS_CONN_F_SYNC; |
1123 | state = ntohs(s->v4.state); | 1123 | state = ntohs(s->v4.state); |
1124 | 1124 | ||
1125 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { | 1125 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { |
1126 | pp = ip_vs_proto_get(s->v4.protocol); | 1126 | pp = ip_vs_proto_get(s->v4.protocol); |
1127 | if (!pp) { | 1127 | if (!pp) { |
1128 | IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", | 1128 | IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", |
1129 | s->v4.protocol); | 1129 | s->v4.protocol); |
1130 | retc = 30; | 1130 | retc = 30; |
1131 | goto out; | 1131 | goto out; |
1132 | } | 1132 | } |
1133 | if (state >= pp->num_states) { | 1133 | if (state >= pp->num_states) { |
1134 | IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", | 1134 | IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", |
1135 | pp->name, state); | 1135 | pp->name, state); |
1136 | retc = 40; | 1136 | retc = 40; |
1137 | goto out; | 1137 | goto out; |
1138 | } | 1138 | } |
1139 | } else { | 1139 | } else { |
1140 | /* protocol in templates is not used for state/timeout */ | 1140 | /* protocol in templates is not used for state/timeout */ |
1141 | if (state > 0) { | 1141 | if (state > 0) { |
1142 | IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", | 1142 | IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", |
1143 | state); | 1143 | state); |
1144 | state = 0; | 1144 | state = 0; |
1145 | } | 1145 | } |
1146 | } | 1146 | } |
1147 | if (ip_vs_conn_fill_param_sync(net, af, s, ¶m, pe_data, | 1147 | if (ip_vs_conn_fill_param_sync(net, af, s, ¶m, pe_data, |
1148 | pe_data_len, pe_name, pe_name_len)) { | 1148 | pe_data_len, pe_name, pe_name_len)) { |
1149 | retc = 50; | 1149 | retc = 50; |
1150 | goto out; | 1150 | goto out; |
1151 | } | 1151 | } |
1152 | /* If only IPv4, just silent skip IPv6 */ | 1152 | /* If only IPv4, just silent skip IPv6 */ |
1153 | if (af == AF_INET) | 1153 | if (af == AF_INET) |
1154 | ip_vs_proc_conn(net, ¶m, flags, state, s->v4.protocol, af, | 1154 | ip_vs_proc_conn(net, ¶m, flags, state, s->v4.protocol, af, |
1155 | (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, | 1155 | (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, |
1156 | ntohl(s->v4.timeout), ntohl(s->v4.fwmark), | 1156 | ntohl(s->v4.timeout), ntohl(s->v4.fwmark), |
1157 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1157 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1158 | ); | 1158 | ); |
1159 | #ifdef CONFIG_IP_VS_IPV6 | 1159 | #ifdef CONFIG_IP_VS_IPV6 |
1160 | else | 1160 | else |
1161 | ip_vs_proc_conn(net, ¶m, flags, state, s->v6.protocol, af, | 1161 | ip_vs_proc_conn(net, ¶m, flags, state, s->v6.protocol, af, |
1162 | (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, | 1162 | (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, |
1163 | ntohl(s->v6.timeout), ntohl(s->v6.fwmark), | 1163 | ntohl(s->v6.timeout), ntohl(s->v6.fwmark), |
1164 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) | 1164 | (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) |
1165 | ); | 1165 | ); |
1166 | #endif | 1166 | #endif |
1167 | return 0; | 1167 | return 0; |
1168 | /* Error exit */ | 1168 | /* Error exit */ |
1169 | out: | 1169 | out: |
1170 | IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); | 1170 | IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); |
1171 | return retc; | 1171 | return retc; |
1172 | 1172 | ||
1173 | } | 1173 | } |
1174 | /* | 1174 | /* |
1175 | * Process received multicast message and create the corresponding | 1175 | * Process received multicast message and create the corresponding |
1176 | * ip_vs_conn entries. | 1176 | * ip_vs_conn entries. |
1177 | * Handles Version 0 & 1 | 1177 | * Handles Version 0 & 1 |
1178 | */ | 1178 | */ |
1179 | static void ip_vs_process_message(struct net *net, __u8 *buffer, | 1179 | static void ip_vs_process_message(struct net *net, __u8 *buffer, |
1180 | const size_t buflen) | 1180 | const size_t buflen) |
1181 | { | 1181 | { |
1182 | struct netns_ipvs *ipvs = net_ipvs(net); | 1182 | struct netns_ipvs *ipvs = net_ipvs(net); |
1183 | struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; | 1183 | struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; |
1184 | __u8 *p, *msg_end; | 1184 | __u8 *p, *msg_end; |
1185 | int i, nr_conns; | 1185 | int i, nr_conns; |
1186 | 1186 | ||
1187 | if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { | 1187 | if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { |
1188 | IP_VS_DBG(2, "BACKUP, message header too short\n"); | 1188 | IP_VS_DBG(2, "BACKUP, message header too short\n"); |
1189 | return; | 1189 | return; |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | if (buflen != ntohs(m2->size)) { | 1192 | if (buflen != ntohs(m2->size)) { |
1193 | IP_VS_DBG(2, "BACKUP, bogus message size\n"); | 1193 | IP_VS_DBG(2, "BACKUP, bogus message size\n"); |
1194 | return; | 1194 | return; |
1195 | } | 1195 | } |
1196 | /* SyncID sanity check */ | 1196 | /* SyncID sanity check */ |
1197 | if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) { | 1197 | if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) { |
1198 | IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); | 1198 | IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); |
1199 | return; | 1199 | return; |
1200 | } | 1200 | } |
1201 | /* Handle version 1 message */ | 1201 | /* Handle version 1 message */ |
1202 | if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) | 1202 | if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) |
1203 | && (m2->spare == 0)) { | 1203 | && (m2->spare == 0)) { |
1204 | 1204 | ||
1205 | msg_end = buffer + sizeof(struct ip_vs_sync_mesg); | 1205 | msg_end = buffer + sizeof(struct ip_vs_sync_mesg); |
1206 | nr_conns = m2->nr_conns; | 1206 | nr_conns = m2->nr_conns; |
1207 | 1207 | ||
1208 | for (i=0; i<nr_conns; i++) { | 1208 | for (i=0; i<nr_conns; i++) { |
1209 | union ip_vs_sync_conn *s; | 1209 | union ip_vs_sync_conn *s; |
1210 | unsigned int size; | 1210 | unsigned int size; |
1211 | int retc; | 1211 | int retc; |
1212 | 1212 | ||
1213 | p = msg_end; | 1213 | p = msg_end; |
1214 | if (p + sizeof(s->v4) > buffer+buflen) { | 1214 | if (p + sizeof(s->v4) > buffer+buflen) { |
1215 | IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n"); | 1215 | IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n"); |
1216 | return; | 1216 | return; |
1217 | } | 1217 | } |
1218 | s = (union ip_vs_sync_conn *)p; | 1218 | s = (union ip_vs_sync_conn *)p; |
1219 | size = ntohs(s->v4.ver_size) & SVER_MASK; | 1219 | size = ntohs(s->v4.ver_size) & SVER_MASK; |
1220 | msg_end = p + size; | 1220 | msg_end = p + size; |
1221 | /* Basic sanity checks */ | 1221 | /* Basic sanity checks */ |
1222 | if (msg_end > buffer+buflen) { | 1222 | if (msg_end > buffer+buflen) { |
1223 | IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); | 1223 | IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); |
1224 | return; | 1224 | return; |
1225 | } | 1225 | } |
1226 | if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { | 1226 | if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { |
1227 | IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", | 1227 | IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", |
1228 | ntohs(s->v4.ver_size) >> SVER_SHIFT); | 1228 | ntohs(s->v4.ver_size) >> SVER_SHIFT); |
1229 | return; | 1229 | return; |
1230 | } | 1230 | } |
1231 | /* Process a single sync_conn */ | 1231 | /* Process a single sync_conn */ |
1232 | retc = ip_vs_proc_sync_conn(net, p, msg_end); | 1232 | retc = ip_vs_proc_sync_conn(net, p, msg_end); |
1233 | if (retc < 0) { | 1233 | if (retc < 0) { |
1234 | IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", | 1234 | IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", |
1235 | retc); | 1235 | retc); |
1236 | return; | 1236 | return; |
1237 | } | 1237 | } |
1238 | /* Make sure we have 32 bit alignment */ | 1238 | /* Make sure we have 32 bit alignment */ |
1239 | msg_end = p + ((size + 3) & ~3); | 1239 | msg_end = p + ((size + 3) & ~3); |
1240 | } | 1240 | } |
1241 | } else { | 1241 | } else { |
1242 | /* Old type of message */ | 1242 | /* Old type of message */ |
1243 | ip_vs_process_message_v0(net, buffer, buflen); | 1243 | ip_vs_process_message_v0(net, buffer, buflen); |
1244 | return; | 1244 | return; |
1245 | } | 1245 | } |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | 1248 | ||
1249 | /* | 1249 | /* |
1250 | * Setup sndbuf (mode=1) or rcvbuf (mode=0) | 1250 | * Setup sndbuf (mode=1) or rcvbuf (mode=0) |
1251 | */ | 1251 | */ |
1252 | static void set_sock_size(struct sock *sk, int mode, int val) | 1252 | static void set_sock_size(struct sock *sk, int mode, int val) |
1253 | { | 1253 | { |
1254 | /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ | 1254 | /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ |
1255 | /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ | 1255 | /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ |
1256 | lock_sock(sk); | 1256 | lock_sock(sk); |
1257 | if (mode) { | 1257 | if (mode) { |
1258 | val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, | 1258 | val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, |
1259 | sysctl_wmem_max); | 1259 | sysctl_wmem_max); |
1260 | sk->sk_sndbuf = val * 2; | 1260 | sk->sk_sndbuf = val * 2; |
1261 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 1261 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
1262 | } else { | 1262 | } else { |
1263 | val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, | 1263 | val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, |
1264 | sysctl_rmem_max); | 1264 | sysctl_rmem_max); |
1265 | sk->sk_rcvbuf = val * 2; | 1265 | sk->sk_rcvbuf = val * 2; |
1266 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 1266 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
1267 | } | 1267 | } |
1268 | release_sock(sk); | 1268 | release_sock(sk); |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | /* | 1271 | /* |
1272 | * Setup loopback of outgoing multicasts on a sending socket | 1272 | * Setup loopback of outgoing multicasts on a sending socket |
1273 | */ | 1273 | */ |
1274 | static void set_mcast_loop(struct sock *sk, u_char loop) | 1274 | static void set_mcast_loop(struct sock *sk, u_char loop) |
1275 | { | 1275 | { |
1276 | struct inet_sock *inet = inet_sk(sk); | 1276 | struct inet_sock *inet = inet_sk(sk); |
1277 | 1277 | ||
1278 | /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */ | 1278 | /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */ |
1279 | lock_sock(sk); | 1279 | lock_sock(sk); |
1280 | inet->mc_loop = loop ? 1 : 0; | 1280 | inet->mc_loop = loop ? 1 : 0; |
1281 | release_sock(sk); | 1281 | release_sock(sk); |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | /* | 1284 | /* |
1285 | * Specify TTL for outgoing multicasts on a sending socket | 1285 | * Specify TTL for outgoing multicasts on a sending socket |
1286 | */ | 1286 | */ |
1287 | static void set_mcast_ttl(struct sock *sk, u_char ttl) | 1287 | static void set_mcast_ttl(struct sock *sk, u_char ttl) |
1288 | { | 1288 | { |
1289 | struct inet_sock *inet = inet_sk(sk); | 1289 | struct inet_sock *inet = inet_sk(sk); |
1290 | 1290 | ||
1291 | /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */ | 1291 | /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */ |
1292 | lock_sock(sk); | 1292 | lock_sock(sk); |
1293 | inet->mc_ttl = ttl; | 1293 | inet->mc_ttl = ttl; |
1294 | release_sock(sk); | 1294 | release_sock(sk); |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | /* | 1297 | /* |
1298 | * Specifiy default interface for outgoing multicasts | 1298 | * Specifiy default interface for outgoing multicasts |
1299 | */ | 1299 | */ |
1300 | static int set_mcast_if(struct sock *sk, char *ifname) | 1300 | static int set_mcast_if(struct sock *sk, char *ifname) |
1301 | { | 1301 | { |
1302 | struct net_device *dev; | 1302 | struct net_device *dev; |
1303 | struct inet_sock *inet = inet_sk(sk); | 1303 | struct inet_sock *inet = inet_sk(sk); |
1304 | struct net *net = sock_net(sk); | 1304 | struct net *net = sock_net(sk); |
1305 | 1305 | ||
1306 | dev = __dev_get_by_name(net, ifname); | 1306 | dev = __dev_get_by_name(net, ifname); |
1307 | if (!dev) | 1307 | if (!dev) |
1308 | return -ENODEV; | 1308 | return -ENODEV; |
1309 | 1309 | ||
1310 | if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) | 1310 | if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) |
1311 | return -EINVAL; | 1311 | return -EINVAL; |
1312 | 1312 | ||
1313 | lock_sock(sk); | 1313 | lock_sock(sk); |
1314 | inet->mc_index = dev->ifindex; | 1314 | inet->mc_index = dev->ifindex; |
1315 | /* inet->mc_addr = 0; */ | 1315 | /* inet->mc_addr = 0; */ |
1316 | release_sock(sk); | 1316 | release_sock(sk); |
1317 | 1317 | ||
1318 | return 0; | 1318 | return 0; |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | 1321 | ||
1322 | /* | 1322 | /* |
1323 | * Set the maximum length of sync message according to the | 1323 | * Set the maximum length of sync message according to the |
1324 | * specified interface's MTU. | 1324 | * specified interface's MTU. |
1325 | */ | 1325 | */ |
1326 | static int set_sync_mesg_maxlen(struct net *net, int sync_state) | 1326 | static int set_sync_mesg_maxlen(struct net *net, int sync_state) |
1327 | { | 1327 | { |
1328 | struct netns_ipvs *ipvs = net_ipvs(net); | 1328 | struct netns_ipvs *ipvs = net_ipvs(net); |
1329 | struct net_device *dev; | 1329 | struct net_device *dev; |
1330 | int num; | 1330 | int num; |
1331 | 1331 | ||
1332 | if (sync_state == IP_VS_STATE_MASTER) { | 1332 | if (sync_state == IP_VS_STATE_MASTER) { |
1333 | dev = __dev_get_by_name(net, ipvs->master_mcast_ifn); | 1333 | dev = __dev_get_by_name(net, ipvs->master_mcast_ifn); |
1334 | if (!dev) | 1334 | if (!dev) |
1335 | return -ENODEV; | 1335 | return -ENODEV; |
1336 | 1336 | ||
1337 | num = (dev->mtu - sizeof(struct iphdr) - | 1337 | num = (dev->mtu - sizeof(struct iphdr) - |
1338 | sizeof(struct udphdr) - | 1338 | sizeof(struct udphdr) - |
1339 | SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE; | 1339 | SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE; |
1340 | ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN + | 1340 | ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN + |
1341 | SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF); | 1341 | SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF); |
1342 | IP_VS_DBG(7, "setting the maximum length of sync sending " | 1342 | IP_VS_DBG(7, "setting the maximum length of sync sending " |
1343 | "message %d.\n", ipvs->send_mesg_maxlen); | 1343 | "message %d.\n", ipvs->send_mesg_maxlen); |
1344 | } else if (sync_state == IP_VS_STATE_BACKUP) { | 1344 | } else if (sync_state == IP_VS_STATE_BACKUP) { |
1345 | dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn); | 1345 | dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn); |
1346 | if (!dev) | 1346 | if (!dev) |
1347 | return -ENODEV; | 1347 | return -ENODEV; |
1348 | 1348 | ||
1349 | ipvs->recv_mesg_maxlen = dev->mtu - | 1349 | ipvs->recv_mesg_maxlen = dev->mtu - |
1350 | sizeof(struct iphdr) - sizeof(struct udphdr); | 1350 | sizeof(struct iphdr) - sizeof(struct udphdr); |
1351 | IP_VS_DBG(7, "setting the maximum length of sync receiving " | 1351 | IP_VS_DBG(7, "setting the maximum length of sync receiving " |
1352 | "message %d.\n", ipvs->recv_mesg_maxlen); | 1352 | "message %d.\n", ipvs->recv_mesg_maxlen); |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | return 0; | 1355 | return 0; |
1356 | } | 1356 | } |
1357 | 1357 | ||
1358 | 1358 | ||
1359 | /* | 1359 | /* |
1360 | * Join a multicast group. | 1360 | * Join a multicast group. |
1361 | * the group is specified by a class D multicast address 224.0.0.0/8 | 1361 | * the group is specified by a class D multicast address 224.0.0.0/8 |
1362 | * in the in_addr structure passed in as a parameter. | 1362 | * in the in_addr structure passed in as a parameter. |
1363 | */ | 1363 | */ |
1364 | static int | 1364 | static int |
1365 | join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) | 1365 | join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) |
1366 | { | 1366 | { |
1367 | struct net *net = sock_net(sk); | 1367 | struct net *net = sock_net(sk); |
1368 | struct ip_mreqn mreq; | 1368 | struct ip_mreqn mreq; |
1369 | struct net_device *dev; | 1369 | struct net_device *dev; |
1370 | int ret; | 1370 | int ret; |
1371 | 1371 | ||
1372 | memset(&mreq, 0, sizeof(mreq)); | 1372 | memset(&mreq, 0, sizeof(mreq)); |
1373 | memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); | 1373 | memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); |
1374 | 1374 | ||
1375 | dev = __dev_get_by_name(net, ifname); | 1375 | dev = __dev_get_by_name(net, ifname); |
1376 | if (!dev) | 1376 | if (!dev) |
1377 | return -ENODEV; | 1377 | return -ENODEV; |
1378 | if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) | 1378 | if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) |
1379 | return -EINVAL; | 1379 | return -EINVAL; |
1380 | 1380 | ||
1381 | mreq.imr_ifindex = dev->ifindex; | 1381 | mreq.imr_ifindex = dev->ifindex; |
1382 | 1382 | ||
1383 | lock_sock(sk); | 1383 | lock_sock(sk); |
1384 | ret = ip_mc_join_group(sk, &mreq); | 1384 | ret = ip_mc_join_group(sk, &mreq); |
1385 | release_sock(sk); | 1385 | release_sock(sk); |
1386 | 1386 | ||
1387 | return ret; | 1387 | return ret; |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | 1390 | ||
1391 | static int bind_mcastif_addr(struct socket *sock, char *ifname) | 1391 | static int bind_mcastif_addr(struct socket *sock, char *ifname) |
1392 | { | 1392 | { |
1393 | struct net *net = sock_net(sock->sk); | 1393 | struct net *net = sock_net(sock->sk); |
1394 | struct net_device *dev; | 1394 | struct net_device *dev; |
1395 | __be32 addr; | 1395 | __be32 addr; |
1396 | struct sockaddr_in sin; | 1396 | struct sockaddr_in sin; |
1397 | 1397 | ||
1398 | dev = __dev_get_by_name(net, ifname); | 1398 | dev = __dev_get_by_name(net, ifname); |
1399 | if (!dev) | 1399 | if (!dev) |
1400 | return -ENODEV; | 1400 | return -ENODEV; |
1401 | 1401 | ||
1402 | addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); | 1402 | addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); |
1403 | if (!addr) | 1403 | if (!addr) |
1404 | pr_err("You probably need to specify IP address on " | 1404 | pr_err("You probably need to specify IP address on " |
1405 | "multicast interface.\n"); | 1405 | "multicast interface.\n"); |
1406 | 1406 | ||
1407 | IP_VS_DBG(7, "binding socket with (%s) %pI4\n", | 1407 | IP_VS_DBG(7, "binding socket with (%s) %pI4\n", |
1408 | ifname, &addr); | 1408 | ifname, &addr); |
1409 | 1409 | ||
1410 | /* Now bind the socket with the address of multicast interface */ | 1410 | /* Now bind the socket with the address of multicast interface */ |
1411 | sin.sin_family = AF_INET; | 1411 | sin.sin_family = AF_INET; |
1412 | sin.sin_addr.s_addr = addr; | 1412 | sin.sin_addr.s_addr = addr; |
1413 | sin.sin_port = 0; | 1413 | sin.sin_port = 0; |
1414 | 1414 | ||
1415 | return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); | 1415 | return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | /* | 1418 | /* |
1419 | * Set up sending multicast socket over UDP | 1419 | * Set up sending multicast socket over UDP |
1420 | */ | 1420 | */ |
1421 | static struct socket *make_send_sock(struct net *net, int id) | 1421 | static struct socket *make_send_sock(struct net *net, int id) |
1422 | { | 1422 | { |
1423 | struct netns_ipvs *ipvs = net_ipvs(net); | 1423 | struct netns_ipvs *ipvs = net_ipvs(net); |
1424 | /* multicast addr */ | 1424 | /* multicast addr */ |
1425 | struct sockaddr_in mcast_addr = { | 1425 | struct sockaddr_in mcast_addr = { |
1426 | .sin_family = AF_INET, | 1426 | .sin_family = AF_INET, |
1427 | .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), | 1427 | .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), |
1428 | .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), | 1428 | .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), |
1429 | }; | 1429 | }; |
1430 | struct socket *sock; | 1430 | struct socket *sock; |
1431 | int result; | 1431 | int result; |
1432 | 1432 | ||
1433 | /* First create a socket move it to right name space later */ | 1433 | /* First create a socket move it to right name space later */ |
1434 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); | 1434 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1435 | if (result < 0) { | 1435 | if (result < 0) { |
1436 | pr_err("Error during creation of socket; terminating\n"); | 1436 | pr_err("Error during creation of socket; terminating\n"); |
1437 | return ERR_PTR(result); | 1437 | return ERR_PTR(result); |
1438 | } | 1438 | } |
1439 | /* | 1439 | /* |
1440 | * Kernel sockets that are a part of a namespace, should not | 1440 | * Kernel sockets that are a part of a namespace, should not |
1441 | * hold a reference to a namespace in order to allow to stop it. | 1441 | * hold a reference to a namespace in order to allow to stop it. |
1442 | * After sk_change_net should be released using sk_release_kernel. | 1442 | * After sk_change_net should be released using sk_release_kernel. |
1443 | */ | 1443 | */ |
1444 | sk_change_net(sock->sk, net); | 1444 | sk_change_net(sock->sk, net); |
1445 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); | 1445 | result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); |
1446 | if (result < 0) { | 1446 | if (result < 0) { |
1447 | pr_err("Error setting outbound mcast interface\n"); | 1447 | pr_err("Error setting outbound mcast interface\n"); |
1448 | goto error; | 1448 | goto error; |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | set_mcast_loop(sock->sk, 0); | 1451 | set_mcast_loop(sock->sk, 0); |
1452 | set_mcast_ttl(sock->sk, 1); | 1452 | set_mcast_ttl(sock->sk, 1); |
1453 | result = sysctl_sync_sock_size(ipvs); | 1453 | result = sysctl_sync_sock_size(ipvs); |
1454 | if (result > 0) | 1454 | if (result > 0) |
1455 | set_sock_size(sock->sk, 1, result); | 1455 | set_sock_size(sock->sk, 1, result); |
1456 | 1456 | ||
1457 | result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); | 1457 | result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); |
1458 | if (result < 0) { | 1458 | if (result < 0) { |
1459 | pr_err("Error binding address of the mcast interface\n"); | 1459 | pr_err("Error binding address of the mcast interface\n"); |
1460 | goto error; | 1460 | goto error; |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, | 1463 | result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, |
1464 | sizeof(struct sockaddr), 0); | 1464 | sizeof(struct sockaddr), 0); |
1465 | if (result < 0) { | 1465 | if (result < 0) { |
1466 | pr_err("Error connecting to the multicast addr\n"); | 1466 | pr_err("Error connecting to the multicast addr\n"); |
1467 | goto error; | 1467 | goto error; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | return sock; | 1470 | return sock; |
1471 | 1471 | ||
1472 | error: | 1472 | error: |
1473 | sk_release_kernel(sock->sk); | 1473 | sk_release_kernel(sock->sk); |
1474 | return ERR_PTR(result); | 1474 | return ERR_PTR(result); |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | 1477 | ||
1478 | /* | 1478 | /* |
1479 | * Set up receiving multicast socket over UDP | 1479 | * Set up receiving multicast socket over UDP |
1480 | */ | 1480 | */ |
1481 | static struct socket *make_receive_sock(struct net *net, int id) | 1481 | static struct socket *make_receive_sock(struct net *net, int id) |
1482 | { | 1482 | { |
1483 | struct netns_ipvs *ipvs = net_ipvs(net); | 1483 | struct netns_ipvs *ipvs = net_ipvs(net); |
1484 | /* multicast addr */ | 1484 | /* multicast addr */ |
1485 | struct sockaddr_in mcast_addr = { | 1485 | struct sockaddr_in mcast_addr = { |
1486 | .sin_family = AF_INET, | 1486 | .sin_family = AF_INET, |
1487 | .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), | 1487 | .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), |
1488 | .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), | 1488 | .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), |
1489 | }; | 1489 | }; |
1490 | struct socket *sock; | 1490 | struct socket *sock; |
1491 | int result; | 1491 | int result; |
1492 | 1492 | ||
1493 | /* First create a socket */ | 1493 | /* First create a socket */ |
1494 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); | 1494 | result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); |
1495 | if (result < 0) { | 1495 | if (result < 0) { |
1496 | pr_err("Error during creation of socket; terminating\n"); | 1496 | pr_err("Error during creation of socket; terminating\n"); |
1497 | return ERR_PTR(result); | 1497 | return ERR_PTR(result); |
1498 | } | 1498 | } |
1499 | /* | 1499 | /* |
1500 | * Kernel sockets that are a part of a namespace, should not | 1500 | * Kernel sockets that are a part of a namespace, should not |
1501 | * hold a reference to a namespace in order to allow to stop it. | 1501 | * hold a reference to a namespace in order to allow to stop it. |
1502 | * After sk_change_net should be released using sk_release_kernel. | 1502 | * After sk_change_net should be released using sk_release_kernel. |
1503 | */ | 1503 | */ |
1504 | sk_change_net(sock->sk, net); | 1504 | sk_change_net(sock->sk, net); |
1505 | /* it is equivalent to the REUSEADDR option in user-space */ | 1505 | /* it is equivalent to the REUSEADDR option in user-space */ |
1506 | sock->sk->sk_reuse = SK_CAN_REUSE; | 1506 | sock->sk->sk_reuse = SK_CAN_REUSE; |
1507 | result = sysctl_sync_sock_size(ipvs); | 1507 | result = sysctl_sync_sock_size(ipvs); |
1508 | if (result > 0) | 1508 | if (result > 0) |
1509 | set_sock_size(sock->sk, 0, result); | 1509 | set_sock_size(sock->sk, 0, result); |
1510 | 1510 | ||
1511 | result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, | 1511 | result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, |
1512 | sizeof(struct sockaddr)); | 1512 | sizeof(struct sockaddr)); |
1513 | if (result < 0) { | 1513 | if (result < 0) { |
1514 | pr_err("Error binding to the multicast addr\n"); | 1514 | pr_err("Error binding to the multicast addr\n"); |
1515 | goto error; | 1515 | goto error; |
1516 | } | 1516 | } |
1517 | 1517 | ||
1518 | /* join the multicast group */ | 1518 | /* join the multicast group */ |
1519 | result = join_mcast_group(sock->sk, | 1519 | result = join_mcast_group(sock->sk, |
1520 | (struct in_addr *) &mcast_addr.sin_addr, | 1520 | (struct in_addr *) &mcast_addr.sin_addr, |
1521 | ipvs->backup_mcast_ifn); | 1521 | ipvs->backup_mcast_ifn); |
1522 | if (result < 0) { | 1522 | if (result < 0) { |
1523 | pr_err("Error joining to the multicast group\n"); | 1523 | pr_err("Error joining to the multicast group\n"); |
1524 | goto error; | 1524 | goto error; |
1525 | } | 1525 | } |
1526 | 1526 | ||
1527 | return sock; | 1527 | return sock; |
1528 | 1528 | ||
1529 | error: | 1529 | error: |
1530 | sk_release_kernel(sock->sk); | 1530 | sk_release_kernel(sock->sk); |
1531 | return ERR_PTR(result); | 1531 | return ERR_PTR(result); |
1532 | } | 1532 | } |
1533 | 1533 | ||
1534 | 1534 | ||
1535 | static int | 1535 | static int |
1536 | ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) | 1536 | ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) |
1537 | { | 1537 | { |
1538 | struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL}; | 1538 | struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL}; |
1539 | struct kvec iov; | 1539 | struct kvec iov; |
1540 | int len; | 1540 | int len; |
1541 | 1541 | ||
1542 | EnterFunction(7); | 1542 | EnterFunction(7); |
1543 | iov.iov_base = (void *)buffer; | 1543 | iov.iov_base = (void *)buffer; |
1544 | iov.iov_len = length; | 1544 | iov.iov_len = length; |
1545 | 1545 | ||
1546 | len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); | 1546 | len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); |
1547 | 1547 | ||
1548 | LeaveFunction(7); | 1548 | LeaveFunction(7); |
1549 | return len; | 1549 | return len; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | static int | 1552 | static int |
1553 | ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) | 1553 | ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) |
1554 | { | 1554 | { |
1555 | int msize; | 1555 | int msize; |
1556 | int ret; | 1556 | int ret; |
1557 | 1557 | ||
1558 | msize = ntohs(msg->size); | 1558 | msize = ntohs(msg->size); |
1559 | 1559 | ||
1560 | ret = ip_vs_send_async(sock, (char *)msg, msize); | 1560 | ret = ip_vs_send_async(sock, (char *)msg, msize); |
1561 | if (ret >= 0 || ret == -EAGAIN) | 1561 | if (ret >= 0 || ret == -EAGAIN) |
1562 | return ret; | 1562 | return ret; |
1563 | pr_err("ip_vs_send_async error %d\n", ret); | 1563 | pr_err("ip_vs_send_async error %d\n", ret); |
1564 | return 0; | 1564 | return 0; |
1565 | } | 1565 | } |
1566 | 1566 | ||
1567 | static int | 1567 | static int |
1568 | ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) | 1568 | ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) |
1569 | { | 1569 | { |
1570 | struct msghdr msg = {NULL,}; | 1570 | struct msghdr msg = {NULL,}; |
1571 | struct kvec iov; | 1571 | struct kvec iov; |
1572 | int len; | 1572 | int len; |
1573 | 1573 | ||
1574 | EnterFunction(7); | 1574 | EnterFunction(7); |
1575 | 1575 | ||
1576 | /* Receive a packet */ | 1576 | /* Receive a packet */ |
1577 | iov.iov_base = buffer; | 1577 | iov.iov_base = buffer; |
1578 | iov.iov_len = (size_t)buflen; | 1578 | iov.iov_len = (size_t)buflen; |
1579 | 1579 | ||
1580 | len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); | 1580 | len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); |
1581 | 1581 | ||
1582 | if (len < 0) | 1582 | if (len < 0) |
1583 | return len; | 1583 | return len; |
1584 | 1584 | ||
1585 | LeaveFunction(7); | 1585 | LeaveFunction(7); |
1586 | return len; | 1586 | return len; |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | /* Wakeup the master thread for sending */ | 1589 | /* Wakeup the master thread for sending */ |
1590 | static void master_wakeup_work_handler(struct work_struct *work) | 1590 | static void master_wakeup_work_handler(struct work_struct *work) |
1591 | { | 1591 | { |
1592 | struct ipvs_master_sync_state *ms = | 1592 | struct ipvs_master_sync_state *ms = |
1593 | container_of(work, struct ipvs_master_sync_state, | 1593 | container_of(work, struct ipvs_master_sync_state, |
1594 | master_wakeup_work.work); | 1594 | master_wakeup_work.work); |
1595 | struct netns_ipvs *ipvs = ms->ipvs; | 1595 | struct netns_ipvs *ipvs = ms->ipvs; |
1596 | 1596 | ||
1597 | spin_lock_bh(&ipvs->sync_lock); | 1597 | spin_lock_bh(&ipvs->sync_lock); |
1598 | if (ms->sync_queue_len && | 1598 | if (ms->sync_queue_len && |
1599 | ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { | 1599 | ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { |
1600 | ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; | 1600 | ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; |
1601 | wake_up_process(ms->master_thread); | 1601 | wake_up_process(ms->master_thread); |
1602 | } | 1602 | } |
1603 | spin_unlock_bh(&ipvs->sync_lock); | 1603 | spin_unlock_bh(&ipvs->sync_lock); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | /* Get next buffer to send */ | 1606 | /* Get next buffer to send */ |
1607 | static inline struct ip_vs_sync_buff * | 1607 | static inline struct ip_vs_sync_buff * |
1608 | next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) | 1608 | next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) |
1609 | { | 1609 | { |
1610 | struct ip_vs_sync_buff *sb; | 1610 | struct ip_vs_sync_buff *sb; |
1611 | 1611 | ||
1612 | sb = sb_dequeue(ipvs, ms); | 1612 | sb = sb_dequeue(ipvs, ms); |
1613 | if (sb) | 1613 | if (sb) |
1614 | return sb; | 1614 | return sb; |
1615 | /* Do not delay entries in buffer for more than 2 seconds */ | 1615 | /* Do not delay entries in buffer for more than 2 seconds */ |
1616 | return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); | 1616 | return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | static int sync_thread_master(void *data) | 1619 | static int sync_thread_master(void *data) |
1620 | { | 1620 | { |
1621 | struct ip_vs_sync_thread_data *tinfo = data; | 1621 | struct ip_vs_sync_thread_data *tinfo = data; |
1622 | struct netns_ipvs *ipvs = net_ipvs(tinfo->net); | 1622 | struct netns_ipvs *ipvs = net_ipvs(tinfo->net); |
1623 | struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; | 1623 | struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; |
1624 | struct sock *sk = tinfo->sock->sk; | 1624 | struct sock *sk = tinfo->sock->sk; |
1625 | struct ip_vs_sync_buff *sb; | 1625 | struct ip_vs_sync_buff *sb; |
1626 | 1626 | ||
1627 | pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " | 1627 | pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " |
1628 | "syncid = %d, id = %d\n", | 1628 | "syncid = %d, id = %d\n", |
1629 | ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); | 1629 | ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); |
1630 | 1630 | ||
1631 | for (;;) { | 1631 | for (;;) { |
1632 | sb = next_sync_buff(ipvs, ms); | 1632 | sb = next_sync_buff(ipvs, ms); |
1633 | if (unlikely(kthread_should_stop())) | 1633 | if (unlikely(kthread_should_stop())) |
1634 | break; | 1634 | break; |
1635 | if (!sb) { | 1635 | if (!sb) { |
1636 | schedule_timeout(IPVS_SYNC_CHECK_PERIOD); | 1636 | schedule_timeout(IPVS_SYNC_CHECK_PERIOD); |
1637 | continue; | 1637 | continue; |
1638 | } | 1638 | } |
1639 | while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { | 1639 | while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { |
1640 | int ret = 0; | 1640 | int ret = __wait_event_interruptible(*sk_sleep(sk), |
1641 | |||
1642 | __wait_event_interruptible(*sk_sleep(sk), | ||
1643 | sock_writeable(sk) || | 1641 | sock_writeable(sk) || |
1644 | kthread_should_stop(), | 1642 | kthread_should_stop()); |
1645 | ret); | ||
1646 | if (unlikely(kthread_should_stop())) | 1643 | if (unlikely(kthread_should_stop())) |
1647 | goto done; | 1644 | goto done; |
1648 | } | 1645 | } |
1649 | ip_vs_sync_buff_release(sb); | 1646 | ip_vs_sync_buff_release(sb); |
1650 | } | 1647 | } |
1651 | 1648 | ||
1652 | done: | 1649 | done: |
1653 | __set_current_state(TASK_RUNNING); | 1650 | __set_current_state(TASK_RUNNING); |
1654 | if (sb) | 1651 | if (sb) |
1655 | ip_vs_sync_buff_release(sb); | 1652 | ip_vs_sync_buff_release(sb); |
1656 | 1653 | ||
1657 | /* clean up the sync_buff queue */ | 1654 | /* clean up the sync_buff queue */ |
1658 | while ((sb = sb_dequeue(ipvs, ms))) | 1655 | while ((sb = sb_dequeue(ipvs, ms))) |
1659 | ip_vs_sync_buff_release(sb); | 1656 | ip_vs_sync_buff_release(sb); |
1660 | __set_current_state(TASK_RUNNING); | 1657 | __set_current_state(TASK_RUNNING); |
1661 | 1658 | ||
1662 | /* clean up the current sync_buff */ | 1659 | /* clean up the current sync_buff */ |
1663 | sb = get_curr_sync_buff(ipvs, ms, 0); | 1660 | sb = get_curr_sync_buff(ipvs, ms, 0); |
1664 | if (sb) | 1661 | if (sb) |
1665 | ip_vs_sync_buff_release(sb); | 1662 | ip_vs_sync_buff_release(sb); |
1666 | 1663 | ||
1667 | /* release the sending multicast socket */ | 1664 | /* release the sending multicast socket */ |
1668 | sk_release_kernel(tinfo->sock->sk); | 1665 | sk_release_kernel(tinfo->sock->sk); |
1669 | kfree(tinfo); | 1666 | kfree(tinfo); |
1670 | 1667 | ||
1671 | return 0; | 1668 | return 0; |
1672 | } | 1669 | } |
1673 | 1670 | ||
1674 | 1671 | ||
1675 | static int sync_thread_backup(void *data) | 1672 | static int sync_thread_backup(void *data) |
1676 | { | 1673 | { |
1677 | struct ip_vs_sync_thread_data *tinfo = data; | 1674 | struct ip_vs_sync_thread_data *tinfo = data; |
1678 | struct netns_ipvs *ipvs = net_ipvs(tinfo->net); | 1675 | struct netns_ipvs *ipvs = net_ipvs(tinfo->net); |
1679 | int len; | 1676 | int len; |
1680 | 1677 | ||
1681 | pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " | 1678 | pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " |
1682 | "syncid = %d, id = %d\n", | 1679 | "syncid = %d, id = %d\n", |
1683 | ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); | 1680 | ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); |
1684 | 1681 | ||
1685 | while (!kthread_should_stop()) { | 1682 | while (!kthread_should_stop()) { |
1686 | wait_event_interruptible(*sk_sleep(tinfo->sock->sk), | 1683 | wait_event_interruptible(*sk_sleep(tinfo->sock->sk), |
1687 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) | 1684 | !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) |
1688 | || kthread_should_stop()); | 1685 | || kthread_should_stop()); |
1689 | 1686 | ||
1690 | /* do we have data now? */ | 1687 | /* do we have data now? */ |
1691 | while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { | 1688 | while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { |
1692 | len = ip_vs_receive(tinfo->sock, tinfo->buf, | 1689 | len = ip_vs_receive(tinfo->sock, tinfo->buf, |
1693 | ipvs->recv_mesg_maxlen); | 1690 | ipvs->recv_mesg_maxlen); |
1694 | if (len <= 0) { | 1691 | if (len <= 0) { |
1695 | if (len != -EAGAIN) | 1692 | if (len != -EAGAIN) |
1696 | pr_err("receiving message error\n"); | 1693 | pr_err("receiving message error\n"); |
1697 | break; | 1694 | break; |
1698 | } | 1695 | } |
1699 | 1696 | ||
1700 | ip_vs_process_message(tinfo->net, tinfo->buf, len); | 1697 | ip_vs_process_message(tinfo->net, tinfo->buf, len); |
1701 | } | 1698 | } |
1702 | } | 1699 | } |
1703 | 1700 | ||
1704 | /* release the sending multicast socket */ | 1701 | /* release the sending multicast socket */ |
1705 | sk_release_kernel(tinfo->sock->sk); | 1702 | sk_release_kernel(tinfo->sock->sk); |
1706 | kfree(tinfo->buf); | 1703 | kfree(tinfo->buf); |
1707 | kfree(tinfo); | 1704 | kfree(tinfo); |
1708 | 1705 | ||
1709 | return 0; | 1706 | return 0; |
1710 | } | 1707 | } |
1711 | 1708 | ||
1712 | 1709 | ||
1713 | int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) | 1710 | int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) |
1714 | { | 1711 | { |
1715 | struct ip_vs_sync_thread_data *tinfo; | 1712 | struct ip_vs_sync_thread_data *tinfo; |
1716 | struct task_struct **array = NULL, *task; | 1713 | struct task_struct **array = NULL, *task; |
1717 | struct socket *sock; | 1714 | struct socket *sock; |
1718 | struct netns_ipvs *ipvs = net_ipvs(net); | 1715 | struct netns_ipvs *ipvs = net_ipvs(net); |
1719 | char *name; | 1716 | char *name; |
1720 | int (*threadfn)(void *data); | 1717 | int (*threadfn)(void *data); |
1721 | int id, count; | 1718 | int id, count; |
1722 | int result = -ENOMEM; | 1719 | int result = -ENOMEM; |
1723 | 1720 | ||
1724 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); | 1721 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
1725 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", | 1722 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", |
1726 | sizeof(struct ip_vs_sync_conn_v0)); | 1723 | sizeof(struct ip_vs_sync_conn_v0)); |
1727 | 1724 | ||
1728 | if (!ipvs->sync_state) { | 1725 | if (!ipvs->sync_state) { |
1729 | count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); | 1726 | count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); |
1730 | ipvs->threads_mask = count - 1; | 1727 | ipvs->threads_mask = count - 1; |
1731 | } else | 1728 | } else |
1732 | count = ipvs->threads_mask + 1; | 1729 | count = ipvs->threads_mask + 1; |
1733 | 1730 | ||
1734 | if (state == IP_VS_STATE_MASTER) { | 1731 | if (state == IP_VS_STATE_MASTER) { |
1735 | if (ipvs->ms) | 1732 | if (ipvs->ms) |
1736 | return -EEXIST; | 1733 | return -EEXIST; |
1737 | 1734 | ||
1738 | strlcpy(ipvs->master_mcast_ifn, mcast_ifn, | 1735 | strlcpy(ipvs->master_mcast_ifn, mcast_ifn, |
1739 | sizeof(ipvs->master_mcast_ifn)); | 1736 | sizeof(ipvs->master_mcast_ifn)); |
1740 | ipvs->master_syncid = syncid; | 1737 | ipvs->master_syncid = syncid; |
1741 | name = "ipvs-m:%d:%d"; | 1738 | name = "ipvs-m:%d:%d"; |
1742 | threadfn = sync_thread_master; | 1739 | threadfn = sync_thread_master; |
1743 | } else if (state == IP_VS_STATE_BACKUP) { | 1740 | } else if (state == IP_VS_STATE_BACKUP) { |
1744 | if (ipvs->backup_threads) | 1741 | if (ipvs->backup_threads) |
1745 | return -EEXIST; | 1742 | return -EEXIST; |
1746 | 1743 | ||
1747 | strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, | 1744 | strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, |
1748 | sizeof(ipvs->backup_mcast_ifn)); | 1745 | sizeof(ipvs->backup_mcast_ifn)); |
1749 | ipvs->backup_syncid = syncid; | 1746 | ipvs->backup_syncid = syncid; |
1750 | name = "ipvs-b:%d:%d"; | 1747 | name = "ipvs-b:%d:%d"; |
1751 | threadfn = sync_thread_backup; | 1748 | threadfn = sync_thread_backup; |
1752 | } else { | 1749 | } else { |
1753 | return -EINVAL; | 1750 | return -EINVAL; |
1754 | } | 1751 | } |
1755 | 1752 | ||
1756 | if (state == IP_VS_STATE_MASTER) { | 1753 | if (state == IP_VS_STATE_MASTER) { |
1757 | struct ipvs_master_sync_state *ms; | 1754 | struct ipvs_master_sync_state *ms; |
1758 | 1755 | ||
1759 | ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); | 1756 | ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); |
1760 | if (!ipvs->ms) | 1757 | if (!ipvs->ms) |
1761 | goto out; | 1758 | goto out; |
1762 | ms = ipvs->ms; | 1759 | ms = ipvs->ms; |
1763 | for (id = 0; id < count; id++, ms++) { | 1760 | for (id = 0; id < count; id++, ms++) { |
1764 | INIT_LIST_HEAD(&ms->sync_queue); | 1761 | INIT_LIST_HEAD(&ms->sync_queue); |
1765 | ms->sync_queue_len = 0; | 1762 | ms->sync_queue_len = 0; |
1766 | ms->sync_queue_delay = 0; | 1763 | ms->sync_queue_delay = 0; |
1767 | INIT_DELAYED_WORK(&ms->master_wakeup_work, | 1764 | INIT_DELAYED_WORK(&ms->master_wakeup_work, |
1768 | master_wakeup_work_handler); | 1765 | master_wakeup_work_handler); |
1769 | ms->ipvs = ipvs; | 1766 | ms->ipvs = ipvs; |
1770 | } | 1767 | } |
1771 | } else { | 1768 | } else { |
1772 | array = kzalloc(count * sizeof(struct task_struct *), | 1769 | array = kzalloc(count * sizeof(struct task_struct *), |
1773 | GFP_KERNEL); | 1770 | GFP_KERNEL); |
1774 | if (!array) | 1771 | if (!array) |
1775 | goto out; | 1772 | goto out; |
1776 | } | 1773 | } |
1777 | set_sync_mesg_maxlen(net, state); | 1774 | set_sync_mesg_maxlen(net, state); |
1778 | 1775 | ||
1779 | tinfo = NULL; | 1776 | tinfo = NULL; |
1780 | for (id = 0; id < count; id++) { | 1777 | for (id = 0; id < count; id++) { |
1781 | if (state == IP_VS_STATE_MASTER) | 1778 | if (state == IP_VS_STATE_MASTER) |
1782 | sock = make_send_sock(net, id); | 1779 | sock = make_send_sock(net, id); |
1783 | else | 1780 | else |
1784 | sock = make_receive_sock(net, id); | 1781 | sock = make_receive_sock(net, id); |
1785 | if (IS_ERR(sock)) { | 1782 | if (IS_ERR(sock)) { |
1786 | result = PTR_ERR(sock); | 1783 | result = PTR_ERR(sock); |
1787 | goto outtinfo; | 1784 | goto outtinfo; |
1788 | } | 1785 | } |
1789 | tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); | 1786 | tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); |
1790 | if (!tinfo) | 1787 | if (!tinfo) |
1791 | goto outsocket; | 1788 | goto outsocket; |
1792 | tinfo->net = net; | 1789 | tinfo->net = net; |
1793 | tinfo->sock = sock; | 1790 | tinfo->sock = sock; |
1794 | if (state == IP_VS_STATE_BACKUP) { | 1791 | if (state == IP_VS_STATE_BACKUP) { |
1795 | tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, | 1792 | tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, |
1796 | GFP_KERNEL); | 1793 | GFP_KERNEL); |
1797 | if (!tinfo->buf) | 1794 | if (!tinfo->buf) |
1798 | goto outtinfo; | 1795 | goto outtinfo; |
1799 | } else { | 1796 | } else { |
1800 | tinfo->buf = NULL; | 1797 | tinfo->buf = NULL; |
1801 | } | 1798 | } |
1802 | tinfo->id = id; | 1799 | tinfo->id = id; |
1803 | 1800 | ||
1804 | task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); | 1801 | task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); |
1805 | if (IS_ERR(task)) { | 1802 | if (IS_ERR(task)) { |
1806 | result = PTR_ERR(task); | 1803 | result = PTR_ERR(task); |
1807 | goto outtinfo; | 1804 | goto outtinfo; |
1808 | } | 1805 | } |
1809 | tinfo = NULL; | 1806 | tinfo = NULL; |
1810 | if (state == IP_VS_STATE_MASTER) | 1807 | if (state == IP_VS_STATE_MASTER) |
1811 | ipvs->ms[id].master_thread = task; | 1808 | ipvs->ms[id].master_thread = task; |
1812 | else | 1809 | else |
1813 | array[id] = task; | 1810 | array[id] = task; |
1814 | } | 1811 | } |
1815 | 1812 | ||
1816 | /* mark as active */ | 1813 | /* mark as active */ |
1817 | 1814 | ||
1818 | if (state == IP_VS_STATE_BACKUP) | 1815 | if (state == IP_VS_STATE_BACKUP) |
1819 | ipvs->backup_threads = array; | 1816 | ipvs->backup_threads = array; |
1820 | spin_lock_bh(&ipvs->sync_buff_lock); | 1817 | spin_lock_bh(&ipvs->sync_buff_lock); |
1821 | ipvs->sync_state |= state; | 1818 | ipvs->sync_state |= state; |
1822 | spin_unlock_bh(&ipvs->sync_buff_lock); | 1819 | spin_unlock_bh(&ipvs->sync_buff_lock); |
1823 | 1820 | ||
1824 | /* increase the module use count */ | 1821 | /* increase the module use count */ |
1825 | ip_vs_use_count_inc(); | 1822 | ip_vs_use_count_inc(); |
1826 | 1823 | ||
1827 | return 0; | 1824 | return 0; |
1828 | 1825 | ||
1829 | outsocket: | 1826 | outsocket: |
1830 | sk_release_kernel(sock->sk); | 1827 | sk_release_kernel(sock->sk); |
1831 | 1828 | ||
1832 | outtinfo: | 1829 | outtinfo: |
1833 | if (tinfo) { | 1830 | if (tinfo) { |
1834 | sk_release_kernel(tinfo->sock->sk); | 1831 | sk_release_kernel(tinfo->sock->sk); |
1835 | kfree(tinfo->buf); | 1832 | kfree(tinfo->buf); |
1836 | kfree(tinfo); | 1833 | kfree(tinfo); |
1837 | } | 1834 | } |
1838 | count = id; | 1835 | count = id; |
1839 | while (count-- > 0) { | 1836 | while (count-- > 0) { |
1840 | if (state == IP_VS_STATE_MASTER) | 1837 | if (state == IP_VS_STATE_MASTER) |
1841 | kthread_stop(ipvs->ms[count].master_thread); | 1838 | kthread_stop(ipvs->ms[count].master_thread); |
1842 | else | 1839 | else |
1843 | kthread_stop(array[count]); | 1840 | kthread_stop(array[count]); |
1844 | } | 1841 | } |
1845 | kfree(array); | 1842 | kfree(array); |
1846 | 1843 | ||
1847 | out: | 1844 | out: |
1848 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { | 1845 | if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { |
1849 | kfree(ipvs->ms); | 1846 | kfree(ipvs->ms); |
1850 | ipvs->ms = NULL; | 1847 | ipvs->ms = NULL; |
1851 | } | 1848 | } |
1852 | return result; | 1849 | return result; |
1853 | } | 1850 | } |
1854 | 1851 | ||
1855 | 1852 | ||
1856 | int stop_sync_thread(struct net *net, int state) | 1853 | int stop_sync_thread(struct net *net, int state) |
1857 | { | 1854 | { |
1858 | struct netns_ipvs *ipvs = net_ipvs(net); | 1855 | struct netns_ipvs *ipvs = net_ipvs(net); |
1859 | struct task_struct **array; | 1856 | struct task_struct **array; |
1860 | int id; | 1857 | int id; |
1861 | int retc = -EINVAL; | 1858 | int retc = -EINVAL; |
1862 | 1859 | ||
1863 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); | 1860 | IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); |
1864 | 1861 | ||
1865 | if (state == IP_VS_STATE_MASTER) { | 1862 | if (state == IP_VS_STATE_MASTER) { |
1866 | if (!ipvs->ms) | 1863 | if (!ipvs->ms) |
1867 | return -ESRCH; | 1864 | return -ESRCH; |
1868 | 1865 | ||
1869 | /* | 1866 | /* |
1870 | * The lock synchronizes with sb_queue_tail(), so that we don't | 1867 | * The lock synchronizes with sb_queue_tail(), so that we don't |
1871 | * add sync buffers to the queue, when we are already in | 1868 | * add sync buffers to the queue, when we are already in |
1872 | * progress of stopping the master sync daemon. | 1869 | * progress of stopping the master sync daemon. |
1873 | */ | 1870 | */ |
1874 | 1871 | ||
1875 | spin_lock_bh(&ipvs->sync_buff_lock); | 1872 | spin_lock_bh(&ipvs->sync_buff_lock); |
1876 | spin_lock(&ipvs->sync_lock); | 1873 | spin_lock(&ipvs->sync_lock); |
1877 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; | 1874 | ipvs->sync_state &= ~IP_VS_STATE_MASTER; |
1878 | spin_unlock(&ipvs->sync_lock); | 1875 | spin_unlock(&ipvs->sync_lock); |
1879 | spin_unlock_bh(&ipvs->sync_buff_lock); | 1876 | spin_unlock_bh(&ipvs->sync_buff_lock); |
1880 | 1877 | ||
1881 | retc = 0; | 1878 | retc = 0; |
1882 | for (id = ipvs->threads_mask; id >= 0; id--) { | 1879 | for (id = ipvs->threads_mask; id >= 0; id--) { |
1883 | struct ipvs_master_sync_state *ms = &ipvs->ms[id]; | 1880 | struct ipvs_master_sync_state *ms = &ipvs->ms[id]; |
1884 | int ret; | 1881 | int ret; |
1885 | 1882 | ||
1886 | pr_info("stopping master sync thread %d ...\n", | 1883 | pr_info("stopping master sync thread %d ...\n", |
1887 | task_pid_nr(ms->master_thread)); | 1884 | task_pid_nr(ms->master_thread)); |
1888 | cancel_delayed_work_sync(&ms->master_wakeup_work); | 1885 | cancel_delayed_work_sync(&ms->master_wakeup_work); |
1889 | ret = kthread_stop(ms->master_thread); | 1886 | ret = kthread_stop(ms->master_thread); |
1890 | if (retc >= 0) | 1887 | if (retc >= 0) |
1891 | retc = ret; | 1888 | retc = ret; |
1892 | } | 1889 | } |
1893 | kfree(ipvs->ms); | 1890 | kfree(ipvs->ms); |
1894 | ipvs->ms = NULL; | 1891 | ipvs->ms = NULL; |
1895 | } else if (state == IP_VS_STATE_BACKUP) { | 1892 | } else if (state == IP_VS_STATE_BACKUP) { |
1896 | if (!ipvs->backup_threads) | 1893 | if (!ipvs->backup_threads) |
1897 | return -ESRCH; | 1894 | return -ESRCH; |
1898 | 1895 | ||
1899 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; | 1896 | ipvs->sync_state &= ~IP_VS_STATE_BACKUP; |
1900 | array = ipvs->backup_threads; | 1897 | array = ipvs->backup_threads; |
1901 | retc = 0; | 1898 | retc = 0; |
1902 | for (id = ipvs->threads_mask; id >= 0; id--) { | 1899 | for (id = ipvs->threads_mask; id >= 0; id--) { |
1903 | int ret; | 1900 | int ret; |
1904 | 1901 | ||
1905 | pr_info("stopping backup sync thread %d ...\n", | 1902 | pr_info("stopping backup sync thread %d ...\n", |
1906 | task_pid_nr(array[id])); | 1903 | task_pid_nr(array[id])); |
1907 | ret = kthread_stop(array[id]); | 1904 | ret = kthread_stop(array[id]); |
1908 | if (retc >= 0) | 1905 | if (retc >= 0) |
1909 | retc = ret; | 1906 | retc = ret; |
1910 | } | 1907 | } |
1911 | kfree(array); | 1908 | kfree(array); |
1912 | ipvs->backup_threads = NULL; | 1909 | ipvs->backup_threads = NULL; |
1913 | } | 1910 | } |
1914 | 1911 | ||
1915 | /* decrease the module use count */ | 1912 | /* decrease the module use count */ |
1916 | ip_vs_use_count_dec(); | 1913 | ip_vs_use_count_dec(); |
1917 | 1914 | ||
1918 | return retc; | 1915 | return retc; |
1919 | } | 1916 | } |
1920 | 1917 | ||
1921 | /* | 1918 | /* |
1922 | * Initialize data struct for each netns | 1919 | * Initialize data struct for each netns |
1923 | */ | 1920 | */ |
1924 | int __net_init ip_vs_sync_net_init(struct net *net) | 1921 | int __net_init ip_vs_sync_net_init(struct net *net) |
1925 | { | 1922 | { |
1926 | struct netns_ipvs *ipvs = net_ipvs(net); | 1923 | struct netns_ipvs *ipvs = net_ipvs(net); |
1927 | 1924 | ||
1928 | __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); | 1925 | __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); |
1929 | spin_lock_init(&ipvs->sync_lock); | 1926 | spin_lock_init(&ipvs->sync_lock); |
1930 | spin_lock_init(&ipvs->sync_buff_lock); | 1927 | spin_lock_init(&ipvs->sync_buff_lock); |
1931 | return 0; | 1928 | return 0; |
1932 | } | 1929 | } |
1933 | 1930 | ||
1934 | void ip_vs_sync_net_cleanup(struct net *net) | 1931 | void ip_vs_sync_net_cleanup(struct net *net) |
1935 | { | 1932 | { |
1936 | int retc; | 1933 | int retc; |
1937 | struct netns_ipvs *ipvs = net_ipvs(net); | 1934 | struct netns_ipvs *ipvs = net_ipvs(net); |
1938 | 1935 | ||
1939 | mutex_lock(&ipvs->sync_mutex); | 1936 | mutex_lock(&ipvs->sync_mutex); |
1940 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); | 1937 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); |
1941 | if (retc && retc != -ESRCH) | 1938 | if (retc && retc != -ESRCH) |
1942 | pr_err("Failed to stop Master Daemon\n"); | 1939 | pr_err("Failed to stop Master Daemon\n"); |
1943 | 1940 | ||
1944 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); | 1941 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); |
1945 | if (retc && retc != -ESRCH) | 1942 | if (retc && retc != -ESRCH) |
1946 | pr_err("Failed to stop Backup Daemon\n"); | 1943 | pr_err("Failed to stop Backup Daemon\n"); |
1947 | mutex_unlock(&ipvs->sync_mutex); | 1944 | mutex_unlock(&ipvs->sync_mutex); |
1948 | } | 1945 | } |
1949 | 1946 |