Commit 922f9cfa79b52c85b6002d96cb0eefd13437c58c
Committed by
Linus Torvalds
1 parent
b55ab616fa
Exists in
master
and in
41 other branches
fs/char_dev.c: chrdev_open marked static and removed from fs.h
There is an outdated comment in serial_core.c also fixed. Signed-off-by: Denis Cheng <crquan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 3 additions and 4 deletions Inline Diff
drivers/serial/serial_core.c
1 | /* | 1 | /* |
2 | * linux/drivers/char/core.c | 2 | * linux/drivers/char/core.c |
3 | * | 3 | * |
4 | * Driver core for serial ports | 4 | * Driver core for serial ports |
5 | * | 5 | * |
6 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. | 6 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. |
7 | * | 7 | * |
8 | * Copyright 1999 ARM Limited | 8 | * Copyright 1999 ARM Limited |
9 | * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. | 9 | * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2 of the License, or | 13 | * the Free Software Foundation; either version 2 of the License, or |
14 | * (at your option) any later version. | 14 | * (at your option) any later version. |
15 | * | 15 | * |
16 | * This program is distributed in the hope that it will be useful, | 16 | * This program is distributed in the hope that it will be useful, |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
19 | * GNU General Public License for more details. | 19 | * GNU General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with this program; if not, write to the Free Software | 22 | * along with this program; if not, write to the Free Software |
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 24 | */ |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/tty.h> | 26 | #include <linux/tty.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/console.h> | 29 | #include <linux/console.h> |
30 | #include <linux/serial_core.h> | 30 | #include <linux/serial_core.h> |
31 | #include <linux/smp_lock.h> | 31 | #include <linux/smp_lock.h> |
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/serial.h> /* for serial_state and serial_icounter_struct */ | 33 | #include <linux/serial.h> /* for serial_state and serial_icounter_struct */ |
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | 36 | ||
37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * This is used to lock changes in serial line configuration. | 41 | * This is used to lock changes in serial line configuration. |
42 | */ | 42 | */ |
43 | static DEFINE_MUTEX(port_mutex); | 43 | static DEFINE_MUTEX(port_mutex); |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * lockdep: port->lock is initialized in two places, but we | 46 | * lockdep: port->lock is initialized in two places, but we |
47 | * want only one lock-class: | 47 | * want only one lock-class: |
48 | */ | 48 | */ |
49 | static struct lock_class_key port_lock_key; | 49 | static struct lock_class_key port_lock_key; |
50 | 50 | ||
51 | #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) | 51 | #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) |
52 | 52 | ||
53 | #define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0)) | 53 | #define uart_users(state) ((state)->count + ((state)->info ? (state)->info->blocked_open : 0)) |
54 | 54 | ||
55 | #ifdef CONFIG_SERIAL_CORE_CONSOLE | 55 | #ifdef CONFIG_SERIAL_CORE_CONSOLE |
56 | #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) | 56 | #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) |
57 | #else | 57 | #else |
58 | #define uart_console(port) (0) | 58 | #define uart_console(port) (0) |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | static void uart_change_speed(struct uart_state *state, | 61 | static void uart_change_speed(struct uart_state *state, |
62 | struct ktermios *old_termios); | 62 | struct ktermios *old_termios); |
63 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout); | 63 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout); |
64 | static void uart_change_pm(struct uart_state *state, int pm_state); | 64 | static void uart_change_pm(struct uart_state *state, int pm_state); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * This routine is used by the interrupt handler to schedule processing in | 67 | * This routine is used by the interrupt handler to schedule processing in |
68 | * the software interrupt portion of the driver. | 68 | * the software interrupt portion of the driver. |
69 | */ | 69 | */ |
70 | void uart_write_wakeup(struct uart_port *port) | 70 | void uart_write_wakeup(struct uart_port *port) |
71 | { | 71 | { |
72 | struct uart_info *info = port->info; | 72 | struct uart_info *info = port->info; |
73 | /* | 73 | /* |
74 | * This means you called this function _after_ the port was | 74 | * This means you called this function _after_ the port was |
75 | * closed. No cookie for you. | 75 | * closed. No cookie for you. |
76 | */ | 76 | */ |
77 | BUG_ON(!info); | 77 | BUG_ON(!info); |
78 | tasklet_schedule(&info->tlet); | 78 | tasklet_schedule(&info->tlet); |
79 | } | 79 | } |
80 | 80 | ||
81 | static void uart_stop(struct tty_struct *tty) | 81 | static void uart_stop(struct tty_struct *tty) |
82 | { | 82 | { |
83 | struct uart_state *state = tty->driver_data; | 83 | struct uart_state *state = tty->driver_data; |
84 | struct uart_port *port = state->port; | 84 | struct uart_port *port = state->port; |
85 | unsigned long flags; | 85 | unsigned long flags; |
86 | 86 | ||
87 | spin_lock_irqsave(&port->lock, flags); | 87 | spin_lock_irqsave(&port->lock, flags); |
88 | port->ops->stop_tx(port); | 88 | port->ops->stop_tx(port); |
89 | spin_unlock_irqrestore(&port->lock, flags); | 89 | spin_unlock_irqrestore(&port->lock, flags); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void __uart_start(struct tty_struct *tty) | 92 | static void __uart_start(struct tty_struct *tty) |
93 | { | 93 | { |
94 | struct uart_state *state = tty->driver_data; | 94 | struct uart_state *state = tty->driver_data; |
95 | struct uart_port *port = state->port; | 95 | struct uart_port *port = state->port; |
96 | 96 | ||
97 | if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf && | 97 | if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf && |
98 | !tty->stopped && !tty->hw_stopped) | 98 | !tty->stopped && !tty->hw_stopped) |
99 | port->ops->start_tx(port); | 99 | port->ops->start_tx(port); |
100 | } | 100 | } |
101 | 101 | ||
102 | static void uart_start(struct tty_struct *tty) | 102 | static void uart_start(struct tty_struct *tty) |
103 | { | 103 | { |
104 | struct uart_state *state = tty->driver_data; | 104 | struct uart_state *state = tty->driver_data; |
105 | struct uart_port *port = state->port; | 105 | struct uart_port *port = state->port; |
106 | unsigned long flags; | 106 | unsigned long flags; |
107 | 107 | ||
108 | spin_lock_irqsave(&port->lock, flags); | 108 | spin_lock_irqsave(&port->lock, flags); |
109 | __uart_start(tty); | 109 | __uart_start(tty); |
110 | spin_unlock_irqrestore(&port->lock, flags); | 110 | spin_unlock_irqrestore(&port->lock, flags); |
111 | } | 111 | } |
112 | 112 | ||
113 | static void uart_tasklet_action(unsigned long data) | 113 | static void uart_tasklet_action(unsigned long data) |
114 | { | 114 | { |
115 | struct uart_state *state = (struct uart_state *)data; | 115 | struct uart_state *state = (struct uart_state *)data; |
116 | tty_wakeup(state->info->tty); | 116 | tty_wakeup(state->info->tty); |
117 | } | 117 | } |
118 | 118 | ||
119 | static inline void | 119 | static inline void |
120 | uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) | 120 | uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) |
121 | { | 121 | { |
122 | unsigned long flags; | 122 | unsigned long flags; |
123 | unsigned int old; | 123 | unsigned int old; |
124 | 124 | ||
125 | spin_lock_irqsave(&port->lock, flags); | 125 | spin_lock_irqsave(&port->lock, flags); |
126 | old = port->mctrl; | 126 | old = port->mctrl; |
127 | port->mctrl = (old & ~clear) | set; | 127 | port->mctrl = (old & ~clear) | set; |
128 | if (old != port->mctrl) | 128 | if (old != port->mctrl) |
129 | port->ops->set_mctrl(port, port->mctrl); | 129 | port->ops->set_mctrl(port, port->mctrl); |
130 | spin_unlock_irqrestore(&port->lock, flags); | 130 | spin_unlock_irqrestore(&port->lock, flags); |
131 | } | 131 | } |
132 | 132 | ||
133 | #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) | 133 | #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) |
134 | #define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear) | 134 | #define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear) |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Startup the port. This will be called once per open. All calls | 137 | * Startup the port. This will be called once per open. All calls |
138 | * will be serialised by the per-port semaphore. | 138 | * will be serialised by the per-port semaphore. |
139 | */ | 139 | */ |
140 | static int uart_startup(struct uart_state *state, int init_hw) | 140 | static int uart_startup(struct uart_state *state, int init_hw) |
141 | { | 141 | { |
142 | struct uart_info *info = state->info; | 142 | struct uart_info *info = state->info; |
143 | struct uart_port *port = state->port; | 143 | struct uart_port *port = state->port; |
144 | unsigned long page; | 144 | unsigned long page; |
145 | int retval = 0; | 145 | int retval = 0; |
146 | 146 | ||
147 | if (info->flags & UIF_INITIALIZED) | 147 | if (info->flags & UIF_INITIALIZED) |
148 | return 0; | 148 | return 0; |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * Set the TTY IO error marker - we will only clear this | 151 | * Set the TTY IO error marker - we will only clear this |
152 | * once we have successfully opened the port. Also set | 152 | * once we have successfully opened the port. Also set |
153 | * up the tty->alt_speed kludge | 153 | * up the tty->alt_speed kludge |
154 | */ | 154 | */ |
155 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 155 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
156 | 156 | ||
157 | if (port->type == PORT_UNKNOWN) | 157 | if (port->type == PORT_UNKNOWN) |
158 | return 0; | 158 | return 0; |
159 | 159 | ||
160 | /* | 160 | /* |
161 | * Initialise and allocate the transmit and temporary | 161 | * Initialise and allocate the transmit and temporary |
162 | * buffer. | 162 | * buffer. |
163 | */ | 163 | */ |
164 | if (!info->xmit.buf) { | 164 | if (!info->xmit.buf) { |
165 | page = get_zeroed_page(GFP_KERNEL); | 165 | page = get_zeroed_page(GFP_KERNEL); |
166 | if (!page) | 166 | if (!page) |
167 | return -ENOMEM; | 167 | return -ENOMEM; |
168 | 168 | ||
169 | info->xmit.buf = (unsigned char *) page; | 169 | info->xmit.buf = (unsigned char *) page; |
170 | uart_circ_clear(&info->xmit); | 170 | uart_circ_clear(&info->xmit); |
171 | } | 171 | } |
172 | 172 | ||
173 | retval = port->ops->startup(port); | 173 | retval = port->ops->startup(port); |
174 | if (retval == 0) { | 174 | if (retval == 0) { |
175 | if (init_hw) { | 175 | if (init_hw) { |
176 | /* | 176 | /* |
177 | * Initialise the hardware port settings. | 177 | * Initialise the hardware port settings. |
178 | */ | 178 | */ |
179 | uart_change_speed(state, NULL); | 179 | uart_change_speed(state, NULL); |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * Setup the RTS and DTR signals once the | 182 | * Setup the RTS and DTR signals once the |
183 | * port is open and ready to respond. | 183 | * port is open and ready to respond. |
184 | */ | 184 | */ |
185 | if (info->tty->termios->c_cflag & CBAUD) | 185 | if (info->tty->termios->c_cflag & CBAUD) |
186 | uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); | 186 | uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); |
187 | } | 187 | } |
188 | 188 | ||
189 | if (info->flags & UIF_CTS_FLOW) { | 189 | if (info->flags & UIF_CTS_FLOW) { |
190 | spin_lock_irq(&port->lock); | 190 | spin_lock_irq(&port->lock); |
191 | if (!(port->ops->get_mctrl(port) & TIOCM_CTS)) | 191 | if (!(port->ops->get_mctrl(port) & TIOCM_CTS)) |
192 | info->tty->hw_stopped = 1; | 192 | info->tty->hw_stopped = 1; |
193 | spin_unlock_irq(&port->lock); | 193 | spin_unlock_irq(&port->lock); |
194 | } | 194 | } |
195 | 195 | ||
196 | info->flags |= UIF_INITIALIZED; | 196 | info->flags |= UIF_INITIALIZED; |
197 | 197 | ||
198 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 198 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
199 | } | 199 | } |
200 | 200 | ||
201 | if (retval && capable(CAP_SYS_ADMIN)) | 201 | if (retval && capable(CAP_SYS_ADMIN)) |
202 | retval = 0; | 202 | retval = 0; |
203 | 203 | ||
204 | return retval; | 204 | return retval; |
205 | } | 205 | } |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * This routine will shutdown a serial port; interrupts are disabled, and | 208 | * This routine will shutdown a serial port; interrupts are disabled, and |
209 | * DTR is dropped if the hangup on close termio flag is on. Calls to | 209 | * DTR is dropped if the hangup on close termio flag is on. Calls to |
210 | * uart_shutdown are serialised by the per-port semaphore. | 210 | * uart_shutdown are serialised by the per-port semaphore. |
211 | */ | 211 | */ |
212 | static void uart_shutdown(struct uart_state *state) | 212 | static void uart_shutdown(struct uart_state *state) |
213 | { | 213 | { |
214 | struct uart_info *info = state->info; | 214 | struct uart_info *info = state->info; |
215 | struct uart_port *port = state->port; | 215 | struct uart_port *port = state->port; |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * Set the TTY IO error marker | 218 | * Set the TTY IO error marker |
219 | */ | 219 | */ |
220 | if (info->tty) | 220 | if (info->tty) |
221 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 221 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
222 | 222 | ||
223 | if (info->flags & UIF_INITIALIZED) { | 223 | if (info->flags & UIF_INITIALIZED) { |
224 | info->flags &= ~UIF_INITIALIZED; | 224 | info->flags &= ~UIF_INITIALIZED; |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * Turn off DTR and RTS early. | 227 | * Turn off DTR and RTS early. |
228 | */ | 228 | */ |
229 | if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) | 229 | if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) |
230 | uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); | 230 | uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
231 | 231 | ||
232 | /* | 232 | /* |
233 | * clear delta_msr_wait queue to avoid mem leaks: we may free | 233 | * clear delta_msr_wait queue to avoid mem leaks: we may free |
234 | * the irq here so the queue might never be woken up. Note | 234 | * the irq here so the queue might never be woken up. Note |
235 | * that we won't end up waiting on delta_msr_wait again since | 235 | * that we won't end up waiting on delta_msr_wait again since |
236 | * any outstanding file descriptors should be pointing at | 236 | * any outstanding file descriptors should be pointing at |
237 | * hung_up_tty_fops now. | 237 | * hung_up_tty_fops now. |
238 | */ | 238 | */ |
239 | wake_up_interruptible(&info->delta_msr_wait); | 239 | wake_up_interruptible(&info->delta_msr_wait); |
240 | 240 | ||
241 | /* | 241 | /* |
242 | * Free the IRQ and disable the port. | 242 | * Free the IRQ and disable the port. |
243 | */ | 243 | */ |
244 | port->ops->shutdown(port); | 244 | port->ops->shutdown(port); |
245 | 245 | ||
246 | /* | 246 | /* |
247 | * Ensure that the IRQ handler isn't running on another CPU. | 247 | * Ensure that the IRQ handler isn't running on another CPU. |
248 | */ | 248 | */ |
249 | synchronize_irq(port->irq); | 249 | synchronize_irq(port->irq); |
250 | } | 250 | } |
251 | 251 | ||
252 | /* | 252 | /* |
253 | * kill off our tasklet | 253 | * kill off our tasklet |
254 | */ | 254 | */ |
255 | tasklet_kill(&info->tlet); | 255 | tasklet_kill(&info->tlet); |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * Free the transmit buffer page. | 258 | * Free the transmit buffer page. |
259 | */ | 259 | */ |
260 | if (info->xmit.buf) { | 260 | if (info->xmit.buf) { |
261 | free_page((unsigned long)info->xmit.buf); | 261 | free_page((unsigned long)info->xmit.buf); |
262 | info->xmit.buf = NULL; | 262 | info->xmit.buf = NULL; |
263 | } | 263 | } |
264 | } | 264 | } |
265 | 265 | ||
266 | /** | 266 | /** |
267 | * uart_update_timeout - update per-port FIFO timeout. | 267 | * uart_update_timeout - update per-port FIFO timeout. |
268 | * @port: uart_port structure describing the port | 268 | * @port: uart_port structure describing the port |
269 | * @cflag: termios cflag value | 269 | * @cflag: termios cflag value |
270 | * @baud: speed of the port | 270 | * @baud: speed of the port |
271 | * | 271 | * |
272 | * Set the port FIFO timeout value. The @cflag value should | 272 | * Set the port FIFO timeout value. The @cflag value should |
273 | * reflect the actual hardware settings. | 273 | * reflect the actual hardware settings. |
274 | */ | 274 | */ |
275 | void | 275 | void |
276 | uart_update_timeout(struct uart_port *port, unsigned int cflag, | 276 | uart_update_timeout(struct uart_port *port, unsigned int cflag, |
277 | unsigned int baud) | 277 | unsigned int baud) |
278 | { | 278 | { |
279 | unsigned int bits; | 279 | unsigned int bits; |
280 | 280 | ||
281 | /* byte size and parity */ | 281 | /* byte size and parity */ |
282 | switch (cflag & CSIZE) { | 282 | switch (cflag & CSIZE) { |
283 | case CS5: | 283 | case CS5: |
284 | bits = 7; | 284 | bits = 7; |
285 | break; | 285 | break; |
286 | case CS6: | 286 | case CS6: |
287 | bits = 8; | 287 | bits = 8; |
288 | break; | 288 | break; |
289 | case CS7: | 289 | case CS7: |
290 | bits = 9; | 290 | bits = 9; |
291 | break; | 291 | break; |
292 | default: | 292 | default: |
293 | bits = 10; | 293 | bits = 10; |
294 | break; /* CS8 */ | 294 | break; /* CS8 */ |
295 | } | 295 | } |
296 | 296 | ||
297 | if (cflag & CSTOPB) | 297 | if (cflag & CSTOPB) |
298 | bits++; | 298 | bits++; |
299 | if (cflag & PARENB) | 299 | if (cflag & PARENB) |
300 | bits++; | 300 | bits++; |
301 | 301 | ||
302 | /* | 302 | /* |
303 | * The total number of bits to be transmitted in the fifo. | 303 | * The total number of bits to be transmitted in the fifo. |
304 | */ | 304 | */ |
305 | bits = bits * port->fifosize; | 305 | bits = bits * port->fifosize; |
306 | 306 | ||
307 | /* | 307 | /* |
308 | * Figure the timeout to send the above number of bits. | 308 | * Figure the timeout to send the above number of bits. |
309 | * Add .02 seconds of slop | 309 | * Add .02 seconds of slop |
310 | */ | 310 | */ |
311 | port->timeout = (HZ * bits) / baud + HZ/50; | 311 | port->timeout = (HZ * bits) / baud + HZ/50; |
312 | } | 312 | } |
313 | 313 | ||
314 | EXPORT_SYMBOL(uart_update_timeout); | 314 | EXPORT_SYMBOL(uart_update_timeout); |
315 | 315 | ||
316 | /** | 316 | /** |
317 | * uart_get_baud_rate - return baud rate for a particular port | 317 | * uart_get_baud_rate - return baud rate for a particular port |
318 | * @port: uart_port structure describing the port in question. | 318 | * @port: uart_port structure describing the port in question. |
319 | * @termios: desired termios settings. | 319 | * @termios: desired termios settings. |
320 | * @old: old termios (or NULL) | 320 | * @old: old termios (or NULL) |
321 | * @min: minimum acceptable baud rate | 321 | * @min: minimum acceptable baud rate |
322 | * @max: maximum acceptable baud rate | 322 | * @max: maximum acceptable baud rate |
323 | * | 323 | * |
324 | * Decode the termios structure into a numeric baud rate, | 324 | * Decode the termios structure into a numeric baud rate, |
325 | * taking account of the magic 38400 baud rate (with spd_* | 325 | * taking account of the magic 38400 baud rate (with spd_* |
326 | * flags), and mapping the %B0 rate to 9600 baud. | 326 | * flags), and mapping the %B0 rate to 9600 baud. |
327 | * | 327 | * |
328 | * If the new baud rate is invalid, try the old termios setting. | 328 | * If the new baud rate is invalid, try the old termios setting. |
329 | * If it's still invalid, we try 9600 baud. | 329 | * If it's still invalid, we try 9600 baud. |
330 | * | 330 | * |
331 | * Update the @termios structure to reflect the baud rate | 331 | * Update the @termios structure to reflect the baud rate |
332 | * we're actually going to be using. | 332 | * we're actually going to be using. |
333 | */ | 333 | */ |
334 | unsigned int | 334 | unsigned int |
335 | uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, | 335 | uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, |
336 | struct ktermios *old, unsigned int min, unsigned int max) | 336 | struct ktermios *old, unsigned int min, unsigned int max) |
337 | { | 337 | { |
338 | unsigned int try, baud, altbaud = 38400; | 338 | unsigned int try, baud, altbaud = 38400; |
339 | upf_t flags = port->flags & UPF_SPD_MASK; | 339 | upf_t flags = port->flags & UPF_SPD_MASK; |
340 | 340 | ||
341 | if (flags == UPF_SPD_HI) | 341 | if (flags == UPF_SPD_HI) |
342 | altbaud = 57600; | 342 | altbaud = 57600; |
343 | if (flags == UPF_SPD_VHI) | 343 | if (flags == UPF_SPD_VHI) |
344 | altbaud = 115200; | 344 | altbaud = 115200; |
345 | if (flags == UPF_SPD_SHI) | 345 | if (flags == UPF_SPD_SHI) |
346 | altbaud = 230400; | 346 | altbaud = 230400; |
347 | if (flags == UPF_SPD_WARP) | 347 | if (flags == UPF_SPD_WARP) |
348 | altbaud = 460800; | 348 | altbaud = 460800; |
349 | 349 | ||
350 | for (try = 0; try < 2; try++) { | 350 | for (try = 0; try < 2; try++) { |
351 | baud = tty_termios_baud_rate(termios); | 351 | baud = tty_termios_baud_rate(termios); |
352 | 352 | ||
353 | /* | 353 | /* |
354 | * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... | 354 | * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... |
355 | * Die! Die! Die! | 355 | * Die! Die! Die! |
356 | */ | 356 | */ |
357 | if (baud == 38400) | 357 | if (baud == 38400) |
358 | baud = altbaud; | 358 | baud = altbaud; |
359 | 359 | ||
360 | /* | 360 | /* |
361 | * Special case: B0 rate. | 361 | * Special case: B0 rate. |
362 | */ | 362 | */ |
363 | if (baud == 0) | 363 | if (baud == 0) |
364 | baud = 9600; | 364 | baud = 9600; |
365 | 365 | ||
366 | if (baud >= min && baud <= max) | 366 | if (baud >= min && baud <= max) |
367 | return baud; | 367 | return baud; |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * Oops, the quotient was zero. Try again with | 370 | * Oops, the quotient was zero. Try again with |
371 | * the old baud rate if possible. | 371 | * the old baud rate if possible. |
372 | */ | 372 | */ |
373 | termios->c_cflag &= ~CBAUD; | 373 | termios->c_cflag &= ~CBAUD; |
374 | if (old) { | 374 | if (old) { |
375 | baud = tty_termios_baud_rate(old); | 375 | baud = tty_termios_baud_rate(old); |
376 | tty_termios_encode_baud_rate(termios, baud, baud); | 376 | tty_termios_encode_baud_rate(termios, baud, baud); |
377 | old = NULL; | 377 | old = NULL; |
378 | continue; | 378 | continue; |
379 | } | 379 | } |
380 | 380 | ||
381 | /* | 381 | /* |
382 | * As a last resort, if the quotient is zero, | 382 | * As a last resort, if the quotient is zero, |
383 | * default to 9600 bps | 383 | * default to 9600 bps |
384 | */ | 384 | */ |
385 | tty_termios_encode_baud_rate(termios, 9600, 9600); | 385 | tty_termios_encode_baud_rate(termios, 9600, 9600); |
386 | } | 386 | } |
387 | 387 | ||
388 | return 0; | 388 | return 0; |
389 | } | 389 | } |
390 | 390 | ||
391 | EXPORT_SYMBOL(uart_get_baud_rate); | 391 | EXPORT_SYMBOL(uart_get_baud_rate); |
392 | 392 | ||
393 | /** | 393 | /** |
394 | * uart_get_divisor - return uart clock divisor | 394 | * uart_get_divisor - return uart clock divisor |
395 | * @port: uart_port structure describing the port. | 395 | * @port: uart_port structure describing the port. |
396 | * @baud: desired baud rate | 396 | * @baud: desired baud rate |
397 | * | 397 | * |
398 | * Calculate the uart clock divisor for the port. | 398 | * Calculate the uart clock divisor for the port. |
399 | */ | 399 | */ |
400 | unsigned int | 400 | unsigned int |
401 | uart_get_divisor(struct uart_port *port, unsigned int baud) | 401 | uart_get_divisor(struct uart_port *port, unsigned int baud) |
402 | { | 402 | { |
403 | unsigned int quot; | 403 | unsigned int quot; |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * Old custom speed handling. | 406 | * Old custom speed handling. |
407 | */ | 407 | */ |
408 | if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) | 408 | if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST) |
409 | quot = port->custom_divisor; | 409 | quot = port->custom_divisor; |
410 | else | 410 | else |
411 | quot = (port->uartclk + (8 * baud)) / (16 * baud); | 411 | quot = (port->uartclk + (8 * baud)) / (16 * baud); |
412 | 412 | ||
413 | return quot; | 413 | return quot; |
414 | } | 414 | } |
415 | 415 | ||
416 | EXPORT_SYMBOL(uart_get_divisor); | 416 | EXPORT_SYMBOL(uart_get_divisor); |
417 | 417 | ||
418 | static void | 418 | static void |
419 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) | 419 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) |
420 | { | 420 | { |
421 | struct tty_struct *tty = state->info->tty; | 421 | struct tty_struct *tty = state->info->tty; |
422 | struct uart_port *port = state->port; | 422 | struct uart_port *port = state->port; |
423 | struct ktermios *termios; | 423 | struct ktermios *termios; |
424 | 424 | ||
425 | /* | 425 | /* |
426 | * If we have no tty, termios, or the port does not exist, | 426 | * If we have no tty, termios, or the port does not exist, |
427 | * then we can't set the parameters for this port. | 427 | * then we can't set the parameters for this port. |
428 | */ | 428 | */ |
429 | if (!tty || !tty->termios || port->type == PORT_UNKNOWN) | 429 | if (!tty || !tty->termios || port->type == PORT_UNKNOWN) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | termios = tty->termios; | 432 | termios = tty->termios; |
433 | 433 | ||
434 | /* | 434 | /* |
435 | * Set flags based on termios cflag | 435 | * Set flags based on termios cflag |
436 | */ | 436 | */ |
437 | if (termios->c_cflag & CRTSCTS) | 437 | if (termios->c_cflag & CRTSCTS) |
438 | state->info->flags |= UIF_CTS_FLOW; | 438 | state->info->flags |= UIF_CTS_FLOW; |
439 | else | 439 | else |
440 | state->info->flags &= ~UIF_CTS_FLOW; | 440 | state->info->flags &= ~UIF_CTS_FLOW; |
441 | 441 | ||
442 | if (termios->c_cflag & CLOCAL) | 442 | if (termios->c_cflag & CLOCAL) |
443 | state->info->flags &= ~UIF_CHECK_CD; | 443 | state->info->flags &= ~UIF_CHECK_CD; |
444 | else | 444 | else |
445 | state->info->flags |= UIF_CHECK_CD; | 445 | state->info->flags |= UIF_CHECK_CD; |
446 | 446 | ||
447 | port->ops->set_termios(port, termios, old_termios); | 447 | port->ops->set_termios(port, termios, old_termios); |
448 | } | 448 | } |
449 | 449 | ||
450 | static inline void | 450 | static inline void |
451 | __uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c) | 451 | __uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c) |
452 | { | 452 | { |
453 | unsigned long flags; | 453 | unsigned long flags; |
454 | 454 | ||
455 | if (!circ->buf) | 455 | if (!circ->buf) |
456 | return; | 456 | return; |
457 | 457 | ||
458 | spin_lock_irqsave(&port->lock, flags); | 458 | spin_lock_irqsave(&port->lock, flags); |
459 | if (uart_circ_chars_free(circ) != 0) { | 459 | if (uart_circ_chars_free(circ) != 0) { |
460 | circ->buf[circ->head] = c; | 460 | circ->buf[circ->head] = c; |
461 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); | 461 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); |
462 | } | 462 | } |
463 | spin_unlock_irqrestore(&port->lock, flags); | 463 | spin_unlock_irqrestore(&port->lock, flags); |
464 | } | 464 | } |
465 | 465 | ||
466 | static void uart_put_char(struct tty_struct *tty, unsigned char ch) | 466 | static void uart_put_char(struct tty_struct *tty, unsigned char ch) |
467 | { | 467 | { |
468 | struct uart_state *state = tty->driver_data; | 468 | struct uart_state *state = tty->driver_data; |
469 | 469 | ||
470 | __uart_put_char(state->port, &state->info->xmit, ch); | 470 | __uart_put_char(state->port, &state->info->xmit, ch); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void uart_flush_chars(struct tty_struct *tty) | 473 | static void uart_flush_chars(struct tty_struct *tty) |
474 | { | 474 | { |
475 | uart_start(tty); | 475 | uart_start(tty); |
476 | } | 476 | } |
477 | 477 | ||
478 | static int | 478 | static int |
479 | uart_write(struct tty_struct *tty, const unsigned char *buf, int count) | 479 | uart_write(struct tty_struct *tty, const unsigned char *buf, int count) |
480 | { | 480 | { |
481 | struct uart_state *state = tty->driver_data; | 481 | struct uart_state *state = tty->driver_data; |
482 | struct uart_port *port; | 482 | struct uart_port *port; |
483 | struct circ_buf *circ; | 483 | struct circ_buf *circ; |
484 | unsigned long flags; | 484 | unsigned long flags; |
485 | int c, ret = 0; | 485 | int c, ret = 0; |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * This means you called this function _after_ the port was | 488 | * This means you called this function _after_ the port was |
489 | * closed. No cookie for you. | 489 | * closed. No cookie for you. |
490 | */ | 490 | */ |
491 | if (!state || !state->info) { | 491 | if (!state || !state->info) { |
492 | WARN_ON(1); | 492 | WARN_ON(1); |
493 | return -EL3HLT; | 493 | return -EL3HLT; |
494 | } | 494 | } |
495 | 495 | ||
496 | port = state->port; | 496 | port = state->port; |
497 | circ = &state->info->xmit; | 497 | circ = &state->info->xmit; |
498 | 498 | ||
499 | if (!circ->buf) | 499 | if (!circ->buf) |
500 | return 0; | 500 | return 0; |
501 | 501 | ||
502 | spin_lock_irqsave(&port->lock, flags); | 502 | spin_lock_irqsave(&port->lock, flags); |
503 | while (1) { | 503 | while (1) { |
504 | c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); | 504 | c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); |
505 | if (count < c) | 505 | if (count < c) |
506 | c = count; | 506 | c = count; |
507 | if (c <= 0) | 507 | if (c <= 0) |
508 | break; | 508 | break; |
509 | memcpy(circ->buf + circ->head, buf, c); | 509 | memcpy(circ->buf + circ->head, buf, c); |
510 | circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); | 510 | circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); |
511 | buf += c; | 511 | buf += c; |
512 | count -= c; | 512 | count -= c; |
513 | ret += c; | 513 | ret += c; |
514 | } | 514 | } |
515 | spin_unlock_irqrestore(&port->lock, flags); | 515 | spin_unlock_irqrestore(&port->lock, flags); |
516 | 516 | ||
517 | uart_start(tty); | 517 | uart_start(tty); |
518 | return ret; | 518 | return ret; |
519 | } | 519 | } |
520 | 520 | ||
521 | static int uart_write_room(struct tty_struct *tty) | 521 | static int uart_write_room(struct tty_struct *tty) |
522 | { | 522 | { |
523 | struct uart_state *state = tty->driver_data; | 523 | struct uart_state *state = tty->driver_data; |
524 | 524 | ||
525 | return uart_circ_chars_free(&state->info->xmit); | 525 | return uart_circ_chars_free(&state->info->xmit); |
526 | } | 526 | } |
527 | 527 | ||
528 | static int uart_chars_in_buffer(struct tty_struct *tty) | 528 | static int uart_chars_in_buffer(struct tty_struct *tty) |
529 | { | 529 | { |
530 | struct uart_state *state = tty->driver_data; | 530 | struct uart_state *state = tty->driver_data; |
531 | 531 | ||
532 | return uart_circ_chars_pending(&state->info->xmit); | 532 | return uart_circ_chars_pending(&state->info->xmit); |
533 | } | 533 | } |
534 | 534 | ||
535 | static void uart_flush_buffer(struct tty_struct *tty) | 535 | static void uart_flush_buffer(struct tty_struct *tty) |
536 | { | 536 | { |
537 | struct uart_state *state = tty->driver_data; | 537 | struct uart_state *state = tty->driver_data; |
538 | struct uart_port *port = state->port; | 538 | struct uart_port *port = state->port; |
539 | unsigned long flags; | 539 | unsigned long flags; |
540 | 540 | ||
541 | /* | 541 | /* |
542 | * This means you called this function _after_ the port was | 542 | * This means you called this function _after_ the port was |
543 | * closed. No cookie for you. | 543 | * closed. No cookie for you. |
544 | */ | 544 | */ |
545 | if (!state || !state->info) { | 545 | if (!state || !state->info) { |
546 | WARN_ON(1); | 546 | WARN_ON(1); |
547 | return; | 547 | return; |
548 | } | 548 | } |
549 | 549 | ||
550 | pr_debug("uart_flush_buffer(%d) called\n", tty->index); | 550 | pr_debug("uart_flush_buffer(%d) called\n", tty->index); |
551 | 551 | ||
552 | spin_lock_irqsave(&port->lock, flags); | 552 | spin_lock_irqsave(&port->lock, flags); |
553 | uart_circ_clear(&state->info->xmit); | 553 | uart_circ_clear(&state->info->xmit); |
554 | spin_unlock_irqrestore(&port->lock, flags); | 554 | spin_unlock_irqrestore(&port->lock, flags); |
555 | tty_wakeup(tty); | 555 | tty_wakeup(tty); |
556 | } | 556 | } |
557 | 557 | ||
558 | /* | 558 | /* |
559 | * This function is used to send a high-priority XON/XOFF character to | 559 | * This function is used to send a high-priority XON/XOFF character to |
560 | * the device | 560 | * the device |
561 | */ | 561 | */ |
562 | static void uart_send_xchar(struct tty_struct *tty, char ch) | 562 | static void uart_send_xchar(struct tty_struct *tty, char ch) |
563 | { | 563 | { |
564 | struct uart_state *state = tty->driver_data; | 564 | struct uart_state *state = tty->driver_data; |
565 | struct uart_port *port = state->port; | 565 | struct uart_port *port = state->port; |
566 | unsigned long flags; | 566 | unsigned long flags; |
567 | 567 | ||
568 | if (port->ops->send_xchar) | 568 | if (port->ops->send_xchar) |
569 | port->ops->send_xchar(port, ch); | 569 | port->ops->send_xchar(port, ch); |
570 | else { | 570 | else { |
571 | port->x_char = ch; | 571 | port->x_char = ch; |
572 | if (ch) { | 572 | if (ch) { |
573 | spin_lock_irqsave(&port->lock, flags); | 573 | spin_lock_irqsave(&port->lock, flags); |
574 | port->ops->start_tx(port); | 574 | port->ops->start_tx(port); |
575 | spin_unlock_irqrestore(&port->lock, flags); | 575 | spin_unlock_irqrestore(&port->lock, flags); |
576 | } | 576 | } |
577 | } | 577 | } |
578 | } | 578 | } |
579 | 579 | ||
580 | static void uart_throttle(struct tty_struct *tty) | 580 | static void uart_throttle(struct tty_struct *tty) |
581 | { | 581 | { |
582 | struct uart_state *state = tty->driver_data; | 582 | struct uart_state *state = tty->driver_data; |
583 | 583 | ||
584 | if (I_IXOFF(tty)) | 584 | if (I_IXOFF(tty)) |
585 | uart_send_xchar(tty, STOP_CHAR(tty)); | 585 | uart_send_xchar(tty, STOP_CHAR(tty)); |
586 | 586 | ||
587 | if (tty->termios->c_cflag & CRTSCTS) | 587 | if (tty->termios->c_cflag & CRTSCTS) |
588 | uart_clear_mctrl(state->port, TIOCM_RTS); | 588 | uart_clear_mctrl(state->port, TIOCM_RTS); |
589 | } | 589 | } |
590 | 590 | ||
591 | static void uart_unthrottle(struct tty_struct *tty) | 591 | static void uart_unthrottle(struct tty_struct *tty) |
592 | { | 592 | { |
593 | struct uart_state *state = tty->driver_data; | 593 | struct uart_state *state = tty->driver_data; |
594 | struct uart_port *port = state->port; | 594 | struct uart_port *port = state->port; |
595 | 595 | ||
596 | if (I_IXOFF(tty)) { | 596 | if (I_IXOFF(tty)) { |
597 | if (port->x_char) | 597 | if (port->x_char) |
598 | port->x_char = 0; | 598 | port->x_char = 0; |
599 | else | 599 | else |
600 | uart_send_xchar(tty, START_CHAR(tty)); | 600 | uart_send_xchar(tty, START_CHAR(tty)); |
601 | } | 601 | } |
602 | 602 | ||
603 | if (tty->termios->c_cflag & CRTSCTS) | 603 | if (tty->termios->c_cflag & CRTSCTS) |
604 | uart_set_mctrl(port, TIOCM_RTS); | 604 | uart_set_mctrl(port, TIOCM_RTS); |
605 | } | 605 | } |
606 | 606 | ||
607 | static int uart_get_info(struct uart_state *state, | 607 | static int uart_get_info(struct uart_state *state, |
608 | struct serial_struct __user *retinfo) | 608 | struct serial_struct __user *retinfo) |
609 | { | 609 | { |
610 | struct uart_port *port = state->port; | 610 | struct uart_port *port = state->port; |
611 | struct serial_struct tmp; | 611 | struct serial_struct tmp; |
612 | 612 | ||
613 | memset(&tmp, 0, sizeof(tmp)); | 613 | memset(&tmp, 0, sizeof(tmp)); |
614 | tmp.type = port->type; | 614 | tmp.type = port->type; |
615 | tmp.line = port->line; | 615 | tmp.line = port->line; |
616 | tmp.port = port->iobase; | 616 | tmp.port = port->iobase; |
617 | if (HIGH_BITS_OFFSET) | 617 | if (HIGH_BITS_OFFSET) |
618 | tmp.port_high = (long) port->iobase >> HIGH_BITS_OFFSET; | 618 | tmp.port_high = (long) port->iobase >> HIGH_BITS_OFFSET; |
619 | tmp.irq = port->irq; | 619 | tmp.irq = port->irq; |
620 | tmp.flags = port->flags; | 620 | tmp.flags = port->flags; |
621 | tmp.xmit_fifo_size = port->fifosize; | 621 | tmp.xmit_fifo_size = port->fifosize; |
622 | tmp.baud_base = port->uartclk / 16; | 622 | tmp.baud_base = port->uartclk / 16; |
623 | tmp.close_delay = state->close_delay / 10; | 623 | tmp.close_delay = state->close_delay / 10; |
624 | tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ? | 624 | tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ? |
625 | ASYNC_CLOSING_WAIT_NONE : | 625 | ASYNC_CLOSING_WAIT_NONE : |
626 | state->closing_wait / 10; | 626 | state->closing_wait / 10; |
627 | tmp.custom_divisor = port->custom_divisor; | 627 | tmp.custom_divisor = port->custom_divisor; |
628 | tmp.hub6 = port->hub6; | 628 | tmp.hub6 = port->hub6; |
629 | tmp.io_type = port->iotype; | 629 | tmp.io_type = port->iotype; |
630 | tmp.iomem_reg_shift = port->regshift; | 630 | tmp.iomem_reg_shift = port->regshift; |
631 | tmp.iomem_base = (void *)(unsigned long)port->mapbase; | 631 | tmp.iomem_base = (void *)(unsigned long)port->mapbase; |
632 | 632 | ||
633 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | 633 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) |
634 | return -EFAULT; | 634 | return -EFAULT; |
635 | return 0; | 635 | return 0; |
636 | } | 636 | } |
637 | 637 | ||
638 | static int uart_set_info(struct uart_state *state, | 638 | static int uart_set_info(struct uart_state *state, |
639 | struct serial_struct __user *newinfo) | 639 | struct serial_struct __user *newinfo) |
640 | { | 640 | { |
641 | struct serial_struct new_serial; | 641 | struct serial_struct new_serial; |
642 | struct uart_port *port = state->port; | 642 | struct uart_port *port = state->port; |
643 | unsigned long new_port; | 643 | unsigned long new_port; |
644 | unsigned int change_irq, change_port, closing_wait; | 644 | unsigned int change_irq, change_port, closing_wait; |
645 | unsigned int old_custom_divisor, close_delay; | 645 | unsigned int old_custom_divisor, close_delay; |
646 | upf_t old_flags, new_flags; | 646 | upf_t old_flags, new_flags; |
647 | int retval = 0; | 647 | int retval = 0; |
648 | 648 | ||
649 | if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) | 649 | if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) |
650 | return -EFAULT; | 650 | return -EFAULT; |
651 | 651 | ||
652 | new_port = new_serial.port; | 652 | new_port = new_serial.port; |
653 | if (HIGH_BITS_OFFSET) | 653 | if (HIGH_BITS_OFFSET) |
654 | new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET; | 654 | new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET; |
655 | 655 | ||
656 | new_serial.irq = irq_canonicalize(new_serial.irq); | 656 | new_serial.irq = irq_canonicalize(new_serial.irq); |
657 | close_delay = new_serial.close_delay * 10; | 657 | close_delay = new_serial.close_delay * 10; |
658 | closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? | 658 | closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ? |
659 | USF_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; | 659 | USF_CLOSING_WAIT_NONE : new_serial.closing_wait * 10; |
660 | 660 | ||
661 | /* | 661 | /* |
662 | * This semaphore protects state->count. It is also | 662 | * This semaphore protects state->count. It is also |
663 | * very useful to prevent opens. Also, take the | 663 | * very useful to prevent opens. Also, take the |
664 | * port configuration semaphore to make sure that a | 664 | * port configuration semaphore to make sure that a |
665 | * module insertion/removal doesn't change anything | 665 | * module insertion/removal doesn't change anything |
666 | * under us. | 666 | * under us. |
667 | */ | 667 | */ |
668 | mutex_lock(&state->mutex); | 668 | mutex_lock(&state->mutex); |
669 | 669 | ||
670 | change_irq = !(port->flags & UPF_FIXED_PORT) | 670 | change_irq = !(port->flags & UPF_FIXED_PORT) |
671 | && new_serial.irq != port->irq; | 671 | && new_serial.irq != port->irq; |
672 | 672 | ||
673 | /* | 673 | /* |
674 | * Since changing the 'type' of the port changes its resource | 674 | * Since changing the 'type' of the port changes its resource |
675 | * allocations, we should treat type changes the same as | 675 | * allocations, we should treat type changes the same as |
676 | * IO port changes. | 676 | * IO port changes. |
677 | */ | 677 | */ |
678 | change_port = !(port->flags & UPF_FIXED_PORT) | 678 | change_port = !(port->flags & UPF_FIXED_PORT) |
679 | && (new_port != port->iobase || | 679 | && (new_port != port->iobase || |
680 | (unsigned long)new_serial.iomem_base != port->mapbase || | 680 | (unsigned long)new_serial.iomem_base != port->mapbase || |
681 | new_serial.hub6 != port->hub6 || | 681 | new_serial.hub6 != port->hub6 || |
682 | new_serial.io_type != port->iotype || | 682 | new_serial.io_type != port->iotype || |
683 | new_serial.iomem_reg_shift != port->regshift || | 683 | new_serial.iomem_reg_shift != port->regshift || |
684 | new_serial.type != port->type); | 684 | new_serial.type != port->type); |
685 | 685 | ||
686 | old_flags = port->flags; | 686 | old_flags = port->flags; |
687 | new_flags = new_serial.flags; | 687 | new_flags = new_serial.flags; |
688 | old_custom_divisor = port->custom_divisor; | 688 | old_custom_divisor = port->custom_divisor; |
689 | 689 | ||
690 | if (!capable(CAP_SYS_ADMIN)) { | 690 | if (!capable(CAP_SYS_ADMIN)) { |
691 | retval = -EPERM; | 691 | retval = -EPERM; |
692 | if (change_irq || change_port || | 692 | if (change_irq || change_port || |
693 | (new_serial.baud_base != port->uartclk / 16) || | 693 | (new_serial.baud_base != port->uartclk / 16) || |
694 | (close_delay != state->close_delay) || | 694 | (close_delay != state->close_delay) || |
695 | (closing_wait != state->closing_wait) || | 695 | (closing_wait != state->closing_wait) || |
696 | (new_serial.xmit_fifo_size && | 696 | (new_serial.xmit_fifo_size && |
697 | new_serial.xmit_fifo_size != port->fifosize) || | 697 | new_serial.xmit_fifo_size != port->fifosize) || |
698 | (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) | 698 | (((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0)) |
699 | goto exit; | 699 | goto exit; |
700 | port->flags = ((port->flags & ~UPF_USR_MASK) | | 700 | port->flags = ((port->flags & ~UPF_USR_MASK) | |
701 | (new_flags & UPF_USR_MASK)); | 701 | (new_flags & UPF_USR_MASK)); |
702 | port->custom_divisor = new_serial.custom_divisor; | 702 | port->custom_divisor = new_serial.custom_divisor; |
703 | goto check_and_exit; | 703 | goto check_and_exit; |
704 | } | 704 | } |
705 | 705 | ||
706 | /* | 706 | /* |
707 | * Ask the low level driver to verify the settings. | 707 | * Ask the low level driver to verify the settings. |
708 | */ | 708 | */ |
709 | if (port->ops->verify_port) | 709 | if (port->ops->verify_port) |
710 | retval = port->ops->verify_port(port, &new_serial); | 710 | retval = port->ops->verify_port(port, &new_serial); |
711 | 711 | ||
712 | if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) || | 712 | if ((new_serial.irq >= NR_IRQS) || (new_serial.irq < 0) || |
713 | (new_serial.baud_base < 9600)) | 713 | (new_serial.baud_base < 9600)) |
714 | retval = -EINVAL; | 714 | retval = -EINVAL; |
715 | 715 | ||
716 | if (retval) | 716 | if (retval) |
717 | goto exit; | 717 | goto exit; |
718 | 718 | ||
719 | if (change_port || change_irq) { | 719 | if (change_port || change_irq) { |
720 | retval = -EBUSY; | 720 | retval = -EBUSY; |
721 | 721 | ||
722 | /* | 722 | /* |
723 | * Make sure that we are the sole user of this port. | 723 | * Make sure that we are the sole user of this port. |
724 | */ | 724 | */ |
725 | if (uart_users(state) > 1) | 725 | if (uart_users(state) > 1) |
726 | goto exit; | 726 | goto exit; |
727 | 727 | ||
728 | /* | 728 | /* |
729 | * We need to shutdown the serial port at the old | 729 | * We need to shutdown the serial port at the old |
730 | * port/type/irq combination. | 730 | * port/type/irq combination. |
731 | */ | 731 | */ |
732 | uart_shutdown(state); | 732 | uart_shutdown(state); |
733 | } | 733 | } |
734 | 734 | ||
735 | if (change_port) { | 735 | if (change_port) { |
736 | unsigned long old_iobase, old_mapbase; | 736 | unsigned long old_iobase, old_mapbase; |
737 | unsigned int old_type, old_iotype, old_hub6, old_shift; | 737 | unsigned int old_type, old_iotype, old_hub6, old_shift; |
738 | 738 | ||
739 | old_iobase = port->iobase; | 739 | old_iobase = port->iobase; |
740 | old_mapbase = port->mapbase; | 740 | old_mapbase = port->mapbase; |
741 | old_type = port->type; | 741 | old_type = port->type; |
742 | old_hub6 = port->hub6; | 742 | old_hub6 = port->hub6; |
743 | old_iotype = port->iotype; | 743 | old_iotype = port->iotype; |
744 | old_shift = port->regshift; | 744 | old_shift = port->regshift; |
745 | 745 | ||
746 | /* | 746 | /* |
747 | * Free and release old regions | 747 | * Free and release old regions |
748 | */ | 748 | */ |
749 | if (old_type != PORT_UNKNOWN) | 749 | if (old_type != PORT_UNKNOWN) |
750 | port->ops->release_port(port); | 750 | port->ops->release_port(port); |
751 | 751 | ||
752 | port->iobase = new_port; | 752 | port->iobase = new_port; |
753 | port->type = new_serial.type; | 753 | port->type = new_serial.type; |
754 | port->hub6 = new_serial.hub6; | 754 | port->hub6 = new_serial.hub6; |
755 | port->iotype = new_serial.io_type; | 755 | port->iotype = new_serial.io_type; |
756 | port->regshift = new_serial.iomem_reg_shift; | 756 | port->regshift = new_serial.iomem_reg_shift; |
757 | port->mapbase = (unsigned long)new_serial.iomem_base; | 757 | port->mapbase = (unsigned long)new_serial.iomem_base; |
758 | 758 | ||
759 | /* | 759 | /* |
760 | * Claim and map the new regions | 760 | * Claim and map the new regions |
761 | */ | 761 | */ |
762 | if (port->type != PORT_UNKNOWN) { | 762 | if (port->type != PORT_UNKNOWN) { |
763 | retval = port->ops->request_port(port); | 763 | retval = port->ops->request_port(port); |
764 | } else { | 764 | } else { |
765 | /* Always success - Jean II */ | 765 | /* Always success - Jean II */ |
766 | retval = 0; | 766 | retval = 0; |
767 | } | 767 | } |
768 | 768 | ||
769 | /* | 769 | /* |
770 | * If we fail to request resources for the | 770 | * If we fail to request resources for the |
771 | * new port, try to restore the old settings. | 771 | * new port, try to restore the old settings. |
772 | */ | 772 | */ |
773 | if (retval && old_type != PORT_UNKNOWN) { | 773 | if (retval && old_type != PORT_UNKNOWN) { |
774 | port->iobase = old_iobase; | 774 | port->iobase = old_iobase; |
775 | port->type = old_type; | 775 | port->type = old_type; |
776 | port->hub6 = old_hub6; | 776 | port->hub6 = old_hub6; |
777 | port->iotype = old_iotype; | 777 | port->iotype = old_iotype; |
778 | port->regshift = old_shift; | 778 | port->regshift = old_shift; |
779 | port->mapbase = old_mapbase; | 779 | port->mapbase = old_mapbase; |
780 | retval = port->ops->request_port(port); | 780 | retval = port->ops->request_port(port); |
781 | /* | 781 | /* |
782 | * If we failed to restore the old settings, | 782 | * If we failed to restore the old settings, |
783 | * we fail like this. | 783 | * we fail like this. |
784 | */ | 784 | */ |
785 | if (retval) | 785 | if (retval) |
786 | port->type = PORT_UNKNOWN; | 786 | port->type = PORT_UNKNOWN; |
787 | 787 | ||
788 | /* | 788 | /* |
789 | * We failed anyway. | 789 | * We failed anyway. |
790 | */ | 790 | */ |
791 | retval = -EBUSY; | 791 | retval = -EBUSY; |
792 | /* Added to return the correct error -Ram Gupta */ | 792 | /* Added to return the correct error -Ram Gupta */ |
793 | goto exit; | 793 | goto exit; |
794 | } | 794 | } |
795 | } | 795 | } |
796 | 796 | ||
797 | if (change_irq) | 797 | if (change_irq) |
798 | port->irq = new_serial.irq; | 798 | port->irq = new_serial.irq; |
799 | if (!(port->flags & UPF_FIXED_PORT)) | 799 | if (!(port->flags & UPF_FIXED_PORT)) |
800 | port->uartclk = new_serial.baud_base * 16; | 800 | port->uartclk = new_serial.baud_base * 16; |
801 | port->flags = (port->flags & ~UPF_CHANGE_MASK) | | 801 | port->flags = (port->flags & ~UPF_CHANGE_MASK) | |
802 | (new_flags & UPF_CHANGE_MASK); | 802 | (new_flags & UPF_CHANGE_MASK); |
803 | port->custom_divisor = new_serial.custom_divisor; | 803 | port->custom_divisor = new_serial.custom_divisor; |
804 | state->close_delay = close_delay; | 804 | state->close_delay = close_delay; |
805 | state->closing_wait = closing_wait; | 805 | state->closing_wait = closing_wait; |
806 | if (new_serial.xmit_fifo_size) | 806 | if (new_serial.xmit_fifo_size) |
807 | port->fifosize = new_serial.xmit_fifo_size; | 807 | port->fifosize = new_serial.xmit_fifo_size; |
808 | if (state->info->tty) | 808 | if (state->info->tty) |
809 | state->info->tty->low_latency = | 809 | state->info->tty->low_latency = |
810 | (port->flags & UPF_LOW_LATENCY) ? 1 : 0; | 810 | (port->flags & UPF_LOW_LATENCY) ? 1 : 0; |
811 | 811 | ||
812 | check_and_exit: | 812 | check_and_exit: |
813 | retval = 0; | 813 | retval = 0; |
814 | if (port->type == PORT_UNKNOWN) | 814 | if (port->type == PORT_UNKNOWN) |
815 | goto exit; | 815 | goto exit; |
816 | if (state->info->flags & UIF_INITIALIZED) { | 816 | if (state->info->flags & UIF_INITIALIZED) { |
817 | if (((old_flags ^ port->flags) & UPF_SPD_MASK) || | 817 | if (((old_flags ^ port->flags) & UPF_SPD_MASK) || |
818 | old_custom_divisor != port->custom_divisor) { | 818 | old_custom_divisor != port->custom_divisor) { |
819 | /* | 819 | /* |
820 | * If they're setting up a custom divisor or speed, | 820 | * If they're setting up a custom divisor or speed, |
821 | * instead of clearing it, then bitch about it. No | 821 | * instead of clearing it, then bitch about it. No |
822 | * need to rate-limit; it's CAP_SYS_ADMIN only. | 822 | * need to rate-limit; it's CAP_SYS_ADMIN only. |
823 | */ | 823 | */ |
824 | if (port->flags & UPF_SPD_MASK) { | 824 | if (port->flags & UPF_SPD_MASK) { |
825 | char buf[64]; | 825 | char buf[64]; |
826 | printk(KERN_NOTICE | 826 | printk(KERN_NOTICE |
827 | "%s sets custom speed on %s. This " | 827 | "%s sets custom speed on %s. This " |
828 | "is deprecated.\n", current->comm, | 828 | "is deprecated.\n", current->comm, |
829 | tty_name(state->info->tty, buf)); | 829 | tty_name(state->info->tty, buf)); |
830 | } | 830 | } |
831 | uart_change_speed(state, NULL); | 831 | uart_change_speed(state, NULL); |
832 | } | 832 | } |
833 | } else | 833 | } else |
834 | retval = uart_startup(state, 1); | 834 | retval = uart_startup(state, 1); |
835 | exit: | 835 | exit: |
836 | mutex_unlock(&state->mutex); | 836 | mutex_unlock(&state->mutex); |
837 | return retval; | 837 | return retval; |
838 | } | 838 | } |
839 | 839 | ||
840 | 840 | ||
841 | /* | 841 | /* |
842 | * uart_get_lsr_info - get line status register info. | 842 | * uart_get_lsr_info - get line status register info. |
843 | * Note: uart_ioctl protects us against hangups. | 843 | * Note: uart_ioctl protects us against hangups. |
844 | */ | 844 | */ |
845 | static int uart_get_lsr_info(struct uart_state *state, | 845 | static int uart_get_lsr_info(struct uart_state *state, |
846 | unsigned int __user *value) | 846 | unsigned int __user *value) |
847 | { | 847 | { |
848 | struct uart_port *port = state->port; | 848 | struct uart_port *port = state->port; |
849 | unsigned int result; | 849 | unsigned int result; |
850 | 850 | ||
851 | result = port->ops->tx_empty(port); | 851 | result = port->ops->tx_empty(port); |
852 | 852 | ||
853 | /* | 853 | /* |
854 | * If we're about to load something into the transmit | 854 | * If we're about to load something into the transmit |
855 | * register, we'll pretend the transmitter isn't empty to | 855 | * register, we'll pretend the transmitter isn't empty to |
856 | * avoid a race condition (depending on when the transmit | 856 | * avoid a race condition (depending on when the transmit |
857 | * interrupt happens). | 857 | * interrupt happens). |
858 | */ | 858 | */ |
859 | if (port->x_char || | 859 | if (port->x_char || |
860 | ((uart_circ_chars_pending(&state->info->xmit) > 0) && | 860 | ((uart_circ_chars_pending(&state->info->xmit) > 0) && |
861 | !state->info->tty->stopped && !state->info->tty->hw_stopped)) | 861 | !state->info->tty->stopped && !state->info->tty->hw_stopped)) |
862 | result &= ~TIOCSER_TEMT; | 862 | result &= ~TIOCSER_TEMT; |
863 | 863 | ||
864 | return put_user(result, value); | 864 | return put_user(result, value); |
865 | } | 865 | } |
866 | 866 | ||
867 | static int uart_tiocmget(struct tty_struct *tty, struct file *file) | 867 | static int uart_tiocmget(struct tty_struct *tty, struct file *file) |
868 | { | 868 | { |
869 | struct uart_state *state = tty->driver_data; | 869 | struct uart_state *state = tty->driver_data; |
870 | struct uart_port *port = state->port; | 870 | struct uart_port *port = state->port; |
871 | int result = -EIO; | 871 | int result = -EIO; |
872 | 872 | ||
873 | mutex_lock(&state->mutex); | 873 | mutex_lock(&state->mutex); |
874 | if ((!file || !tty_hung_up_p(file)) && | 874 | if ((!file || !tty_hung_up_p(file)) && |
875 | !(tty->flags & (1 << TTY_IO_ERROR))) { | 875 | !(tty->flags & (1 << TTY_IO_ERROR))) { |
876 | result = port->mctrl; | 876 | result = port->mctrl; |
877 | 877 | ||
878 | spin_lock_irq(&port->lock); | 878 | spin_lock_irq(&port->lock); |
879 | result |= port->ops->get_mctrl(port); | 879 | result |= port->ops->get_mctrl(port); |
880 | spin_unlock_irq(&port->lock); | 880 | spin_unlock_irq(&port->lock); |
881 | } | 881 | } |
882 | mutex_unlock(&state->mutex); | 882 | mutex_unlock(&state->mutex); |
883 | 883 | ||
884 | return result; | 884 | return result; |
885 | } | 885 | } |
886 | 886 | ||
887 | static int | 887 | static int |
888 | uart_tiocmset(struct tty_struct *tty, struct file *file, | 888 | uart_tiocmset(struct tty_struct *tty, struct file *file, |
889 | unsigned int set, unsigned int clear) | 889 | unsigned int set, unsigned int clear) |
890 | { | 890 | { |
891 | struct uart_state *state = tty->driver_data; | 891 | struct uart_state *state = tty->driver_data; |
892 | struct uart_port *port = state->port; | 892 | struct uart_port *port = state->port; |
893 | int ret = -EIO; | 893 | int ret = -EIO; |
894 | 894 | ||
895 | mutex_lock(&state->mutex); | 895 | mutex_lock(&state->mutex); |
896 | if ((!file || !tty_hung_up_p(file)) && | 896 | if ((!file || !tty_hung_up_p(file)) && |
897 | !(tty->flags & (1 << TTY_IO_ERROR))) { | 897 | !(tty->flags & (1 << TTY_IO_ERROR))) { |
898 | uart_update_mctrl(port, set, clear); | 898 | uart_update_mctrl(port, set, clear); |
899 | ret = 0; | 899 | ret = 0; |
900 | } | 900 | } |
901 | mutex_unlock(&state->mutex); | 901 | mutex_unlock(&state->mutex); |
902 | return ret; | 902 | return ret; |
903 | } | 903 | } |
904 | 904 | ||
905 | static void uart_break_ctl(struct tty_struct *tty, int break_state) | 905 | static void uart_break_ctl(struct tty_struct *tty, int break_state) |
906 | { | 906 | { |
907 | struct uart_state *state = tty->driver_data; | 907 | struct uart_state *state = tty->driver_data; |
908 | struct uart_port *port = state->port; | 908 | struct uart_port *port = state->port; |
909 | 909 | ||
910 | BUG_ON(!kernel_locked()); | 910 | BUG_ON(!kernel_locked()); |
911 | 911 | ||
912 | mutex_lock(&state->mutex); | 912 | mutex_lock(&state->mutex); |
913 | 913 | ||
914 | if (port->type != PORT_UNKNOWN) | 914 | if (port->type != PORT_UNKNOWN) |
915 | port->ops->break_ctl(port, break_state); | 915 | port->ops->break_ctl(port, break_state); |
916 | 916 | ||
917 | mutex_unlock(&state->mutex); | 917 | mutex_unlock(&state->mutex); |
918 | } | 918 | } |
919 | 919 | ||
920 | static int uart_do_autoconfig(struct uart_state *state) | 920 | static int uart_do_autoconfig(struct uart_state *state) |
921 | { | 921 | { |
922 | struct uart_port *port = state->port; | 922 | struct uart_port *port = state->port; |
923 | int flags, ret; | 923 | int flags, ret; |
924 | 924 | ||
925 | if (!capable(CAP_SYS_ADMIN)) | 925 | if (!capable(CAP_SYS_ADMIN)) |
926 | return -EPERM; | 926 | return -EPERM; |
927 | 927 | ||
928 | /* | 928 | /* |
929 | * Take the per-port semaphore. This prevents count from | 929 | * Take the per-port semaphore. This prevents count from |
930 | * changing, and hence any extra opens of the port while | 930 | * changing, and hence any extra opens of the port while |
931 | * we're auto-configuring. | 931 | * we're auto-configuring. |
932 | */ | 932 | */ |
933 | if (mutex_lock_interruptible(&state->mutex)) | 933 | if (mutex_lock_interruptible(&state->mutex)) |
934 | return -ERESTARTSYS; | 934 | return -ERESTARTSYS; |
935 | 935 | ||
936 | ret = -EBUSY; | 936 | ret = -EBUSY; |
937 | if (uart_users(state) == 1) { | 937 | if (uart_users(state) == 1) { |
938 | uart_shutdown(state); | 938 | uart_shutdown(state); |
939 | 939 | ||
940 | /* | 940 | /* |
941 | * If we already have a port type configured, | 941 | * If we already have a port type configured, |
942 | * we must release its resources. | 942 | * we must release its resources. |
943 | */ | 943 | */ |
944 | if (port->type != PORT_UNKNOWN) | 944 | if (port->type != PORT_UNKNOWN) |
945 | port->ops->release_port(port); | 945 | port->ops->release_port(port); |
946 | 946 | ||
947 | flags = UART_CONFIG_TYPE; | 947 | flags = UART_CONFIG_TYPE; |
948 | if (port->flags & UPF_AUTO_IRQ) | 948 | if (port->flags & UPF_AUTO_IRQ) |
949 | flags |= UART_CONFIG_IRQ; | 949 | flags |= UART_CONFIG_IRQ; |
950 | 950 | ||
951 | /* | 951 | /* |
952 | * This will claim the ports resources if | 952 | * This will claim the ports resources if |
953 | * a port is found. | 953 | * a port is found. |
954 | */ | 954 | */ |
955 | port->ops->config_port(port, flags); | 955 | port->ops->config_port(port, flags); |
956 | 956 | ||
957 | ret = uart_startup(state, 1); | 957 | ret = uart_startup(state, 1); |
958 | } | 958 | } |
959 | mutex_unlock(&state->mutex); | 959 | mutex_unlock(&state->mutex); |
960 | return ret; | 960 | return ret; |
961 | } | 961 | } |
962 | 962 | ||
963 | /* | 963 | /* |
964 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | 964 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change |
965 | * - mask passed in arg for lines of interest | 965 | * - mask passed in arg for lines of interest |
966 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | 966 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) |
967 | * Caller should use TIOCGICOUNT to see which one it was | 967 | * Caller should use TIOCGICOUNT to see which one it was |
968 | */ | 968 | */ |
969 | static int | 969 | static int |
970 | uart_wait_modem_status(struct uart_state *state, unsigned long arg) | 970 | uart_wait_modem_status(struct uart_state *state, unsigned long arg) |
971 | { | 971 | { |
972 | struct uart_port *port = state->port; | 972 | struct uart_port *port = state->port; |
973 | DECLARE_WAITQUEUE(wait, current); | 973 | DECLARE_WAITQUEUE(wait, current); |
974 | struct uart_icount cprev, cnow; | 974 | struct uart_icount cprev, cnow; |
975 | int ret; | 975 | int ret; |
976 | 976 | ||
977 | /* | 977 | /* |
978 | * note the counters on entry | 978 | * note the counters on entry |
979 | */ | 979 | */ |
980 | spin_lock_irq(&port->lock); | 980 | spin_lock_irq(&port->lock); |
981 | memcpy(&cprev, &port->icount, sizeof(struct uart_icount)); | 981 | memcpy(&cprev, &port->icount, sizeof(struct uart_icount)); |
982 | 982 | ||
983 | /* | 983 | /* |
984 | * Force modem status interrupts on | 984 | * Force modem status interrupts on |
985 | */ | 985 | */ |
986 | port->ops->enable_ms(port); | 986 | port->ops->enable_ms(port); |
987 | spin_unlock_irq(&port->lock); | 987 | spin_unlock_irq(&port->lock); |
988 | 988 | ||
989 | add_wait_queue(&state->info->delta_msr_wait, &wait); | 989 | add_wait_queue(&state->info->delta_msr_wait, &wait); |
990 | for (;;) { | 990 | for (;;) { |
991 | spin_lock_irq(&port->lock); | 991 | spin_lock_irq(&port->lock); |
992 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); | 992 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); |
993 | spin_unlock_irq(&port->lock); | 993 | spin_unlock_irq(&port->lock); |
994 | 994 | ||
995 | set_current_state(TASK_INTERRUPTIBLE); | 995 | set_current_state(TASK_INTERRUPTIBLE); |
996 | 996 | ||
997 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | 997 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || |
998 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | 998 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
999 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || | 999 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || |
1000 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { | 1000 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { |
1001 | ret = 0; | 1001 | ret = 0; |
1002 | break; | 1002 | break; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | schedule(); | 1005 | schedule(); |
1006 | 1006 | ||
1007 | /* see if a signal did it */ | 1007 | /* see if a signal did it */ |
1008 | if (signal_pending(current)) { | 1008 | if (signal_pending(current)) { |
1009 | ret = -ERESTARTSYS; | 1009 | ret = -ERESTARTSYS; |
1010 | break; | 1010 | break; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | cprev = cnow; | 1013 | cprev = cnow; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | current->state = TASK_RUNNING; | 1016 | current->state = TASK_RUNNING; |
1017 | remove_wait_queue(&state->info->delta_msr_wait, &wait); | 1017 | remove_wait_queue(&state->info->delta_msr_wait, &wait); |
1018 | 1018 | ||
1019 | return ret; | 1019 | return ret; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | /* | 1022 | /* |
1023 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | 1023 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) |
1024 | * Return: write counters to the user passed counter struct | 1024 | * Return: write counters to the user passed counter struct |
1025 | * NB: both 1->0 and 0->1 transitions are counted except for | 1025 | * NB: both 1->0 and 0->1 transitions are counted except for |
1026 | * RI where only 0->1 is counted. | 1026 | * RI where only 0->1 is counted. |
1027 | */ | 1027 | */ |
1028 | static int uart_get_count(struct uart_state *state, | 1028 | static int uart_get_count(struct uart_state *state, |
1029 | struct serial_icounter_struct __user *icnt) | 1029 | struct serial_icounter_struct __user *icnt) |
1030 | { | 1030 | { |
1031 | struct serial_icounter_struct icount; | 1031 | struct serial_icounter_struct icount; |
1032 | struct uart_icount cnow; | 1032 | struct uart_icount cnow; |
1033 | struct uart_port *port = state->port; | 1033 | struct uart_port *port = state->port; |
1034 | 1034 | ||
1035 | spin_lock_irq(&port->lock); | 1035 | spin_lock_irq(&port->lock); |
1036 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); | 1036 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); |
1037 | spin_unlock_irq(&port->lock); | 1037 | spin_unlock_irq(&port->lock); |
1038 | 1038 | ||
1039 | icount.cts = cnow.cts; | 1039 | icount.cts = cnow.cts; |
1040 | icount.dsr = cnow.dsr; | 1040 | icount.dsr = cnow.dsr; |
1041 | icount.rng = cnow.rng; | 1041 | icount.rng = cnow.rng; |
1042 | icount.dcd = cnow.dcd; | 1042 | icount.dcd = cnow.dcd; |
1043 | icount.rx = cnow.rx; | 1043 | icount.rx = cnow.rx; |
1044 | icount.tx = cnow.tx; | 1044 | icount.tx = cnow.tx; |
1045 | icount.frame = cnow.frame; | 1045 | icount.frame = cnow.frame; |
1046 | icount.overrun = cnow.overrun; | 1046 | icount.overrun = cnow.overrun; |
1047 | icount.parity = cnow.parity; | 1047 | icount.parity = cnow.parity; |
1048 | icount.brk = cnow.brk; | 1048 | icount.brk = cnow.brk; |
1049 | icount.buf_overrun = cnow.buf_overrun; | 1049 | icount.buf_overrun = cnow.buf_overrun; |
1050 | 1050 | ||
1051 | return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; | 1051 | return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | /* | 1054 | /* |
1055 | * Called via sys_ioctl under the BKL. We can use spin_lock_irq() here. | 1055 | * Called via sys_ioctl under the BKL. We can use spin_lock_irq() here. |
1056 | */ | 1056 | */ |
1057 | static int | 1057 | static int |
1058 | uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, | 1058 | uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, |
1059 | unsigned long arg) | 1059 | unsigned long arg) |
1060 | { | 1060 | { |
1061 | struct uart_state *state = tty->driver_data; | 1061 | struct uart_state *state = tty->driver_data; |
1062 | void __user *uarg = (void __user *)arg; | 1062 | void __user *uarg = (void __user *)arg; |
1063 | int ret = -ENOIOCTLCMD; | 1063 | int ret = -ENOIOCTLCMD; |
1064 | 1064 | ||
1065 | BUG_ON(!kernel_locked()); | 1065 | BUG_ON(!kernel_locked()); |
1066 | 1066 | ||
1067 | /* | 1067 | /* |
1068 | * These ioctls don't rely on the hardware to be present. | 1068 | * These ioctls don't rely on the hardware to be present. |
1069 | */ | 1069 | */ |
1070 | switch (cmd) { | 1070 | switch (cmd) { |
1071 | case TIOCGSERIAL: | 1071 | case TIOCGSERIAL: |
1072 | ret = uart_get_info(state, uarg); | 1072 | ret = uart_get_info(state, uarg); |
1073 | break; | 1073 | break; |
1074 | 1074 | ||
1075 | case TIOCSSERIAL: | 1075 | case TIOCSSERIAL: |
1076 | ret = uart_set_info(state, uarg); | 1076 | ret = uart_set_info(state, uarg); |
1077 | break; | 1077 | break; |
1078 | 1078 | ||
1079 | case TIOCSERCONFIG: | 1079 | case TIOCSERCONFIG: |
1080 | ret = uart_do_autoconfig(state); | 1080 | ret = uart_do_autoconfig(state); |
1081 | break; | 1081 | break; |
1082 | 1082 | ||
1083 | case TIOCSERGWILD: /* obsolete */ | 1083 | case TIOCSERGWILD: /* obsolete */ |
1084 | case TIOCSERSWILD: /* obsolete */ | 1084 | case TIOCSERSWILD: /* obsolete */ |
1085 | ret = 0; | 1085 | ret = 0; |
1086 | break; | 1086 | break; |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | if (ret != -ENOIOCTLCMD) | 1089 | if (ret != -ENOIOCTLCMD) |
1090 | goto out; | 1090 | goto out; |
1091 | 1091 | ||
1092 | if (tty->flags & (1 << TTY_IO_ERROR)) { | 1092 | if (tty->flags & (1 << TTY_IO_ERROR)) { |
1093 | ret = -EIO; | 1093 | ret = -EIO; |
1094 | goto out; | 1094 | goto out; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | /* | 1097 | /* |
1098 | * The following should only be used when hardware is present. | 1098 | * The following should only be used when hardware is present. |
1099 | */ | 1099 | */ |
1100 | switch (cmd) { | 1100 | switch (cmd) { |
1101 | case TIOCMIWAIT: | 1101 | case TIOCMIWAIT: |
1102 | ret = uart_wait_modem_status(state, arg); | 1102 | ret = uart_wait_modem_status(state, arg); |
1103 | break; | 1103 | break; |
1104 | 1104 | ||
1105 | case TIOCGICOUNT: | 1105 | case TIOCGICOUNT: |
1106 | ret = uart_get_count(state, uarg); | 1106 | ret = uart_get_count(state, uarg); |
1107 | break; | 1107 | break; |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | if (ret != -ENOIOCTLCMD) | 1110 | if (ret != -ENOIOCTLCMD) |
1111 | goto out; | 1111 | goto out; |
1112 | 1112 | ||
1113 | mutex_lock(&state->mutex); | 1113 | mutex_lock(&state->mutex); |
1114 | 1114 | ||
1115 | if (tty_hung_up_p(filp)) { | 1115 | if (tty_hung_up_p(filp)) { |
1116 | ret = -EIO; | 1116 | ret = -EIO; |
1117 | goto out_up; | 1117 | goto out_up; |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* | 1120 | /* |
1121 | * All these rely on hardware being present and need to be | 1121 | * All these rely on hardware being present and need to be |
1122 | * protected against the tty being hung up. | 1122 | * protected against the tty being hung up. |
1123 | */ | 1123 | */ |
1124 | switch (cmd) { | 1124 | switch (cmd) { |
1125 | case TIOCSERGETLSR: /* Get line status register */ | 1125 | case TIOCSERGETLSR: /* Get line status register */ |
1126 | ret = uart_get_lsr_info(state, uarg); | 1126 | ret = uart_get_lsr_info(state, uarg); |
1127 | break; | 1127 | break; |
1128 | 1128 | ||
1129 | default: { | 1129 | default: { |
1130 | struct uart_port *port = state->port; | 1130 | struct uart_port *port = state->port; |
1131 | if (port->ops->ioctl) | 1131 | if (port->ops->ioctl) |
1132 | ret = port->ops->ioctl(port, cmd, arg); | 1132 | ret = port->ops->ioctl(port, cmd, arg); |
1133 | break; | 1133 | break; |
1134 | } | 1134 | } |
1135 | } | 1135 | } |
1136 | out_up: | 1136 | out_up: |
1137 | mutex_unlock(&state->mutex); | 1137 | mutex_unlock(&state->mutex); |
1138 | out: | 1138 | out: |
1139 | return ret; | 1139 | return ret; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | static void uart_set_termios(struct tty_struct *tty, | 1142 | static void uart_set_termios(struct tty_struct *tty, |
1143 | struct ktermios *old_termios) | 1143 | struct ktermios *old_termios) |
1144 | { | 1144 | { |
1145 | struct uart_state *state = tty->driver_data; | 1145 | struct uart_state *state = tty->driver_data; |
1146 | unsigned long flags; | 1146 | unsigned long flags; |
1147 | unsigned int cflag = tty->termios->c_cflag; | 1147 | unsigned int cflag = tty->termios->c_cflag; |
1148 | 1148 | ||
1149 | BUG_ON(!kernel_locked()); | 1149 | BUG_ON(!kernel_locked()); |
1150 | 1150 | ||
1151 | /* | 1151 | /* |
1152 | * These are the bits that are used to setup various | 1152 | * These are the bits that are used to setup various |
1153 | * flags in the low level driver. We can ignore the Bfoo | 1153 | * flags in the low level driver. We can ignore the Bfoo |
1154 | * bits in c_cflag; c_[io]speed will always be set | 1154 | * bits in c_cflag; c_[io]speed will always be set |
1155 | * appropriately by set_termios() in tty_ioctl.c | 1155 | * appropriately by set_termios() in tty_ioctl.c |
1156 | */ | 1156 | */ |
1157 | #define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) | 1157 | #define RELEVANT_IFLAG(iflag) ((iflag) & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) |
1158 | if ((cflag ^ old_termios->c_cflag) == 0 && | 1158 | if ((cflag ^ old_termios->c_cflag) == 0 && |
1159 | tty->termios->c_ospeed == old_termios->c_ospeed && | 1159 | tty->termios->c_ospeed == old_termios->c_ospeed && |
1160 | tty->termios->c_ispeed == old_termios->c_ispeed && | 1160 | tty->termios->c_ispeed == old_termios->c_ispeed && |
1161 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) | 1161 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) |
1162 | return; | 1162 | return; |
1163 | 1163 | ||
1164 | uart_change_speed(state, old_termios); | 1164 | uart_change_speed(state, old_termios); |
1165 | 1165 | ||
1166 | /* Handle transition to B0 status */ | 1166 | /* Handle transition to B0 status */ |
1167 | if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) | 1167 | if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) |
1168 | uart_clear_mctrl(state->port, TIOCM_RTS | TIOCM_DTR); | 1168 | uart_clear_mctrl(state->port, TIOCM_RTS | TIOCM_DTR); |
1169 | 1169 | ||
1170 | /* Handle transition away from B0 status */ | 1170 | /* Handle transition away from B0 status */ |
1171 | if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { | 1171 | if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { |
1172 | unsigned int mask = TIOCM_DTR; | 1172 | unsigned int mask = TIOCM_DTR; |
1173 | if (!(cflag & CRTSCTS) || | 1173 | if (!(cflag & CRTSCTS) || |
1174 | !test_bit(TTY_THROTTLED, &tty->flags)) | 1174 | !test_bit(TTY_THROTTLED, &tty->flags)) |
1175 | mask |= TIOCM_RTS; | 1175 | mask |= TIOCM_RTS; |
1176 | uart_set_mctrl(state->port, mask); | 1176 | uart_set_mctrl(state->port, mask); |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | /* Handle turning off CRTSCTS */ | 1179 | /* Handle turning off CRTSCTS */ |
1180 | if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) { | 1180 | if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) { |
1181 | spin_lock_irqsave(&state->port->lock, flags); | 1181 | spin_lock_irqsave(&state->port->lock, flags); |
1182 | tty->hw_stopped = 0; | 1182 | tty->hw_stopped = 0; |
1183 | __uart_start(tty); | 1183 | __uart_start(tty); |
1184 | spin_unlock_irqrestore(&state->port->lock, flags); | 1184 | spin_unlock_irqrestore(&state->port->lock, flags); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* Handle turning on CRTSCTS */ | 1187 | /* Handle turning on CRTSCTS */ |
1188 | if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) { | 1188 | if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) { |
1189 | spin_lock_irqsave(&state->port->lock, flags); | 1189 | spin_lock_irqsave(&state->port->lock, flags); |
1190 | if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) { | 1190 | if (!(state->port->ops->get_mctrl(state->port) & TIOCM_CTS)) { |
1191 | tty->hw_stopped = 1; | 1191 | tty->hw_stopped = 1; |
1192 | state->port->ops->stop_tx(state->port); | 1192 | state->port->ops->stop_tx(state->port); |
1193 | } | 1193 | } |
1194 | spin_unlock_irqrestore(&state->port->lock, flags); | 1194 | spin_unlock_irqrestore(&state->port->lock, flags); |
1195 | } | 1195 | } |
1196 | 1196 | ||
1197 | #if 0 | 1197 | #if 0 |
1198 | /* | 1198 | /* |
1199 | * No need to wake up processes in open wait, since they | 1199 | * No need to wake up processes in open wait, since they |
1200 | * sample the CLOCAL flag once, and don't recheck it. | 1200 | * sample the CLOCAL flag once, and don't recheck it. |
1201 | * XXX It's not clear whether the current behavior is correct | 1201 | * XXX It's not clear whether the current behavior is correct |
1202 | * or not. Hence, this may change..... | 1202 | * or not. Hence, this may change..... |
1203 | */ | 1203 | */ |
1204 | if (!(old_termios->c_cflag & CLOCAL) && | 1204 | if (!(old_termios->c_cflag & CLOCAL) && |
1205 | (tty->termios->c_cflag & CLOCAL)) | 1205 | (tty->termios->c_cflag & CLOCAL)) |
1206 | wake_up_interruptible(&state->info->open_wait); | 1206 | wake_up_interruptible(&state->info->open_wait); |
1207 | #endif | 1207 | #endif |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | /* | 1210 | /* |
1211 | * In 2.4.5, calls to this will be serialized via the BKL in | 1211 | * In 2.4.5, calls to this will be serialized via the BKL in |
1212 | * linux/drivers/char/tty_io.c:tty_release() | 1212 | * linux/drivers/char/tty_io.c:tty_release() |
1213 | * linux/drivers/char/tty_io.c:do_tty_handup() | 1213 | * linux/drivers/char/tty_io.c:do_tty_handup() |
1214 | */ | 1214 | */ |
1215 | static void uart_close(struct tty_struct *tty, struct file *filp) | 1215 | static void uart_close(struct tty_struct *tty, struct file *filp) |
1216 | { | 1216 | { |
1217 | struct uart_state *state = tty->driver_data; | 1217 | struct uart_state *state = tty->driver_data; |
1218 | struct uart_port *port; | 1218 | struct uart_port *port; |
1219 | 1219 | ||
1220 | BUG_ON(!kernel_locked()); | 1220 | BUG_ON(!kernel_locked()); |
1221 | 1221 | ||
1222 | if (!state || !state->port) | 1222 | if (!state || !state->port) |
1223 | return; | 1223 | return; |
1224 | 1224 | ||
1225 | port = state->port; | 1225 | port = state->port; |
1226 | 1226 | ||
1227 | pr_debug("uart_close(%d) called\n", port->line); | 1227 | pr_debug("uart_close(%d) called\n", port->line); |
1228 | 1228 | ||
1229 | mutex_lock(&state->mutex); | 1229 | mutex_lock(&state->mutex); |
1230 | 1230 | ||
1231 | if (tty_hung_up_p(filp)) | 1231 | if (tty_hung_up_p(filp)) |
1232 | goto done; | 1232 | goto done; |
1233 | 1233 | ||
1234 | if ((tty->count == 1) && (state->count != 1)) { | 1234 | if ((tty->count == 1) && (state->count != 1)) { |
1235 | /* | 1235 | /* |
1236 | * Uh, oh. tty->count is 1, which means that the tty | 1236 | * Uh, oh. tty->count is 1, which means that the tty |
1237 | * structure will be freed. state->count should always | 1237 | * structure will be freed. state->count should always |
1238 | * be one in these conditions. If it's greater than | 1238 | * be one in these conditions. If it's greater than |
1239 | * one, we've got real problems, since it means the | 1239 | * one, we've got real problems, since it means the |
1240 | * serial port won't be shutdown. | 1240 | * serial port won't be shutdown. |
1241 | */ | 1241 | */ |
1242 | printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, " | 1242 | printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, " |
1243 | "state->count is %d\n", state->count); | 1243 | "state->count is %d\n", state->count); |
1244 | state->count = 1; | 1244 | state->count = 1; |
1245 | } | 1245 | } |
1246 | if (--state->count < 0) { | 1246 | if (--state->count < 0) { |
1247 | printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n", | 1247 | printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n", |
1248 | tty->name, state->count); | 1248 | tty->name, state->count); |
1249 | state->count = 0; | 1249 | state->count = 0; |
1250 | } | 1250 | } |
1251 | if (state->count) | 1251 | if (state->count) |
1252 | goto done; | 1252 | goto done; |
1253 | 1253 | ||
1254 | /* | 1254 | /* |
1255 | * Now we wait for the transmit buffer to clear; and we notify | 1255 | * Now we wait for the transmit buffer to clear; and we notify |
1256 | * the line discipline to only process XON/XOFF characters by | 1256 | * the line discipline to only process XON/XOFF characters by |
1257 | * setting tty->closing. | 1257 | * setting tty->closing. |
1258 | */ | 1258 | */ |
1259 | tty->closing = 1; | 1259 | tty->closing = 1; |
1260 | 1260 | ||
1261 | if (state->closing_wait != USF_CLOSING_WAIT_NONE) | 1261 | if (state->closing_wait != USF_CLOSING_WAIT_NONE) |
1262 | tty_wait_until_sent(tty, msecs_to_jiffies(state->closing_wait)); | 1262 | tty_wait_until_sent(tty, msecs_to_jiffies(state->closing_wait)); |
1263 | 1263 | ||
1264 | /* | 1264 | /* |
1265 | * At this point, we stop accepting input. To do this, we | 1265 | * At this point, we stop accepting input. To do this, we |
1266 | * disable the receive line status interrupts. | 1266 | * disable the receive line status interrupts. |
1267 | */ | 1267 | */ |
1268 | if (state->info->flags & UIF_INITIALIZED) { | 1268 | if (state->info->flags & UIF_INITIALIZED) { |
1269 | unsigned long flags; | 1269 | unsigned long flags; |
1270 | spin_lock_irqsave(&port->lock, flags); | 1270 | spin_lock_irqsave(&port->lock, flags); |
1271 | port->ops->stop_rx(port); | 1271 | port->ops->stop_rx(port); |
1272 | spin_unlock_irqrestore(&port->lock, flags); | 1272 | spin_unlock_irqrestore(&port->lock, flags); |
1273 | /* | 1273 | /* |
1274 | * Before we drop DTR, make sure the UART transmitter | 1274 | * Before we drop DTR, make sure the UART transmitter |
1275 | * has completely drained; this is especially | 1275 | * has completely drained; this is especially |
1276 | * important if there is a transmit FIFO! | 1276 | * important if there is a transmit FIFO! |
1277 | */ | 1277 | */ |
1278 | uart_wait_until_sent(tty, port->timeout); | 1278 | uart_wait_until_sent(tty, port->timeout); |
1279 | } | 1279 | } |
1280 | 1280 | ||
1281 | uart_shutdown(state); | 1281 | uart_shutdown(state); |
1282 | uart_flush_buffer(tty); | 1282 | uart_flush_buffer(tty); |
1283 | 1283 | ||
1284 | tty_ldisc_flush(tty); | 1284 | tty_ldisc_flush(tty); |
1285 | 1285 | ||
1286 | tty->closing = 0; | 1286 | tty->closing = 0; |
1287 | state->info->tty = NULL; | 1287 | state->info->tty = NULL; |
1288 | 1288 | ||
1289 | if (state->info->blocked_open) { | 1289 | if (state->info->blocked_open) { |
1290 | if (state->close_delay) | 1290 | if (state->close_delay) |
1291 | msleep_interruptible(state->close_delay); | 1291 | msleep_interruptible(state->close_delay); |
1292 | } else if (!uart_console(port)) { | 1292 | } else if (!uart_console(port)) { |
1293 | uart_change_pm(state, 3); | 1293 | uart_change_pm(state, 3); |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | /* | 1296 | /* |
1297 | * Wake up anyone trying to open this port. | 1297 | * Wake up anyone trying to open this port. |
1298 | */ | 1298 | */ |
1299 | state->info->flags &= ~UIF_NORMAL_ACTIVE; | 1299 | state->info->flags &= ~UIF_NORMAL_ACTIVE; |
1300 | wake_up_interruptible(&state->info->open_wait); | 1300 | wake_up_interruptible(&state->info->open_wait); |
1301 | 1301 | ||
1302 | done: | 1302 | done: |
1303 | mutex_unlock(&state->mutex); | 1303 | mutex_unlock(&state->mutex); |
1304 | } | 1304 | } |
1305 | 1305 | ||
1306 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | 1306 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout) |
1307 | { | 1307 | { |
1308 | struct uart_state *state = tty->driver_data; | 1308 | struct uart_state *state = tty->driver_data; |
1309 | struct uart_port *port = state->port; | 1309 | struct uart_port *port = state->port; |
1310 | unsigned long char_time, expire; | 1310 | unsigned long char_time, expire; |
1311 | 1311 | ||
1312 | BUG_ON(!kernel_locked()); | 1312 | BUG_ON(!kernel_locked()); |
1313 | 1313 | ||
1314 | if (port->type == PORT_UNKNOWN || port->fifosize == 0) | 1314 | if (port->type == PORT_UNKNOWN || port->fifosize == 0) |
1315 | return; | 1315 | return; |
1316 | 1316 | ||
1317 | /* | 1317 | /* |
1318 | * Set the check interval to be 1/5 of the estimated time to | 1318 | * Set the check interval to be 1/5 of the estimated time to |
1319 | * send a single character, and make it at least 1. The check | 1319 | * send a single character, and make it at least 1. The check |
1320 | * interval should also be less than the timeout. | 1320 | * interval should also be less than the timeout. |
1321 | * | 1321 | * |
1322 | * Note: we have to use pretty tight timings here to satisfy | 1322 | * Note: we have to use pretty tight timings here to satisfy |
1323 | * the NIST-PCTS. | 1323 | * the NIST-PCTS. |
1324 | */ | 1324 | */ |
1325 | char_time = (port->timeout - HZ/50) / port->fifosize; | 1325 | char_time = (port->timeout - HZ/50) / port->fifosize; |
1326 | char_time = char_time / 5; | 1326 | char_time = char_time / 5; |
1327 | if (char_time == 0) | 1327 | if (char_time == 0) |
1328 | char_time = 1; | 1328 | char_time = 1; |
1329 | if (timeout && timeout < char_time) | 1329 | if (timeout && timeout < char_time) |
1330 | char_time = timeout; | 1330 | char_time = timeout; |
1331 | 1331 | ||
1332 | /* | 1332 | /* |
1333 | * If the transmitter hasn't cleared in twice the approximate | 1333 | * If the transmitter hasn't cleared in twice the approximate |
1334 | * amount of time to send the entire FIFO, it probably won't | 1334 | * amount of time to send the entire FIFO, it probably won't |
1335 | * ever clear. This assumes the UART isn't doing flow | 1335 | * ever clear. This assumes the UART isn't doing flow |
1336 | * control, which is currently the case. Hence, if it ever | 1336 | * control, which is currently the case. Hence, if it ever |
1337 | * takes longer than port->timeout, this is probably due to a | 1337 | * takes longer than port->timeout, this is probably due to a |
1338 | * UART bug of some kind. So, we clamp the timeout parameter at | 1338 | * UART bug of some kind. So, we clamp the timeout parameter at |
1339 | * 2*port->timeout. | 1339 | * 2*port->timeout. |
1340 | */ | 1340 | */ |
1341 | if (timeout == 0 || timeout > 2 * port->timeout) | 1341 | if (timeout == 0 || timeout > 2 * port->timeout) |
1342 | timeout = 2 * port->timeout; | 1342 | timeout = 2 * port->timeout; |
1343 | 1343 | ||
1344 | expire = jiffies + timeout; | 1344 | expire = jiffies + timeout; |
1345 | 1345 | ||
1346 | pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", | 1346 | pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", |
1347 | port->line, jiffies, expire); | 1347 | port->line, jiffies, expire); |
1348 | 1348 | ||
1349 | /* | 1349 | /* |
1350 | * Check whether the transmitter is empty every 'char_time'. | 1350 | * Check whether the transmitter is empty every 'char_time'. |
1351 | * 'timeout' / 'expire' give us the maximum amount of time | 1351 | * 'timeout' / 'expire' give us the maximum amount of time |
1352 | * we wait. | 1352 | * we wait. |
1353 | */ | 1353 | */ |
1354 | while (!port->ops->tx_empty(port)) { | 1354 | while (!port->ops->tx_empty(port)) { |
1355 | msleep_interruptible(jiffies_to_msecs(char_time)); | 1355 | msleep_interruptible(jiffies_to_msecs(char_time)); |
1356 | if (signal_pending(current)) | 1356 | if (signal_pending(current)) |
1357 | break; | 1357 | break; |
1358 | if (time_after(jiffies, expire)) | 1358 | if (time_after(jiffies, expire)) |
1359 | break; | 1359 | break; |
1360 | } | 1360 | } |
1361 | set_current_state(TASK_RUNNING); /* might not be needed */ | 1361 | set_current_state(TASK_RUNNING); /* might not be needed */ |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | /* | 1364 | /* |
1365 | * This is called with the BKL held in | 1365 | * This is called with the BKL held in |
1366 | * linux/drivers/char/tty_io.c:do_tty_hangup() | 1366 | * linux/drivers/char/tty_io.c:do_tty_hangup() |
1367 | * We're called from the eventd thread, so we can sleep for | 1367 | * We're called from the eventd thread, so we can sleep for |
1368 | * a _short_ time only. | 1368 | * a _short_ time only. |
1369 | */ | 1369 | */ |
1370 | static void uart_hangup(struct tty_struct *tty) | 1370 | static void uart_hangup(struct tty_struct *tty) |
1371 | { | 1371 | { |
1372 | struct uart_state *state = tty->driver_data; | 1372 | struct uart_state *state = tty->driver_data; |
1373 | 1373 | ||
1374 | BUG_ON(!kernel_locked()); | 1374 | BUG_ON(!kernel_locked()); |
1375 | pr_debug("uart_hangup(%d)\n", state->port->line); | 1375 | pr_debug("uart_hangup(%d)\n", state->port->line); |
1376 | 1376 | ||
1377 | mutex_lock(&state->mutex); | 1377 | mutex_lock(&state->mutex); |
1378 | if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) { | 1378 | if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) { |
1379 | uart_flush_buffer(tty); | 1379 | uart_flush_buffer(tty); |
1380 | uart_shutdown(state); | 1380 | uart_shutdown(state); |
1381 | state->count = 0; | 1381 | state->count = 0; |
1382 | state->info->flags &= ~UIF_NORMAL_ACTIVE; | 1382 | state->info->flags &= ~UIF_NORMAL_ACTIVE; |
1383 | state->info->tty = NULL; | 1383 | state->info->tty = NULL; |
1384 | wake_up_interruptible(&state->info->open_wait); | 1384 | wake_up_interruptible(&state->info->open_wait); |
1385 | wake_up_interruptible(&state->info->delta_msr_wait); | 1385 | wake_up_interruptible(&state->info->delta_msr_wait); |
1386 | } | 1386 | } |
1387 | mutex_unlock(&state->mutex); | 1387 | mutex_unlock(&state->mutex); |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | /* | 1390 | /* |
1391 | * Copy across the serial console cflag setting into the termios settings | 1391 | * Copy across the serial console cflag setting into the termios settings |
1392 | * for the initial open of the port. This allows continuity between the | 1392 | * for the initial open of the port. This allows continuity between the |
1393 | * kernel settings, and the settings init adopts when it opens the port | 1393 | * kernel settings, and the settings init adopts when it opens the port |
1394 | * for the first time. | 1394 | * for the first time. |
1395 | */ | 1395 | */ |
1396 | static void uart_update_termios(struct uart_state *state) | 1396 | static void uart_update_termios(struct uart_state *state) |
1397 | { | 1397 | { |
1398 | struct tty_struct *tty = state->info->tty; | 1398 | struct tty_struct *tty = state->info->tty; |
1399 | struct uart_port *port = state->port; | 1399 | struct uart_port *port = state->port; |
1400 | 1400 | ||
1401 | if (uart_console(port) && port->cons->cflag) { | 1401 | if (uart_console(port) && port->cons->cflag) { |
1402 | tty->termios->c_cflag = port->cons->cflag; | 1402 | tty->termios->c_cflag = port->cons->cflag; |
1403 | port->cons->cflag = 0; | 1403 | port->cons->cflag = 0; |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | /* | 1406 | /* |
1407 | * If the device failed to grab its irq resources, | 1407 | * If the device failed to grab its irq resources, |
1408 | * or some other error occurred, don't try to talk | 1408 | * or some other error occurred, don't try to talk |
1409 | * to the port hardware. | 1409 | * to the port hardware. |
1410 | */ | 1410 | */ |
1411 | if (!(tty->flags & (1 << TTY_IO_ERROR))) { | 1411 | if (!(tty->flags & (1 << TTY_IO_ERROR))) { |
1412 | /* | 1412 | /* |
1413 | * Make termios settings take effect. | 1413 | * Make termios settings take effect. |
1414 | */ | 1414 | */ |
1415 | uart_change_speed(state, NULL); | 1415 | uart_change_speed(state, NULL); |
1416 | 1416 | ||
1417 | /* | 1417 | /* |
1418 | * And finally enable the RTS and DTR signals. | 1418 | * And finally enable the RTS and DTR signals. |
1419 | */ | 1419 | */ |
1420 | if (tty->termios->c_cflag & CBAUD) | 1420 | if (tty->termios->c_cflag & CBAUD) |
1421 | uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS); | 1421 | uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
1422 | } | 1422 | } |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | /* | 1425 | /* |
1426 | * Block the open until the port is ready. We must be called with | 1426 | * Block the open until the port is ready. We must be called with |
1427 | * the per-port semaphore held. | 1427 | * the per-port semaphore held. |
1428 | */ | 1428 | */ |
1429 | static int | 1429 | static int |
1430 | uart_block_til_ready(struct file *filp, struct uart_state *state) | 1430 | uart_block_til_ready(struct file *filp, struct uart_state *state) |
1431 | { | 1431 | { |
1432 | DECLARE_WAITQUEUE(wait, current); | 1432 | DECLARE_WAITQUEUE(wait, current); |
1433 | struct uart_info *info = state->info; | 1433 | struct uart_info *info = state->info; |
1434 | struct uart_port *port = state->port; | 1434 | struct uart_port *port = state->port; |
1435 | unsigned int mctrl; | 1435 | unsigned int mctrl; |
1436 | 1436 | ||
1437 | info->blocked_open++; | 1437 | info->blocked_open++; |
1438 | state->count--; | 1438 | state->count--; |
1439 | 1439 | ||
1440 | add_wait_queue(&info->open_wait, &wait); | 1440 | add_wait_queue(&info->open_wait, &wait); |
1441 | while (1) { | 1441 | while (1) { |
1442 | set_current_state(TASK_INTERRUPTIBLE); | 1442 | set_current_state(TASK_INTERRUPTIBLE); |
1443 | 1443 | ||
1444 | /* | 1444 | /* |
1445 | * If we have been hung up, tell userspace/restart open. | 1445 | * If we have been hung up, tell userspace/restart open. |
1446 | */ | 1446 | */ |
1447 | if (tty_hung_up_p(filp) || info->tty == NULL) | 1447 | if (tty_hung_up_p(filp) || info->tty == NULL) |
1448 | break; | 1448 | break; |
1449 | 1449 | ||
1450 | /* | 1450 | /* |
1451 | * If the port has been closed, tell userspace/restart open. | 1451 | * If the port has been closed, tell userspace/restart open. |
1452 | */ | 1452 | */ |
1453 | if (!(info->flags & UIF_INITIALIZED)) | 1453 | if (!(info->flags & UIF_INITIALIZED)) |
1454 | break; | 1454 | break; |
1455 | 1455 | ||
1456 | /* | 1456 | /* |
1457 | * If non-blocking mode is set, or CLOCAL mode is set, | 1457 | * If non-blocking mode is set, or CLOCAL mode is set, |
1458 | * we don't want to wait for the modem status lines to | 1458 | * we don't want to wait for the modem status lines to |
1459 | * indicate that the port is ready. | 1459 | * indicate that the port is ready. |
1460 | * | 1460 | * |
1461 | * Also, if the port is not enabled/configured, we want | 1461 | * Also, if the port is not enabled/configured, we want |
1462 | * to allow the open to succeed here. Note that we will | 1462 | * to allow the open to succeed here. Note that we will |
1463 | * have set TTY_IO_ERROR for a non-existant port. | 1463 | * have set TTY_IO_ERROR for a non-existant port. |
1464 | */ | 1464 | */ |
1465 | if ((filp->f_flags & O_NONBLOCK) || | 1465 | if ((filp->f_flags & O_NONBLOCK) || |
1466 | (info->tty->termios->c_cflag & CLOCAL) || | 1466 | (info->tty->termios->c_cflag & CLOCAL) || |
1467 | (info->tty->flags & (1 << TTY_IO_ERROR))) | 1467 | (info->tty->flags & (1 << TTY_IO_ERROR))) |
1468 | break; | 1468 | break; |
1469 | 1469 | ||
1470 | /* | 1470 | /* |
1471 | * Set DTR to allow modem to know we're waiting. Do | 1471 | * Set DTR to allow modem to know we're waiting. Do |
1472 | * not set RTS here - we want to make sure we catch | 1472 | * not set RTS here - we want to make sure we catch |
1473 | * the data from the modem. | 1473 | * the data from the modem. |
1474 | */ | 1474 | */ |
1475 | if (info->tty->termios->c_cflag & CBAUD) | 1475 | if (info->tty->termios->c_cflag & CBAUD) |
1476 | uart_set_mctrl(port, TIOCM_DTR); | 1476 | uart_set_mctrl(port, TIOCM_DTR); |
1477 | 1477 | ||
1478 | /* | 1478 | /* |
1479 | * and wait for the carrier to indicate that the | 1479 | * and wait for the carrier to indicate that the |
1480 | * modem is ready for us. | 1480 | * modem is ready for us. |
1481 | */ | 1481 | */ |
1482 | spin_lock_irq(&port->lock); | 1482 | spin_lock_irq(&port->lock); |
1483 | port->ops->enable_ms(port); | 1483 | port->ops->enable_ms(port); |
1484 | mctrl = port->ops->get_mctrl(port); | 1484 | mctrl = port->ops->get_mctrl(port); |
1485 | spin_unlock_irq(&port->lock); | 1485 | spin_unlock_irq(&port->lock); |
1486 | if (mctrl & TIOCM_CAR) | 1486 | if (mctrl & TIOCM_CAR) |
1487 | break; | 1487 | break; |
1488 | 1488 | ||
1489 | mutex_unlock(&state->mutex); | 1489 | mutex_unlock(&state->mutex); |
1490 | schedule(); | 1490 | schedule(); |
1491 | mutex_lock(&state->mutex); | 1491 | mutex_lock(&state->mutex); |
1492 | 1492 | ||
1493 | if (signal_pending(current)) | 1493 | if (signal_pending(current)) |
1494 | break; | 1494 | break; |
1495 | } | 1495 | } |
1496 | set_current_state(TASK_RUNNING); | 1496 | set_current_state(TASK_RUNNING); |
1497 | remove_wait_queue(&info->open_wait, &wait); | 1497 | remove_wait_queue(&info->open_wait, &wait); |
1498 | 1498 | ||
1499 | state->count++; | 1499 | state->count++; |
1500 | info->blocked_open--; | 1500 | info->blocked_open--; |
1501 | 1501 | ||
1502 | if (signal_pending(current)) | 1502 | if (signal_pending(current)) |
1503 | return -ERESTARTSYS; | 1503 | return -ERESTARTSYS; |
1504 | 1504 | ||
1505 | if (!info->tty || tty_hung_up_p(filp)) | 1505 | if (!info->tty || tty_hung_up_p(filp)) |
1506 | return -EAGAIN; | 1506 | return -EAGAIN; |
1507 | 1507 | ||
1508 | return 0; | 1508 | return 0; |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | static struct uart_state *uart_get(struct uart_driver *drv, int line) | 1511 | static struct uart_state *uart_get(struct uart_driver *drv, int line) |
1512 | { | 1512 | { |
1513 | struct uart_state *state; | 1513 | struct uart_state *state; |
1514 | int ret = 0; | 1514 | int ret = 0; |
1515 | 1515 | ||
1516 | state = drv->state + line; | 1516 | state = drv->state + line; |
1517 | if (mutex_lock_interruptible(&state->mutex)) { | 1517 | if (mutex_lock_interruptible(&state->mutex)) { |
1518 | ret = -ERESTARTSYS; | 1518 | ret = -ERESTARTSYS; |
1519 | goto err; | 1519 | goto err; |
1520 | } | 1520 | } |
1521 | 1521 | ||
1522 | state->count++; | 1522 | state->count++; |
1523 | if (!state->port || state->port->flags & UPF_DEAD) { | 1523 | if (!state->port || state->port->flags & UPF_DEAD) { |
1524 | ret = -ENXIO; | 1524 | ret = -ENXIO; |
1525 | goto err_unlock; | 1525 | goto err_unlock; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | if (!state->info) { | 1528 | if (!state->info) { |
1529 | state->info = kzalloc(sizeof(struct uart_info), GFP_KERNEL); | 1529 | state->info = kzalloc(sizeof(struct uart_info), GFP_KERNEL); |
1530 | if (state->info) { | 1530 | if (state->info) { |
1531 | init_waitqueue_head(&state->info->open_wait); | 1531 | init_waitqueue_head(&state->info->open_wait); |
1532 | init_waitqueue_head(&state->info->delta_msr_wait); | 1532 | init_waitqueue_head(&state->info->delta_msr_wait); |
1533 | 1533 | ||
1534 | /* | 1534 | /* |
1535 | * Link the info into the other structures. | 1535 | * Link the info into the other structures. |
1536 | */ | 1536 | */ |
1537 | state->port->info = state->info; | 1537 | state->port->info = state->info; |
1538 | 1538 | ||
1539 | tasklet_init(&state->info->tlet, uart_tasklet_action, | 1539 | tasklet_init(&state->info->tlet, uart_tasklet_action, |
1540 | (unsigned long)state); | 1540 | (unsigned long)state); |
1541 | } else { | 1541 | } else { |
1542 | ret = -ENOMEM; | 1542 | ret = -ENOMEM; |
1543 | goto err_unlock; | 1543 | goto err_unlock; |
1544 | } | 1544 | } |
1545 | } | 1545 | } |
1546 | return state; | 1546 | return state; |
1547 | 1547 | ||
1548 | err_unlock: | 1548 | err_unlock: |
1549 | state->count--; | 1549 | state->count--; |
1550 | mutex_unlock(&state->mutex); | 1550 | mutex_unlock(&state->mutex); |
1551 | err: | 1551 | err: |
1552 | return ERR_PTR(ret); | 1552 | return ERR_PTR(ret); |
1553 | } | 1553 | } |
1554 | 1554 | ||
1555 | /* | 1555 | /* |
1556 | * In 2.4.5, calls to uart_open are serialised by the BKL in | 1556 | * calls to uart_open are serialised by the BKL in |
1557 | * linux/fs/devices.c:chrdev_open() | 1557 | * fs/char_dev.c:chrdev_open() |
1558 | * Note that if this fails, then uart_close() _will_ be called. | 1558 | * Note that if this fails, then uart_close() _will_ be called. |
1559 | * | 1559 | * |
1560 | * In time, we want to scrap the "opening nonpresent ports" | 1560 | * In time, we want to scrap the "opening nonpresent ports" |
1561 | * behaviour and implement an alternative way for setserial | 1561 | * behaviour and implement an alternative way for setserial |
1562 | * to set base addresses/ports/types. This will allow us to | 1562 | * to set base addresses/ports/types. This will allow us to |
1563 | * get rid of a certain amount of extra tests. | 1563 | * get rid of a certain amount of extra tests. |
1564 | */ | 1564 | */ |
1565 | static int uart_open(struct tty_struct *tty, struct file *filp) | 1565 | static int uart_open(struct tty_struct *tty, struct file *filp) |
1566 | { | 1566 | { |
1567 | struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state; | 1567 | struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state; |
1568 | struct uart_state *state; | 1568 | struct uart_state *state; |
1569 | int retval, line = tty->index; | 1569 | int retval, line = tty->index; |
1570 | 1570 | ||
1571 | BUG_ON(!kernel_locked()); | 1571 | BUG_ON(!kernel_locked()); |
1572 | pr_debug("uart_open(%d) called\n", line); | 1572 | pr_debug("uart_open(%d) called\n", line); |
1573 | 1573 | ||
1574 | /* | 1574 | /* |
1575 | * tty->driver->num won't change, so we won't fail here with | 1575 | * tty->driver->num won't change, so we won't fail here with |
1576 | * tty->driver_data set to something non-NULL (and therefore | 1576 | * tty->driver_data set to something non-NULL (and therefore |
1577 | * we won't get caught by uart_close()). | 1577 | * we won't get caught by uart_close()). |
1578 | */ | 1578 | */ |
1579 | retval = -ENODEV; | 1579 | retval = -ENODEV; |
1580 | if (line >= tty->driver->num) | 1580 | if (line >= tty->driver->num) |
1581 | goto fail; | 1581 | goto fail; |
1582 | 1582 | ||
1583 | /* | 1583 | /* |
1584 | * We take the semaphore inside uart_get to guarantee that we won't | 1584 | * We take the semaphore inside uart_get to guarantee that we won't |
1585 | * be re-entered while allocating the info structure, or while we | 1585 | * be re-entered while allocating the info structure, or while we |
1586 | * request any IRQs that the driver may need. This also has the nice | 1586 | * request any IRQs that the driver may need. This also has the nice |
1587 | * side-effect that it delays the action of uart_hangup, so we can | 1587 | * side-effect that it delays the action of uart_hangup, so we can |
1588 | * guarantee that info->tty will always contain something reasonable. | 1588 | * guarantee that info->tty will always contain something reasonable. |
1589 | */ | 1589 | */ |
1590 | state = uart_get(drv, line); | 1590 | state = uart_get(drv, line); |
1591 | if (IS_ERR(state)) { | 1591 | if (IS_ERR(state)) { |
1592 | retval = PTR_ERR(state); | 1592 | retval = PTR_ERR(state); |
1593 | goto fail; | 1593 | goto fail; |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | /* | 1596 | /* |
1597 | * Once we set tty->driver_data here, we are guaranteed that | 1597 | * Once we set tty->driver_data here, we are guaranteed that |
1598 | * uart_close() will decrement the driver module use count. | 1598 | * uart_close() will decrement the driver module use count. |
1599 | * Any failures from here onwards should not touch the count. | 1599 | * Any failures from here onwards should not touch the count. |
1600 | */ | 1600 | */ |
1601 | tty->driver_data = state; | 1601 | tty->driver_data = state; |
1602 | tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0; | 1602 | tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0; |
1603 | tty->alt_speed = 0; | 1603 | tty->alt_speed = 0; |
1604 | state->info->tty = tty; | 1604 | state->info->tty = tty; |
1605 | 1605 | ||
1606 | /* | 1606 | /* |
1607 | * If the port is in the middle of closing, bail out now. | 1607 | * If the port is in the middle of closing, bail out now. |
1608 | */ | 1608 | */ |
1609 | if (tty_hung_up_p(filp)) { | 1609 | if (tty_hung_up_p(filp)) { |
1610 | retval = -EAGAIN; | 1610 | retval = -EAGAIN; |
1611 | state->count--; | 1611 | state->count--; |
1612 | mutex_unlock(&state->mutex); | 1612 | mutex_unlock(&state->mutex); |
1613 | goto fail; | 1613 | goto fail; |
1614 | } | 1614 | } |
1615 | 1615 | ||
1616 | /* | 1616 | /* |
1617 | * Make sure the device is in D0 state. | 1617 | * Make sure the device is in D0 state. |
1618 | */ | 1618 | */ |
1619 | if (state->count == 1) | 1619 | if (state->count == 1) |
1620 | uart_change_pm(state, 0); | 1620 | uart_change_pm(state, 0); |
1621 | 1621 | ||
1622 | /* | 1622 | /* |
1623 | * Start up the serial port. | 1623 | * Start up the serial port. |
1624 | */ | 1624 | */ |
1625 | retval = uart_startup(state, 0); | 1625 | retval = uart_startup(state, 0); |
1626 | 1626 | ||
1627 | /* | 1627 | /* |
1628 | * If we succeeded, wait until the port is ready. | 1628 | * If we succeeded, wait until the port is ready. |
1629 | */ | 1629 | */ |
1630 | if (retval == 0) | 1630 | if (retval == 0) |
1631 | retval = uart_block_til_ready(filp, state); | 1631 | retval = uart_block_til_ready(filp, state); |
1632 | mutex_unlock(&state->mutex); | 1632 | mutex_unlock(&state->mutex); |
1633 | 1633 | ||
1634 | /* | 1634 | /* |
1635 | * If this is the first open to succeed, adjust things to suit. | 1635 | * If this is the first open to succeed, adjust things to suit. |
1636 | */ | 1636 | */ |
1637 | if (retval == 0 && !(state->info->flags & UIF_NORMAL_ACTIVE)) { | 1637 | if (retval == 0 && !(state->info->flags & UIF_NORMAL_ACTIVE)) { |
1638 | state->info->flags |= UIF_NORMAL_ACTIVE; | 1638 | state->info->flags |= UIF_NORMAL_ACTIVE; |
1639 | 1639 | ||
1640 | uart_update_termios(state); | 1640 | uart_update_termios(state); |
1641 | } | 1641 | } |
1642 | 1642 | ||
1643 | fail: | 1643 | fail: |
1644 | return retval; | 1644 | return retval; |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | static const char *uart_type(struct uart_port *port) | 1647 | static const char *uart_type(struct uart_port *port) |
1648 | { | 1648 | { |
1649 | const char *str = NULL; | 1649 | const char *str = NULL; |
1650 | 1650 | ||
1651 | if (port->ops->type) | 1651 | if (port->ops->type) |
1652 | str = port->ops->type(port); | 1652 | str = port->ops->type(port); |
1653 | 1653 | ||
1654 | if (!str) | 1654 | if (!str) |
1655 | str = "unknown"; | 1655 | str = "unknown"; |
1656 | 1656 | ||
1657 | return str; | 1657 | return str; |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | #ifdef CONFIG_PROC_FS | 1660 | #ifdef CONFIG_PROC_FS |
1661 | 1661 | ||
1662 | static int uart_line_info(char *buf, struct uart_driver *drv, int i) | 1662 | static int uart_line_info(char *buf, struct uart_driver *drv, int i) |
1663 | { | 1663 | { |
1664 | struct uart_state *state = drv->state + i; | 1664 | struct uart_state *state = drv->state + i; |
1665 | int pm_state; | 1665 | int pm_state; |
1666 | struct uart_port *port = state->port; | 1666 | struct uart_port *port = state->port; |
1667 | char stat_buf[32]; | 1667 | char stat_buf[32]; |
1668 | unsigned int status; | 1668 | unsigned int status; |
1669 | int mmio, ret; | 1669 | int mmio, ret; |
1670 | 1670 | ||
1671 | if (!port) | 1671 | if (!port) |
1672 | return 0; | 1672 | return 0; |
1673 | 1673 | ||
1674 | mmio = port->iotype >= UPIO_MEM; | 1674 | mmio = port->iotype >= UPIO_MEM; |
1675 | ret = sprintf(buf, "%d: uart:%s %s%08llX irq:%d", | 1675 | ret = sprintf(buf, "%d: uart:%s %s%08llX irq:%d", |
1676 | port->line, uart_type(port), | 1676 | port->line, uart_type(port), |
1677 | mmio ? "mmio:0x" : "port:", | 1677 | mmio ? "mmio:0x" : "port:", |
1678 | mmio ? (unsigned long long)port->mapbase | 1678 | mmio ? (unsigned long long)port->mapbase |
1679 | : (unsigned long long) port->iobase, | 1679 | : (unsigned long long) port->iobase, |
1680 | port->irq); | 1680 | port->irq); |
1681 | 1681 | ||
1682 | if (port->type == PORT_UNKNOWN) { | 1682 | if (port->type == PORT_UNKNOWN) { |
1683 | strcat(buf, "\n"); | 1683 | strcat(buf, "\n"); |
1684 | return ret + 1; | 1684 | return ret + 1; |
1685 | } | 1685 | } |
1686 | 1686 | ||
1687 | if (capable(CAP_SYS_ADMIN)) { | 1687 | if (capable(CAP_SYS_ADMIN)) { |
1688 | mutex_lock(&state->mutex); | 1688 | mutex_lock(&state->mutex); |
1689 | pm_state = state->pm_state; | 1689 | pm_state = state->pm_state; |
1690 | if (pm_state) | 1690 | if (pm_state) |
1691 | uart_change_pm(state, 0); | 1691 | uart_change_pm(state, 0); |
1692 | spin_lock_irq(&port->lock); | 1692 | spin_lock_irq(&port->lock); |
1693 | status = port->ops->get_mctrl(port); | 1693 | status = port->ops->get_mctrl(port); |
1694 | spin_unlock_irq(&port->lock); | 1694 | spin_unlock_irq(&port->lock); |
1695 | if (pm_state) | 1695 | if (pm_state) |
1696 | uart_change_pm(state, pm_state); | 1696 | uart_change_pm(state, pm_state); |
1697 | mutex_unlock(&state->mutex); | 1697 | mutex_unlock(&state->mutex); |
1698 | 1698 | ||
1699 | ret += sprintf(buf + ret, " tx:%d rx:%d", | 1699 | ret += sprintf(buf + ret, " tx:%d rx:%d", |
1700 | port->icount.tx, port->icount.rx); | 1700 | port->icount.tx, port->icount.rx); |
1701 | if (port->icount.frame) | 1701 | if (port->icount.frame) |
1702 | ret += sprintf(buf + ret, " fe:%d", | 1702 | ret += sprintf(buf + ret, " fe:%d", |
1703 | port->icount.frame); | 1703 | port->icount.frame); |
1704 | if (port->icount.parity) | 1704 | if (port->icount.parity) |
1705 | ret += sprintf(buf + ret, " pe:%d", | 1705 | ret += sprintf(buf + ret, " pe:%d", |
1706 | port->icount.parity); | 1706 | port->icount.parity); |
1707 | if (port->icount.brk) | 1707 | if (port->icount.brk) |
1708 | ret += sprintf(buf + ret, " brk:%d", | 1708 | ret += sprintf(buf + ret, " brk:%d", |
1709 | port->icount.brk); | 1709 | port->icount.brk); |
1710 | if (port->icount.overrun) | 1710 | if (port->icount.overrun) |
1711 | ret += sprintf(buf + ret, " oe:%d", | 1711 | ret += sprintf(buf + ret, " oe:%d", |
1712 | port->icount.overrun); | 1712 | port->icount.overrun); |
1713 | 1713 | ||
1714 | #define INFOBIT(bit, str) \ | 1714 | #define INFOBIT(bit, str) \ |
1715 | if (port->mctrl & (bit)) \ | 1715 | if (port->mctrl & (bit)) \ |
1716 | strncat(stat_buf, (str), sizeof(stat_buf) - \ | 1716 | strncat(stat_buf, (str), sizeof(stat_buf) - \ |
1717 | strlen(stat_buf) - 2) | 1717 | strlen(stat_buf) - 2) |
1718 | #define STATBIT(bit, str) \ | 1718 | #define STATBIT(bit, str) \ |
1719 | if (status & (bit)) \ | 1719 | if (status & (bit)) \ |
1720 | strncat(stat_buf, (str), sizeof(stat_buf) - \ | 1720 | strncat(stat_buf, (str), sizeof(stat_buf) - \ |
1721 | strlen(stat_buf) - 2) | 1721 | strlen(stat_buf) - 2) |
1722 | 1722 | ||
1723 | stat_buf[0] = '\0'; | 1723 | stat_buf[0] = '\0'; |
1724 | stat_buf[1] = '\0'; | 1724 | stat_buf[1] = '\0'; |
1725 | INFOBIT(TIOCM_RTS, "|RTS"); | 1725 | INFOBIT(TIOCM_RTS, "|RTS"); |
1726 | STATBIT(TIOCM_CTS, "|CTS"); | 1726 | STATBIT(TIOCM_CTS, "|CTS"); |
1727 | INFOBIT(TIOCM_DTR, "|DTR"); | 1727 | INFOBIT(TIOCM_DTR, "|DTR"); |
1728 | STATBIT(TIOCM_DSR, "|DSR"); | 1728 | STATBIT(TIOCM_DSR, "|DSR"); |
1729 | STATBIT(TIOCM_CAR, "|CD"); | 1729 | STATBIT(TIOCM_CAR, "|CD"); |
1730 | STATBIT(TIOCM_RNG, "|RI"); | 1730 | STATBIT(TIOCM_RNG, "|RI"); |
1731 | if (stat_buf[0]) | 1731 | if (stat_buf[0]) |
1732 | stat_buf[0] = ' '; | 1732 | stat_buf[0] = ' '; |
1733 | strcat(stat_buf, "\n"); | 1733 | strcat(stat_buf, "\n"); |
1734 | 1734 | ||
1735 | ret += sprintf(buf + ret, stat_buf); | 1735 | ret += sprintf(buf + ret, stat_buf); |
1736 | } else { | 1736 | } else { |
1737 | strcat(buf, "\n"); | 1737 | strcat(buf, "\n"); |
1738 | ret++; | 1738 | ret++; |
1739 | } | 1739 | } |
1740 | #undef STATBIT | 1740 | #undef STATBIT |
1741 | #undef INFOBIT | 1741 | #undef INFOBIT |
1742 | return ret; | 1742 | return ret; |
1743 | } | 1743 | } |
1744 | 1744 | ||
1745 | static int uart_read_proc(char *page, char **start, off_t off, | 1745 | static int uart_read_proc(char *page, char **start, off_t off, |
1746 | int count, int *eof, void *data) | 1746 | int count, int *eof, void *data) |
1747 | { | 1747 | { |
1748 | struct tty_driver *ttydrv = data; | 1748 | struct tty_driver *ttydrv = data; |
1749 | struct uart_driver *drv = ttydrv->driver_state; | 1749 | struct uart_driver *drv = ttydrv->driver_state; |
1750 | int i, len = 0, l; | 1750 | int i, len = 0, l; |
1751 | off_t begin = 0; | 1751 | off_t begin = 0; |
1752 | 1752 | ||
1753 | len += sprintf(page, "serinfo:1.0 driver%s%s revision:%s\n", | 1753 | len += sprintf(page, "serinfo:1.0 driver%s%s revision:%s\n", |
1754 | "", "", ""); | 1754 | "", "", ""); |
1755 | for (i = 0; i < drv->nr && len < PAGE_SIZE - 96; i++) { | 1755 | for (i = 0; i < drv->nr && len < PAGE_SIZE - 96; i++) { |
1756 | l = uart_line_info(page + len, drv, i); | 1756 | l = uart_line_info(page + len, drv, i); |
1757 | len += l; | 1757 | len += l; |
1758 | if (len + begin > off + count) | 1758 | if (len + begin > off + count) |
1759 | goto done; | 1759 | goto done; |
1760 | if (len + begin < off) { | 1760 | if (len + begin < off) { |
1761 | begin += len; | 1761 | begin += len; |
1762 | len = 0; | 1762 | len = 0; |
1763 | } | 1763 | } |
1764 | } | 1764 | } |
1765 | *eof = 1; | 1765 | *eof = 1; |
1766 | done: | 1766 | done: |
1767 | if (off >= len + begin) | 1767 | if (off >= len + begin) |
1768 | return 0; | 1768 | return 0; |
1769 | *start = page + (off - begin); | 1769 | *start = page + (off - begin); |
1770 | return (count < begin + len - off) ? count : (begin + len - off); | 1770 | return (count < begin + len - off) ? count : (begin + len - off); |
1771 | } | 1771 | } |
1772 | #endif | 1772 | #endif |
1773 | 1773 | ||
1774 | #ifdef CONFIG_SERIAL_CORE_CONSOLE | 1774 | #ifdef CONFIG_SERIAL_CORE_CONSOLE |
1775 | /* | 1775 | /* |
1776 | * uart_console_write - write a console message to a serial port | 1776 | * uart_console_write - write a console message to a serial port |
1777 | * @port: the port to write the message | 1777 | * @port: the port to write the message |
1778 | * @s: array of characters | 1778 | * @s: array of characters |
1779 | * @count: number of characters in string to write | 1779 | * @count: number of characters in string to write |
1780 | * @write: function to write character to port | 1780 | * @write: function to write character to port |
1781 | */ | 1781 | */ |
1782 | void uart_console_write(struct uart_port *port, const char *s, | 1782 | void uart_console_write(struct uart_port *port, const char *s, |
1783 | unsigned int count, | 1783 | unsigned int count, |
1784 | void (*putchar)(struct uart_port *, int)) | 1784 | void (*putchar)(struct uart_port *, int)) |
1785 | { | 1785 | { |
1786 | unsigned int i; | 1786 | unsigned int i; |
1787 | 1787 | ||
1788 | for (i = 0; i < count; i++, s++) { | 1788 | for (i = 0; i < count; i++, s++) { |
1789 | if (*s == '\n') | 1789 | if (*s == '\n') |
1790 | putchar(port, '\r'); | 1790 | putchar(port, '\r'); |
1791 | putchar(port, *s); | 1791 | putchar(port, *s); |
1792 | } | 1792 | } |
1793 | } | 1793 | } |
1794 | EXPORT_SYMBOL_GPL(uart_console_write); | 1794 | EXPORT_SYMBOL_GPL(uart_console_write); |
1795 | 1795 | ||
1796 | /* | 1796 | /* |
1797 | * Check whether an invalid uart number has been specified, and | 1797 | * Check whether an invalid uart number has been specified, and |
1798 | * if so, search for the first available port that does have | 1798 | * if so, search for the first available port that does have |
1799 | * console support. | 1799 | * console support. |
1800 | */ | 1800 | */ |
1801 | struct uart_port * __init | 1801 | struct uart_port * __init |
1802 | uart_get_console(struct uart_port *ports, int nr, struct console *co) | 1802 | uart_get_console(struct uart_port *ports, int nr, struct console *co) |
1803 | { | 1803 | { |
1804 | int idx = co->index; | 1804 | int idx = co->index; |
1805 | 1805 | ||
1806 | if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 && | 1806 | if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 && |
1807 | ports[idx].membase == NULL)) | 1807 | ports[idx].membase == NULL)) |
1808 | for (idx = 0; idx < nr; idx++) | 1808 | for (idx = 0; idx < nr; idx++) |
1809 | if (ports[idx].iobase != 0 || | 1809 | if (ports[idx].iobase != 0 || |
1810 | ports[idx].membase != NULL) | 1810 | ports[idx].membase != NULL) |
1811 | break; | 1811 | break; |
1812 | 1812 | ||
1813 | co->index = idx; | 1813 | co->index = idx; |
1814 | 1814 | ||
1815 | return ports + idx; | 1815 | return ports + idx; |
1816 | } | 1816 | } |
1817 | 1817 | ||
1818 | /** | 1818 | /** |
1819 | * uart_parse_options - Parse serial port baud/parity/bits/flow contro. | 1819 | * uart_parse_options - Parse serial port baud/parity/bits/flow contro. |
1820 | * @options: pointer to option string | 1820 | * @options: pointer to option string |
1821 | * @baud: pointer to an 'int' variable for the baud rate. | 1821 | * @baud: pointer to an 'int' variable for the baud rate. |
1822 | * @parity: pointer to an 'int' variable for the parity. | 1822 | * @parity: pointer to an 'int' variable for the parity. |
1823 | * @bits: pointer to an 'int' variable for the number of data bits. | 1823 | * @bits: pointer to an 'int' variable for the number of data bits. |
1824 | * @flow: pointer to an 'int' variable for the flow control character. | 1824 | * @flow: pointer to an 'int' variable for the flow control character. |
1825 | * | 1825 | * |
1826 | * uart_parse_options decodes a string containing the serial console | 1826 | * uart_parse_options decodes a string containing the serial console |
1827 | * options. The format of the string is <baud><parity><bits><flow>, | 1827 | * options. The format of the string is <baud><parity><bits><flow>, |
1828 | * eg: 115200n8r | 1828 | * eg: 115200n8r |
1829 | */ | 1829 | */ |
1830 | void __init | 1830 | void __init |
1831 | uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow) | 1831 | uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow) |
1832 | { | 1832 | { |
1833 | char *s = options; | 1833 | char *s = options; |
1834 | 1834 | ||
1835 | *baud = simple_strtoul(s, NULL, 10); | 1835 | *baud = simple_strtoul(s, NULL, 10); |
1836 | while (*s >= '0' && *s <= '9') | 1836 | while (*s >= '0' && *s <= '9') |
1837 | s++; | 1837 | s++; |
1838 | if (*s) | 1838 | if (*s) |
1839 | *parity = *s++; | 1839 | *parity = *s++; |
1840 | if (*s) | 1840 | if (*s) |
1841 | *bits = *s++ - '0'; | 1841 | *bits = *s++ - '0'; |
1842 | if (*s) | 1842 | if (*s) |
1843 | *flow = *s; | 1843 | *flow = *s; |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | struct baud_rates { | 1846 | struct baud_rates { |
1847 | unsigned int rate; | 1847 | unsigned int rate; |
1848 | unsigned int cflag; | 1848 | unsigned int cflag; |
1849 | }; | 1849 | }; |
1850 | 1850 | ||
1851 | static const struct baud_rates baud_rates[] = { | 1851 | static const struct baud_rates baud_rates[] = { |
1852 | { 921600, B921600 }, | 1852 | { 921600, B921600 }, |
1853 | { 460800, B460800 }, | 1853 | { 460800, B460800 }, |
1854 | { 230400, B230400 }, | 1854 | { 230400, B230400 }, |
1855 | { 115200, B115200 }, | 1855 | { 115200, B115200 }, |
1856 | { 57600, B57600 }, | 1856 | { 57600, B57600 }, |
1857 | { 38400, B38400 }, | 1857 | { 38400, B38400 }, |
1858 | { 19200, B19200 }, | 1858 | { 19200, B19200 }, |
1859 | { 9600, B9600 }, | 1859 | { 9600, B9600 }, |
1860 | { 4800, B4800 }, | 1860 | { 4800, B4800 }, |
1861 | { 2400, B2400 }, | 1861 | { 2400, B2400 }, |
1862 | { 1200, B1200 }, | 1862 | { 1200, B1200 }, |
1863 | { 0, B38400 } | 1863 | { 0, B38400 } |
1864 | }; | 1864 | }; |
1865 | 1865 | ||
1866 | /** | 1866 | /** |
1867 | * uart_set_options - setup the serial console parameters | 1867 | * uart_set_options - setup the serial console parameters |
1868 | * @port: pointer to the serial ports uart_port structure | 1868 | * @port: pointer to the serial ports uart_port structure |
1869 | * @co: console pointer | 1869 | * @co: console pointer |
1870 | * @baud: baud rate | 1870 | * @baud: baud rate |
1871 | * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even) | 1871 | * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even) |
1872 | * @bits: number of data bits | 1872 | * @bits: number of data bits |
1873 | * @flow: flow control character - 'r' (rts) | 1873 | * @flow: flow control character - 'r' (rts) |
1874 | */ | 1874 | */ |
1875 | int __init | 1875 | int __init |
1876 | uart_set_options(struct uart_port *port, struct console *co, | 1876 | uart_set_options(struct uart_port *port, struct console *co, |
1877 | int baud, int parity, int bits, int flow) | 1877 | int baud, int parity, int bits, int flow) |
1878 | { | 1878 | { |
1879 | struct ktermios termios; | 1879 | struct ktermios termios; |
1880 | static struct ktermios dummy; | 1880 | static struct ktermios dummy; |
1881 | int i; | 1881 | int i; |
1882 | 1882 | ||
1883 | /* | 1883 | /* |
1884 | * Ensure that the serial console lock is initialised | 1884 | * Ensure that the serial console lock is initialised |
1885 | * early. | 1885 | * early. |
1886 | */ | 1886 | */ |
1887 | spin_lock_init(&port->lock); | 1887 | spin_lock_init(&port->lock); |
1888 | lockdep_set_class(&port->lock, &port_lock_key); | 1888 | lockdep_set_class(&port->lock, &port_lock_key); |
1889 | 1889 | ||
1890 | memset(&termios, 0, sizeof(struct ktermios)); | 1890 | memset(&termios, 0, sizeof(struct ktermios)); |
1891 | 1891 | ||
1892 | termios.c_cflag = CREAD | HUPCL | CLOCAL; | 1892 | termios.c_cflag = CREAD | HUPCL | CLOCAL; |
1893 | 1893 | ||
1894 | /* | 1894 | /* |
1895 | * Construct a cflag setting. | 1895 | * Construct a cflag setting. |
1896 | */ | 1896 | */ |
1897 | for (i = 0; baud_rates[i].rate; i++) | 1897 | for (i = 0; baud_rates[i].rate; i++) |
1898 | if (baud_rates[i].rate <= baud) | 1898 | if (baud_rates[i].rate <= baud) |
1899 | break; | 1899 | break; |
1900 | 1900 | ||
1901 | termios.c_cflag |= baud_rates[i].cflag; | 1901 | termios.c_cflag |= baud_rates[i].cflag; |
1902 | 1902 | ||
1903 | if (bits == 7) | 1903 | if (bits == 7) |
1904 | termios.c_cflag |= CS7; | 1904 | termios.c_cflag |= CS7; |
1905 | else | 1905 | else |
1906 | termios.c_cflag |= CS8; | 1906 | termios.c_cflag |= CS8; |
1907 | 1907 | ||
1908 | switch (parity) { | 1908 | switch (parity) { |
1909 | case 'o': case 'O': | 1909 | case 'o': case 'O': |
1910 | termios.c_cflag |= PARODD; | 1910 | termios.c_cflag |= PARODD; |
1911 | /*fall through*/ | 1911 | /*fall through*/ |
1912 | case 'e': case 'E': | 1912 | case 'e': case 'E': |
1913 | termios.c_cflag |= PARENB; | 1913 | termios.c_cflag |= PARENB; |
1914 | break; | 1914 | break; |
1915 | } | 1915 | } |
1916 | 1916 | ||
1917 | if (flow == 'r') | 1917 | if (flow == 'r') |
1918 | termios.c_cflag |= CRTSCTS; | 1918 | termios.c_cflag |= CRTSCTS; |
1919 | 1919 | ||
1920 | /* | 1920 | /* |
1921 | * some uarts on other side don't support no flow control. | 1921 | * some uarts on other side don't support no flow control. |
1922 | * So we set * DTR in host uart to make them happy | 1922 | * So we set * DTR in host uart to make them happy |
1923 | */ | 1923 | */ |
1924 | port->mctrl |= TIOCM_DTR; | 1924 | port->mctrl |= TIOCM_DTR; |
1925 | 1925 | ||
1926 | port->ops->set_termios(port, &termios, &dummy); | 1926 | port->ops->set_termios(port, &termios, &dummy); |
1927 | co->cflag = termios.c_cflag; | 1927 | co->cflag = termios.c_cflag; |
1928 | 1928 | ||
1929 | return 0; | 1929 | return 0; |
1930 | } | 1930 | } |
1931 | #endif /* CONFIG_SERIAL_CORE_CONSOLE */ | 1931 | #endif /* CONFIG_SERIAL_CORE_CONSOLE */ |
1932 | 1932 | ||
1933 | static void uart_change_pm(struct uart_state *state, int pm_state) | 1933 | static void uart_change_pm(struct uart_state *state, int pm_state) |
1934 | { | 1934 | { |
1935 | struct uart_port *port = state->port; | 1935 | struct uart_port *port = state->port; |
1936 | 1936 | ||
1937 | if (state->pm_state != pm_state) { | 1937 | if (state->pm_state != pm_state) { |
1938 | if (port->ops->pm) | 1938 | if (port->ops->pm) |
1939 | port->ops->pm(port, pm_state, state->pm_state); | 1939 | port->ops->pm(port, pm_state, state->pm_state); |
1940 | state->pm_state = pm_state; | 1940 | state->pm_state = pm_state; |
1941 | } | 1941 | } |
1942 | } | 1942 | } |
1943 | 1943 | ||
1944 | struct uart_match { | 1944 | struct uart_match { |
1945 | struct uart_port *port; | 1945 | struct uart_port *port; |
1946 | struct uart_driver *driver; | 1946 | struct uart_driver *driver; |
1947 | }; | 1947 | }; |
1948 | 1948 | ||
1949 | static int serial_match_port(struct device *dev, void *data) | 1949 | static int serial_match_port(struct device *dev, void *data) |
1950 | { | 1950 | { |
1951 | struct uart_match *match = data; | 1951 | struct uart_match *match = data; |
1952 | dev_t devt = MKDEV(match->driver->major, match->driver->minor) + match->port->line; | 1952 | dev_t devt = MKDEV(match->driver->major, match->driver->minor) + match->port->line; |
1953 | 1953 | ||
1954 | return dev->devt == devt; /* Actually, only one tty per port */ | 1954 | return dev->devt == devt; /* Actually, only one tty per port */ |
1955 | } | 1955 | } |
1956 | 1956 | ||
1957 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | 1957 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) |
1958 | { | 1958 | { |
1959 | struct uart_state *state = drv->state + port->line; | 1959 | struct uart_state *state = drv->state + port->line; |
1960 | struct device *tty_dev; | 1960 | struct device *tty_dev; |
1961 | struct uart_match match = {port, drv}; | 1961 | struct uart_match match = {port, drv}; |
1962 | 1962 | ||
1963 | mutex_lock(&state->mutex); | 1963 | mutex_lock(&state->mutex); |
1964 | 1964 | ||
1965 | if (!console_suspend_enabled && uart_console(port)) { | 1965 | if (!console_suspend_enabled && uart_console(port)) { |
1966 | /* we're going to avoid suspending serial console */ | 1966 | /* we're going to avoid suspending serial console */ |
1967 | mutex_unlock(&state->mutex); | 1967 | mutex_unlock(&state->mutex); |
1968 | return 0; | 1968 | return 0; |
1969 | } | 1969 | } |
1970 | 1970 | ||
1971 | tty_dev = device_find_child(port->dev, &match, serial_match_port); | 1971 | tty_dev = device_find_child(port->dev, &match, serial_match_port); |
1972 | if (device_may_wakeup(tty_dev)) { | 1972 | if (device_may_wakeup(tty_dev)) { |
1973 | enable_irq_wake(port->irq); | 1973 | enable_irq_wake(port->irq); |
1974 | put_device(tty_dev); | 1974 | put_device(tty_dev); |
1975 | mutex_unlock(&state->mutex); | 1975 | mutex_unlock(&state->mutex); |
1976 | return 0; | 1976 | return 0; |
1977 | } | 1977 | } |
1978 | port->suspended = 1; | 1978 | port->suspended = 1; |
1979 | 1979 | ||
1980 | if (state->info && state->info->flags & UIF_INITIALIZED) { | 1980 | if (state->info && state->info->flags & UIF_INITIALIZED) { |
1981 | const struct uart_ops *ops = port->ops; | 1981 | const struct uart_ops *ops = port->ops; |
1982 | int tries; | 1982 | int tries; |
1983 | 1983 | ||
1984 | state->info->flags = (state->info->flags & ~UIF_INITIALIZED) | 1984 | state->info->flags = (state->info->flags & ~UIF_INITIALIZED) |
1985 | | UIF_SUSPENDED; | 1985 | | UIF_SUSPENDED; |
1986 | 1986 | ||
1987 | spin_lock_irq(&port->lock); | 1987 | spin_lock_irq(&port->lock); |
1988 | ops->stop_tx(port); | 1988 | ops->stop_tx(port); |
1989 | ops->set_mctrl(port, 0); | 1989 | ops->set_mctrl(port, 0); |
1990 | ops->stop_rx(port); | 1990 | ops->stop_rx(port); |
1991 | spin_unlock_irq(&port->lock); | 1991 | spin_unlock_irq(&port->lock); |
1992 | 1992 | ||
1993 | /* | 1993 | /* |
1994 | * Wait for the transmitter to empty. | 1994 | * Wait for the transmitter to empty. |
1995 | */ | 1995 | */ |
1996 | for (tries = 3; !ops->tx_empty(port) && tries; tries--) | 1996 | for (tries = 3; !ops->tx_empty(port) && tries; tries--) |
1997 | msleep(10); | 1997 | msleep(10); |
1998 | if (!tries) | 1998 | if (!tries) |
1999 | printk(KERN_ERR "%s%s%s%d: Unable to drain " | 1999 | printk(KERN_ERR "%s%s%s%d: Unable to drain " |
2000 | "transmitter\n", | 2000 | "transmitter\n", |
2001 | port->dev ? port->dev->bus_id : "", | 2001 | port->dev ? port->dev->bus_id : "", |
2002 | port->dev ? ": " : "", | 2002 | port->dev ? ": " : "", |
2003 | drv->dev_name, port->line); | 2003 | drv->dev_name, port->line); |
2004 | 2004 | ||
2005 | ops->shutdown(port); | 2005 | ops->shutdown(port); |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | /* | 2008 | /* |
2009 | * Disable the console device before suspending. | 2009 | * Disable the console device before suspending. |
2010 | */ | 2010 | */ |
2011 | if (uart_console(port)) | 2011 | if (uart_console(port)) |
2012 | console_stop(port->cons); | 2012 | console_stop(port->cons); |
2013 | 2013 | ||
2014 | uart_change_pm(state, 3); | 2014 | uart_change_pm(state, 3); |
2015 | 2015 | ||
2016 | mutex_unlock(&state->mutex); | 2016 | mutex_unlock(&state->mutex); |
2017 | 2017 | ||
2018 | return 0; | 2018 | return 0; |
2019 | } | 2019 | } |
2020 | 2020 | ||
2021 | int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | 2021 | int uart_resume_port(struct uart_driver *drv, struct uart_port *port) |
2022 | { | 2022 | { |
2023 | struct uart_state *state = drv->state + port->line; | 2023 | struct uart_state *state = drv->state + port->line; |
2024 | 2024 | ||
2025 | mutex_lock(&state->mutex); | 2025 | mutex_lock(&state->mutex); |
2026 | 2026 | ||
2027 | if (!console_suspend_enabled && uart_console(port)) { | 2027 | if (!console_suspend_enabled && uart_console(port)) { |
2028 | /* no need to resume serial console, it wasn't suspended */ | 2028 | /* no need to resume serial console, it wasn't suspended */ |
2029 | mutex_unlock(&state->mutex); | 2029 | mutex_unlock(&state->mutex); |
2030 | return 0; | 2030 | return 0; |
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | if (!port->suspended) { | 2033 | if (!port->suspended) { |
2034 | disable_irq_wake(port->irq); | 2034 | disable_irq_wake(port->irq); |
2035 | mutex_unlock(&state->mutex); | 2035 | mutex_unlock(&state->mutex); |
2036 | return 0; | 2036 | return 0; |
2037 | } | 2037 | } |
2038 | port->suspended = 0; | 2038 | port->suspended = 0; |
2039 | 2039 | ||
2040 | /* | 2040 | /* |
2041 | * Re-enable the console device after suspending. | 2041 | * Re-enable the console device after suspending. |
2042 | */ | 2042 | */ |
2043 | if (uart_console(port)) { | 2043 | if (uart_console(port)) { |
2044 | struct ktermios termios; | 2044 | struct ktermios termios; |
2045 | 2045 | ||
2046 | /* | 2046 | /* |
2047 | * First try to use the console cflag setting. | 2047 | * First try to use the console cflag setting. |
2048 | */ | 2048 | */ |
2049 | memset(&termios, 0, sizeof(struct ktermios)); | 2049 | memset(&termios, 0, sizeof(struct ktermios)); |
2050 | termios.c_cflag = port->cons->cflag; | 2050 | termios.c_cflag = port->cons->cflag; |
2051 | 2051 | ||
2052 | /* | 2052 | /* |
2053 | * If that's unset, use the tty termios setting. | 2053 | * If that's unset, use the tty termios setting. |
2054 | */ | 2054 | */ |
2055 | if (state->info && state->info->tty && termios.c_cflag == 0) | 2055 | if (state->info && state->info->tty && termios.c_cflag == 0) |
2056 | termios = *state->info->tty->termios; | 2056 | termios = *state->info->tty->termios; |
2057 | 2057 | ||
2058 | uart_change_pm(state, 0); | 2058 | uart_change_pm(state, 0); |
2059 | port->ops->set_termios(port, &termios, NULL); | 2059 | port->ops->set_termios(port, &termios, NULL); |
2060 | console_start(port->cons); | 2060 | console_start(port->cons); |
2061 | } | 2061 | } |
2062 | 2062 | ||
2063 | if (state->info && state->info->flags & UIF_SUSPENDED) { | 2063 | if (state->info && state->info->flags & UIF_SUSPENDED) { |
2064 | const struct uart_ops *ops = port->ops; | 2064 | const struct uart_ops *ops = port->ops; |
2065 | int ret; | 2065 | int ret; |
2066 | 2066 | ||
2067 | uart_change_pm(state, 0); | 2067 | uart_change_pm(state, 0); |
2068 | ops->set_mctrl(port, 0); | 2068 | ops->set_mctrl(port, 0); |
2069 | ret = ops->startup(port); | 2069 | ret = ops->startup(port); |
2070 | if (ret == 0) { | 2070 | if (ret == 0) { |
2071 | uart_change_speed(state, NULL); | 2071 | uart_change_speed(state, NULL); |
2072 | spin_lock_irq(&port->lock); | 2072 | spin_lock_irq(&port->lock); |
2073 | ops->set_mctrl(port, port->mctrl); | 2073 | ops->set_mctrl(port, port->mctrl); |
2074 | ops->start_tx(port); | 2074 | ops->start_tx(port); |
2075 | spin_unlock_irq(&port->lock); | 2075 | spin_unlock_irq(&port->lock); |
2076 | state->info->flags |= UIF_INITIALIZED; | 2076 | state->info->flags |= UIF_INITIALIZED; |
2077 | } else { | 2077 | } else { |
2078 | /* | 2078 | /* |
2079 | * Failed to resume - maybe hardware went away? | 2079 | * Failed to resume - maybe hardware went away? |
2080 | * Clear the "initialized" flag so we won't try | 2080 | * Clear the "initialized" flag so we won't try |
2081 | * to call the low level drivers shutdown method. | 2081 | * to call the low level drivers shutdown method. |
2082 | */ | 2082 | */ |
2083 | uart_shutdown(state); | 2083 | uart_shutdown(state); |
2084 | } | 2084 | } |
2085 | 2085 | ||
2086 | state->info->flags &= ~UIF_SUSPENDED; | 2086 | state->info->flags &= ~UIF_SUSPENDED; |
2087 | } | 2087 | } |
2088 | 2088 | ||
2089 | mutex_unlock(&state->mutex); | 2089 | mutex_unlock(&state->mutex); |
2090 | 2090 | ||
2091 | return 0; | 2091 | return 0; |
2092 | } | 2092 | } |
2093 | 2093 | ||
2094 | static inline void | 2094 | static inline void |
2095 | uart_report_port(struct uart_driver *drv, struct uart_port *port) | 2095 | uart_report_port(struct uart_driver *drv, struct uart_port *port) |
2096 | { | 2096 | { |
2097 | char address[64]; | 2097 | char address[64]; |
2098 | 2098 | ||
2099 | switch (port->iotype) { | 2099 | switch (port->iotype) { |
2100 | case UPIO_PORT: | 2100 | case UPIO_PORT: |
2101 | snprintf(address, sizeof(address), | 2101 | snprintf(address, sizeof(address), |
2102 | "I/O 0x%x", port->iobase); | 2102 | "I/O 0x%x", port->iobase); |
2103 | break; | 2103 | break; |
2104 | case UPIO_HUB6: | 2104 | case UPIO_HUB6: |
2105 | snprintf(address, sizeof(address), | 2105 | snprintf(address, sizeof(address), |
2106 | "I/O 0x%x offset 0x%x", port->iobase, port->hub6); | 2106 | "I/O 0x%x offset 0x%x", port->iobase, port->hub6); |
2107 | break; | 2107 | break; |
2108 | case UPIO_MEM: | 2108 | case UPIO_MEM: |
2109 | case UPIO_MEM32: | 2109 | case UPIO_MEM32: |
2110 | case UPIO_AU: | 2110 | case UPIO_AU: |
2111 | case UPIO_TSI: | 2111 | case UPIO_TSI: |
2112 | case UPIO_DWAPB: | 2112 | case UPIO_DWAPB: |
2113 | snprintf(address, sizeof(address), | 2113 | snprintf(address, sizeof(address), |
2114 | "MMIO 0x%llx", (unsigned long long)port->mapbase); | 2114 | "MMIO 0x%llx", (unsigned long long)port->mapbase); |
2115 | break; | 2115 | break; |
2116 | default: | 2116 | default: |
2117 | strlcpy(address, "*unknown*", sizeof(address)); | 2117 | strlcpy(address, "*unknown*", sizeof(address)); |
2118 | break; | 2118 | break; |
2119 | } | 2119 | } |
2120 | 2120 | ||
2121 | printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n", | 2121 | printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n", |
2122 | port->dev ? port->dev->bus_id : "", | 2122 | port->dev ? port->dev->bus_id : "", |
2123 | port->dev ? ": " : "", | 2123 | port->dev ? ": " : "", |
2124 | drv->dev_name, port->line, address, port->irq, uart_type(port)); | 2124 | drv->dev_name, port->line, address, port->irq, uart_type(port)); |
2125 | } | 2125 | } |
2126 | 2126 | ||
2127 | static void | 2127 | static void |
2128 | uart_configure_port(struct uart_driver *drv, struct uart_state *state, | 2128 | uart_configure_port(struct uart_driver *drv, struct uart_state *state, |
2129 | struct uart_port *port) | 2129 | struct uart_port *port) |
2130 | { | 2130 | { |
2131 | unsigned int flags; | 2131 | unsigned int flags; |
2132 | 2132 | ||
2133 | /* | 2133 | /* |
2134 | * If there isn't a port here, don't do anything further. | 2134 | * If there isn't a port here, don't do anything further. |
2135 | */ | 2135 | */ |
2136 | if (!port->iobase && !port->mapbase && !port->membase) | 2136 | if (!port->iobase && !port->mapbase && !port->membase) |
2137 | return; | 2137 | return; |
2138 | 2138 | ||
2139 | /* | 2139 | /* |
2140 | * Now do the auto configuration stuff. Note that config_port | 2140 | * Now do the auto configuration stuff. Note that config_port |
2141 | * is expected to claim the resources and map the port for us. | 2141 | * is expected to claim the resources and map the port for us. |
2142 | */ | 2142 | */ |
2143 | flags = UART_CONFIG_TYPE; | 2143 | flags = UART_CONFIG_TYPE; |
2144 | if (port->flags & UPF_AUTO_IRQ) | 2144 | if (port->flags & UPF_AUTO_IRQ) |
2145 | flags |= UART_CONFIG_IRQ; | 2145 | flags |= UART_CONFIG_IRQ; |
2146 | if (port->flags & UPF_BOOT_AUTOCONF) { | 2146 | if (port->flags & UPF_BOOT_AUTOCONF) { |
2147 | port->type = PORT_UNKNOWN; | 2147 | port->type = PORT_UNKNOWN; |
2148 | port->ops->config_port(port, flags); | 2148 | port->ops->config_port(port, flags); |
2149 | } | 2149 | } |
2150 | 2150 | ||
2151 | if (port->type != PORT_UNKNOWN) { | 2151 | if (port->type != PORT_UNKNOWN) { |
2152 | unsigned long flags; | 2152 | unsigned long flags; |
2153 | 2153 | ||
2154 | uart_report_port(drv, port); | 2154 | uart_report_port(drv, port); |
2155 | 2155 | ||
2156 | /* Power up port for set_mctrl() */ | 2156 | /* Power up port for set_mctrl() */ |
2157 | uart_change_pm(state, 0); | 2157 | uart_change_pm(state, 0); |
2158 | 2158 | ||
2159 | /* | 2159 | /* |
2160 | * Ensure that the modem control lines are de-activated. | 2160 | * Ensure that the modem control lines are de-activated. |
2161 | * keep the DTR setting that is set in uart_set_options() | 2161 | * keep the DTR setting that is set in uart_set_options() |
2162 | * We probably don't need a spinlock around this, but | 2162 | * We probably don't need a spinlock around this, but |
2163 | */ | 2163 | */ |
2164 | spin_lock_irqsave(&port->lock, flags); | 2164 | spin_lock_irqsave(&port->lock, flags); |
2165 | port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); | 2165 | port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); |
2166 | spin_unlock_irqrestore(&port->lock, flags); | 2166 | spin_unlock_irqrestore(&port->lock, flags); |
2167 | 2167 | ||
2168 | /* | 2168 | /* |
2169 | * If this driver supports console, and it hasn't been | 2169 | * If this driver supports console, and it hasn't been |
2170 | * successfully registered yet, try to re-register it. | 2170 | * successfully registered yet, try to re-register it. |
2171 | * It may be that the port was not available. | 2171 | * It may be that the port was not available. |
2172 | */ | 2172 | */ |
2173 | if (port->cons && !(port->cons->flags & CON_ENABLED)) | 2173 | if (port->cons && !(port->cons->flags & CON_ENABLED)) |
2174 | register_console(port->cons); | 2174 | register_console(port->cons); |
2175 | 2175 | ||
2176 | /* | 2176 | /* |
2177 | * Power down all ports by default, except the | 2177 | * Power down all ports by default, except the |
2178 | * console if we have one. | 2178 | * console if we have one. |
2179 | */ | 2179 | */ |
2180 | if (!uart_console(port)) | 2180 | if (!uart_console(port)) |
2181 | uart_change_pm(state, 3); | 2181 | uart_change_pm(state, 3); |
2182 | } | 2182 | } |
2183 | } | 2183 | } |
2184 | 2184 | ||
2185 | static const struct tty_operations uart_ops = { | 2185 | static const struct tty_operations uart_ops = { |
2186 | .open = uart_open, | 2186 | .open = uart_open, |
2187 | .close = uart_close, | 2187 | .close = uart_close, |
2188 | .write = uart_write, | 2188 | .write = uart_write, |
2189 | .put_char = uart_put_char, | 2189 | .put_char = uart_put_char, |
2190 | .flush_chars = uart_flush_chars, | 2190 | .flush_chars = uart_flush_chars, |
2191 | .write_room = uart_write_room, | 2191 | .write_room = uart_write_room, |
2192 | .chars_in_buffer= uart_chars_in_buffer, | 2192 | .chars_in_buffer= uart_chars_in_buffer, |
2193 | .flush_buffer = uart_flush_buffer, | 2193 | .flush_buffer = uart_flush_buffer, |
2194 | .ioctl = uart_ioctl, | 2194 | .ioctl = uart_ioctl, |
2195 | .throttle = uart_throttle, | 2195 | .throttle = uart_throttle, |
2196 | .unthrottle = uart_unthrottle, | 2196 | .unthrottle = uart_unthrottle, |
2197 | .send_xchar = uart_send_xchar, | 2197 | .send_xchar = uart_send_xchar, |
2198 | .set_termios = uart_set_termios, | 2198 | .set_termios = uart_set_termios, |
2199 | .stop = uart_stop, | 2199 | .stop = uart_stop, |
2200 | .start = uart_start, | 2200 | .start = uart_start, |
2201 | .hangup = uart_hangup, | 2201 | .hangup = uart_hangup, |
2202 | .break_ctl = uart_break_ctl, | 2202 | .break_ctl = uart_break_ctl, |
2203 | .wait_until_sent= uart_wait_until_sent, | 2203 | .wait_until_sent= uart_wait_until_sent, |
2204 | #ifdef CONFIG_PROC_FS | 2204 | #ifdef CONFIG_PROC_FS |
2205 | .read_proc = uart_read_proc, | 2205 | .read_proc = uart_read_proc, |
2206 | #endif | 2206 | #endif |
2207 | .tiocmget = uart_tiocmget, | 2207 | .tiocmget = uart_tiocmget, |
2208 | .tiocmset = uart_tiocmset, | 2208 | .tiocmset = uart_tiocmset, |
2209 | }; | 2209 | }; |
2210 | 2210 | ||
2211 | /** | 2211 | /** |
2212 | * uart_register_driver - register a driver with the uart core layer | 2212 | * uart_register_driver - register a driver with the uart core layer |
2213 | * @drv: low level driver structure | 2213 | * @drv: low level driver structure |
2214 | * | 2214 | * |
2215 | * Register a uart driver with the core driver. We in turn register | 2215 | * Register a uart driver with the core driver. We in turn register |
2216 | * with the tty layer, and initialise the core driver per-port state. | 2216 | * with the tty layer, and initialise the core driver per-port state. |
2217 | * | 2217 | * |
2218 | * We have a proc file in /proc/tty/driver which is named after the | 2218 | * We have a proc file in /proc/tty/driver which is named after the |
2219 | * normal driver. | 2219 | * normal driver. |
2220 | * | 2220 | * |
2221 | * drv->port should be NULL, and the per-port structures should be | 2221 | * drv->port should be NULL, and the per-port structures should be |
2222 | * registered using uart_add_one_port after this call has succeeded. | 2222 | * registered using uart_add_one_port after this call has succeeded. |
2223 | */ | 2223 | */ |
2224 | int uart_register_driver(struct uart_driver *drv) | 2224 | int uart_register_driver(struct uart_driver *drv) |
2225 | { | 2225 | { |
2226 | struct tty_driver *normal = NULL; | 2226 | struct tty_driver *normal = NULL; |
2227 | int i, retval; | 2227 | int i, retval; |
2228 | 2228 | ||
2229 | BUG_ON(drv->state); | 2229 | BUG_ON(drv->state); |
2230 | 2230 | ||
2231 | /* | 2231 | /* |
2232 | * Maybe we should be using a slab cache for this, especially if | 2232 | * Maybe we should be using a slab cache for this, especially if |
2233 | * we have a large number of ports to handle. | 2233 | * we have a large number of ports to handle. |
2234 | */ | 2234 | */ |
2235 | drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL); | 2235 | drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL); |
2236 | retval = -ENOMEM; | 2236 | retval = -ENOMEM; |
2237 | if (!drv->state) | 2237 | if (!drv->state) |
2238 | goto out; | 2238 | goto out; |
2239 | 2239 | ||
2240 | normal = alloc_tty_driver(drv->nr); | 2240 | normal = alloc_tty_driver(drv->nr); |
2241 | if (!normal) | 2241 | if (!normal) |
2242 | goto out; | 2242 | goto out; |
2243 | 2243 | ||
2244 | drv->tty_driver = normal; | 2244 | drv->tty_driver = normal; |
2245 | 2245 | ||
2246 | normal->owner = drv->owner; | 2246 | normal->owner = drv->owner; |
2247 | normal->driver_name = drv->driver_name; | 2247 | normal->driver_name = drv->driver_name; |
2248 | normal->name = drv->dev_name; | 2248 | normal->name = drv->dev_name; |
2249 | normal->major = drv->major; | 2249 | normal->major = drv->major; |
2250 | normal->minor_start = drv->minor; | 2250 | normal->minor_start = drv->minor; |
2251 | normal->type = TTY_DRIVER_TYPE_SERIAL; | 2251 | normal->type = TTY_DRIVER_TYPE_SERIAL; |
2252 | normal->subtype = SERIAL_TYPE_NORMAL; | 2252 | normal->subtype = SERIAL_TYPE_NORMAL; |
2253 | normal->init_termios = tty_std_termios; | 2253 | normal->init_termios = tty_std_termios; |
2254 | normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; | 2254 | normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; |
2255 | normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600; | 2255 | normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600; |
2256 | normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; | 2256 | normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; |
2257 | normal->driver_state = drv; | 2257 | normal->driver_state = drv; |
2258 | tty_set_operations(normal, &uart_ops); | 2258 | tty_set_operations(normal, &uart_ops); |
2259 | 2259 | ||
2260 | /* | 2260 | /* |
2261 | * Initialise the UART state(s). | 2261 | * Initialise the UART state(s). |
2262 | */ | 2262 | */ |
2263 | for (i = 0; i < drv->nr; i++) { | 2263 | for (i = 0; i < drv->nr; i++) { |
2264 | struct uart_state *state = drv->state + i; | 2264 | struct uart_state *state = drv->state + i; |
2265 | 2265 | ||
2266 | state->close_delay = 500; /* .5 seconds */ | 2266 | state->close_delay = 500; /* .5 seconds */ |
2267 | state->closing_wait = 30000; /* 30 seconds */ | 2267 | state->closing_wait = 30000; /* 30 seconds */ |
2268 | 2268 | ||
2269 | mutex_init(&state->mutex); | 2269 | mutex_init(&state->mutex); |
2270 | } | 2270 | } |
2271 | 2271 | ||
2272 | retval = tty_register_driver(normal); | 2272 | retval = tty_register_driver(normal); |
2273 | out: | 2273 | out: |
2274 | if (retval < 0) { | 2274 | if (retval < 0) { |
2275 | put_tty_driver(normal); | 2275 | put_tty_driver(normal); |
2276 | kfree(drv->state); | 2276 | kfree(drv->state); |
2277 | } | 2277 | } |
2278 | return retval; | 2278 | return retval; |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | /** | 2281 | /** |
2282 | * uart_unregister_driver - remove a driver from the uart core layer | 2282 | * uart_unregister_driver - remove a driver from the uart core layer |
2283 | * @drv: low level driver structure | 2283 | * @drv: low level driver structure |
2284 | * | 2284 | * |
2285 | * Remove all references to a driver from the core driver. The low | 2285 | * Remove all references to a driver from the core driver. The low |
2286 | * level driver must have removed all its ports via the | 2286 | * level driver must have removed all its ports via the |
2287 | * uart_remove_one_port() if it registered them with uart_add_one_port(). | 2287 | * uart_remove_one_port() if it registered them with uart_add_one_port(). |
2288 | * (ie, drv->port == NULL) | 2288 | * (ie, drv->port == NULL) |
2289 | */ | 2289 | */ |
2290 | void uart_unregister_driver(struct uart_driver *drv) | 2290 | void uart_unregister_driver(struct uart_driver *drv) |
2291 | { | 2291 | { |
2292 | struct tty_driver *p = drv->tty_driver; | 2292 | struct tty_driver *p = drv->tty_driver; |
2293 | tty_unregister_driver(p); | 2293 | tty_unregister_driver(p); |
2294 | put_tty_driver(p); | 2294 | put_tty_driver(p); |
2295 | kfree(drv->state); | 2295 | kfree(drv->state); |
2296 | drv->tty_driver = NULL; | 2296 | drv->tty_driver = NULL; |
2297 | } | 2297 | } |
2298 | 2298 | ||
2299 | struct tty_driver *uart_console_device(struct console *co, int *index) | 2299 | struct tty_driver *uart_console_device(struct console *co, int *index) |
2300 | { | 2300 | { |
2301 | struct uart_driver *p = co->data; | 2301 | struct uart_driver *p = co->data; |
2302 | *index = co->index; | 2302 | *index = co->index; |
2303 | return p->tty_driver; | 2303 | return p->tty_driver; |
2304 | } | 2304 | } |
2305 | 2305 | ||
2306 | /** | 2306 | /** |
2307 | * uart_add_one_port - attach a driver-defined port structure | 2307 | * uart_add_one_port - attach a driver-defined port structure |
2308 | * @drv: pointer to the uart low level driver structure for this port | 2308 | * @drv: pointer to the uart low level driver structure for this port |
2309 | * @port: uart port structure to use for this port. | 2309 | * @port: uart port structure to use for this port. |
2310 | * | 2310 | * |
2311 | * This allows the driver to register its own uart_port structure | 2311 | * This allows the driver to register its own uart_port structure |
2312 | * with the core driver. The main purpose is to allow the low | 2312 | * with the core driver. The main purpose is to allow the low |
2313 | * level uart drivers to expand uart_port, rather than having yet | 2313 | * level uart drivers to expand uart_port, rather than having yet |
2314 | * more levels of structures. | 2314 | * more levels of structures. |
2315 | */ | 2315 | */ |
2316 | int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) | 2316 | int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) |
2317 | { | 2317 | { |
2318 | struct uart_state *state; | 2318 | struct uart_state *state; |
2319 | int ret = 0; | 2319 | int ret = 0; |
2320 | struct device *tty_dev; | 2320 | struct device *tty_dev; |
2321 | 2321 | ||
2322 | BUG_ON(in_interrupt()); | 2322 | BUG_ON(in_interrupt()); |
2323 | 2323 | ||
2324 | if (port->line >= drv->nr) | 2324 | if (port->line >= drv->nr) |
2325 | return -EINVAL; | 2325 | return -EINVAL; |
2326 | 2326 | ||
2327 | state = drv->state + port->line; | 2327 | state = drv->state + port->line; |
2328 | 2328 | ||
2329 | mutex_lock(&port_mutex); | 2329 | mutex_lock(&port_mutex); |
2330 | mutex_lock(&state->mutex); | 2330 | mutex_lock(&state->mutex); |
2331 | if (state->port) { | 2331 | if (state->port) { |
2332 | ret = -EINVAL; | 2332 | ret = -EINVAL; |
2333 | goto out; | 2333 | goto out; |
2334 | } | 2334 | } |
2335 | 2335 | ||
2336 | state->port = port; | 2336 | state->port = port; |
2337 | state->pm_state = -1; | 2337 | state->pm_state = -1; |
2338 | 2338 | ||
2339 | port->cons = drv->cons; | 2339 | port->cons = drv->cons; |
2340 | port->info = state->info; | 2340 | port->info = state->info; |
2341 | 2341 | ||
2342 | /* | 2342 | /* |
2343 | * If this port is a console, then the spinlock is already | 2343 | * If this port is a console, then the spinlock is already |
2344 | * initialised. | 2344 | * initialised. |
2345 | */ | 2345 | */ |
2346 | if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) { | 2346 | if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) { |
2347 | spin_lock_init(&port->lock); | 2347 | spin_lock_init(&port->lock); |
2348 | lockdep_set_class(&port->lock, &port_lock_key); | 2348 | lockdep_set_class(&port->lock, &port_lock_key); |
2349 | } | 2349 | } |
2350 | 2350 | ||
2351 | uart_configure_port(drv, state, port); | 2351 | uart_configure_port(drv, state, port); |
2352 | 2352 | ||
2353 | /* | 2353 | /* |
2354 | * Register the port whether it's detected or not. This allows | 2354 | * Register the port whether it's detected or not. This allows |
2355 | * setserial to be used to alter this ports parameters. | 2355 | * setserial to be used to alter this ports parameters. |
2356 | */ | 2356 | */ |
2357 | tty_dev = tty_register_device(drv->tty_driver, port->line, port->dev); | 2357 | tty_dev = tty_register_device(drv->tty_driver, port->line, port->dev); |
2358 | if (likely(!IS_ERR(tty_dev))) { | 2358 | if (likely(!IS_ERR(tty_dev))) { |
2359 | device_can_wakeup(tty_dev) = 1; | 2359 | device_can_wakeup(tty_dev) = 1; |
2360 | device_set_wakeup_enable(tty_dev, 0); | 2360 | device_set_wakeup_enable(tty_dev, 0); |
2361 | } else | 2361 | } else |
2362 | printk(KERN_ERR "Cannot register tty device on line %d\n", | 2362 | printk(KERN_ERR "Cannot register tty device on line %d\n", |
2363 | port->line); | 2363 | port->line); |
2364 | 2364 | ||
2365 | /* | 2365 | /* |
2366 | * Ensure UPF_DEAD is not set. | 2366 | * Ensure UPF_DEAD is not set. |
2367 | */ | 2367 | */ |
2368 | port->flags &= ~UPF_DEAD; | 2368 | port->flags &= ~UPF_DEAD; |
2369 | 2369 | ||
2370 | out: | 2370 | out: |
2371 | mutex_unlock(&state->mutex); | 2371 | mutex_unlock(&state->mutex); |
2372 | mutex_unlock(&port_mutex); | 2372 | mutex_unlock(&port_mutex); |
2373 | 2373 | ||
2374 | return ret; | 2374 | return ret; |
2375 | } | 2375 | } |
2376 | 2376 | ||
2377 | /** | 2377 | /** |
2378 | * uart_remove_one_port - detach a driver defined port structure | 2378 | * uart_remove_one_port - detach a driver defined port structure |
2379 | * @drv: pointer to the uart low level driver structure for this port | 2379 | * @drv: pointer to the uart low level driver structure for this port |
2380 | * @port: uart port structure for this port | 2380 | * @port: uart port structure for this port |
2381 | * | 2381 | * |
2382 | * This unhooks (and hangs up) the specified port structure from the | 2382 | * This unhooks (and hangs up) the specified port structure from the |
2383 | * core driver. No further calls will be made to the low-level code | 2383 | * core driver. No further calls will be made to the low-level code |
2384 | * for this port. | 2384 | * for this port. |
2385 | */ | 2385 | */ |
2386 | int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) | 2386 | int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) |
2387 | { | 2387 | { |
2388 | struct uart_state *state = drv->state + port->line; | 2388 | struct uart_state *state = drv->state + port->line; |
2389 | struct uart_info *info; | 2389 | struct uart_info *info; |
2390 | 2390 | ||
2391 | BUG_ON(in_interrupt()); | 2391 | BUG_ON(in_interrupt()); |
2392 | 2392 | ||
2393 | if (state->port != port) | 2393 | if (state->port != port) |
2394 | printk(KERN_ALERT "Removing wrong port: %p != %p\n", | 2394 | printk(KERN_ALERT "Removing wrong port: %p != %p\n", |
2395 | state->port, port); | 2395 | state->port, port); |
2396 | 2396 | ||
2397 | mutex_lock(&port_mutex); | 2397 | mutex_lock(&port_mutex); |
2398 | 2398 | ||
2399 | /* | 2399 | /* |
2400 | * Mark the port "dead" - this prevents any opens from | 2400 | * Mark the port "dead" - this prevents any opens from |
2401 | * succeeding while we shut down the port. | 2401 | * succeeding while we shut down the port. |
2402 | */ | 2402 | */ |
2403 | mutex_lock(&state->mutex); | 2403 | mutex_lock(&state->mutex); |
2404 | port->flags |= UPF_DEAD; | 2404 | port->flags |= UPF_DEAD; |
2405 | mutex_unlock(&state->mutex); | 2405 | mutex_unlock(&state->mutex); |
2406 | 2406 | ||
2407 | /* | 2407 | /* |
2408 | * Remove the devices from the tty layer | 2408 | * Remove the devices from the tty layer |
2409 | */ | 2409 | */ |
2410 | tty_unregister_device(drv->tty_driver, port->line); | 2410 | tty_unregister_device(drv->tty_driver, port->line); |
2411 | 2411 | ||
2412 | info = state->info; | 2412 | info = state->info; |
2413 | if (info && info->tty) | 2413 | if (info && info->tty) |
2414 | tty_vhangup(info->tty); | 2414 | tty_vhangup(info->tty); |
2415 | 2415 | ||
2416 | /* | 2416 | /* |
2417 | * All users of this port should now be disconnected from | 2417 | * All users of this port should now be disconnected from |
2418 | * this driver, and the port shut down. We should be the | 2418 | * this driver, and the port shut down. We should be the |
2419 | * only thread fiddling with this port from now on. | 2419 | * only thread fiddling with this port from now on. |
2420 | */ | 2420 | */ |
2421 | state->info = NULL; | 2421 | state->info = NULL; |
2422 | 2422 | ||
2423 | /* | 2423 | /* |
2424 | * Free the port IO and memory resources, if any. | 2424 | * Free the port IO and memory resources, if any. |
2425 | */ | 2425 | */ |
2426 | if (port->type != PORT_UNKNOWN) | 2426 | if (port->type != PORT_UNKNOWN) |
2427 | port->ops->release_port(port); | 2427 | port->ops->release_port(port); |
2428 | 2428 | ||
2429 | /* | 2429 | /* |
2430 | * Indicate that there isn't a port here anymore. | 2430 | * Indicate that there isn't a port here anymore. |
2431 | */ | 2431 | */ |
2432 | port->type = PORT_UNKNOWN; | 2432 | port->type = PORT_UNKNOWN; |
2433 | 2433 | ||
2434 | /* | 2434 | /* |
2435 | * Kill the tasklet, and free resources. | 2435 | * Kill the tasklet, and free resources. |
2436 | */ | 2436 | */ |
2437 | if (info) { | 2437 | if (info) { |
2438 | tasklet_kill(&info->tlet); | 2438 | tasklet_kill(&info->tlet); |
2439 | kfree(info); | 2439 | kfree(info); |
2440 | } | 2440 | } |
2441 | 2441 | ||
2442 | state->port = NULL; | 2442 | state->port = NULL; |
2443 | mutex_unlock(&port_mutex); | 2443 | mutex_unlock(&port_mutex); |
2444 | 2444 | ||
2445 | return 0; | 2445 | return 0; |
2446 | } | 2446 | } |
2447 | 2447 | ||
2448 | /* | 2448 | /* |
2449 | * Are the two ports equivalent? | 2449 | * Are the two ports equivalent? |
2450 | */ | 2450 | */ |
2451 | int uart_match_port(struct uart_port *port1, struct uart_port *port2) | 2451 | int uart_match_port(struct uart_port *port1, struct uart_port *port2) |
2452 | { | 2452 | { |
2453 | if (port1->iotype != port2->iotype) | 2453 | if (port1->iotype != port2->iotype) |
2454 | return 0; | 2454 | return 0; |
2455 | 2455 | ||
2456 | switch (port1->iotype) { | 2456 | switch (port1->iotype) { |
2457 | case UPIO_PORT: | 2457 | case UPIO_PORT: |
2458 | return (port1->iobase == port2->iobase); | 2458 | return (port1->iobase == port2->iobase); |
2459 | case UPIO_HUB6: | 2459 | case UPIO_HUB6: |
2460 | return (port1->iobase == port2->iobase) && | 2460 | return (port1->iobase == port2->iobase) && |
2461 | (port1->hub6 == port2->hub6); | 2461 | (port1->hub6 == port2->hub6); |
2462 | case UPIO_MEM: | 2462 | case UPIO_MEM: |
2463 | case UPIO_MEM32: | 2463 | case UPIO_MEM32: |
2464 | case UPIO_AU: | 2464 | case UPIO_AU: |
2465 | case UPIO_TSI: | 2465 | case UPIO_TSI: |
2466 | case UPIO_DWAPB: | 2466 | case UPIO_DWAPB: |
2467 | return (port1->mapbase == port2->mapbase); | 2467 | return (port1->mapbase == port2->mapbase); |
2468 | } | 2468 | } |
2469 | return 0; | 2469 | return 0; |
2470 | } | 2470 | } |
2471 | EXPORT_SYMBOL(uart_match_port); | 2471 | EXPORT_SYMBOL(uart_match_port); |
2472 | 2472 | ||
2473 | EXPORT_SYMBOL(uart_write_wakeup); | 2473 | EXPORT_SYMBOL(uart_write_wakeup); |
2474 | EXPORT_SYMBOL(uart_register_driver); | 2474 | EXPORT_SYMBOL(uart_register_driver); |
2475 | EXPORT_SYMBOL(uart_unregister_driver); | 2475 | EXPORT_SYMBOL(uart_unregister_driver); |
2476 | EXPORT_SYMBOL(uart_suspend_port); | 2476 | EXPORT_SYMBOL(uart_suspend_port); |
2477 | EXPORT_SYMBOL(uart_resume_port); | 2477 | EXPORT_SYMBOL(uart_resume_port); |
2478 | EXPORT_SYMBOL(uart_add_one_port); | 2478 | EXPORT_SYMBOL(uart_add_one_port); |
2479 | EXPORT_SYMBOL(uart_remove_one_port); | 2479 | EXPORT_SYMBOL(uart_remove_one_port); |
2480 | 2480 | ||
2481 | MODULE_DESCRIPTION("Serial driver core"); | 2481 | MODULE_DESCRIPTION("Serial driver core"); |
2482 | MODULE_LICENSE("GPL"); | 2482 | MODULE_LICENSE("GPL"); |
2483 | 2483 |
fs/char_dev.c
1 | /* | 1 | /* |
2 | * linux/fs/char_dev.c | 2 | * linux/fs/char_dev.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
9 | #include <linux/kdev_t.h> | 9 | #include <linux/kdev_t.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | 12 | ||
13 | #include <linux/major.h> | 13 | #include <linux/major.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/smp_lock.h> | 16 | #include <linux/smp_lock.h> |
17 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
18 | 18 | ||
19 | #include <linux/kobject.h> | 19 | #include <linux/kobject.h> |
20 | #include <linux/kobj_map.h> | 20 | #include <linux/kobj_map.h> |
21 | #include <linux/cdev.h> | 21 | #include <linux/cdev.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/backing-dev.h> | 23 | #include <linux/backing-dev.h> |
24 | 24 | ||
25 | #ifdef CONFIG_KMOD | 25 | #ifdef CONFIG_KMOD |
26 | #include <linux/kmod.h> | 26 | #include <linux/kmod.h> |
27 | #endif | 27 | #endif |
28 | #include "internal.h" | 28 | #include "internal.h" |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * capabilities for /dev/mem, /dev/kmem and similar directly mappable character | 31 | * capabilities for /dev/mem, /dev/kmem and similar directly mappable character |
32 | * devices | 32 | * devices |
33 | * - permits shared-mmap for read, write and/or exec | 33 | * - permits shared-mmap for read, write and/or exec |
34 | * - does not permit private mmap in NOMMU mode (can't do COW) | 34 | * - does not permit private mmap in NOMMU mode (can't do COW) |
35 | * - no readahead or I/O queue unplugging required | 35 | * - no readahead or I/O queue unplugging required |
36 | */ | 36 | */ |
37 | struct backing_dev_info directly_mappable_cdev_bdi = { | 37 | struct backing_dev_info directly_mappable_cdev_bdi = { |
38 | .capabilities = ( | 38 | .capabilities = ( |
39 | #ifdef CONFIG_MMU | 39 | #ifdef CONFIG_MMU |
40 | /* permit private copies of the data to be taken */ | 40 | /* permit private copies of the data to be taken */ |
41 | BDI_CAP_MAP_COPY | | 41 | BDI_CAP_MAP_COPY | |
42 | #endif | 42 | #endif |
43 | /* permit direct mmap, for read, write or exec */ | 43 | /* permit direct mmap, for read, write or exec */ |
44 | BDI_CAP_MAP_DIRECT | | 44 | BDI_CAP_MAP_DIRECT | |
45 | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), | 45 | BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static struct kobj_map *cdev_map; | 48 | static struct kobj_map *cdev_map; |
49 | 49 | ||
50 | static DEFINE_MUTEX(chrdevs_lock); | 50 | static DEFINE_MUTEX(chrdevs_lock); |
51 | 51 | ||
52 | static struct char_device_struct { | 52 | static struct char_device_struct { |
53 | struct char_device_struct *next; | 53 | struct char_device_struct *next; |
54 | unsigned int major; | 54 | unsigned int major; |
55 | unsigned int baseminor; | 55 | unsigned int baseminor; |
56 | int minorct; | 56 | int minorct; |
57 | char name[64]; | 57 | char name[64]; |
58 | struct file_operations *fops; | 58 | struct file_operations *fops; |
59 | struct cdev *cdev; /* will die */ | 59 | struct cdev *cdev; /* will die */ |
60 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; | 60 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; |
61 | 61 | ||
62 | /* index in the above */ | 62 | /* index in the above */ |
63 | static inline int major_to_index(int major) | 63 | static inline int major_to_index(int major) |
64 | { | 64 | { |
65 | return major % CHRDEV_MAJOR_HASH_SIZE; | 65 | return major % CHRDEV_MAJOR_HASH_SIZE; |
66 | } | 66 | } |
67 | 67 | ||
68 | #ifdef CONFIG_PROC_FS | 68 | #ifdef CONFIG_PROC_FS |
69 | 69 | ||
70 | void chrdev_show(struct seq_file *f, off_t offset) | 70 | void chrdev_show(struct seq_file *f, off_t offset) |
71 | { | 71 | { |
72 | struct char_device_struct *cd; | 72 | struct char_device_struct *cd; |
73 | 73 | ||
74 | if (offset < CHRDEV_MAJOR_HASH_SIZE) { | 74 | if (offset < CHRDEV_MAJOR_HASH_SIZE) { |
75 | mutex_lock(&chrdevs_lock); | 75 | mutex_lock(&chrdevs_lock); |
76 | for (cd = chrdevs[offset]; cd; cd = cd->next) | 76 | for (cd = chrdevs[offset]; cd; cd = cd->next) |
77 | seq_printf(f, "%3d %s\n", cd->major, cd->name); | 77 | seq_printf(f, "%3d %s\n", cd->major, cd->name); |
78 | mutex_unlock(&chrdevs_lock); | 78 | mutex_unlock(&chrdevs_lock); |
79 | } | 79 | } |
80 | } | 80 | } |
81 | 81 | ||
82 | #endif /* CONFIG_PROC_FS */ | 82 | #endif /* CONFIG_PROC_FS */ |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Register a single major with a specified minor range. | 85 | * Register a single major with a specified minor range. |
86 | * | 86 | * |
87 | * If major == 0 this functions will dynamically allocate a major and return | 87 | * If major == 0 this functions will dynamically allocate a major and return |
88 | * its number. | 88 | * its number. |
89 | * | 89 | * |
90 | * If major > 0 this function will attempt to reserve the passed range of | 90 | * If major > 0 this function will attempt to reserve the passed range of |
91 | * minors and will return zero on success. | 91 | * minors and will return zero on success. |
92 | * | 92 | * |
93 | * Returns a -ve errno on failure. | 93 | * Returns a -ve errno on failure. |
94 | */ | 94 | */ |
95 | static struct char_device_struct * | 95 | static struct char_device_struct * |
96 | __register_chrdev_region(unsigned int major, unsigned int baseminor, | 96 | __register_chrdev_region(unsigned int major, unsigned int baseminor, |
97 | int minorct, const char *name) | 97 | int minorct, const char *name) |
98 | { | 98 | { |
99 | struct char_device_struct *cd, **cp; | 99 | struct char_device_struct *cd, **cp; |
100 | int ret = 0; | 100 | int ret = 0; |
101 | int i; | 101 | int i; |
102 | 102 | ||
103 | cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); | 103 | cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); |
104 | if (cd == NULL) | 104 | if (cd == NULL) |
105 | return ERR_PTR(-ENOMEM); | 105 | return ERR_PTR(-ENOMEM); |
106 | 106 | ||
107 | mutex_lock(&chrdevs_lock); | 107 | mutex_lock(&chrdevs_lock); |
108 | 108 | ||
109 | /* temporary */ | 109 | /* temporary */ |
110 | if (major == 0) { | 110 | if (major == 0) { |
111 | for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { | 111 | for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) { |
112 | if (chrdevs[i] == NULL) | 112 | if (chrdevs[i] == NULL) |
113 | break; | 113 | break; |
114 | } | 114 | } |
115 | 115 | ||
116 | if (i == 0) { | 116 | if (i == 0) { |
117 | ret = -EBUSY; | 117 | ret = -EBUSY; |
118 | goto out; | 118 | goto out; |
119 | } | 119 | } |
120 | major = i; | 120 | major = i; |
121 | ret = major; | 121 | ret = major; |
122 | } | 122 | } |
123 | 123 | ||
124 | cd->major = major; | 124 | cd->major = major; |
125 | cd->baseminor = baseminor; | 125 | cd->baseminor = baseminor; |
126 | cd->minorct = minorct; | 126 | cd->minorct = minorct; |
127 | strncpy(cd->name,name, 64); | 127 | strncpy(cd->name,name, 64); |
128 | 128 | ||
129 | i = major_to_index(major); | 129 | i = major_to_index(major); |
130 | 130 | ||
131 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) | 131 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) |
132 | if ((*cp)->major > major || | 132 | if ((*cp)->major > major || |
133 | ((*cp)->major == major && | 133 | ((*cp)->major == major && |
134 | (((*cp)->baseminor >= baseminor) || | 134 | (((*cp)->baseminor >= baseminor) || |
135 | ((*cp)->baseminor + (*cp)->minorct > baseminor)))) | 135 | ((*cp)->baseminor + (*cp)->minorct > baseminor)))) |
136 | break; | 136 | break; |
137 | 137 | ||
138 | /* Check for overlapping minor ranges. */ | 138 | /* Check for overlapping minor ranges. */ |
139 | if (*cp && (*cp)->major == major) { | 139 | if (*cp && (*cp)->major == major) { |
140 | int old_min = (*cp)->baseminor; | 140 | int old_min = (*cp)->baseminor; |
141 | int old_max = (*cp)->baseminor + (*cp)->minorct - 1; | 141 | int old_max = (*cp)->baseminor + (*cp)->minorct - 1; |
142 | int new_min = baseminor; | 142 | int new_min = baseminor; |
143 | int new_max = baseminor + minorct - 1; | 143 | int new_max = baseminor + minorct - 1; |
144 | 144 | ||
145 | /* New driver overlaps from the left. */ | 145 | /* New driver overlaps from the left. */ |
146 | if (new_max >= old_min && new_max <= old_max) { | 146 | if (new_max >= old_min && new_max <= old_max) { |
147 | ret = -EBUSY; | 147 | ret = -EBUSY; |
148 | goto out; | 148 | goto out; |
149 | } | 149 | } |
150 | 150 | ||
151 | /* New driver overlaps from the right. */ | 151 | /* New driver overlaps from the right. */ |
152 | if (new_min <= old_max && new_min >= old_min) { | 152 | if (new_min <= old_max && new_min >= old_min) { |
153 | ret = -EBUSY; | 153 | ret = -EBUSY; |
154 | goto out; | 154 | goto out; |
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
158 | cd->next = *cp; | 158 | cd->next = *cp; |
159 | *cp = cd; | 159 | *cp = cd; |
160 | mutex_unlock(&chrdevs_lock); | 160 | mutex_unlock(&chrdevs_lock); |
161 | return cd; | 161 | return cd; |
162 | out: | 162 | out: |
163 | mutex_unlock(&chrdevs_lock); | 163 | mutex_unlock(&chrdevs_lock); |
164 | kfree(cd); | 164 | kfree(cd); |
165 | return ERR_PTR(ret); | 165 | return ERR_PTR(ret); |
166 | } | 166 | } |
167 | 167 | ||
168 | static struct char_device_struct * | 168 | static struct char_device_struct * |
169 | __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) | 169 | __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct) |
170 | { | 170 | { |
171 | struct char_device_struct *cd = NULL, **cp; | 171 | struct char_device_struct *cd = NULL, **cp; |
172 | int i = major_to_index(major); | 172 | int i = major_to_index(major); |
173 | 173 | ||
174 | mutex_lock(&chrdevs_lock); | 174 | mutex_lock(&chrdevs_lock); |
175 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) | 175 | for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) |
176 | if ((*cp)->major == major && | 176 | if ((*cp)->major == major && |
177 | (*cp)->baseminor == baseminor && | 177 | (*cp)->baseminor == baseminor && |
178 | (*cp)->minorct == minorct) | 178 | (*cp)->minorct == minorct) |
179 | break; | 179 | break; |
180 | if (*cp) { | 180 | if (*cp) { |
181 | cd = *cp; | 181 | cd = *cp; |
182 | *cp = cd->next; | 182 | *cp = cd->next; |
183 | } | 183 | } |
184 | mutex_unlock(&chrdevs_lock); | 184 | mutex_unlock(&chrdevs_lock); |
185 | return cd; | 185 | return cd; |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * register_chrdev_region() - register a range of device numbers | 189 | * register_chrdev_region() - register a range of device numbers |
190 | * @from: the first in the desired range of device numbers; must include | 190 | * @from: the first in the desired range of device numbers; must include |
191 | * the major number. | 191 | * the major number. |
192 | * @count: the number of consecutive device numbers required | 192 | * @count: the number of consecutive device numbers required |
193 | * @name: the name of the device or driver. | 193 | * @name: the name of the device or driver. |
194 | * | 194 | * |
195 | * Return value is zero on success, a negative error code on failure. | 195 | * Return value is zero on success, a negative error code on failure. |
196 | */ | 196 | */ |
197 | int register_chrdev_region(dev_t from, unsigned count, const char *name) | 197 | int register_chrdev_region(dev_t from, unsigned count, const char *name) |
198 | { | 198 | { |
199 | struct char_device_struct *cd; | 199 | struct char_device_struct *cd; |
200 | dev_t to = from + count; | 200 | dev_t to = from + count; |
201 | dev_t n, next; | 201 | dev_t n, next; |
202 | 202 | ||
203 | for (n = from; n < to; n = next) { | 203 | for (n = from; n < to; n = next) { |
204 | next = MKDEV(MAJOR(n)+1, 0); | 204 | next = MKDEV(MAJOR(n)+1, 0); |
205 | if (next > to) | 205 | if (next > to) |
206 | next = to; | 206 | next = to; |
207 | cd = __register_chrdev_region(MAJOR(n), MINOR(n), | 207 | cd = __register_chrdev_region(MAJOR(n), MINOR(n), |
208 | next - n, name); | 208 | next - n, name); |
209 | if (IS_ERR(cd)) | 209 | if (IS_ERR(cd)) |
210 | goto fail; | 210 | goto fail; |
211 | } | 211 | } |
212 | return 0; | 212 | return 0; |
213 | fail: | 213 | fail: |
214 | to = n; | 214 | to = n; |
215 | for (n = from; n < to; n = next) { | 215 | for (n = from; n < to; n = next) { |
216 | next = MKDEV(MAJOR(n)+1, 0); | 216 | next = MKDEV(MAJOR(n)+1, 0); |
217 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); | 217 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); |
218 | } | 218 | } |
219 | return PTR_ERR(cd); | 219 | return PTR_ERR(cd); |
220 | } | 220 | } |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * alloc_chrdev_region() - register a range of char device numbers | 223 | * alloc_chrdev_region() - register a range of char device numbers |
224 | * @dev: output parameter for first assigned number | 224 | * @dev: output parameter for first assigned number |
225 | * @baseminor: first of the requested range of minor numbers | 225 | * @baseminor: first of the requested range of minor numbers |
226 | * @count: the number of minor numbers required | 226 | * @count: the number of minor numbers required |
227 | * @name: the name of the associated device or driver | 227 | * @name: the name of the associated device or driver |
228 | * | 228 | * |
229 | * Allocates a range of char device numbers. The major number will be | 229 | * Allocates a range of char device numbers. The major number will be |
230 | * chosen dynamically, and returned (along with the first minor number) | 230 | * chosen dynamically, and returned (along with the first minor number) |
231 | * in @dev. Returns zero or a negative error code. | 231 | * in @dev. Returns zero or a negative error code. |
232 | */ | 232 | */ |
233 | int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, | 233 | int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count, |
234 | const char *name) | 234 | const char *name) |
235 | { | 235 | { |
236 | struct char_device_struct *cd; | 236 | struct char_device_struct *cd; |
237 | cd = __register_chrdev_region(0, baseminor, count, name); | 237 | cd = __register_chrdev_region(0, baseminor, count, name); |
238 | if (IS_ERR(cd)) | 238 | if (IS_ERR(cd)) |
239 | return PTR_ERR(cd); | 239 | return PTR_ERR(cd); |
240 | *dev = MKDEV(cd->major, cd->baseminor); | 240 | *dev = MKDEV(cd->major, cd->baseminor); |
241 | return 0; | 241 | return 0; |
242 | } | 242 | } |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * register_chrdev() - Register a major number for character devices. | 245 | * register_chrdev() - Register a major number for character devices. |
246 | * @major: major device number or 0 for dynamic allocation | 246 | * @major: major device number or 0 for dynamic allocation |
247 | * @name: name of this range of devices | 247 | * @name: name of this range of devices |
248 | * @fops: file operations associated with this devices | 248 | * @fops: file operations associated with this devices |
249 | * | 249 | * |
250 | * If @major == 0 this functions will dynamically allocate a major and return | 250 | * If @major == 0 this functions will dynamically allocate a major and return |
251 | * its number. | 251 | * its number. |
252 | * | 252 | * |
253 | * If @major > 0 this function will attempt to reserve a device with the given | 253 | * If @major > 0 this function will attempt to reserve a device with the given |
254 | * major number and will return zero on success. | 254 | * major number and will return zero on success. |
255 | * | 255 | * |
256 | * Returns a -ve errno on failure. | 256 | * Returns a -ve errno on failure. |
257 | * | 257 | * |
258 | * The name of this device has nothing to do with the name of the device in | 258 | * The name of this device has nothing to do with the name of the device in |
259 | * /dev. It only helps to keep track of the different owners of devices. If | 259 | * /dev. It only helps to keep track of the different owners of devices. If |
260 | * your module name has only one type of devices it's ok to use e.g. the name | 260 | * your module name has only one type of devices it's ok to use e.g. the name |
261 | * of the module here. | 261 | * of the module here. |
262 | * | 262 | * |
263 | * This function registers a range of 256 minor numbers. The first minor number | 263 | * This function registers a range of 256 minor numbers. The first minor number |
264 | * is 0. | 264 | * is 0. |
265 | */ | 265 | */ |
266 | int register_chrdev(unsigned int major, const char *name, | 266 | int register_chrdev(unsigned int major, const char *name, |
267 | const struct file_operations *fops) | 267 | const struct file_operations *fops) |
268 | { | 268 | { |
269 | struct char_device_struct *cd; | 269 | struct char_device_struct *cd; |
270 | struct cdev *cdev; | 270 | struct cdev *cdev; |
271 | char *s; | 271 | char *s; |
272 | int err = -ENOMEM; | 272 | int err = -ENOMEM; |
273 | 273 | ||
274 | cd = __register_chrdev_region(major, 0, 256, name); | 274 | cd = __register_chrdev_region(major, 0, 256, name); |
275 | if (IS_ERR(cd)) | 275 | if (IS_ERR(cd)) |
276 | return PTR_ERR(cd); | 276 | return PTR_ERR(cd); |
277 | 277 | ||
278 | cdev = cdev_alloc(); | 278 | cdev = cdev_alloc(); |
279 | if (!cdev) | 279 | if (!cdev) |
280 | goto out2; | 280 | goto out2; |
281 | 281 | ||
282 | cdev->owner = fops->owner; | 282 | cdev->owner = fops->owner; |
283 | cdev->ops = fops; | 283 | cdev->ops = fops; |
284 | kobject_set_name(&cdev->kobj, "%s", name); | 284 | kobject_set_name(&cdev->kobj, "%s", name); |
285 | for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) | 285 | for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) |
286 | *s = '!'; | 286 | *s = '!'; |
287 | 287 | ||
288 | err = cdev_add(cdev, MKDEV(cd->major, 0), 256); | 288 | err = cdev_add(cdev, MKDEV(cd->major, 0), 256); |
289 | if (err) | 289 | if (err) |
290 | goto out; | 290 | goto out; |
291 | 291 | ||
292 | cd->cdev = cdev; | 292 | cd->cdev = cdev; |
293 | 293 | ||
294 | return major ? 0 : cd->major; | 294 | return major ? 0 : cd->major; |
295 | out: | 295 | out: |
296 | kobject_put(&cdev->kobj); | 296 | kobject_put(&cdev->kobj); |
297 | out2: | 297 | out2: |
298 | kfree(__unregister_chrdev_region(cd->major, 0, 256)); | 298 | kfree(__unregister_chrdev_region(cd->major, 0, 256)); |
299 | return err; | 299 | return err; |
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * unregister_chrdev_region() - return a range of device numbers | 303 | * unregister_chrdev_region() - return a range of device numbers |
304 | * @from: the first in the range of numbers to unregister | 304 | * @from: the first in the range of numbers to unregister |
305 | * @count: the number of device numbers to unregister | 305 | * @count: the number of device numbers to unregister |
306 | * | 306 | * |
307 | * This function will unregister a range of @count device numbers, | 307 | * This function will unregister a range of @count device numbers, |
308 | * starting with @from. The caller should normally be the one who | 308 | * starting with @from. The caller should normally be the one who |
309 | * allocated those numbers in the first place... | 309 | * allocated those numbers in the first place... |
310 | */ | 310 | */ |
311 | void unregister_chrdev_region(dev_t from, unsigned count) | 311 | void unregister_chrdev_region(dev_t from, unsigned count) |
312 | { | 312 | { |
313 | dev_t to = from + count; | 313 | dev_t to = from + count; |
314 | dev_t n, next; | 314 | dev_t n, next; |
315 | 315 | ||
316 | for (n = from; n < to; n = next) { | 316 | for (n = from; n < to; n = next) { |
317 | next = MKDEV(MAJOR(n)+1, 0); | 317 | next = MKDEV(MAJOR(n)+1, 0); |
318 | if (next > to) | 318 | if (next > to) |
319 | next = to; | 319 | next = to; |
320 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); | 320 | kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n)); |
321 | } | 321 | } |
322 | } | 322 | } |
323 | 323 | ||
324 | void unregister_chrdev(unsigned int major, const char *name) | 324 | void unregister_chrdev(unsigned int major, const char *name) |
325 | { | 325 | { |
326 | struct char_device_struct *cd; | 326 | struct char_device_struct *cd; |
327 | cd = __unregister_chrdev_region(major, 0, 256); | 327 | cd = __unregister_chrdev_region(major, 0, 256); |
328 | if (cd && cd->cdev) | 328 | if (cd && cd->cdev) |
329 | cdev_del(cd->cdev); | 329 | cdev_del(cd->cdev); |
330 | kfree(cd); | 330 | kfree(cd); |
331 | } | 331 | } |
332 | 332 | ||
333 | static DEFINE_SPINLOCK(cdev_lock); | 333 | static DEFINE_SPINLOCK(cdev_lock); |
334 | 334 | ||
335 | static struct kobject *cdev_get(struct cdev *p) | 335 | static struct kobject *cdev_get(struct cdev *p) |
336 | { | 336 | { |
337 | struct module *owner = p->owner; | 337 | struct module *owner = p->owner; |
338 | struct kobject *kobj; | 338 | struct kobject *kobj; |
339 | 339 | ||
340 | if (owner && !try_module_get(owner)) | 340 | if (owner && !try_module_get(owner)) |
341 | return NULL; | 341 | return NULL; |
342 | kobj = kobject_get(&p->kobj); | 342 | kobj = kobject_get(&p->kobj); |
343 | if (!kobj) | 343 | if (!kobj) |
344 | module_put(owner); | 344 | module_put(owner); |
345 | return kobj; | 345 | return kobj; |
346 | } | 346 | } |
347 | 347 | ||
348 | void cdev_put(struct cdev *p) | 348 | void cdev_put(struct cdev *p) |
349 | { | 349 | { |
350 | if (p) { | 350 | if (p) { |
351 | struct module *owner = p->owner; | 351 | struct module *owner = p->owner; |
352 | kobject_put(&p->kobj); | 352 | kobject_put(&p->kobj); |
353 | module_put(owner); | 353 | module_put(owner); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Called every time a character special file is opened | 358 | * Called every time a character special file is opened |
359 | */ | 359 | */ |
360 | int chrdev_open(struct inode * inode, struct file * filp) | 360 | static int chrdev_open(struct inode *inode, struct file *filp) |
361 | { | 361 | { |
362 | struct cdev *p; | 362 | struct cdev *p; |
363 | struct cdev *new = NULL; | 363 | struct cdev *new = NULL; |
364 | int ret = 0; | 364 | int ret = 0; |
365 | 365 | ||
366 | spin_lock(&cdev_lock); | 366 | spin_lock(&cdev_lock); |
367 | p = inode->i_cdev; | 367 | p = inode->i_cdev; |
368 | if (!p) { | 368 | if (!p) { |
369 | struct kobject *kobj; | 369 | struct kobject *kobj; |
370 | int idx; | 370 | int idx; |
371 | spin_unlock(&cdev_lock); | 371 | spin_unlock(&cdev_lock); |
372 | kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); | 372 | kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); |
373 | if (!kobj) | 373 | if (!kobj) |
374 | return -ENXIO; | 374 | return -ENXIO; |
375 | new = container_of(kobj, struct cdev, kobj); | 375 | new = container_of(kobj, struct cdev, kobj); |
376 | spin_lock(&cdev_lock); | 376 | spin_lock(&cdev_lock); |
377 | p = inode->i_cdev; | 377 | p = inode->i_cdev; |
378 | if (!p) { | 378 | if (!p) { |
379 | inode->i_cdev = p = new; | 379 | inode->i_cdev = p = new; |
380 | inode->i_cindex = idx; | 380 | inode->i_cindex = idx; |
381 | list_add(&inode->i_devices, &p->list); | 381 | list_add(&inode->i_devices, &p->list); |
382 | new = NULL; | 382 | new = NULL; |
383 | } else if (!cdev_get(p)) | 383 | } else if (!cdev_get(p)) |
384 | ret = -ENXIO; | 384 | ret = -ENXIO; |
385 | } else if (!cdev_get(p)) | 385 | } else if (!cdev_get(p)) |
386 | ret = -ENXIO; | 386 | ret = -ENXIO; |
387 | spin_unlock(&cdev_lock); | 387 | spin_unlock(&cdev_lock); |
388 | cdev_put(new); | 388 | cdev_put(new); |
389 | if (ret) | 389 | if (ret) |
390 | return ret; | 390 | return ret; |
391 | filp->f_op = fops_get(p->ops); | 391 | filp->f_op = fops_get(p->ops); |
392 | if (!filp->f_op) { | 392 | if (!filp->f_op) { |
393 | cdev_put(p); | 393 | cdev_put(p); |
394 | return -ENXIO; | 394 | return -ENXIO; |
395 | } | 395 | } |
396 | if (filp->f_op->open) { | 396 | if (filp->f_op->open) { |
397 | lock_kernel(); | 397 | lock_kernel(); |
398 | ret = filp->f_op->open(inode,filp); | 398 | ret = filp->f_op->open(inode,filp); |
399 | unlock_kernel(); | 399 | unlock_kernel(); |
400 | } | 400 | } |
401 | if (ret) | 401 | if (ret) |
402 | cdev_put(p); | 402 | cdev_put(p); |
403 | return ret; | 403 | return ret; |
404 | } | 404 | } |
405 | 405 | ||
406 | void cd_forget(struct inode *inode) | 406 | void cd_forget(struct inode *inode) |
407 | { | 407 | { |
408 | spin_lock(&cdev_lock); | 408 | spin_lock(&cdev_lock); |
409 | list_del_init(&inode->i_devices); | 409 | list_del_init(&inode->i_devices); |
410 | inode->i_cdev = NULL; | 410 | inode->i_cdev = NULL; |
411 | spin_unlock(&cdev_lock); | 411 | spin_unlock(&cdev_lock); |
412 | } | 412 | } |
413 | 413 | ||
414 | static void cdev_purge(struct cdev *cdev) | 414 | static void cdev_purge(struct cdev *cdev) |
415 | { | 415 | { |
416 | spin_lock(&cdev_lock); | 416 | spin_lock(&cdev_lock); |
417 | while (!list_empty(&cdev->list)) { | 417 | while (!list_empty(&cdev->list)) { |
418 | struct inode *inode; | 418 | struct inode *inode; |
419 | inode = container_of(cdev->list.next, struct inode, i_devices); | 419 | inode = container_of(cdev->list.next, struct inode, i_devices); |
420 | list_del_init(&inode->i_devices); | 420 | list_del_init(&inode->i_devices); |
421 | inode->i_cdev = NULL; | 421 | inode->i_cdev = NULL; |
422 | } | 422 | } |
423 | spin_unlock(&cdev_lock); | 423 | spin_unlock(&cdev_lock); |
424 | } | 424 | } |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * Dummy default file-operations: the only thing this does | 427 | * Dummy default file-operations: the only thing this does |
428 | * is contain the open that then fills in the correct operations | 428 | * is contain the open that then fills in the correct operations |
429 | * depending on the special file... | 429 | * depending on the special file... |
430 | */ | 430 | */ |
431 | const struct file_operations def_chr_fops = { | 431 | const struct file_operations def_chr_fops = { |
432 | .open = chrdev_open, | 432 | .open = chrdev_open, |
433 | }; | 433 | }; |
434 | 434 | ||
435 | static struct kobject *exact_match(dev_t dev, int *part, void *data) | 435 | static struct kobject *exact_match(dev_t dev, int *part, void *data) |
436 | { | 436 | { |
437 | struct cdev *p = data; | 437 | struct cdev *p = data; |
438 | return &p->kobj; | 438 | return &p->kobj; |
439 | } | 439 | } |
440 | 440 | ||
441 | static int exact_lock(dev_t dev, void *data) | 441 | static int exact_lock(dev_t dev, void *data) |
442 | { | 442 | { |
443 | struct cdev *p = data; | 443 | struct cdev *p = data; |
444 | return cdev_get(p) ? 0 : -1; | 444 | return cdev_get(p) ? 0 : -1; |
445 | } | 445 | } |
446 | 446 | ||
447 | /** | 447 | /** |
448 | * cdev_add() - add a char device to the system | 448 | * cdev_add() - add a char device to the system |
449 | * @p: the cdev structure for the device | 449 | * @p: the cdev structure for the device |
450 | * @dev: the first device number for which this device is responsible | 450 | * @dev: the first device number for which this device is responsible |
451 | * @count: the number of consecutive minor numbers corresponding to this | 451 | * @count: the number of consecutive minor numbers corresponding to this |
452 | * device | 452 | * device |
453 | * | 453 | * |
454 | * cdev_add() adds the device represented by @p to the system, making it | 454 | * cdev_add() adds the device represented by @p to the system, making it |
455 | * live immediately. A negative error code is returned on failure. | 455 | * live immediately. A negative error code is returned on failure. |
456 | */ | 456 | */ |
457 | int cdev_add(struct cdev *p, dev_t dev, unsigned count) | 457 | int cdev_add(struct cdev *p, dev_t dev, unsigned count) |
458 | { | 458 | { |
459 | p->dev = dev; | 459 | p->dev = dev; |
460 | p->count = count; | 460 | p->count = count; |
461 | return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); | 461 | return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p); |
462 | } | 462 | } |
463 | 463 | ||
464 | static void cdev_unmap(dev_t dev, unsigned count) | 464 | static void cdev_unmap(dev_t dev, unsigned count) |
465 | { | 465 | { |
466 | kobj_unmap(cdev_map, dev, count); | 466 | kobj_unmap(cdev_map, dev, count); |
467 | } | 467 | } |
468 | 468 | ||
469 | /** | 469 | /** |
470 | * cdev_del() - remove a cdev from the system | 470 | * cdev_del() - remove a cdev from the system |
471 | * @p: the cdev structure to be removed | 471 | * @p: the cdev structure to be removed |
472 | * | 472 | * |
473 | * cdev_del() removes @p from the system, possibly freeing the structure | 473 | * cdev_del() removes @p from the system, possibly freeing the structure |
474 | * itself. | 474 | * itself. |
475 | */ | 475 | */ |
476 | void cdev_del(struct cdev *p) | 476 | void cdev_del(struct cdev *p) |
477 | { | 477 | { |
478 | cdev_unmap(p->dev, p->count); | 478 | cdev_unmap(p->dev, p->count); |
479 | kobject_put(&p->kobj); | 479 | kobject_put(&p->kobj); |
480 | } | 480 | } |
481 | 481 | ||
482 | 482 | ||
483 | static void cdev_default_release(struct kobject *kobj) | 483 | static void cdev_default_release(struct kobject *kobj) |
484 | { | 484 | { |
485 | struct cdev *p = container_of(kobj, struct cdev, kobj); | 485 | struct cdev *p = container_of(kobj, struct cdev, kobj); |
486 | cdev_purge(p); | 486 | cdev_purge(p); |
487 | } | 487 | } |
488 | 488 | ||
489 | static void cdev_dynamic_release(struct kobject *kobj) | 489 | static void cdev_dynamic_release(struct kobject *kobj) |
490 | { | 490 | { |
491 | struct cdev *p = container_of(kobj, struct cdev, kobj); | 491 | struct cdev *p = container_of(kobj, struct cdev, kobj); |
492 | cdev_purge(p); | 492 | cdev_purge(p); |
493 | kfree(p); | 493 | kfree(p); |
494 | } | 494 | } |
495 | 495 | ||
496 | static struct kobj_type ktype_cdev_default = { | 496 | static struct kobj_type ktype_cdev_default = { |
497 | .release = cdev_default_release, | 497 | .release = cdev_default_release, |
498 | }; | 498 | }; |
499 | 499 | ||
500 | static struct kobj_type ktype_cdev_dynamic = { | 500 | static struct kobj_type ktype_cdev_dynamic = { |
501 | .release = cdev_dynamic_release, | 501 | .release = cdev_dynamic_release, |
502 | }; | 502 | }; |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * cdev_alloc() - allocate a cdev structure | 505 | * cdev_alloc() - allocate a cdev structure |
506 | * | 506 | * |
507 | * Allocates and returns a cdev structure, or NULL on failure. | 507 | * Allocates and returns a cdev structure, or NULL on failure. |
508 | */ | 508 | */ |
509 | struct cdev *cdev_alloc(void) | 509 | struct cdev *cdev_alloc(void) |
510 | { | 510 | { |
511 | struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); | 511 | struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL); |
512 | if (p) { | 512 | if (p) { |
513 | INIT_LIST_HEAD(&p->list); | 513 | INIT_LIST_HEAD(&p->list); |
514 | kobject_init(&p->kobj, &ktype_cdev_dynamic); | 514 | kobject_init(&p->kobj, &ktype_cdev_dynamic); |
515 | } | 515 | } |
516 | return p; | 516 | return p; |
517 | } | 517 | } |
518 | 518 | ||
519 | /** | 519 | /** |
520 | * cdev_init() - initialize a cdev structure | 520 | * cdev_init() - initialize a cdev structure |
521 | * @cdev: the structure to initialize | 521 | * @cdev: the structure to initialize |
522 | * @fops: the file_operations for this device | 522 | * @fops: the file_operations for this device |
523 | * | 523 | * |
524 | * Initializes @cdev, remembering @fops, making it ready to add to the | 524 | * Initializes @cdev, remembering @fops, making it ready to add to the |
525 | * system with cdev_add(). | 525 | * system with cdev_add(). |
526 | */ | 526 | */ |
527 | void cdev_init(struct cdev *cdev, const struct file_operations *fops) | 527 | void cdev_init(struct cdev *cdev, const struct file_operations *fops) |
528 | { | 528 | { |
529 | memset(cdev, 0, sizeof *cdev); | 529 | memset(cdev, 0, sizeof *cdev); |
530 | INIT_LIST_HEAD(&cdev->list); | 530 | INIT_LIST_HEAD(&cdev->list); |
531 | kobject_init(&cdev->kobj, &ktype_cdev_default); | 531 | kobject_init(&cdev->kobj, &ktype_cdev_default); |
532 | cdev->ops = fops; | 532 | cdev->ops = fops; |
533 | } | 533 | } |
534 | 534 | ||
535 | static struct kobject *base_probe(dev_t dev, int *part, void *data) | 535 | static struct kobject *base_probe(dev_t dev, int *part, void *data) |
536 | { | 536 | { |
537 | if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) | 537 | if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0) |
538 | /* Make old-style 2.4 aliases work */ | 538 | /* Make old-style 2.4 aliases work */ |
539 | request_module("char-major-%d", MAJOR(dev)); | 539 | request_module("char-major-%d", MAJOR(dev)); |
540 | return NULL; | 540 | return NULL; |
541 | } | 541 | } |
542 | 542 | ||
543 | void __init chrdev_init(void) | 543 | void __init chrdev_init(void) |
544 | { | 544 | { |
545 | cdev_map = kobj_map_init(base_probe, &chrdevs_lock); | 545 | cdev_map = kobj_map_init(base_probe, &chrdevs_lock); |
546 | bdi_init(&directly_mappable_cdev_bdi); | 546 | bdi_init(&directly_mappable_cdev_bdi); |
547 | } | 547 | } |
548 | 548 | ||
549 | 549 | ||
550 | /* Let modules do char dev stuff */ | 550 | /* Let modules do char dev stuff */ |
551 | EXPORT_SYMBOL(register_chrdev_region); | 551 | EXPORT_SYMBOL(register_chrdev_region); |
552 | EXPORT_SYMBOL(unregister_chrdev_region); | 552 | EXPORT_SYMBOL(unregister_chrdev_region); |
553 | EXPORT_SYMBOL(alloc_chrdev_region); | 553 | EXPORT_SYMBOL(alloc_chrdev_region); |
554 | EXPORT_SYMBOL(cdev_init); | 554 | EXPORT_SYMBOL(cdev_init); |
555 | EXPORT_SYMBOL(cdev_alloc); | 555 | EXPORT_SYMBOL(cdev_alloc); |
556 | EXPORT_SYMBOL(cdev_del); | 556 | EXPORT_SYMBOL(cdev_del); |
557 | EXPORT_SYMBOL(cdev_add); | 557 | EXPORT_SYMBOL(cdev_add); |
558 | EXPORT_SYMBOL(register_chrdev); | 558 | EXPORT_SYMBOL(register_chrdev); |
559 | EXPORT_SYMBOL(unregister_chrdev); | 559 | EXPORT_SYMBOL(unregister_chrdev); |
560 | EXPORT_SYMBOL(directly_mappable_cdev_bdi); | 560 | EXPORT_SYMBOL(directly_mappable_cdev_bdi); |
561 | 561 |
include/linux/fs.h
1 | #ifndef _LINUX_FS_H | 1 | #ifndef _LINUX_FS_H |
2 | #define _LINUX_FS_H | 2 | #define _LINUX_FS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file has definitions for some important file table | 5 | * This file has definitions for some important file table |
6 | * structures etc. | 6 | * structures etc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/limits.h> | 9 | #include <linux/limits.h> |
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change | 13 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change |
14 | * the file limit at runtime and only root can increase the per-process | 14 | * the file limit at runtime and only root can increase the per-process |
15 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute | 15 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute |
16 | * upper limit on files-per-process. | 16 | * upper limit on files-per-process. |
17 | * | 17 | * |
18 | * Some programs (notably those using select()) may have to be | 18 | * Some programs (notably those using select()) may have to be |
19 | * recompiled to take full advantage of the new limits.. | 19 | * recompiled to take full advantage of the new limits.. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* Fixed constants first: */ | 22 | /* Fixed constants first: */ |
23 | #undef NR_OPEN | 23 | #undef NR_OPEN |
24 | extern int sysctl_nr_open; | 24 | extern int sysctl_nr_open; |
25 | #define INR_OPEN 1024 /* Initial setting for nfile rlimits */ | 25 | #define INR_OPEN 1024 /* Initial setting for nfile rlimits */ |
26 | 26 | ||
27 | #define BLOCK_SIZE_BITS 10 | 27 | #define BLOCK_SIZE_BITS 10 |
28 | #define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) | 28 | #define BLOCK_SIZE (1<<BLOCK_SIZE_BITS) |
29 | 29 | ||
30 | #define SEEK_SET 0 /* seek relative to beginning of file */ | 30 | #define SEEK_SET 0 /* seek relative to beginning of file */ |
31 | #define SEEK_CUR 1 /* seek relative to current file position */ | 31 | #define SEEK_CUR 1 /* seek relative to current file position */ |
32 | #define SEEK_END 2 /* seek relative to end of file */ | 32 | #define SEEK_END 2 /* seek relative to end of file */ |
33 | #define SEEK_MAX SEEK_END | 33 | #define SEEK_MAX SEEK_END |
34 | 34 | ||
35 | /* And dynamically-tunable limits and defaults: */ | 35 | /* And dynamically-tunable limits and defaults: */ |
36 | struct files_stat_struct { | 36 | struct files_stat_struct { |
37 | int nr_files; /* read only */ | 37 | int nr_files; /* read only */ |
38 | int nr_free_files; /* read only */ | 38 | int nr_free_files; /* read only */ |
39 | int max_files; /* tunable */ | 39 | int max_files; /* tunable */ |
40 | }; | 40 | }; |
41 | extern struct files_stat_struct files_stat; | 41 | extern struct files_stat_struct files_stat; |
42 | extern int get_max_files(void); | 42 | extern int get_max_files(void); |
43 | 43 | ||
44 | struct inodes_stat_t { | 44 | struct inodes_stat_t { |
45 | int nr_inodes; | 45 | int nr_inodes; |
46 | int nr_unused; | 46 | int nr_unused; |
47 | int dummy[5]; /* padding for sysctl ABI compatibility */ | 47 | int dummy[5]; /* padding for sysctl ABI compatibility */ |
48 | }; | 48 | }; |
49 | extern struct inodes_stat_t inodes_stat; | 49 | extern struct inodes_stat_t inodes_stat; |
50 | 50 | ||
51 | extern int leases_enable, lease_break_time; | 51 | extern int leases_enable, lease_break_time; |
52 | 52 | ||
53 | #ifdef CONFIG_DNOTIFY | 53 | #ifdef CONFIG_DNOTIFY |
54 | extern int dir_notify_enable; | 54 | extern int dir_notify_enable; |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #define NR_FILE 8192 /* this can well be larger on a larger system */ | 57 | #define NR_FILE 8192 /* this can well be larger on a larger system */ |
58 | 58 | ||
59 | #define MAY_EXEC 1 | 59 | #define MAY_EXEC 1 |
60 | #define MAY_WRITE 2 | 60 | #define MAY_WRITE 2 |
61 | #define MAY_READ 4 | 61 | #define MAY_READ 4 |
62 | #define MAY_APPEND 8 | 62 | #define MAY_APPEND 8 |
63 | 63 | ||
64 | #define FMODE_READ 1 | 64 | #define FMODE_READ 1 |
65 | #define FMODE_WRITE 2 | 65 | #define FMODE_WRITE 2 |
66 | 66 | ||
67 | /* Internal kernel extensions */ | 67 | /* Internal kernel extensions */ |
68 | #define FMODE_LSEEK 4 | 68 | #define FMODE_LSEEK 4 |
69 | #define FMODE_PREAD 8 | 69 | #define FMODE_PREAD 8 |
70 | #define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */ | 70 | #define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */ |
71 | 71 | ||
72 | /* File is being opened for execution. Primary users of this flag are | 72 | /* File is being opened for execution. Primary users of this flag are |
73 | distributed filesystems that can use it to achieve correct ETXTBUSY | 73 | distributed filesystems that can use it to achieve correct ETXTBUSY |
74 | behavior for cross-node execution/opening_for_writing of files */ | 74 | behavior for cross-node execution/opening_for_writing of files */ |
75 | #define FMODE_EXEC 16 | 75 | #define FMODE_EXEC 16 |
76 | 76 | ||
77 | #define RW_MASK 1 | 77 | #define RW_MASK 1 |
78 | #define RWA_MASK 2 | 78 | #define RWA_MASK 2 |
79 | #define READ 0 | 79 | #define READ 0 |
80 | #define WRITE 1 | 80 | #define WRITE 1 |
81 | #define READA 2 /* read-ahead - don't block if no resources */ | 81 | #define READA 2 /* read-ahead - don't block if no resources */ |
82 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ | 82 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ |
83 | #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) | 83 | #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) |
84 | #define READ_META (READ | (1 << BIO_RW_META)) | 84 | #define READ_META (READ | (1 << BIO_RW_META)) |
85 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) | 85 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) |
86 | #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) | 86 | #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) |
87 | 87 | ||
88 | #define SEL_IN 1 | 88 | #define SEL_IN 1 |
89 | #define SEL_OUT 2 | 89 | #define SEL_OUT 2 |
90 | #define SEL_EX 4 | 90 | #define SEL_EX 4 |
91 | 91 | ||
92 | /* public flags for file_system_type */ | 92 | /* public flags for file_system_type */ |
93 | #define FS_REQUIRES_DEV 1 | 93 | #define FS_REQUIRES_DEV 1 |
94 | #define FS_BINARY_MOUNTDATA 2 | 94 | #define FS_BINARY_MOUNTDATA 2 |
95 | #define FS_HAS_SUBTYPE 4 | 95 | #define FS_HAS_SUBTYPE 4 |
96 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ | 96 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ |
97 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() | 97 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() |
98 | * during rename() internally. | 98 | * during rename() internally. |
99 | */ | 99 | */ |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * These are the fs-independent mount-flags: up to 32 flags are supported | 102 | * These are the fs-independent mount-flags: up to 32 flags are supported |
103 | */ | 103 | */ |
104 | #define MS_RDONLY 1 /* Mount read-only */ | 104 | #define MS_RDONLY 1 /* Mount read-only */ |
105 | #define MS_NOSUID 2 /* Ignore suid and sgid bits */ | 105 | #define MS_NOSUID 2 /* Ignore suid and sgid bits */ |
106 | #define MS_NODEV 4 /* Disallow access to device special files */ | 106 | #define MS_NODEV 4 /* Disallow access to device special files */ |
107 | #define MS_NOEXEC 8 /* Disallow program execution */ | 107 | #define MS_NOEXEC 8 /* Disallow program execution */ |
108 | #define MS_SYNCHRONOUS 16 /* Writes are synced at once */ | 108 | #define MS_SYNCHRONOUS 16 /* Writes are synced at once */ |
109 | #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ | 109 | #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ |
110 | #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ | 110 | #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ |
111 | #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ | 111 | #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ |
112 | #define MS_NOATIME 1024 /* Do not update access times. */ | 112 | #define MS_NOATIME 1024 /* Do not update access times. */ |
113 | #define MS_NODIRATIME 2048 /* Do not update directory access times */ | 113 | #define MS_NODIRATIME 2048 /* Do not update directory access times */ |
114 | #define MS_BIND 4096 | 114 | #define MS_BIND 4096 |
115 | #define MS_MOVE 8192 | 115 | #define MS_MOVE 8192 |
116 | #define MS_REC 16384 | 116 | #define MS_REC 16384 |
117 | #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. | 117 | #define MS_VERBOSE 32768 /* War is peace. Verbosity is silence. |
118 | MS_VERBOSE is deprecated. */ | 118 | MS_VERBOSE is deprecated. */ |
119 | #define MS_SILENT 32768 | 119 | #define MS_SILENT 32768 |
120 | #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ | 120 | #define MS_POSIXACL (1<<16) /* VFS does not apply the umask */ |
121 | #define MS_UNBINDABLE (1<<17) /* change to unbindable */ | 121 | #define MS_UNBINDABLE (1<<17) /* change to unbindable */ |
122 | #define MS_PRIVATE (1<<18) /* change to private */ | 122 | #define MS_PRIVATE (1<<18) /* change to private */ |
123 | #define MS_SLAVE (1<<19) /* change to slave */ | 123 | #define MS_SLAVE (1<<19) /* change to slave */ |
124 | #define MS_SHARED (1<<20) /* change to shared */ | 124 | #define MS_SHARED (1<<20) /* change to shared */ |
125 | #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ | 125 | #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ |
126 | #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ | 126 | #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ |
127 | #define MS_I_VERSION (1<<23) /* Update inode I_version field */ | 127 | #define MS_I_VERSION (1<<23) /* Update inode I_version field */ |
128 | #define MS_ACTIVE (1<<30) | 128 | #define MS_ACTIVE (1<<30) |
129 | #define MS_NOUSER (1<<31) | 129 | #define MS_NOUSER (1<<31) |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * Superblock flags that can be altered by MS_REMOUNT | 132 | * Superblock flags that can be altered by MS_REMOUNT |
133 | */ | 133 | */ |
134 | #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK) | 134 | #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK) |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Old magic mount flag and mask | 137 | * Old magic mount flag and mask |
138 | */ | 138 | */ |
139 | #define MS_MGC_VAL 0xC0ED0000 | 139 | #define MS_MGC_VAL 0xC0ED0000 |
140 | #define MS_MGC_MSK 0xffff0000 | 140 | #define MS_MGC_MSK 0xffff0000 |
141 | 141 | ||
142 | /* Inode flags - they have nothing to superblock flags now */ | 142 | /* Inode flags - they have nothing to superblock flags now */ |
143 | 143 | ||
144 | #define S_SYNC 1 /* Writes are synced at once */ | 144 | #define S_SYNC 1 /* Writes are synced at once */ |
145 | #define S_NOATIME 2 /* Do not update access times */ | 145 | #define S_NOATIME 2 /* Do not update access times */ |
146 | #define S_APPEND 4 /* Append-only file */ | 146 | #define S_APPEND 4 /* Append-only file */ |
147 | #define S_IMMUTABLE 8 /* Immutable file */ | 147 | #define S_IMMUTABLE 8 /* Immutable file */ |
148 | #define S_DEAD 16 /* removed, but still open directory */ | 148 | #define S_DEAD 16 /* removed, but still open directory */ |
149 | #define S_NOQUOTA 32 /* Inode is not counted to quota */ | 149 | #define S_NOQUOTA 32 /* Inode is not counted to quota */ |
150 | #define S_DIRSYNC 64 /* Directory modifications are synchronous */ | 150 | #define S_DIRSYNC 64 /* Directory modifications are synchronous */ |
151 | #define S_NOCMTIME 128 /* Do not update file c/mtime */ | 151 | #define S_NOCMTIME 128 /* Do not update file c/mtime */ |
152 | #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ | 152 | #define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ |
153 | #define S_PRIVATE 512 /* Inode is fs-internal */ | 153 | #define S_PRIVATE 512 /* Inode is fs-internal */ |
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Note that nosuid etc flags are inode-specific: setting some file-system | 156 | * Note that nosuid etc flags are inode-specific: setting some file-system |
157 | * flags just means all the inodes inherit those flags by default. It might be | 157 | * flags just means all the inodes inherit those flags by default. It might be |
158 | * possible to override it selectively if you really wanted to with some | 158 | * possible to override it selectively if you really wanted to with some |
159 | * ioctl() that is not currently implemented. | 159 | * ioctl() that is not currently implemented. |
160 | * | 160 | * |
161 | * Exception: MS_RDONLY is always applied to the entire file system. | 161 | * Exception: MS_RDONLY is always applied to the entire file system. |
162 | * | 162 | * |
163 | * Unfortunately, it is possible to change a filesystems flags with it mounted | 163 | * Unfortunately, it is possible to change a filesystems flags with it mounted |
164 | * with files in use. This means that all of the inodes will not have their | 164 | * with files in use. This means that all of the inodes will not have their |
165 | * i_flags updated. Hence, i_flags no longer inherit the superblock mount | 165 | * i_flags updated. Hence, i_flags no longer inherit the superblock mount |
166 | * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org | 166 | * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org |
167 | */ | 167 | */ |
168 | #define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg)) | 168 | #define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg)) |
169 | 169 | ||
170 | #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY) | 170 | #define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY) |
171 | #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \ | 171 | #define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \ |
172 | ((inode)->i_flags & S_SYNC)) | 172 | ((inode)->i_flags & S_SYNC)) |
173 | #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ | 173 | #define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \ |
174 | ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) | 174 | ((inode)->i_flags & (S_SYNC|S_DIRSYNC))) |
175 | #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) | 175 | #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK) |
176 | #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) | 176 | #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME) |
177 | #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) | 177 | #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION) |
178 | 178 | ||
179 | #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) | 179 | #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA) |
180 | #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) | 180 | #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND) |
181 | #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) | 181 | #define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE) |
182 | #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) | 182 | #define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL) |
183 | 183 | ||
184 | #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) | 184 | #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) |
185 | #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) | 185 | #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) |
186 | #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) | 186 | #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) |
187 | #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) | 187 | #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) |
188 | 188 | ||
189 | /* the read-only stuff doesn't really belong here, but any other place is | 189 | /* the read-only stuff doesn't really belong here, but any other place is |
190 | probably as bad and I don't want to create yet another include file. */ | 190 | probably as bad and I don't want to create yet another include file. */ |
191 | 191 | ||
192 | #define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ | 192 | #define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */ |
193 | #define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ | 193 | #define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */ |
194 | #define BLKRRPART _IO(0x12,95) /* re-read partition table */ | 194 | #define BLKRRPART _IO(0x12,95) /* re-read partition table */ |
195 | #define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ | 195 | #define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */ |
196 | #define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ | 196 | #define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */ |
197 | #define BLKRASET _IO(0x12,98) /* set read ahead for block device */ | 197 | #define BLKRASET _IO(0x12,98) /* set read ahead for block device */ |
198 | #define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ | 198 | #define BLKRAGET _IO(0x12,99) /* get current read ahead setting */ |
199 | #define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ | 199 | #define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */ |
200 | #define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ | 200 | #define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */ |
201 | #define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ | 201 | #define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */ |
202 | #define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ | 202 | #define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */ |
203 | #define BLKSSZGET _IO(0x12,104)/* get block device sector size */ | 203 | #define BLKSSZGET _IO(0x12,104)/* get block device sector size */ |
204 | #if 0 | 204 | #if 0 |
205 | #define BLKPG _IO(0x12,105)/* See blkpg.h */ | 205 | #define BLKPG _IO(0x12,105)/* See blkpg.h */ |
206 | 206 | ||
207 | /* Some people are morons. Do not use sizeof! */ | 207 | /* Some people are morons. Do not use sizeof! */ |
208 | 208 | ||
209 | #define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ | 209 | #define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */ |
210 | #define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ | 210 | #define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */ |
211 | /* This was here just to show that the number is taken - | 211 | /* This was here just to show that the number is taken - |
212 | probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ | 212 | probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */ |
213 | #endif | 213 | #endif |
214 | /* A jump here: 108-111 have been used for various private purposes. */ | 214 | /* A jump here: 108-111 have been used for various private purposes. */ |
215 | #define BLKBSZGET _IOR(0x12,112,size_t) | 215 | #define BLKBSZGET _IOR(0x12,112,size_t) |
216 | #define BLKBSZSET _IOW(0x12,113,size_t) | 216 | #define BLKBSZSET _IOW(0x12,113,size_t) |
217 | #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ | 217 | #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ |
218 | #define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) | 218 | #define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) |
219 | #define BLKTRACESTART _IO(0x12,116) | 219 | #define BLKTRACESTART _IO(0x12,116) |
220 | #define BLKTRACESTOP _IO(0x12,117) | 220 | #define BLKTRACESTOP _IO(0x12,117) |
221 | #define BLKTRACETEARDOWN _IO(0x12,118) | 221 | #define BLKTRACETEARDOWN _IO(0x12,118) |
222 | 222 | ||
223 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | 223 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ |
224 | #define FIBMAP _IO(0x00,1) /* bmap access */ | 224 | #define FIBMAP _IO(0x00,1) /* bmap access */ |
225 | #define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ | 225 | #define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */ |
226 | 226 | ||
227 | #define FS_IOC_GETFLAGS _IOR('f', 1, long) | 227 | #define FS_IOC_GETFLAGS _IOR('f', 1, long) |
228 | #define FS_IOC_SETFLAGS _IOW('f', 2, long) | 228 | #define FS_IOC_SETFLAGS _IOW('f', 2, long) |
229 | #define FS_IOC_GETVERSION _IOR('v', 1, long) | 229 | #define FS_IOC_GETVERSION _IOR('v', 1, long) |
230 | #define FS_IOC_SETVERSION _IOW('v', 2, long) | 230 | #define FS_IOC_SETVERSION _IOW('v', 2, long) |
231 | #define FS_IOC32_GETFLAGS _IOR('f', 1, int) | 231 | #define FS_IOC32_GETFLAGS _IOR('f', 1, int) |
232 | #define FS_IOC32_SETFLAGS _IOW('f', 2, int) | 232 | #define FS_IOC32_SETFLAGS _IOW('f', 2, int) |
233 | #define FS_IOC32_GETVERSION _IOR('v', 1, int) | 233 | #define FS_IOC32_GETVERSION _IOR('v', 1, int) |
234 | #define FS_IOC32_SETVERSION _IOW('v', 2, int) | 234 | #define FS_IOC32_SETVERSION _IOW('v', 2, int) |
235 | 235 | ||
236 | /* | 236 | /* |
237 | * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) | 237 | * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) |
238 | */ | 238 | */ |
239 | #define FS_SECRM_FL 0x00000001 /* Secure deletion */ | 239 | #define FS_SECRM_FL 0x00000001 /* Secure deletion */ |
240 | #define FS_UNRM_FL 0x00000002 /* Undelete */ | 240 | #define FS_UNRM_FL 0x00000002 /* Undelete */ |
241 | #define FS_COMPR_FL 0x00000004 /* Compress file */ | 241 | #define FS_COMPR_FL 0x00000004 /* Compress file */ |
242 | #define FS_SYNC_FL 0x00000008 /* Synchronous updates */ | 242 | #define FS_SYNC_FL 0x00000008 /* Synchronous updates */ |
243 | #define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ | 243 | #define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ |
244 | #define FS_APPEND_FL 0x00000020 /* writes to file may only append */ | 244 | #define FS_APPEND_FL 0x00000020 /* writes to file may only append */ |
245 | #define FS_NODUMP_FL 0x00000040 /* do not dump file */ | 245 | #define FS_NODUMP_FL 0x00000040 /* do not dump file */ |
246 | #define FS_NOATIME_FL 0x00000080 /* do not update atime */ | 246 | #define FS_NOATIME_FL 0x00000080 /* do not update atime */ |
247 | /* Reserved for compression usage... */ | 247 | /* Reserved for compression usage... */ |
248 | #define FS_DIRTY_FL 0x00000100 | 248 | #define FS_DIRTY_FL 0x00000100 |
249 | #define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ | 249 | #define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ |
250 | #define FS_NOCOMP_FL 0x00000400 /* Don't compress */ | 250 | #define FS_NOCOMP_FL 0x00000400 /* Don't compress */ |
251 | #define FS_ECOMPR_FL 0x00000800 /* Compression error */ | 251 | #define FS_ECOMPR_FL 0x00000800 /* Compression error */ |
252 | /* End compression flags --- maybe not all used */ | 252 | /* End compression flags --- maybe not all used */ |
253 | #define FS_BTREE_FL 0x00001000 /* btree format dir */ | 253 | #define FS_BTREE_FL 0x00001000 /* btree format dir */ |
254 | #define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ | 254 | #define FS_INDEX_FL 0x00001000 /* hash-indexed directory */ |
255 | #define FS_IMAGIC_FL 0x00002000 /* AFS directory */ | 255 | #define FS_IMAGIC_FL 0x00002000 /* AFS directory */ |
256 | #define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ | 256 | #define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */ |
257 | #define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ | 257 | #define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ |
258 | #define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ | 258 | #define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ |
259 | #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ | 259 | #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ |
260 | #define FS_EXTENT_FL 0x00080000 /* Extents */ | 260 | #define FS_EXTENT_FL 0x00080000 /* Extents */ |
261 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ | 261 | #define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */ |
262 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ | 262 | #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ |
263 | 263 | ||
264 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | 264 | #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ |
265 | #define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ | 265 | #define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ |
266 | 266 | ||
267 | 267 | ||
268 | #define SYNC_FILE_RANGE_WAIT_BEFORE 1 | 268 | #define SYNC_FILE_RANGE_WAIT_BEFORE 1 |
269 | #define SYNC_FILE_RANGE_WRITE 2 | 269 | #define SYNC_FILE_RANGE_WRITE 2 |
270 | #define SYNC_FILE_RANGE_WAIT_AFTER 4 | 270 | #define SYNC_FILE_RANGE_WAIT_AFTER 4 |
271 | 271 | ||
272 | #ifdef __KERNEL__ | 272 | #ifdef __KERNEL__ |
273 | 273 | ||
274 | #include <linux/linkage.h> | 274 | #include <linux/linkage.h> |
275 | #include <linux/wait.h> | 275 | #include <linux/wait.h> |
276 | #include <linux/types.h> | 276 | #include <linux/types.h> |
277 | #include <linux/kdev_t.h> | 277 | #include <linux/kdev_t.h> |
278 | #include <linux/dcache.h> | 278 | #include <linux/dcache.h> |
279 | #include <linux/namei.h> | 279 | #include <linux/namei.h> |
280 | #include <linux/stat.h> | 280 | #include <linux/stat.h> |
281 | #include <linux/cache.h> | 281 | #include <linux/cache.h> |
282 | #include <linux/kobject.h> | 282 | #include <linux/kobject.h> |
283 | #include <linux/list.h> | 283 | #include <linux/list.h> |
284 | #include <linux/radix-tree.h> | 284 | #include <linux/radix-tree.h> |
285 | #include <linux/prio_tree.h> | 285 | #include <linux/prio_tree.h> |
286 | #include <linux/init.h> | 286 | #include <linux/init.h> |
287 | #include <linux/pid.h> | 287 | #include <linux/pid.h> |
288 | #include <linux/mutex.h> | 288 | #include <linux/mutex.h> |
289 | #include <linux/capability.h> | 289 | #include <linux/capability.h> |
290 | 290 | ||
291 | #include <asm/atomic.h> | 291 | #include <asm/atomic.h> |
292 | #include <asm/semaphore.h> | 292 | #include <asm/semaphore.h> |
293 | #include <asm/byteorder.h> | 293 | #include <asm/byteorder.h> |
294 | 294 | ||
295 | struct export_operations; | 295 | struct export_operations; |
296 | struct hd_geometry; | 296 | struct hd_geometry; |
297 | struct iovec; | 297 | struct iovec; |
298 | struct nameidata; | 298 | struct nameidata; |
299 | struct kiocb; | 299 | struct kiocb; |
300 | struct pipe_inode_info; | 300 | struct pipe_inode_info; |
301 | struct poll_table_struct; | 301 | struct poll_table_struct; |
302 | struct kstatfs; | 302 | struct kstatfs; |
303 | struct vm_area_struct; | 303 | struct vm_area_struct; |
304 | struct vfsmount; | 304 | struct vfsmount; |
305 | 305 | ||
306 | extern void __init inode_init(void); | 306 | extern void __init inode_init(void); |
307 | extern void __init inode_init_early(void); | 307 | extern void __init inode_init_early(void); |
308 | extern void __init mnt_init(void); | 308 | extern void __init mnt_init(void); |
309 | extern void __init files_init(unsigned long); | 309 | extern void __init files_init(unsigned long); |
310 | 310 | ||
311 | struct buffer_head; | 311 | struct buffer_head; |
312 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | 312 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, |
313 | struct buffer_head *bh_result, int create); | 313 | struct buffer_head *bh_result, int create); |
314 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | 314 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, |
315 | ssize_t bytes, void *private); | 315 | ssize_t bytes, void *private); |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * Attribute flags. These should be or-ed together to figure out what | 318 | * Attribute flags. These should be or-ed together to figure out what |
319 | * has been changed! | 319 | * has been changed! |
320 | */ | 320 | */ |
321 | #define ATTR_MODE 1 | 321 | #define ATTR_MODE 1 |
322 | #define ATTR_UID 2 | 322 | #define ATTR_UID 2 |
323 | #define ATTR_GID 4 | 323 | #define ATTR_GID 4 |
324 | #define ATTR_SIZE 8 | 324 | #define ATTR_SIZE 8 |
325 | #define ATTR_ATIME 16 | 325 | #define ATTR_ATIME 16 |
326 | #define ATTR_MTIME 32 | 326 | #define ATTR_MTIME 32 |
327 | #define ATTR_CTIME 64 | 327 | #define ATTR_CTIME 64 |
328 | #define ATTR_ATIME_SET 128 | 328 | #define ATTR_ATIME_SET 128 |
329 | #define ATTR_MTIME_SET 256 | 329 | #define ATTR_MTIME_SET 256 |
330 | #define ATTR_FORCE 512 /* Not a change, but a change it */ | 330 | #define ATTR_FORCE 512 /* Not a change, but a change it */ |
331 | #define ATTR_ATTR_FLAG 1024 | 331 | #define ATTR_ATTR_FLAG 1024 |
332 | #define ATTR_KILL_SUID 2048 | 332 | #define ATTR_KILL_SUID 2048 |
333 | #define ATTR_KILL_SGID 4096 | 333 | #define ATTR_KILL_SGID 4096 |
334 | #define ATTR_FILE 8192 | 334 | #define ATTR_FILE 8192 |
335 | #define ATTR_KILL_PRIV 16384 | 335 | #define ATTR_KILL_PRIV 16384 |
336 | #define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */ | 336 | #define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */ |
337 | 337 | ||
338 | /* | 338 | /* |
339 | * This is the Inode Attributes structure, used for notify_change(). It | 339 | * This is the Inode Attributes structure, used for notify_change(). It |
340 | * uses the above definitions as flags, to know which values have changed. | 340 | * uses the above definitions as flags, to know which values have changed. |
341 | * Also, in this manner, a Filesystem can look at only the values it cares | 341 | * Also, in this manner, a Filesystem can look at only the values it cares |
342 | * about. Basically, these are the attributes that the VFS layer can | 342 | * about. Basically, these are the attributes that the VFS layer can |
343 | * request to change from the FS layer. | 343 | * request to change from the FS layer. |
344 | * | 344 | * |
345 | * Derek Atkins <warlord@MIT.EDU> 94-10-20 | 345 | * Derek Atkins <warlord@MIT.EDU> 94-10-20 |
346 | */ | 346 | */ |
347 | struct iattr { | 347 | struct iattr { |
348 | unsigned int ia_valid; | 348 | unsigned int ia_valid; |
349 | umode_t ia_mode; | 349 | umode_t ia_mode; |
350 | uid_t ia_uid; | 350 | uid_t ia_uid; |
351 | gid_t ia_gid; | 351 | gid_t ia_gid; |
352 | loff_t ia_size; | 352 | loff_t ia_size; |
353 | struct timespec ia_atime; | 353 | struct timespec ia_atime; |
354 | struct timespec ia_mtime; | 354 | struct timespec ia_mtime; |
355 | struct timespec ia_ctime; | 355 | struct timespec ia_ctime; |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Not an attribute, but an auxilary info for filesystems wanting to | 358 | * Not an attribute, but an auxilary info for filesystems wanting to |
359 | * implement an ftruncate() like method. NOTE: filesystem should | 359 | * implement an ftruncate() like method. NOTE: filesystem should |
360 | * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). | 360 | * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL). |
361 | */ | 361 | */ |
362 | struct file *ia_file; | 362 | struct file *ia_file; |
363 | }; | 363 | }; |
364 | 364 | ||
365 | /* | 365 | /* |
366 | * Includes for diskquotas. | 366 | * Includes for diskquotas. |
367 | */ | 367 | */ |
368 | #include <linux/quota.h> | 368 | #include <linux/quota.h> |
369 | 369 | ||
370 | /** | 370 | /** |
371 | * enum positive_aop_returns - aop return codes with specific semantics | 371 | * enum positive_aop_returns - aop return codes with specific semantics |
372 | * | 372 | * |
373 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has | 373 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has |
374 | * completed, that the page is still locked, and | 374 | * completed, that the page is still locked, and |
375 | * should be considered active. The VM uses this hint | 375 | * should be considered active. The VM uses this hint |
376 | * to return the page to the active list -- it won't | 376 | * to return the page to the active list -- it won't |
377 | * be a candidate for writeback again in the near | 377 | * be a candidate for writeback again in the near |
378 | * future. Other callers must be careful to unlock | 378 | * future. Other callers must be careful to unlock |
379 | * the page if they get this return. Returned by | 379 | * the page if they get this return. Returned by |
380 | * writepage(); | 380 | * writepage(); |
381 | * | 381 | * |
382 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has | 382 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has |
383 | * unlocked it and the page might have been truncated. | 383 | * unlocked it and the page might have been truncated. |
384 | * The caller should back up to acquiring a new page and | 384 | * The caller should back up to acquiring a new page and |
385 | * trying again. The aop will be taking reasonable | 385 | * trying again. The aop will be taking reasonable |
386 | * precautions not to livelock. If the caller held a page | 386 | * precautions not to livelock. If the caller held a page |
387 | * reference, it should drop it before retrying. Returned | 387 | * reference, it should drop it before retrying. Returned |
388 | * by readpage(). | 388 | * by readpage(). |
389 | * | 389 | * |
390 | * address_space_operation functions return these large constants to indicate | 390 | * address_space_operation functions return these large constants to indicate |
391 | * special semantics to the caller. These are much larger than the bytes in a | 391 | * special semantics to the caller. These are much larger than the bytes in a |
392 | * page to allow for functions that return the number of bytes operated on in a | 392 | * page to allow for functions that return the number of bytes operated on in a |
393 | * given page. | 393 | * given page. |
394 | */ | 394 | */ |
395 | 395 | ||
396 | enum positive_aop_returns { | 396 | enum positive_aop_returns { |
397 | AOP_WRITEPAGE_ACTIVATE = 0x80000, | 397 | AOP_WRITEPAGE_ACTIVATE = 0x80000, |
398 | AOP_TRUNCATED_PAGE = 0x80001, | 398 | AOP_TRUNCATED_PAGE = 0x80001, |
399 | }; | 399 | }; |
400 | 400 | ||
401 | #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ | 401 | #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ |
402 | #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ | 402 | #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ |
403 | 403 | ||
404 | /* | 404 | /* |
405 | * oh the beauties of C type declarations. | 405 | * oh the beauties of C type declarations. |
406 | */ | 406 | */ |
407 | struct page; | 407 | struct page; |
408 | struct address_space; | 408 | struct address_space; |
409 | struct writeback_control; | 409 | struct writeback_control; |
410 | 410 | ||
411 | struct iov_iter { | 411 | struct iov_iter { |
412 | const struct iovec *iov; | 412 | const struct iovec *iov; |
413 | unsigned long nr_segs; | 413 | unsigned long nr_segs; |
414 | size_t iov_offset; | 414 | size_t iov_offset; |
415 | size_t count; | 415 | size_t count; |
416 | }; | 416 | }; |
417 | 417 | ||
418 | size_t iov_iter_copy_from_user_atomic(struct page *page, | 418 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
419 | struct iov_iter *i, unsigned long offset, size_t bytes); | 419 | struct iov_iter *i, unsigned long offset, size_t bytes); |
420 | size_t iov_iter_copy_from_user(struct page *page, | 420 | size_t iov_iter_copy_from_user(struct page *page, |
421 | struct iov_iter *i, unsigned long offset, size_t bytes); | 421 | struct iov_iter *i, unsigned long offset, size_t bytes); |
422 | void iov_iter_advance(struct iov_iter *i, size_t bytes); | 422 | void iov_iter_advance(struct iov_iter *i, size_t bytes); |
423 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); | 423 | int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); |
424 | size_t iov_iter_single_seg_count(struct iov_iter *i); | 424 | size_t iov_iter_single_seg_count(struct iov_iter *i); |
425 | 425 | ||
426 | static inline void iov_iter_init(struct iov_iter *i, | 426 | static inline void iov_iter_init(struct iov_iter *i, |
427 | const struct iovec *iov, unsigned long nr_segs, | 427 | const struct iovec *iov, unsigned long nr_segs, |
428 | size_t count, size_t written) | 428 | size_t count, size_t written) |
429 | { | 429 | { |
430 | i->iov = iov; | 430 | i->iov = iov; |
431 | i->nr_segs = nr_segs; | 431 | i->nr_segs = nr_segs; |
432 | i->iov_offset = 0; | 432 | i->iov_offset = 0; |
433 | i->count = count + written; | 433 | i->count = count + written; |
434 | 434 | ||
435 | iov_iter_advance(i, written); | 435 | iov_iter_advance(i, written); |
436 | } | 436 | } |
437 | 437 | ||
438 | static inline size_t iov_iter_count(struct iov_iter *i) | 438 | static inline size_t iov_iter_count(struct iov_iter *i) |
439 | { | 439 | { |
440 | return i->count; | 440 | return i->count; |
441 | } | 441 | } |
442 | 442 | ||
443 | 443 | ||
444 | struct address_space_operations { | 444 | struct address_space_operations { |
445 | int (*writepage)(struct page *page, struct writeback_control *wbc); | 445 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
446 | int (*readpage)(struct file *, struct page *); | 446 | int (*readpage)(struct file *, struct page *); |
447 | void (*sync_page)(struct page *); | 447 | void (*sync_page)(struct page *); |
448 | 448 | ||
449 | /* Write back some dirty pages from this mapping. */ | 449 | /* Write back some dirty pages from this mapping. */ |
450 | int (*writepages)(struct address_space *, struct writeback_control *); | 450 | int (*writepages)(struct address_space *, struct writeback_control *); |
451 | 451 | ||
452 | /* Set a page dirty. Return true if this dirtied it */ | 452 | /* Set a page dirty. Return true if this dirtied it */ |
453 | int (*set_page_dirty)(struct page *page); | 453 | int (*set_page_dirty)(struct page *page); |
454 | 454 | ||
455 | int (*readpages)(struct file *filp, struct address_space *mapping, | 455 | int (*readpages)(struct file *filp, struct address_space *mapping, |
456 | struct list_head *pages, unsigned nr_pages); | 456 | struct list_head *pages, unsigned nr_pages); |
457 | 457 | ||
458 | /* | 458 | /* |
459 | * ext3 requires that a successful prepare_write() call be followed | 459 | * ext3 requires that a successful prepare_write() call be followed |
460 | * by a commit_write() call - they must be balanced | 460 | * by a commit_write() call - they must be balanced |
461 | */ | 461 | */ |
462 | int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); | 462 | int (*prepare_write)(struct file *, struct page *, unsigned, unsigned); |
463 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); | 463 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); |
464 | 464 | ||
465 | int (*write_begin)(struct file *, struct address_space *mapping, | 465 | int (*write_begin)(struct file *, struct address_space *mapping, |
466 | loff_t pos, unsigned len, unsigned flags, | 466 | loff_t pos, unsigned len, unsigned flags, |
467 | struct page **pagep, void **fsdata); | 467 | struct page **pagep, void **fsdata); |
468 | int (*write_end)(struct file *, struct address_space *mapping, | 468 | int (*write_end)(struct file *, struct address_space *mapping, |
469 | loff_t pos, unsigned len, unsigned copied, | 469 | loff_t pos, unsigned len, unsigned copied, |
470 | struct page *page, void *fsdata); | 470 | struct page *page, void *fsdata); |
471 | 471 | ||
472 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ | 472 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ |
473 | sector_t (*bmap)(struct address_space *, sector_t); | 473 | sector_t (*bmap)(struct address_space *, sector_t); |
474 | void (*invalidatepage) (struct page *, unsigned long); | 474 | void (*invalidatepage) (struct page *, unsigned long); |
475 | int (*releasepage) (struct page *, gfp_t); | 475 | int (*releasepage) (struct page *, gfp_t); |
476 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, | 476 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, |
477 | loff_t offset, unsigned long nr_segs); | 477 | loff_t offset, unsigned long nr_segs); |
478 | struct page* (*get_xip_page)(struct address_space *, sector_t, | 478 | struct page* (*get_xip_page)(struct address_space *, sector_t, |
479 | int); | 479 | int); |
480 | /* migrate the contents of a page to the specified target */ | 480 | /* migrate the contents of a page to the specified target */ |
481 | int (*migratepage) (struct address_space *, | 481 | int (*migratepage) (struct address_space *, |
482 | struct page *, struct page *); | 482 | struct page *, struct page *); |
483 | int (*launder_page) (struct page *); | 483 | int (*launder_page) (struct page *); |
484 | }; | 484 | }; |
485 | 485 | ||
486 | /* | 486 | /* |
487 | * pagecache_write_begin/pagecache_write_end must be used by general code | 487 | * pagecache_write_begin/pagecache_write_end must be used by general code |
488 | * to write into the pagecache. | 488 | * to write into the pagecache. |
489 | */ | 489 | */ |
490 | int pagecache_write_begin(struct file *, struct address_space *mapping, | 490 | int pagecache_write_begin(struct file *, struct address_space *mapping, |
491 | loff_t pos, unsigned len, unsigned flags, | 491 | loff_t pos, unsigned len, unsigned flags, |
492 | struct page **pagep, void **fsdata); | 492 | struct page **pagep, void **fsdata); |
493 | 493 | ||
494 | int pagecache_write_end(struct file *, struct address_space *mapping, | 494 | int pagecache_write_end(struct file *, struct address_space *mapping, |
495 | loff_t pos, unsigned len, unsigned copied, | 495 | loff_t pos, unsigned len, unsigned copied, |
496 | struct page *page, void *fsdata); | 496 | struct page *page, void *fsdata); |
497 | 497 | ||
498 | struct backing_dev_info; | 498 | struct backing_dev_info; |
499 | struct address_space { | 499 | struct address_space { |
500 | struct inode *host; /* owner: inode, block_device */ | 500 | struct inode *host; /* owner: inode, block_device */ |
501 | struct radix_tree_root page_tree; /* radix tree of all pages */ | 501 | struct radix_tree_root page_tree; /* radix tree of all pages */ |
502 | rwlock_t tree_lock; /* and rwlock protecting it */ | 502 | rwlock_t tree_lock; /* and rwlock protecting it */ |
503 | unsigned int i_mmap_writable;/* count VM_SHARED mappings */ | 503 | unsigned int i_mmap_writable;/* count VM_SHARED mappings */ |
504 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ | 504 | struct prio_tree_root i_mmap; /* tree of private and shared mappings */ |
505 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ | 505 | struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ |
506 | spinlock_t i_mmap_lock; /* protect tree, count, list */ | 506 | spinlock_t i_mmap_lock; /* protect tree, count, list */ |
507 | unsigned int truncate_count; /* Cover race condition with truncate */ | 507 | unsigned int truncate_count; /* Cover race condition with truncate */ |
508 | unsigned long nrpages; /* number of total pages */ | 508 | unsigned long nrpages; /* number of total pages */ |
509 | pgoff_t writeback_index;/* writeback starts here */ | 509 | pgoff_t writeback_index;/* writeback starts here */ |
510 | const struct address_space_operations *a_ops; /* methods */ | 510 | const struct address_space_operations *a_ops; /* methods */ |
511 | unsigned long flags; /* error bits/gfp mask */ | 511 | unsigned long flags; /* error bits/gfp mask */ |
512 | struct backing_dev_info *backing_dev_info; /* device readahead, etc */ | 512 | struct backing_dev_info *backing_dev_info; /* device readahead, etc */ |
513 | spinlock_t private_lock; /* for use by the address_space */ | 513 | spinlock_t private_lock; /* for use by the address_space */ |
514 | struct list_head private_list; /* ditto */ | 514 | struct list_head private_list; /* ditto */ |
515 | struct address_space *assoc_mapping; /* ditto */ | 515 | struct address_space *assoc_mapping; /* ditto */ |
516 | } __attribute__((aligned(sizeof(long)))); | 516 | } __attribute__((aligned(sizeof(long)))); |
517 | /* | 517 | /* |
518 | * On most architectures that alignment is already the case; but | 518 | * On most architectures that alignment is already the case; but |
519 | * must be enforced here for CRIS, to let the least signficant bit | 519 | * must be enforced here for CRIS, to let the least signficant bit |
520 | * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. | 520 | * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. |
521 | */ | 521 | */ |
522 | 522 | ||
523 | struct block_device { | 523 | struct block_device { |
524 | dev_t bd_dev; /* not a kdev_t - it's a search key */ | 524 | dev_t bd_dev; /* not a kdev_t - it's a search key */ |
525 | struct inode * bd_inode; /* will die */ | 525 | struct inode * bd_inode; /* will die */ |
526 | int bd_openers; | 526 | int bd_openers; |
527 | struct mutex bd_mutex; /* open/close mutex */ | 527 | struct mutex bd_mutex; /* open/close mutex */ |
528 | struct semaphore bd_mount_sem; | 528 | struct semaphore bd_mount_sem; |
529 | struct list_head bd_inodes; | 529 | struct list_head bd_inodes; |
530 | void * bd_holder; | 530 | void * bd_holder; |
531 | int bd_holders; | 531 | int bd_holders; |
532 | #ifdef CONFIG_SYSFS | 532 | #ifdef CONFIG_SYSFS |
533 | struct list_head bd_holder_list; | 533 | struct list_head bd_holder_list; |
534 | #endif | 534 | #endif |
535 | struct block_device * bd_contains; | 535 | struct block_device * bd_contains; |
536 | unsigned bd_block_size; | 536 | unsigned bd_block_size; |
537 | struct hd_struct * bd_part; | 537 | struct hd_struct * bd_part; |
538 | /* number of times partitions within this device have been opened. */ | 538 | /* number of times partitions within this device have been opened. */ |
539 | unsigned bd_part_count; | 539 | unsigned bd_part_count; |
540 | int bd_invalidated; | 540 | int bd_invalidated; |
541 | struct gendisk * bd_disk; | 541 | struct gendisk * bd_disk; |
542 | struct list_head bd_list; | 542 | struct list_head bd_list; |
543 | struct backing_dev_info *bd_inode_backing_dev_info; | 543 | struct backing_dev_info *bd_inode_backing_dev_info; |
544 | /* | 544 | /* |
545 | * Private data. You must have bd_claim'ed the block_device | 545 | * Private data. You must have bd_claim'ed the block_device |
546 | * to use this. NOTE: bd_claim allows an owner to claim | 546 | * to use this. NOTE: bd_claim allows an owner to claim |
547 | * the same device multiple times, the owner must take special | 547 | * the same device multiple times, the owner must take special |
548 | * care to not mess up bd_private for that case. | 548 | * care to not mess up bd_private for that case. |
549 | */ | 549 | */ |
550 | unsigned long bd_private; | 550 | unsigned long bd_private; |
551 | }; | 551 | }; |
552 | 552 | ||
553 | /* | 553 | /* |
554 | * Radix-tree tags, for tagging dirty and writeback pages within the pagecache | 554 | * Radix-tree tags, for tagging dirty and writeback pages within the pagecache |
555 | * radix trees | 555 | * radix trees |
556 | */ | 556 | */ |
557 | #define PAGECACHE_TAG_DIRTY 0 | 557 | #define PAGECACHE_TAG_DIRTY 0 |
558 | #define PAGECACHE_TAG_WRITEBACK 1 | 558 | #define PAGECACHE_TAG_WRITEBACK 1 |
559 | 559 | ||
560 | int mapping_tagged(struct address_space *mapping, int tag); | 560 | int mapping_tagged(struct address_space *mapping, int tag); |
561 | 561 | ||
562 | /* | 562 | /* |
563 | * Might pages of this file be mapped into userspace? | 563 | * Might pages of this file be mapped into userspace? |
564 | */ | 564 | */ |
565 | static inline int mapping_mapped(struct address_space *mapping) | 565 | static inline int mapping_mapped(struct address_space *mapping) |
566 | { | 566 | { |
567 | return !prio_tree_empty(&mapping->i_mmap) || | 567 | return !prio_tree_empty(&mapping->i_mmap) || |
568 | !list_empty(&mapping->i_mmap_nonlinear); | 568 | !list_empty(&mapping->i_mmap_nonlinear); |
569 | } | 569 | } |
570 | 570 | ||
571 | /* | 571 | /* |
572 | * Might pages of this file have been modified in userspace? | 572 | * Might pages of this file have been modified in userspace? |
573 | * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff | 573 | * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff |
574 | * marks vma as VM_SHARED if it is shared, and the file was opened for | 574 | * marks vma as VM_SHARED if it is shared, and the file was opened for |
575 | * writing i.e. vma may be mprotected writable even if now readonly. | 575 | * writing i.e. vma may be mprotected writable even if now readonly. |
576 | */ | 576 | */ |
577 | static inline int mapping_writably_mapped(struct address_space *mapping) | 577 | static inline int mapping_writably_mapped(struct address_space *mapping) |
578 | { | 578 | { |
579 | return mapping->i_mmap_writable != 0; | 579 | return mapping->i_mmap_writable != 0; |
580 | } | 580 | } |
581 | 581 | ||
582 | /* | 582 | /* |
583 | * Use sequence counter to get consistent i_size on 32-bit processors. | 583 | * Use sequence counter to get consistent i_size on 32-bit processors. |
584 | */ | 584 | */ |
585 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | 585 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
586 | #include <linux/seqlock.h> | 586 | #include <linux/seqlock.h> |
587 | #define __NEED_I_SIZE_ORDERED | 587 | #define __NEED_I_SIZE_ORDERED |
588 | #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) | 588 | #define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount) |
589 | #else | 589 | #else |
590 | #define i_size_ordered_init(inode) do { } while (0) | 590 | #define i_size_ordered_init(inode) do { } while (0) |
591 | #endif | 591 | #endif |
592 | 592 | ||
593 | struct inode { | 593 | struct inode { |
594 | struct hlist_node i_hash; | 594 | struct hlist_node i_hash; |
595 | struct list_head i_list; | 595 | struct list_head i_list; |
596 | struct list_head i_sb_list; | 596 | struct list_head i_sb_list; |
597 | struct list_head i_dentry; | 597 | struct list_head i_dentry; |
598 | unsigned long i_ino; | 598 | unsigned long i_ino; |
599 | atomic_t i_count; | 599 | atomic_t i_count; |
600 | unsigned int i_nlink; | 600 | unsigned int i_nlink; |
601 | uid_t i_uid; | 601 | uid_t i_uid; |
602 | gid_t i_gid; | 602 | gid_t i_gid; |
603 | dev_t i_rdev; | 603 | dev_t i_rdev; |
604 | u64 i_version; | 604 | u64 i_version; |
605 | loff_t i_size; | 605 | loff_t i_size; |
606 | #ifdef __NEED_I_SIZE_ORDERED | 606 | #ifdef __NEED_I_SIZE_ORDERED |
607 | seqcount_t i_size_seqcount; | 607 | seqcount_t i_size_seqcount; |
608 | #endif | 608 | #endif |
609 | struct timespec i_atime; | 609 | struct timespec i_atime; |
610 | struct timespec i_mtime; | 610 | struct timespec i_mtime; |
611 | struct timespec i_ctime; | 611 | struct timespec i_ctime; |
612 | unsigned int i_blkbits; | 612 | unsigned int i_blkbits; |
613 | blkcnt_t i_blocks; | 613 | blkcnt_t i_blocks; |
614 | unsigned short i_bytes; | 614 | unsigned short i_bytes; |
615 | umode_t i_mode; | 615 | umode_t i_mode; |
616 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 616 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
617 | struct mutex i_mutex; | 617 | struct mutex i_mutex; |
618 | struct rw_semaphore i_alloc_sem; | 618 | struct rw_semaphore i_alloc_sem; |
619 | const struct inode_operations *i_op; | 619 | const struct inode_operations *i_op; |
620 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 620 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
621 | struct super_block *i_sb; | 621 | struct super_block *i_sb; |
622 | struct file_lock *i_flock; | 622 | struct file_lock *i_flock; |
623 | struct address_space *i_mapping; | 623 | struct address_space *i_mapping; |
624 | struct address_space i_data; | 624 | struct address_space i_data; |
625 | #ifdef CONFIG_QUOTA | 625 | #ifdef CONFIG_QUOTA |
626 | struct dquot *i_dquot[MAXQUOTAS]; | 626 | struct dquot *i_dquot[MAXQUOTAS]; |
627 | #endif | 627 | #endif |
628 | struct list_head i_devices; | 628 | struct list_head i_devices; |
629 | union { | 629 | union { |
630 | struct pipe_inode_info *i_pipe; | 630 | struct pipe_inode_info *i_pipe; |
631 | struct block_device *i_bdev; | 631 | struct block_device *i_bdev; |
632 | struct cdev *i_cdev; | 632 | struct cdev *i_cdev; |
633 | }; | 633 | }; |
634 | int i_cindex; | 634 | int i_cindex; |
635 | 635 | ||
636 | __u32 i_generation; | 636 | __u32 i_generation; |
637 | 637 | ||
638 | #ifdef CONFIG_DNOTIFY | 638 | #ifdef CONFIG_DNOTIFY |
639 | unsigned long i_dnotify_mask; /* Directory notify events */ | 639 | unsigned long i_dnotify_mask; /* Directory notify events */ |
640 | struct dnotify_struct *i_dnotify; /* for directory notifications */ | 640 | struct dnotify_struct *i_dnotify; /* for directory notifications */ |
641 | #endif | 641 | #endif |
642 | 642 | ||
643 | #ifdef CONFIG_INOTIFY | 643 | #ifdef CONFIG_INOTIFY |
644 | struct list_head inotify_watches; /* watches on this inode */ | 644 | struct list_head inotify_watches; /* watches on this inode */ |
645 | struct mutex inotify_mutex; /* protects the watches list */ | 645 | struct mutex inotify_mutex; /* protects the watches list */ |
646 | #endif | 646 | #endif |
647 | 647 | ||
648 | unsigned long i_state; | 648 | unsigned long i_state; |
649 | unsigned long dirtied_when; /* jiffies of first dirtying */ | 649 | unsigned long dirtied_when; /* jiffies of first dirtying */ |
650 | 650 | ||
651 | unsigned int i_flags; | 651 | unsigned int i_flags; |
652 | 652 | ||
653 | atomic_t i_writecount; | 653 | atomic_t i_writecount; |
654 | #ifdef CONFIG_SECURITY | 654 | #ifdef CONFIG_SECURITY |
655 | void *i_security; | 655 | void *i_security; |
656 | #endif | 656 | #endif |
657 | void *i_private; /* fs or device private pointer */ | 657 | void *i_private; /* fs or device private pointer */ |
658 | }; | 658 | }; |
659 | 659 | ||
660 | /* | 660 | /* |
661 | * inode->i_mutex nesting subclasses for the lock validator: | 661 | * inode->i_mutex nesting subclasses for the lock validator: |
662 | * | 662 | * |
663 | * 0: the object of the current VFS operation | 663 | * 0: the object of the current VFS operation |
664 | * 1: parent | 664 | * 1: parent |
665 | * 2: child/target | 665 | * 2: child/target |
666 | * 3: quota file | 666 | * 3: quota file |
667 | * | 667 | * |
668 | * The locking order between these classes is | 668 | * The locking order between these classes is |
669 | * parent -> child -> normal -> xattr -> quota | 669 | * parent -> child -> normal -> xattr -> quota |
670 | */ | 670 | */ |
671 | enum inode_i_mutex_lock_class | 671 | enum inode_i_mutex_lock_class |
672 | { | 672 | { |
673 | I_MUTEX_NORMAL, | 673 | I_MUTEX_NORMAL, |
674 | I_MUTEX_PARENT, | 674 | I_MUTEX_PARENT, |
675 | I_MUTEX_CHILD, | 675 | I_MUTEX_CHILD, |
676 | I_MUTEX_XATTR, | 676 | I_MUTEX_XATTR, |
677 | I_MUTEX_QUOTA | 677 | I_MUTEX_QUOTA |
678 | }; | 678 | }; |
679 | 679 | ||
680 | extern void inode_double_lock(struct inode *inode1, struct inode *inode2); | 680 | extern void inode_double_lock(struct inode *inode1, struct inode *inode2); |
681 | extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); | 681 | extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); |
682 | 682 | ||
683 | /* | 683 | /* |
684 | * NOTE: in a 32bit arch with a preemptable kernel and | 684 | * NOTE: in a 32bit arch with a preemptable kernel and |
685 | * an UP compile the i_size_read/write must be atomic | 685 | * an UP compile the i_size_read/write must be atomic |
686 | * with respect to the local cpu (unlike with preempt disabled), | 686 | * with respect to the local cpu (unlike with preempt disabled), |
687 | * but they don't need to be atomic with respect to other cpus like in | 687 | * but they don't need to be atomic with respect to other cpus like in |
688 | * true SMP (so they need either to either locally disable irq around | 688 | * true SMP (so they need either to either locally disable irq around |
689 | * the read or for example on x86 they can be still implemented as a | 689 | * the read or for example on x86 they can be still implemented as a |
690 | * cmpxchg8b without the need of the lock prefix). For SMP compiles | 690 | * cmpxchg8b without the need of the lock prefix). For SMP compiles |
691 | * and 64bit archs it makes no difference if preempt is enabled or not. | 691 | * and 64bit archs it makes no difference if preempt is enabled or not. |
692 | */ | 692 | */ |
693 | static inline loff_t i_size_read(const struct inode *inode) | 693 | static inline loff_t i_size_read(const struct inode *inode) |
694 | { | 694 | { |
695 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | 695 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
696 | loff_t i_size; | 696 | loff_t i_size; |
697 | unsigned int seq; | 697 | unsigned int seq; |
698 | 698 | ||
699 | do { | 699 | do { |
700 | seq = read_seqcount_begin(&inode->i_size_seqcount); | 700 | seq = read_seqcount_begin(&inode->i_size_seqcount); |
701 | i_size = inode->i_size; | 701 | i_size = inode->i_size; |
702 | } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); | 702 | } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); |
703 | return i_size; | 703 | return i_size; |
704 | #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) | 704 | #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) |
705 | loff_t i_size; | 705 | loff_t i_size; |
706 | 706 | ||
707 | preempt_disable(); | 707 | preempt_disable(); |
708 | i_size = inode->i_size; | 708 | i_size = inode->i_size; |
709 | preempt_enable(); | 709 | preempt_enable(); |
710 | return i_size; | 710 | return i_size; |
711 | #else | 711 | #else |
712 | return inode->i_size; | 712 | return inode->i_size; |
713 | #endif | 713 | #endif |
714 | } | 714 | } |
715 | 715 | ||
716 | /* | 716 | /* |
717 | * NOTE: unlike i_size_read(), i_size_write() does need locking around it | 717 | * NOTE: unlike i_size_read(), i_size_write() does need locking around it |
718 | * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount | 718 | * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount |
719 | * can be lost, resulting in subsequent i_size_read() calls spinning forever. | 719 | * can be lost, resulting in subsequent i_size_read() calls spinning forever. |
720 | */ | 720 | */ |
721 | static inline void i_size_write(struct inode *inode, loff_t i_size) | 721 | static inline void i_size_write(struct inode *inode, loff_t i_size) |
722 | { | 722 | { |
723 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) | 723 | #if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
724 | write_seqcount_begin(&inode->i_size_seqcount); | 724 | write_seqcount_begin(&inode->i_size_seqcount); |
725 | inode->i_size = i_size; | 725 | inode->i_size = i_size; |
726 | write_seqcount_end(&inode->i_size_seqcount); | 726 | write_seqcount_end(&inode->i_size_seqcount); |
727 | #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) | 727 | #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) |
728 | preempt_disable(); | 728 | preempt_disable(); |
729 | inode->i_size = i_size; | 729 | inode->i_size = i_size; |
730 | preempt_enable(); | 730 | preempt_enable(); |
731 | #else | 731 | #else |
732 | inode->i_size = i_size; | 732 | inode->i_size = i_size; |
733 | #endif | 733 | #endif |
734 | } | 734 | } |
735 | 735 | ||
736 | static inline unsigned iminor(const struct inode *inode) | 736 | static inline unsigned iminor(const struct inode *inode) |
737 | { | 737 | { |
738 | return MINOR(inode->i_rdev); | 738 | return MINOR(inode->i_rdev); |
739 | } | 739 | } |
740 | 740 | ||
741 | static inline unsigned imajor(const struct inode *inode) | 741 | static inline unsigned imajor(const struct inode *inode) |
742 | { | 742 | { |
743 | return MAJOR(inode->i_rdev); | 743 | return MAJOR(inode->i_rdev); |
744 | } | 744 | } |
745 | 745 | ||
746 | extern struct block_device *I_BDEV(struct inode *inode); | 746 | extern struct block_device *I_BDEV(struct inode *inode); |
747 | 747 | ||
748 | struct fown_struct { | 748 | struct fown_struct { |
749 | rwlock_t lock; /* protects pid, uid, euid fields */ | 749 | rwlock_t lock; /* protects pid, uid, euid fields */ |
750 | struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ | 750 | struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ |
751 | enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ | 751 | enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ |
752 | uid_t uid, euid; /* uid/euid of process setting the owner */ | 752 | uid_t uid, euid; /* uid/euid of process setting the owner */ |
753 | int signum; /* posix.1b rt signal to be delivered on IO */ | 753 | int signum; /* posix.1b rt signal to be delivered on IO */ |
754 | }; | 754 | }; |
755 | 755 | ||
756 | /* | 756 | /* |
757 | * Track a single file's readahead state | 757 | * Track a single file's readahead state |
758 | */ | 758 | */ |
759 | struct file_ra_state { | 759 | struct file_ra_state { |
760 | pgoff_t start; /* where readahead started */ | 760 | pgoff_t start; /* where readahead started */ |
761 | unsigned int size; /* # of readahead pages */ | 761 | unsigned int size; /* # of readahead pages */ |
762 | unsigned int async_size; /* do asynchronous readahead when | 762 | unsigned int async_size; /* do asynchronous readahead when |
763 | there are only # of pages ahead */ | 763 | there are only # of pages ahead */ |
764 | 764 | ||
765 | unsigned int ra_pages; /* Maximum readahead window */ | 765 | unsigned int ra_pages; /* Maximum readahead window */ |
766 | int mmap_miss; /* Cache miss stat for mmap accesses */ | 766 | int mmap_miss; /* Cache miss stat for mmap accesses */ |
767 | loff_t prev_pos; /* Cache last read() position */ | 767 | loff_t prev_pos; /* Cache last read() position */ |
768 | }; | 768 | }; |
769 | 769 | ||
770 | /* | 770 | /* |
771 | * Check if @index falls in the readahead windows. | 771 | * Check if @index falls in the readahead windows. |
772 | */ | 772 | */ |
773 | static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) | 773 | static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) |
774 | { | 774 | { |
775 | return (index >= ra->start && | 775 | return (index >= ra->start && |
776 | index < ra->start + ra->size); | 776 | index < ra->start + ra->size); |
777 | } | 777 | } |
778 | 778 | ||
779 | struct file { | 779 | struct file { |
780 | /* | 780 | /* |
781 | * fu_list becomes invalid after file_free is called and queued via | 781 | * fu_list becomes invalid after file_free is called and queued via |
782 | * fu_rcuhead for RCU freeing | 782 | * fu_rcuhead for RCU freeing |
783 | */ | 783 | */ |
784 | union { | 784 | union { |
785 | struct list_head fu_list; | 785 | struct list_head fu_list; |
786 | struct rcu_head fu_rcuhead; | 786 | struct rcu_head fu_rcuhead; |
787 | } f_u; | 787 | } f_u; |
788 | struct path f_path; | 788 | struct path f_path; |
789 | #define f_dentry f_path.dentry | 789 | #define f_dentry f_path.dentry |
790 | #define f_vfsmnt f_path.mnt | 790 | #define f_vfsmnt f_path.mnt |
791 | const struct file_operations *f_op; | 791 | const struct file_operations *f_op; |
792 | atomic_t f_count; | 792 | atomic_t f_count; |
793 | unsigned int f_flags; | 793 | unsigned int f_flags; |
794 | mode_t f_mode; | 794 | mode_t f_mode; |
795 | loff_t f_pos; | 795 | loff_t f_pos; |
796 | struct fown_struct f_owner; | 796 | struct fown_struct f_owner; |
797 | unsigned int f_uid, f_gid; | 797 | unsigned int f_uid, f_gid; |
798 | struct file_ra_state f_ra; | 798 | struct file_ra_state f_ra; |
799 | 799 | ||
800 | u64 f_version; | 800 | u64 f_version; |
801 | #ifdef CONFIG_SECURITY | 801 | #ifdef CONFIG_SECURITY |
802 | void *f_security; | 802 | void *f_security; |
803 | #endif | 803 | #endif |
804 | /* needed for tty driver, and maybe others */ | 804 | /* needed for tty driver, and maybe others */ |
805 | void *private_data; | 805 | void *private_data; |
806 | 806 | ||
807 | #ifdef CONFIG_EPOLL | 807 | #ifdef CONFIG_EPOLL |
808 | /* Used by fs/eventpoll.c to link all the hooks to this file */ | 808 | /* Used by fs/eventpoll.c to link all the hooks to this file */ |
809 | struct list_head f_ep_links; | 809 | struct list_head f_ep_links; |
810 | spinlock_t f_ep_lock; | 810 | spinlock_t f_ep_lock; |
811 | #endif /* #ifdef CONFIG_EPOLL */ | 811 | #endif /* #ifdef CONFIG_EPOLL */ |
812 | struct address_space *f_mapping; | 812 | struct address_space *f_mapping; |
813 | }; | 813 | }; |
814 | extern spinlock_t files_lock; | 814 | extern spinlock_t files_lock; |
815 | #define file_list_lock() spin_lock(&files_lock); | 815 | #define file_list_lock() spin_lock(&files_lock); |
816 | #define file_list_unlock() spin_unlock(&files_lock); | 816 | #define file_list_unlock() spin_unlock(&files_lock); |
817 | 817 | ||
818 | #define get_file(x) atomic_inc(&(x)->f_count) | 818 | #define get_file(x) atomic_inc(&(x)->f_count) |
819 | #define file_count(x) atomic_read(&(x)->f_count) | 819 | #define file_count(x) atomic_read(&(x)->f_count) |
820 | 820 | ||
821 | #define MAX_NON_LFS ((1UL<<31) - 1) | 821 | #define MAX_NON_LFS ((1UL<<31) - 1) |
822 | 822 | ||
823 | /* Page cache limit. The filesystems should put that into their s_maxbytes | 823 | /* Page cache limit. The filesystems should put that into their s_maxbytes |
824 | limits, otherwise bad things can happen in VM. */ | 824 | limits, otherwise bad things can happen in VM. */ |
825 | #if BITS_PER_LONG==32 | 825 | #if BITS_PER_LONG==32 |
826 | #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | 826 | #define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) |
827 | #elif BITS_PER_LONG==64 | 827 | #elif BITS_PER_LONG==64 |
828 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL | 828 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL |
829 | #endif | 829 | #endif |
830 | 830 | ||
831 | #define FL_POSIX 1 | 831 | #define FL_POSIX 1 |
832 | #define FL_FLOCK 2 | 832 | #define FL_FLOCK 2 |
833 | #define FL_ACCESS 8 /* not trying to lock, just looking */ | 833 | #define FL_ACCESS 8 /* not trying to lock, just looking */ |
834 | #define FL_EXISTS 16 /* when unlocking, test for existence */ | 834 | #define FL_EXISTS 16 /* when unlocking, test for existence */ |
835 | #define FL_LEASE 32 /* lease held on this file */ | 835 | #define FL_LEASE 32 /* lease held on this file */ |
836 | #define FL_CLOSE 64 /* unlock on close */ | 836 | #define FL_CLOSE 64 /* unlock on close */ |
837 | #define FL_SLEEP 128 /* A blocking lock */ | 837 | #define FL_SLEEP 128 /* A blocking lock */ |
838 | 838 | ||
839 | /* | 839 | /* |
840 | * The POSIX file lock owner is determined by | 840 | * The POSIX file lock owner is determined by |
841 | * the "struct files_struct" in the thread group | 841 | * the "struct files_struct" in the thread group |
842 | * (or NULL for no owner - BSD locks). | 842 | * (or NULL for no owner - BSD locks). |
843 | * | 843 | * |
844 | * Lockd stuffs a "host" pointer into this. | 844 | * Lockd stuffs a "host" pointer into this. |
845 | */ | 845 | */ |
846 | typedef struct files_struct *fl_owner_t; | 846 | typedef struct files_struct *fl_owner_t; |
847 | 847 | ||
848 | struct file_lock_operations { | 848 | struct file_lock_operations { |
849 | void (*fl_insert)(struct file_lock *); /* lock insertion callback */ | 849 | void (*fl_insert)(struct file_lock *); /* lock insertion callback */ |
850 | void (*fl_remove)(struct file_lock *); /* lock removal callback */ | 850 | void (*fl_remove)(struct file_lock *); /* lock removal callback */ |
851 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); | 851 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); |
852 | void (*fl_release_private)(struct file_lock *); | 852 | void (*fl_release_private)(struct file_lock *); |
853 | }; | 853 | }; |
854 | 854 | ||
855 | struct lock_manager_operations { | 855 | struct lock_manager_operations { |
856 | int (*fl_compare_owner)(struct file_lock *, struct file_lock *); | 856 | int (*fl_compare_owner)(struct file_lock *, struct file_lock *); |
857 | void (*fl_notify)(struct file_lock *); /* unblock callback */ | 857 | void (*fl_notify)(struct file_lock *); /* unblock callback */ |
858 | int (*fl_grant)(struct file_lock *, struct file_lock *, int); | 858 | int (*fl_grant)(struct file_lock *, struct file_lock *, int); |
859 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); | 859 | void (*fl_copy_lock)(struct file_lock *, struct file_lock *); |
860 | void (*fl_release_private)(struct file_lock *); | 860 | void (*fl_release_private)(struct file_lock *); |
861 | void (*fl_break)(struct file_lock *); | 861 | void (*fl_break)(struct file_lock *); |
862 | int (*fl_mylease)(struct file_lock *, struct file_lock *); | 862 | int (*fl_mylease)(struct file_lock *, struct file_lock *); |
863 | int (*fl_change)(struct file_lock **, int); | 863 | int (*fl_change)(struct file_lock **, int); |
864 | }; | 864 | }; |
865 | 865 | ||
866 | /* that will die - we need it for nfs_lock_info */ | 866 | /* that will die - we need it for nfs_lock_info */ |
867 | #include <linux/nfs_fs_i.h> | 867 | #include <linux/nfs_fs_i.h> |
868 | 868 | ||
869 | struct file_lock { | 869 | struct file_lock { |
870 | struct file_lock *fl_next; /* singly linked list for this inode */ | 870 | struct file_lock *fl_next; /* singly linked list for this inode */ |
871 | struct list_head fl_link; /* doubly linked list of all locks */ | 871 | struct list_head fl_link; /* doubly linked list of all locks */ |
872 | struct list_head fl_block; /* circular list of blocked processes */ | 872 | struct list_head fl_block; /* circular list of blocked processes */ |
873 | fl_owner_t fl_owner; | 873 | fl_owner_t fl_owner; |
874 | unsigned int fl_pid; | 874 | unsigned int fl_pid; |
875 | struct pid *fl_nspid; | 875 | struct pid *fl_nspid; |
876 | wait_queue_head_t fl_wait; | 876 | wait_queue_head_t fl_wait; |
877 | struct file *fl_file; | 877 | struct file *fl_file; |
878 | unsigned char fl_flags; | 878 | unsigned char fl_flags; |
879 | unsigned char fl_type; | 879 | unsigned char fl_type; |
880 | loff_t fl_start; | 880 | loff_t fl_start; |
881 | loff_t fl_end; | 881 | loff_t fl_end; |
882 | 882 | ||
883 | struct fasync_struct * fl_fasync; /* for lease break notifications */ | 883 | struct fasync_struct * fl_fasync; /* for lease break notifications */ |
884 | unsigned long fl_break_time; /* for nonblocking lease breaks */ | 884 | unsigned long fl_break_time; /* for nonblocking lease breaks */ |
885 | 885 | ||
886 | struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ | 886 | struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ |
887 | struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ | 887 | struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ |
888 | union { | 888 | union { |
889 | struct nfs_lock_info nfs_fl; | 889 | struct nfs_lock_info nfs_fl; |
890 | struct nfs4_lock_info nfs4_fl; | 890 | struct nfs4_lock_info nfs4_fl; |
891 | struct { | 891 | struct { |
892 | struct list_head link; /* link in AFS vnode's pending_locks list */ | 892 | struct list_head link; /* link in AFS vnode's pending_locks list */ |
893 | int state; /* state of grant or error if -ve */ | 893 | int state; /* state of grant or error if -ve */ |
894 | } afs; | 894 | } afs; |
895 | } fl_u; | 895 | } fl_u; |
896 | }; | 896 | }; |
897 | 897 | ||
898 | /* The following constant reflects the upper bound of the file/locking space */ | 898 | /* The following constant reflects the upper bound of the file/locking space */ |
899 | #ifndef OFFSET_MAX | 899 | #ifndef OFFSET_MAX |
900 | #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) | 900 | #define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1))) |
901 | #define OFFSET_MAX INT_LIMIT(loff_t) | 901 | #define OFFSET_MAX INT_LIMIT(loff_t) |
902 | #define OFFT_OFFSET_MAX INT_LIMIT(off_t) | 902 | #define OFFT_OFFSET_MAX INT_LIMIT(off_t) |
903 | #endif | 903 | #endif |
904 | 904 | ||
905 | #include <linux/fcntl.h> | 905 | #include <linux/fcntl.h> |
906 | 906 | ||
907 | extern int fcntl_getlk(struct file *, struct flock __user *); | 907 | extern int fcntl_getlk(struct file *, struct flock __user *); |
908 | extern int fcntl_setlk(unsigned int, struct file *, unsigned int, | 908 | extern int fcntl_setlk(unsigned int, struct file *, unsigned int, |
909 | struct flock __user *); | 909 | struct flock __user *); |
910 | 910 | ||
911 | #if BITS_PER_LONG == 32 | 911 | #if BITS_PER_LONG == 32 |
912 | extern int fcntl_getlk64(struct file *, struct flock64 __user *); | 912 | extern int fcntl_getlk64(struct file *, struct flock64 __user *); |
913 | extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, | 913 | extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, |
914 | struct flock64 __user *); | 914 | struct flock64 __user *); |
915 | #endif | 915 | #endif |
916 | 916 | ||
917 | extern void send_sigio(struct fown_struct *fown, int fd, int band); | 917 | extern void send_sigio(struct fown_struct *fown, int fd, int band); |
918 | extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); | 918 | extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); |
919 | extern int fcntl_getlease(struct file *filp); | 919 | extern int fcntl_getlease(struct file *filp); |
920 | 920 | ||
921 | /* fs/sync.c */ | 921 | /* fs/sync.c */ |
922 | extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, | 922 | extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, |
923 | loff_t endbyte, unsigned int flags); | 923 | loff_t endbyte, unsigned int flags); |
924 | 924 | ||
925 | /* fs/locks.c */ | 925 | /* fs/locks.c */ |
926 | extern void locks_init_lock(struct file_lock *); | 926 | extern void locks_init_lock(struct file_lock *); |
927 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); | 927 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); |
928 | extern void locks_remove_posix(struct file *, fl_owner_t); | 928 | extern void locks_remove_posix(struct file *, fl_owner_t); |
929 | extern void locks_remove_flock(struct file *); | 929 | extern void locks_remove_flock(struct file *); |
930 | extern void posix_test_lock(struct file *, struct file_lock *); | 930 | extern void posix_test_lock(struct file *, struct file_lock *); |
931 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); | 931 | extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); |
932 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | 932 | extern int posix_lock_file_wait(struct file *, struct file_lock *); |
933 | extern int posix_unblock_lock(struct file *, struct file_lock *); | 933 | extern int posix_unblock_lock(struct file *, struct file_lock *); |
934 | extern int vfs_test_lock(struct file *, struct file_lock *); | 934 | extern int vfs_test_lock(struct file *, struct file_lock *); |
935 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); | 935 | extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); |
936 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); | 936 | extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); |
937 | extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); | 937 | extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); |
938 | extern int __break_lease(struct inode *inode, unsigned int flags); | 938 | extern int __break_lease(struct inode *inode, unsigned int flags); |
939 | extern void lease_get_mtime(struct inode *, struct timespec *time); | 939 | extern void lease_get_mtime(struct inode *, struct timespec *time); |
940 | extern int generic_setlease(struct file *, long, struct file_lock **); | 940 | extern int generic_setlease(struct file *, long, struct file_lock **); |
941 | extern int vfs_setlease(struct file *, long, struct file_lock **); | 941 | extern int vfs_setlease(struct file *, long, struct file_lock **); |
942 | extern int lease_modify(struct file_lock **, int); | 942 | extern int lease_modify(struct file_lock **, int); |
943 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); | 943 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); |
944 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); | 944 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); |
945 | extern struct seq_operations locks_seq_operations; | 945 | extern struct seq_operations locks_seq_operations; |
946 | 946 | ||
947 | struct fasync_struct { | 947 | struct fasync_struct { |
948 | int magic; | 948 | int magic; |
949 | int fa_fd; | 949 | int fa_fd; |
950 | struct fasync_struct *fa_next; /* singly linked list */ | 950 | struct fasync_struct *fa_next; /* singly linked list */ |
951 | struct file *fa_file; | 951 | struct file *fa_file; |
952 | }; | 952 | }; |
953 | 953 | ||
954 | #define FASYNC_MAGIC 0x4601 | 954 | #define FASYNC_MAGIC 0x4601 |
955 | 955 | ||
956 | /* SMP safe fasync helpers: */ | 956 | /* SMP safe fasync helpers: */ |
957 | extern int fasync_helper(int, struct file *, int, struct fasync_struct **); | 957 | extern int fasync_helper(int, struct file *, int, struct fasync_struct **); |
958 | /* can be called from interrupts */ | 958 | /* can be called from interrupts */ |
959 | extern void kill_fasync(struct fasync_struct **, int, int); | 959 | extern void kill_fasync(struct fasync_struct **, int, int); |
960 | /* only for net: no internal synchronization */ | 960 | /* only for net: no internal synchronization */ |
961 | extern void __kill_fasync(struct fasync_struct *, int, int); | 961 | extern void __kill_fasync(struct fasync_struct *, int, int); |
962 | 962 | ||
963 | extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); | 963 | extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); |
964 | extern int f_setown(struct file *filp, unsigned long arg, int force); | 964 | extern int f_setown(struct file *filp, unsigned long arg, int force); |
965 | extern void f_delown(struct file *filp); | 965 | extern void f_delown(struct file *filp); |
966 | extern pid_t f_getown(struct file *filp); | 966 | extern pid_t f_getown(struct file *filp); |
967 | extern int send_sigurg(struct fown_struct *fown); | 967 | extern int send_sigurg(struct fown_struct *fown); |
968 | 968 | ||
969 | /* | 969 | /* |
970 | * Umount options | 970 | * Umount options |
971 | */ | 971 | */ |
972 | 972 | ||
973 | #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ | 973 | #define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */ |
974 | #define MNT_DETACH 0x00000002 /* Just detach from the tree */ | 974 | #define MNT_DETACH 0x00000002 /* Just detach from the tree */ |
975 | #define MNT_EXPIRE 0x00000004 /* Mark for expiry */ | 975 | #define MNT_EXPIRE 0x00000004 /* Mark for expiry */ |
976 | 976 | ||
977 | extern struct list_head super_blocks; | 977 | extern struct list_head super_blocks; |
978 | extern spinlock_t sb_lock; | 978 | extern spinlock_t sb_lock; |
979 | 979 | ||
980 | #define S_BIAS (1<<30) | 980 | #define S_BIAS (1<<30) |
981 | struct super_block { | 981 | struct super_block { |
982 | struct list_head s_list; /* Keep this first */ | 982 | struct list_head s_list; /* Keep this first */ |
983 | dev_t s_dev; /* search index; _not_ kdev_t */ | 983 | dev_t s_dev; /* search index; _not_ kdev_t */ |
984 | unsigned long s_blocksize; | 984 | unsigned long s_blocksize; |
985 | unsigned char s_blocksize_bits; | 985 | unsigned char s_blocksize_bits; |
986 | unsigned char s_dirt; | 986 | unsigned char s_dirt; |
987 | unsigned long long s_maxbytes; /* Max file size */ | 987 | unsigned long long s_maxbytes; /* Max file size */ |
988 | struct file_system_type *s_type; | 988 | struct file_system_type *s_type; |
989 | const struct super_operations *s_op; | 989 | const struct super_operations *s_op; |
990 | struct dquot_operations *dq_op; | 990 | struct dquot_operations *dq_op; |
991 | struct quotactl_ops *s_qcop; | 991 | struct quotactl_ops *s_qcop; |
992 | const struct export_operations *s_export_op; | 992 | const struct export_operations *s_export_op; |
993 | unsigned long s_flags; | 993 | unsigned long s_flags; |
994 | unsigned long s_magic; | 994 | unsigned long s_magic; |
995 | struct dentry *s_root; | 995 | struct dentry *s_root; |
996 | struct rw_semaphore s_umount; | 996 | struct rw_semaphore s_umount; |
997 | struct mutex s_lock; | 997 | struct mutex s_lock; |
998 | int s_count; | 998 | int s_count; |
999 | int s_syncing; | 999 | int s_syncing; |
1000 | int s_need_sync_fs; | 1000 | int s_need_sync_fs; |
1001 | atomic_t s_active; | 1001 | atomic_t s_active; |
1002 | #ifdef CONFIG_SECURITY | 1002 | #ifdef CONFIG_SECURITY |
1003 | void *s_security; | 1003 | void *s_security; |
1004 | #endif | 1004 | #endif |
1005 | struct xattr_handler **s_xattr; | 1005 | struct xattr_handler **s_xattr; |
1006 | 1006 | ||
1007 | struct list_head s_inodes; /* all inodes */ | 1007 | struct list_head s_inodes; /* all inodes */ |
1008 | struct list_head s_dirty; /* dirty inodes */ | 1008 | struct list_head s_dirty; /* dirty inodes */ |
1009 | struct list_head s_io; /* parked for writeback */ | 1009 | struct list_head s_io; /* parked for writeback */ |
1010 | struct list_head s_more_io; /* parked for more writeback */ | 1010 | struct list_head s_more_io; /* parked for more writeback */ |
1011 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ | 1011 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ |
1012 | struct list_head s_files; | 1012 | struct list_head s_files; |
1013 | 1013 | ||
1014 | struct block_device *s_bdev; | 1014 | struct block_device *s_bdev; |
1015 | struct mtd_info *s_mtd; | 1015 | struct mtd_info *s_mtd; |
1016 | struct list_head s_instances; | 1016 | struct list_head s_instances; |
1017 | struct quota_info s_dquot; /* Diskquota specific options */ | 1017 | struct quota_info s_dquot; /* Diskquota specific options */ |
1018 | 1018 | ||
1019 | int s_frozen; | 1019 | int s_frozen; |
1020 | wait_queue_head_t s_wait_unfrozen; | 1020 | wait_queue_head_t s_wait_unfrozen; |
1021 | 1021 | ||
1022 | char s_id[32]; /* Informational name */ | 1022 | char s_id[32]; /* Informational name */ |
1023 | 1023 | ||
1024 | void *s_fs_info; /* Filesystem private info */ | 1024 | void *s_fs_info; /* Filesystem private info */ |
1025 | 1025 | ||
1026 | /* | 1026 | /* |
1027 | * The next field is for VFS *only*. No filesystems have any business | 1027 | * The next field is for VFS *only*. No filesystems have any business |
1028 | * even looking at it. You had been warned. | 1028 | * even looking at it. You had been warned. |
1029 | */ | 1029 | */ |
1030 | struct mutex s_vfs_rename_mutex; /* Kludge */ | 1030 | struct mutex s_vfs_rename_mutex; /* Kludge */ |
1031 | 1031 | ||
1032 | /* Granularity of c/m/atime in ns. | 1032 | /* Granularity of c/m/atime in ns. |
1033 | Cannot be worse than a second */ | 1033 | Cannot be worse than a second */ |
1034 | u32 s_time_gran; | 1034 | u32 s_time_gran; |
1035 | 1035 | ||
1036 | /* | 1036 | /* |
1037 | * Filesystem subtype. If non-empty the filesystem type field | 1037 | * Filesystem subtype. If non-empty the filesystem type field |
1038 | * in /proc/mounts will be "type.subtype" | 1038 | * in /proc/mounts will be "type.subtype" |
1039 | */ | 1039 | */ |
1040 | char *s_subtype; | 1040 | char *s_subtype; |
1041 | 1041 | ||
1042 | /* | 1042 | /* |
1043 | * Saved mount options for lazy filesystems using | 1043 | * Saved mount options for lazy filesystems using |
1044 | * generic_show_options() | 1044 | * generic_show_options() |
1045 | */ | 1045 | */ |
1046 | char *s_options; | 1046 | char *s_options; |
1047 | }; | 1047 | }; |
1048 | 1048 | ||
1049 | extern struct timespec current_fs_time(struct super_block *sb); | 1049 | extern struct timespec current_fs_time(struct super_block *sb); |
1050 | 1050 | ||
1051 | /* | 1051 | /* |
1052 | * Snapshotting support. | 1052 | * Snapshotting support. |
1053 | */ | 1053 | */ |
1054 | enum { | 1054 | enum { |
1055 | SB_UNFROZEN = 0, | 1055 | SB_UNFROZEN = 0, |
1056 | SB_FREEZE_WRITE = 1, | 1056 | SB_FREEZE_WRITE = 1, |
1057 | SB_FREEZE_TRANS = 2, | 1057 | SB_FREEZE_TRANS = 2, |
1058 | }; | 1058 | }; |
1059 | 1059 | ||
1060 | #define vfs_check_frozen(sb, level) \ | 1060 | #define vfs_check_frozen(sb, level) \ |
1061 | wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) | 1061 | wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) |
1062 | 1062 | ||
1063 | #define get_fs_excl() atomic_inc(¤t->fs_excl) | 1063 | #define get_fs_excl() atomic_inc(¤t->fs_excl) |
1064 | #define put_fs_excl() atomic_dec(¤t->fs_excl) | 1064 | #define put_fs_excl() atomic_dec(¤t->fs_excl) |
1065 | #define has_fs_excl() atomic_read(¤t->fs_excl) | 1065 | #define has_fs_excl() atomic_read(¤t->fs_excl) |
1066 | 1066 | ||
1067 | #define is_owner_or_cap(inode) \ | 1067 | #define is_owner_or_cap(inode) \ |
1068 | ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) | 1068 | ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) |
1069 | 1069 | ||
1070 | /* not quite ready to be deprecated, but... */ | 1070 | /* not quite ready to be deprecated, but... */ |
1071 | extern void lock_super(struct super_block *); | 1071 | extern void lock_super(struct super_block *); |
1072 | extern void unlock_super(struct super_block *); | 1072 | extern void unlock_super(struct super_block *); |
1073 | 1073 | ||
1074 | /* | 1074 | /* |
1075 | * VFS helper functions.. | 1075 | * VFS helper functions.. |
1076 | */ | 1076 | */ |
1077 | extern int vfs_permission(struct nameidata *, int); | 1077 | extern int vfs_permission(struct nameidata *, int); |
1078 | extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); | 1078 | extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); |
1079 | extern int vfs_mkdir(struct inode *, struct dentry *, int); | 1079 | extern int vfs_mkdir(struct inode *, struct dentry *, int); |
1080 | extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); | 1080 | extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); |
1081 | extern int vfs_symlink(struct inode *, struct dentry *, const char *, int); | 1081 | extern int vfs_symlink(struct inode *, struct dentry *, const char *, int); |
1082 | extern int vfs_link(struct dentry *, struct inode *, struct dentry *); | 1082 | extern int vfs_link(struct dentry *, struct inode *, struct dentry *); |
1083 | extern int vfs_rmdir(struct inode *, struct dentry *); | 1083 | extern int vfs_rmdir(struct inode *, struct dentry *); |
1084 | extern int vfs_unlink(struct inode *, struct dentry *); | 1084 | extern int vfs_unlink(struct inode *, struct dentry *); |
1085 | extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); | 1085 | extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); |
1086 | 1086 | ||
1087 | /* | 1087 | /* |
1088 | * VFS dentry helper functions. | 1088 | * VFS dentry helper functions. |
1089 | */ | 1089 | */ |
1090 | extern void dentry_unhash(struct dentry *dentry); | 1090 | extern void dentry_unhash(struct dentry *dentry); |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
1093 | * VFS file helper functions. | 1093 | * VFS file helper functions. |
1094 | */ | 1094 | */ |
1095 | extern int file_permission(struct file *, int); | 1095 | extern int file_permission(struct file *, int); |
1096 | 1096 | ||
1097 | /* | 1097 | /* |
1098 | * File types | 1098 | * File types |
1099 | * | 1099 | * |
1100 | * NOTE! These match bits 12..15 of stat.st_mode | 1100 | * NOTE! These match bits 12..15 of stat.st_mode |
1101 | * (ie "(i_mode >> 12) & 15"). | 1101 | * (ie "(i_mode >> 12) & 15"). |
1102 | */ | 1102 | */ |
1103 | #define DT_UNKNOWN 0 | 1103 | #define DT_UNKNOWN 0 |
1104 | #define DT_FIFO 1 | 1104 | #define DT_FIFO 1 |
1105 | #define DT_CHR 2 | 1105 | #define DT_CHR 2 |
1106 | #define DT_DIR 4 | 1106 | #define DT_DIR 4 |
1107 | #define DT_BLK 6 | 1107 | #define DT_BLK 6 |
1108 | #define DT_REG 8 | 1108 | #define DT_REG 8 |
1109 | #define DT_LNK 10 | 1109 | #define DT_LNK 10 |
1110 | #define DT_SOCK 12 | 1110 | #define DT_SOCK 12 |
1111 | #define DT_WHT 14 | 1111 | #define DT_WHT 14 |
1112 | 1112 | ||
1113 | #define OSYNC_METADATA (1<<0) | 1113 | #define OSYNC_METADATA (1<<0) |
1114 | #define OSYNC_DATA (1<<1) | 1114 | #define OSYNC_DATA (1<<1) |
1115 | #define OSYNC_INODE (1<<2) | 1115 | #define OSYNC_INODE (1<<2) |
1116 | int generic_osync_inode(struct inode *, struct address_space *, int); | 1116 | int generic_osync_inode(struct inode *, struct address_space *, int); |
1117 | 1117 | ||
1118 | /* | 1118 | /* |
1119 | * This is the "filldir" function type, used by readdir() to let | 1119 | * This is the "filldir" function type, used by readdir() to let |
1120 | * the kernel specify what kind of dirent layout it wants to have. | 1120 | * the kernel specify what kind of dirent layout it wants to have. |
1121 | * This allows the kernel to read directories into kernel space or | 1121 | * This allows the kernel to read directories into kernel space or |
1122 | * to have different dirent layouts depending on the binary type. | 1122 | * to have different dirent layouts depending on the binary type. |
1123 | */ | 1123 | */ |
1124 | typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); | 1124 | typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); |
1125 | 1125 | ||
1126 | struct block_device_operations { | 1126 | struct block_device_operations { |
1127 | int (*open) (struct inode *, struct file *); | 1127 | int (*open) (struct inode *, struct file *); |
1128 | int (*release) (struct inode *, struct file *); | 1128 | int (*release) (struct inode *, struct file *); |
1129 | int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); | 1129 | int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long); |
1130 | long (*unlocked_ioctl) (struct file *, unsigned, unsigned long); | 1130 | long (*unlocked_ioctl) (struct file *, unsigned, unsigned long); |
1131 | long (*compat_ioctl) (struct file *, unsigned, unsigned long); | 1131 | long (*compat_ioctl) (struct file *, unsigned, unsigned long); |
1132 | int (*direct_access) (struct block_device *, sector_t, unsigned long *); | 1132 | int (*direct_access) (struct block_device *, sector_t, unsigned long *); |
1133 | int (*media_changed) (struct gendisk *); | 1133 | int (*media_changed) (struct gendisk *); |
1134 | int (*revalidate_disk) (struct gendisk *); | 1134 | int (*revalidate_disk) (struct gendisk *); |
1135 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1135 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1136 | struct module *owner; | 1136 | struct module *owner; |
1137 | }; | 1137 | }; |
1138 | 1138 | ||
1139 | /* | 1139 | /* |
1140 | * "descriptor" for what we're up to with a read. | 1140 | * "descriptor" for what we're up to with a read. |
1141 | * This allows us to use the same read code yet | 1141 | * This allows us to use the same read code yet |
1142 | * have multiple different users of the data that | 1142 | * have multiple different users of the data that |
1143 | * we read from a file. | 1143 | * we read from a file. |
1144 | * | 1144 | * |
1145 | * The simplest case just copies the data to user | 1145 | * The simplest case just copies the data to user |
1146 | * mode. | 1146 | * mode. |
1147 | */ | 1147 | */ |
1148 | typedef struct { | 1148 | typedef struct { |
1149 | size_t written; | 1149 | size_t written; |
1150 | size_t count; | 1150 | size_t count; |
1151 | union { | 1151 | union { |
1152 | char __user * buf; | 1152 | char __user * buf; |
1153 | void *data; | 1153 | void *data; |
1154 | } arg; | 1154 | } arg; |
1155 | int error; | 1155 | int error; |
1156 | } read_descriptor_t; | 1156 | } read_descriptor_t; |
1157 | 1157 | ||
1158 | typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); | 1158 | typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long); |
1159 | 1159 | ||
1160 | /* These macros are for out of kernel modules to test that | 1160 | /* These macros are for out of kernel modules to test that |
1161 | * the kernel supports the unlocked_ioctl and compat_ioctl | 1161 | * the kernel supports the unlocked_ioctl and compat_ioctl |
1162 | * fields in struct file_operations. */ | 1162 | * fields in struct file_operations. */ |
1163 | #define HAVE_COMPAT_IOCTL 1 | 1163 | #define HAVE_COMPAT_IOCTL 1 |
1164 | #define HAVE_UNLOCKED_IOCTL 1 | 1164 | #define HAVE_UNLOCKED_IOCTL 1 |
1165 | 1165 | ||
1166 | /* | 1166 | /* |
1167 | * NOTE: | 1167 | * NOTE: |
1168 | * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl | 1168 | * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl |
1169 | * can be called without the big kernel lock held in all filesystems. | 1169 | * can be called without the big kernel lock held in all filesystems. |
1170 | */ | 1170 | */ |
1171 | struct file_operations { | 1171 | struct file_operations { |
1172 | struct module *owner; | 1172 | struct module *owner; |
1173 | loff_t (*llseek) (struct file *, loff_t, int); | 1173 | loff_t (*llseek) (struct file *, loff_t, int); |
1174 | ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); | 1174 | ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); |
1175 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); | 1175 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); |
1176 | ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1176 | ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1177 | ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1177 | ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1178 | int (*readdir) (struct file *, void *, filldir_t); | 1178 | int (*readdir) (struct file *, void *, filldir_t); |
1179 | unsigned int (*poll) (struct file *, struct poll_table_struct *); | 1179 | unsigned int (*poll) (struct file *, struct poll_table_struct *); |
1180 | int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long); | 1180 | int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long); |
1181 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1181 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
1182 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | 1182 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); |
1183 | int (*mmap) (struct file *, struct vm_area_struct *); | 1183 | int (*mmap) (struct file *, struct vm_area_struct *); |
1184 | int (*open) (struct inode *, struct file *); | 1184 | int (*open) (struct inode *, struct file *); |
1185 | int (*flush) (struct file *, fl_owner_t id); | 1185 | int (*flush) (struct file *, fl_owner_t id); |
1186 | int (*release) (struct inode *, struct file *); | 1186 | int (*release) (struct inode *, struct file *); |
1187 | int (*fsync) (struct file *, struct dentry *, int datasync); | 1187 | int (*fsync) (struct file *, struct dentry *, int datasync); |
1188 | int (*aio_fsync) (struct kiocb *, int datasync); | 1188 | int (*aio_fsync) (struct kiocb *, int datasync); |
1189 | int (*fasync) (int, struct file *, int); | 1189 | int (*fasync) (int, struct file *, int); |
1190 | int (*lock) (struct file *, int, struct file_lock *); | 1190 | int (*lock) (struct file *, int, struct file_lock *); |
1191 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); | 1191 | ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); |
1192 | unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); | 1192 | unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); |
1193 | int (*check_flags)(int); | 1193 | int (*check_flags)(int); |
1194 | int (*dir_notify)(struct file *filp, unsigned long arg); | 1194 | int (*dir_notify)(struct file *filp, unsigned long arg); |
1195 | int (*flock) (struct file *, int, struct file_lock *); | 1195 | int (*flock) (struct file *, int, struct file_lock *); |
1196 | ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); | 1196 | ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); |
1197 | ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); | 1197 | ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); |
1198 | int (*setlease)(struct file *, long, struct file_lock **); | 1198 | int (*setlease)(struct file *, long, struct file_lock **); |
1199 | }; | 1199 | }; |
1200 | 1200 | ||
1201 | struct inode_operations { | 1201 | struct inode_operations { |
1202 | int (*create) (struct inode *,struct dentry *,int, struct nameidata *); | 1202 | int (*create) (struct inode *,struct dentry *,int, struct nameidata *); |
1203 | struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); | 1203 | struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); |
1204 | int (*link) (struct dentry *,struct inode *,struct dentry *); | 1204 | int (*link) (struct dentry *,struct inode *,struct dentry *); |
1205 | int (*unlink) (struct inode *,struct dentry *); | 1205 | int (*unlink) (struct inode *,struct dentry *); |
1206 | int (*symlink) (struct inode *,struct dentry *,const char *); | 1206 | int (*symlink) (struct inode *,struct dentry *,const char *); |
1207 | int (*mkdir) (struct inode *,struct dentry *,int); | 1207 | int (*mkdir) (struct inode *,struct dentry *,int); |
1208 | int (*rmdir) (struct inode *,struct dentry *); | 1208 | int (*rmdir) (struct inode *,struct dentry *); |
1209 | int (*mknod) (struct inode *,struct dentry *,int,dev_t); | 1209 | int (*mknod) (struct inode *,struct dentry *,int,dev_t); |
1210 | int (*rename) (struct inode *, struct dentry *, | 1210 | int (*rename) (struct inode *, struct dentry *, |
1211 | struct inode *, struct dentry *); | 1211 | struct inode *, struct dentry *); |
1212 | int (*readlink) (struct dentry *, char __user *,int); | 1212 | int (*readlink) (struct dentry *, char __user *,int); |
1213 | void * (*follow_link) (struct dentry *, struct nameidata *); | 1213 | void * (*follow_link) (struct dentry *, struct nameidata *); |
1214 | void (*put_link) (struct dentry *, struct nameidata *, void *); | 1214 | void (*put_link) (struct dentry *, struct nameidata *, void *); |
1215 | void (*truncate) (struct inode *); | 1215 | void (*truncate) (struct inode *); |
1216 | int (*permission) (struct inode *, int, struct nameidata *); | 1216 | int (*permission) (struct inode *, int, struct nameidata *); |
1217 | int (*setattr) (struct dentry *, struct iattr *); | 1217 | int (*setattr) (struct dentry *, struct iattr *); |
1218 | int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); | 1218 | int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); |
1219 | int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); | 1219 | int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); |
1220 | ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); | 1220 | ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); |
1221 | ssize_t (*listxattr) (struct dentry *, char *, size_t); | 1221 | ssize_t (*listxattr) (struct dentry *, char *, size_t); |
1222 | int (*removexattr) (struct dentry *, const char *); | 1222 | int (*removexattr) (struct dentry *, const char *); |
1223 | void (*truncate_range)(struct inode *, loff_t, loff_t); | 1223 | void (*truncate_range)(struct inode *, loff_t, loff_t); |
1224 | long (*fallocate)(struct inode *inode, int mode, loff_t offset, | 1224 | long (*fallocate)(struct inode *inode, int mode, loff_t offset, |
1225 | loff_t len); | 1225 | loff_t len); |
1226 | }; | 1226 | }; |
1227 | 1227 | ||
1228 | struct seq_file; | 1228 | struct seq_file; |
1229 | 1229 | ||
1230 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | 1230 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, |
1231 | unsigned long nr_segs, unsigned long fast_segs, | 1231 | unsigned long nr_segs, unsigned long fast_segs, |
1232 | struct iovec *fast_pointer, | 1232 | struct iovec *fast_pointer, |
1233 | struct iovec **ret_pointer); | 1233 | struct iovec **ret_pointer); |
1234 | 1234 | ||
1235 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); | 1235 | extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); |
1236 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); | 1236 | extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); |
1237 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, | 1237 | extern ssize_t vfs_readv(struct file *, const struct iovec __user *, |
1238 | unsigned long, loff_t *); | 1238 | unsigned long, loff_t *); |
1239 | extern ssize_t vfs_writev(struct file *, const struct iovec __user *, | 1239 | extern ssize_t vfs_writev(struct file *, const struct iovec __user *, |
1240 | unsigned long, loff_t *); | 1240 | unsigned long, loff_t *); |
1241 | 1241 | ||
1242 | /* | 1242 | /* |
1243 | * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called | 1243 | * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called |
1244 | * without the big kernel lock held in all filesystems. | 1244 | * without the big kernel lock held in all filesystems. |
1245 | */ | 1245 | */ |
1246 | struct super_operations { | 1246 | struct super_operations { |
1247 | struct inode *(*alloc_inode)(struct super_block *sb); | 1247 | struct inode *(*alloc_inode)(struct super_block *sb); |
1248 | void (*destroy_inode)(struct inode *); | 1248 | void (*destroy_inode)(struct inode *); |
1249 | 1249 | ||
1250 | void (*dirty_inode) (struct inode *); | 1250 | void (*dirty_inode) (struct inode *); |
1251 | int (*write_inode) (struct inode *, int); | 1251 | int (*write_inode) (struct inode *, int); |
1252 | void (*put_inode) (struct inode *); | 1252 | void (*put_inode) (struct inode *); |
1253 | void (*drop_inode) (struct inode *); | 1253 | void (*drop_inode) (struct inode *); |
1254 | void (*delete_inode) (struct inode *); | 1254 | void (*delete_inode) (struct inode *); |
1255 | void (*put_super) (struct super_block *); | 1255 | void (*put_super) (struct super_block *); |
1256 | void (*write_super) (struct super_block *); | 1256 | void (*write_super) (struct super_block *); |
1257 | int (*sync_fs)(struct super_block *sb, int wait); | 1257 | int (*sync_fs)(struct super_block *sb, int wait); |
1258 | void (*write_super_lockfs) (struct super_block *); | 1258 | void (*write_super_lockfs) (struct super_block *); |
1259 | void (*unlockfs) (struct super_block *); | 1259 | void (*unlockfs) (struct super_block *); |
1260 | int (*statfs) (struct dentry *, struct kstatfs *); | 1260 | int (*statfs) (struct dentry *, struct kstatfs *); |
1261 | int (*remount_fs) (struct super_block *, int *, char *); | 1261 | int (*remount_fs) (struct super_block *, int *, char *); |
1262 | void (*clear_inode) (struct inode *); | 1262 | void (*clear_inode) (struct inode *); |
1263 | void (*umount_begin) (struct vfsmount *, int); | 1263 | void (*umount_begin) (struct vfsmount *, int); |
1264 | 1264 | ||
1265 | int (*show_options)(struct seq_file *, struct vfsmount *); | 1265 | int (*show_options)(struct seq_file *, struct vfsmount *); |
1266 | int (*show_stats)(struct seq_file *, struct vfsmount *); | 1266 | int (*show_stats)(struct seq_file *, struct vfsmount *); |
1267 | #ifdef CONFIG_QUOTA | 1267 | #ifdef CONFIG_QUOTA |
1268 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); | 1268 | ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); |
1269 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); | 1269 | ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); |
1270 | #endif | 1270 | #endif |
1271 | }; | 1271 | }; |
1272 | 1272 | ||
1273 | /* | 1273 | /* |
1274 | * Inode state bits. Protected by inode_lock. | 1274 | * Inode state bits. Protected by inode_lock. |
1275 | * | 1275 | * |
1276 | * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, | 1276 | * Three bits determine the dirty state of the inode, I_DIRTY_SYNC, |
1277 | * I_DIRTY_DATASYNC and I_DIRTY_PAGES. | 1277 | * I_DIRTY_DATASYNC and I_DIRTY_PAGES. |
1278 | * | 1278 | * |
1279 | * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, | 1279 | * Four bits define the lifetime of an inode. Initially, inodes are I_NEW, |
1280 | * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at | 1280 | * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at |
1281 | * various stages of removing an inode. | 1281 | * various stages of removing an inode. |
1282 | * | 1282 | * |
1283 | * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. | 1283 | * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. |
1284 | * | 1284 | * |
1285 | * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on | 1285 | * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on |
1286 | * fdatasync(). i_atime is the usual cause. | 1286 | * fdatasync(). i_atime is the usual cause. |
1287 | * I_DIRTY_DATASYNC Inode is dirty and must be written on fdatasync(), f.e. | 1287 | * I_DIRTY_DATASYNC Inode is dirty and must be written on fdatasync(), f.e. |
1288 | * because i_size changed. | 1288 | * because i_size changed. |
1289 | * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. | 1289 | * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. |
1290 | * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both | 1290 | * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both |
1291 | * are cleared by unlock_new_inode(), called from iget(). | 1291 | * are cleared by unlock_new_inode(), called from iget(). |
1292 | * I_WILL_FREE Must be set when calling write_inode_now() if i_count | 1292 | * I_WILL_FREE Must be set when calling write_inode_now() if i_count |
1293 | * is zero. I_FREEING must be set when I_WILL_FREE is | 1293 | * is zero. I_FREEING must be set when I_WILL_FREE is |
1294 | * cleared. | 1294 | * cleared. |
1295 | * I_FREEING Set when inode is about to be freed but still has dirty | 1295 | * I_FREEING Set when inode is about to be freed but still has dirty |
1296 | * pages or buffers attached or the inode itself is still | 1296 | * pages or buffers attached or the inode itself is still |
1297 | * dirty. | 1297 | * dirty. |
1298 | * I_CLEAR Set by clear_inode(). In this state the inode is clean | 1298 | * I_CLEAR Set by clear_inode(). In this state the inode is clean |
1299 | * and can be destroyed. | 1299 | * and can be destroyed. |
1300 | * | 1300 | * |
1301 | * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are | 1301 | * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are |
1302 | * prohibited for many purposes. iget() must wait for | 1302 | * prohibited for many purposes. iget() must wait for |
1303 | * the inode to be completely released, then create it | 1303 | * the inode to be completely released, then create it |
1304 | * anew. Other functions will just ignore such inodes, | 1304 | * anew. Other functions will just ignore such inodes, |
1305 | * if appropriate. I_LOCK is used for waiting. | 1305 | * if appropriate. I_LOCK is used for waiting. |
1306 | * | 1306 | * |
1307 | * I_LOCK Serves as both a mutex and completion notification. | 1307 | * I_LOCK Serves as both a mutex and completion notification. |
1308 | * New inodes set I_LOCK. If two processes both create | 1308 | * New inodes set I_LOCK. If two processes both create |
1309 | * the same inode, one of them will release its inode and | 1309 | * the same inode, one of them will release its inode and |
1310 | * wait for I_LOCK to be released before returning. | 1310 | * wait for I_LOCK to be released before returning. |
1311 | * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can | 1311 | * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can |
1312 | * also cause waiting on I_LOCK, without I_LOCK actually | 1312 | * also cause waiting on I_LOCK, without I_LOCK actually |
1313 | * being set. find_inode() uses this to prevent returning | 1313 | * being set. find_inode() uses this to prevent returning |
1314 | * nearly-dead inodes. | 1314 | * nearly-dead inodes. |
1315 | * I_SYNC Similar to I_LOCK, but limited in scope to writeback | 1315 | * I_SYNC Similar to I_LOCK, but limited in scope to writeback |
1316 | * of inode dirty data. Having a separate lock for this | 1316 | * of inode dirty data. Having a separate lock for this |
1317 | * purpose reduces latency and prevents some filesystem- | 1317 | * purpose reduces latency and prevents some filesystem- |
1318 | * specific deadlocks. | 1318 | * specific deadlocks. |
1319 | * | 1319 | * |
1320 | * Q: What is the difference between I_WILL_FREE and I_FREEING? | 1320 | * Q: What is the difference between I_WILL_FREE and I_FREEING? |
1321 | * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on | 1321 | * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on |
1322 | * I_CLEAR? If not, why? | 1322 | * I_CLEAR? If not, why? |
1323 | */ | 1323 | */ |
1324 | #define I_DIRTY_SYNC 1 | 1324 | #define I_DIRTY_SYNC 1 |
1325 | #define I_DIRTY_DATASYNC 2 | 1325 | #define I_DIRTY_DATASYNC 2 |
1326 | #define I_DIRTY_PAGES 4 | 1326 | #define I_DIRTY_PAGES 4 |
1327 | #define I_NEW 8 | 1327 | #define I_NEW 8 |
1328 | #define I_WILL_FREE 16 | 1328 | #define I_WILL_FREE 16 |
1329 | #define I_FREEING 32 | 1329 | #define I_FREEING 32 |
1330 | #define I_CLEAR 64 | 1330 | #define I_CLEAR 64 |
1331 | #define __I_LOCK 7 | 1331 | #define __I_LOCK 7 |
1332 | #define I_LOCK (1 << __I_LOCK) | 1332 | #define I_LOCK (1 << __I_LOCK) |
1333 | #define __I_SYNC 8 | 1333 | #define __I_SYNC 8 |
1334 | #define I_SYNC (1 << __I_SYNC) | 1334 | #define I_SYNC (1 << __I_SYNC) |
1335 | 1335 | ||
1336 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) | 1336 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) |
1337 | 1337 | ||
1338 | extern void __mark_inode_dirty(struct inode *, int); | 1338 | extern void __mark_inode_dirty(struct inode *, int); |
1339 | static inline void mark_inode_dirty(struct inode *inode) | 1339 | static inline void mark_inode_dirty(struct inode *inode) |
1340 | { | 1340 | { |
1341 | __mark_inode_dirty(inode, I_DIRTY); | 1341 | __mark_inode_dirty(inode, I_DIRTY); |
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static inline void mark_inode_dirty_sync(struct inode *inode) | 1344 | static inline void mark_inode_dirty_sync(struct inode *inode) |
1345 | { | 1345 | { |
1346 | __mark_inode_dirty(inode, I_DIRTY_SYNC); | 1346 | __mark_inode_dirty(inode, I_DIRTY_SYNC); |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | /** | 1349 | /** |
1350 | * inc_nlink - directly increment an inode's link count | 1350 | * inc_nlink - directly increment an inode's link count |
1351 | * @inode: inode | 1351 | * @inode: inode |
1352 | * | 1352 | * |
1353 | * This is a low-level filesystem helper to replace any | 1353 | * This is a low-level filesystem helper to replace any |
1354 | * direct filesystem manipulation of i_nlink. Currently, | 1354 | * direct filesystem manipulation of i_nlink. Currently, |
1355 | * it is only here for parity with dec_nlink(). | 1355 | * it is only here for parity with dec_nlink(). |
1356 | */ | 1356 | */ |
1357 | static inline void inc_nlink(struct inode *inode) | 1357 | static inline void inc_nlink(struct inode *inode) |
1358 | { | 1358 | { |
1359 | inode->i_nlink++; | 1359 | inode->i_nlink++; |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | static inline void inode_inc_link_count(struct inode *inode) | 1362 | static inline void inode_inc_link_count(struct inode *inode) |
1363 | { | 1363 | { |
1364 | inc_nlink(inode); | 1364 | inc_nlink(inode); |
1365 | mark_inode_dirty(inode); | 1365 | mark_inode_dirty(inode); |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | /** | 1368 | /** |
1369 | * drop_nlink - directly drop an inode's link count | 1369 | * drop_nlink - directly drop an inode's link count |
1370 | * @inode: inode | 1370 | * @inode: inode |
1371 | * | 1371 | * |
1372 | * This is a low-level filesystem helper to replace any | 1372 | * This is a low-level filesystem helper to replace any |
1373 | * direct filesystem manipulation of i_nlink. In cases | 1373 | * direct filesystem manipulation of i_nlink. In cases |
1374 | * where we are attempting to track writes to the | 1374 | * where we are attempting to track writes to the |
1375 | * filesystem, a decrement to zero means an imminent | 1375 | * filesystem, a decrement to zero means an imminent |
1376 | * write when the file is truncated and actually unlinked | 1376 | * write when the file is truncated and actually unlinked |
1377 | * on the filesystem. | 1377 | * on the filesystem. |
1378 | */ | 1378 | */ |
1379 | static inline void drop_nlink(struct inode *inode) | 1379 | static inline void drop_nlink(struct inode *inode) |
1380 | { | 1380 | { |
1381 | inode->i_nlink--; | 1381 | inode->i_nlink--; |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | /** | 1384 | /** |
1385 | * clear_nlink - directly zero an inode's link count | 1385 | * clear_nlink - directly zero an inode's link count |
1386 | * @inode: inode | 1386 | * @inode: inode |
1387 | * | 1387 | * |
1388 | * This is a low-level filesystem helper to replace any | 1388 | * This is a low-level filesystem helper to replace any |
1389 | * direct filesystem manipulation of i_nlink. See | 1389 | * direct filesystem manipulation of i_nlink. See |
1390 | * drop_nlink() for why we care about i_nlink hitting zero. | 1390 | * drop_nlink() for why we care about i_nlink hitting zero. |
1391 | */ | 1391 | */ |
1392 | static inline void clear_nlink(struct inode *inode) | 1392 | static inline void clear_nlink(struct inode *inode) |
1393 | { | 1393 | { |
1394 | inode->i_nlink = 0; | 1394 | inode->i_nlink = 0; |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | static inline void inode_dec_link_count(struct inode *inode) | 1397 | static inline void inode_dec_link_count(struct inode *inode) |
1398 | { | 1398 | { |
1399 | drop_nlink(inode); | 1399 | drop_nlink(inode); |
1400 | mark_inode_dirty(inode); | 1400 | mark_inode_dirty(inode); |
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | /** | 1403 | /** |
1404 | * inode_inc_iversion - increments i_version | 1404 | * inode_inc_iversion - increments i_version |
1405 | * @inode: inode that need to be updated | 1405 | * @inode: inode that need to be updated |
1406 | * | 1406 | * |
1407 | * Every time the inode is modified, the i_version field will be incremented. | 1407 | * Every time the inode is modified, the i_version field will be incremented. |
1408 | * The filesystem has to be mounted with i_version flag | 1408 | * The filesystem has to be mounted with i_version flag |
1409 | */ | 1409 | */ |
1410 | 1410 | ||
1411 | static inline void inode_inc_iversion(struct inode *inode) | 1411 | static inline void inode_inc_iversion(struct inode *inode) |
1412 | { | 1412 | { |
1413 | spin_lock(&inode->i_lock); | 1413 | spin_lock(&inode->i_lock); |
1414 | inode->i_version++; | 1414 | inode->i_version++; |
1415 | spin_unlock(&inode->i_lock); | 1415 | spin_unlock(&inode->i_lock); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); | 1418 | extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry); |
1419 | static inline void file_accessed(struct file *file) | 1419 | static inline void file_accessed(struct file *file) |
1420 | { | 1420 | { |
1421 | if (!(file->f_flags & O_NOATIME)) | 1421 | if (!(file->f_flags & O_NOATIME)) |
1422 | touch_atime(file->f_path.mnt, file->f_path.dentry); | 1422 | touch_atime(file->f_path.mnt, file->f_path.dentry); |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | int sync_inode(struct inode *inode, struct writeback_control *wbc); | 1425 | int sync_inode(struct inode *inode, struct writeback_control *wbc); |
1426 | 1426 | ||
1427 | struct file_system_type { | 1427 | struct file_system_type { |
1428 | const char *name; | 1428 | const char *name; |
1429 | int fs_flags; | 1429 | int fs_flags; |
1430 | int (*get_sb) (struct file_system_type *, int, | 1430 | int (*get_sb) (struct file_system_type *, int, |
1431 | const char *, void *, struct vfsmount *); | 1431 | const char *, void *, struct vfsmount *); |
1432 | void (*kill_sb) (struct super_block *); | 1432 | void (*kill_sb) (struct super_block *); |
1433 | struct module *owner; | 1433 | struct module *owner; |
1434 | struct file_system_type * next; | 1434 | struct file_system_type * next; |
1435 | struct list_head fs_supers; | 1435 | struct list_head fs_supers; |
1436 | 1436 | ||
1437 | struct lock_class_key s_lock_key; | 1437 | struct lock_class_key s_lock_key; |
1438 | struct lock_class_key s_umount_key; | 1438 | struct lock_class_key s_umount_key; |
1439 | 1439 | ||
1440 | struct lock_class_key i_lock_key; | 1440 | struct lock_class_key i_lock_key; |
1441 | struct lock_class_key i_mutex_key; | 1441 | struct lock_class_key i_mutex_key; |
1442 | struct lock_class_key i_mutex_dir_key; | 1442 | struct lock_class_key i_mutex_dir_key; |
1443 | struct lock_class_key i_alloc_sem_key; | 1443 | struct lock_class_key i_alloc_sem_key; |
1444 | }; | 1444 | }; |
1445 | 1445 | ||
1446 | extern int get_sb_bdev(struct file_system_type *fs_type, | 1446 | extern int get_sb_bdev(struct file_system_type *fs_type, |
1447 | int flags, const char *dev_name, void *data, | 1447 | int flags, const char *dev_name, void *data, |
1448 | int (*fill_super)(struct super_block *, void *, int), | 1448 | int (*fill_super)(struct super_block *, void *, int), |
1449 | struct vfsmount *mnt); | 1449 | struct vfsmount *mnt); |
1450 | extern int get_sb_single(struct file_system_type *fs_type, | 1450 | extern int get_sb_single(struct file_system_type *fs_type, |
1451 | int flags, void *data, | 1451 | int flags, void *data, |
1452 | int (*fill_super)(struct super_block *, void *, int), | 1452 | int (*fill_super)(struct super_block *, void *, int), |
1453 | struct vfsmount *mnt); | 1453 | struct vfsmount *mnt); |
1454 | extern int get_sb_nodev(struct file_system_type *fs_type, | 1454 | extern int get_sb_nodev(struct file_system_type *fs_type, |
1455 | int flags, void *data, | 1455 | int flags, void *data, |
1456 | int (*fill_super)(struct super_block *, void *, int), | 1456 | int (*fill_super)(struct super_block *, void *, int), |
1457 | struct vfsmount *mnt); | 1457 | struct vfsmount *mnt); |
1458 | void generic_shutdown_super(struct super_block *sb); | 1458 | void generic_shutdown_super(struct super_block *sb); |
1459 | void kill_block_super(struct super_block *sb); | 1459 | void kill_block_super(struct super_block *sb); |
1460 | void kill_anon_super(struct super_block *sb); | 1460 | void kill_anon_super(struct super_block *sb); |
1461 | void kill_litter_super(struct super_block *sb); | 1461 | void kill_litter_super(struct super_block *sb); |
1462 | void deactivate_super(struct super_block *sb); | 1462 | void deactivate_super(struct super_block *sb); |
1463 | int set_anon_super(struct super_block *s, void *data); | 1463 | int set_anon_super(struct super_block *s, void *data); |
1464 | struct super_block *sget(struct file_system_type *type, | 1464 | struct super_block *sget(struct file_system_type *type, |
1465 | int (*test)(struct super_block *,void *), | 1465 | int (*test)(struct super_block *,void *), |
1466 | int (*set)(struct super_block *,void *), | 1466 | int (*set)(struct super_block *,void *), |
1467 | void *data); | 1467 | void *data); |
1468 | extern int get_sb_pseudo(struct file_system_type *, char *, | 1468 | extern int get_sb_pseudo(struct file_system_type *, char *, |
1469 | const struct super_operations *ops, unsigned long, | 1469 | const struct super_operations *ops, unsigned long, |
1470 | struct vfsmount *mnt); | 1470 | struct vfsmount *mnt); |
1471 | extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); | 1471 | extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); |
1472 | int __put_super(struct super_block *sb); | 1472 | int __put_super(struct super_block *sb); |
1473 | int __put_super_and_need_restart(struct super_block *sb); | 1473 | int __put_super_and_need_restart(struct super_block *sb); |
1474 | void unnamed_dev_init(void); | 1474 | void unnamed_dev_init(void); |
1475 | 1475 | ||
1476 | /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ | 1476 | /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ |
1477 | #define fops_get(fops) \ | 1477 | #define fops_get(fops) \ |
1478 | (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) | 1478 | (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) |
1479 | #define fops_put(fops) \ | 1479 | #define fops_put(fops) \ |
1480 | do { if (fops) module_put((fops)->owner); } while(0) | 1480 | do { if (fops) module_put((fops)->owner); } while(0) |
1481 | 1481 | ||
1482 | extern int register_filesystem(struct file_system_type *); | 1482 | extern int register_filesystem(struct file_system_type *); |
1483 | extern int unregister_filesystem(struct file_system_type *); | 1483 | extern int unregister_filesystem(struct file_system_type *); |
1484 | extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); | 1484 | extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); |
1485 | #define kern_mount(type) kern_mount_data(type, NULL) | 1485 | #define kern_mount(type) kern_mount_data(type, NULL) |
1486 | extern int may_umount_tree(struct vfsmount *); | 1486 | extern int may_umount_tree(struct vfsmount *); |
1487 | extern int may_umount(struct vfsmount *); | 1487 | extern int may_umount(struct vfsmount *); |
1488 | extern void umount_tree(struct vfsmount *, int, struct list_head *); | 1488 | extern void umount_tree(struct vfsmount *, int, struct list_head *); |
1489 | extern void release_mounts(struct list_head *); | 1489 | extern void release_mounts(struct list_head *); |
1490 | extern long do_mount(char *, char *, char *, unsigned long, void *); | 1490 | extern long do_mount(char *, char *, char *, unsigned long, void *); |
1491 | extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); | 1491 | extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); |
1492 | extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, | 1492 | extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, |
1493 | struct vfsmount *); | 1493 | struct vfsmount *); |
1494 | extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); | 1494 | extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); |
1495 | extern void drop_collected_mounts(struct vfsmount *); | 1495 | extern void drop_collected_mounts(struct vfsmount *); |
1496 | 1496 | ||
1497 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1497 | extern int vfs_statfs(struct dentry *, struct kstatfs *); |
1498 | 1498 | ||
1499 | /* /sys/fs */ | 1499 | /* /sys/fs */ |
1500 | extern struct kobject *fs_kobj; | 1500 | extern struct kobject *fs_kobj; |
1501 | 1501 | ||
1502 | #define FLOCK_VERIFY_READ 1 | 1502 | #define FLOCK_VERIFY_READ 1 |
1503 | #define FLOCK_VERIFY_WRITE 2 | 1503 | #define FLOCK_VERIFY_WRITE 2 |
1504 | 1504 | ||
1505 | extern int locks_mandatory_locked(struct inode *); | 1505 | extern int locks_mandatory_locked(struct inode *); |
1506 | extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); | 1506 | extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); |
1507 | 1507 | ||
1508 | /* | 1508 | /* |
1509 | * Candidates for mandatory locking have the setgid bit set | 1509 | * Candidates for mandatory locking have the setgid bit set |
1510 | * but no group execute bit - an otherwise meaningless combination. | 1510 | * but no group execute bit - an otherwise meaningless combination. |
1511 | */ | 1511 | */ |
1512 | 1512 | ||
1513 | static inline int __mandatory_lock(struct inode *ino) | 1513 | static inline int __mandatory_lock(struct inode *ino) |
1514 | { | 1514 | { |
1515 | return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; | 1515 | return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; |
1516 | } | 1516 | } |
1517 | 1517 | ||
1518 | /* | 1518 | /* |
1519 | * ... and these candidates should be on MS_MANDLOCK mounted fs, | 1519 | * ... and these candidates should be on MS_MANDLOCK mounted fs, |
1520 | * otherwise these will be advisory locks | 1520 | * otherwise these will be advisory locks |
1521 | */ | 1521 | */ |
1522 | 1522 | ||
1523 | static inline int mandatory_lock(struct inode *ino) | 1523 | static inline int mandatory_lock(struct inode *ino) |
1524 | { | 1524 | { |
1525 | return IS_MANDLOCK(ino) && __mandatory_lock(ino); | 1525 | return IS_MANDLOCK(ino) && __mandatory_lock(ino); |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | static inline int locks_verify_locked(struct inode *inode) | 1528 | static inline int locks_verify_locked(struct inode *inode) |
1529 | { | 1529 | { |
1530 | if (mandatory_lock(inode)) | 1530 | if (mandatory_lock(inode)) |
1531 | return locks_mandatory_locked(inode); | 1531 | return locks_mandatory_locked(inode); |
1532 | return 0; | 1532 | return 0; |
1533 | } | 1533 | } |
1534 | 1534 | ||
1535 | extern int rw_verify_area(int, struct file *, loff_t *, size_t); | 1535 | extern int rw_verify_area(int, struct file *, loff_t *, size_t); |
1536 | 1536 | ||
1537 | static inline int locks_verify_truncate(struct inode *inode, | 1537 | static inline int locks_verify_truncate(struct inode *inode, |
1538 | struct file *filp, | 1538 | struct file *filp, |
1539 | loff_t size) | 1539 | loff_t size) |
1540 | { | 1540 | { |
1541 | if (inode->i_flock && mandatory_lock(inode)) | 1541 | if (inode->i_flock && mandatory_lock(inode)) |
1542 | return locks_mandatory_area( | 1542 | return locks_mandatory_area( |
1543 | FLOCK_VERIFY_WRITE, inode, filp, | 1543 | FLOCK_VERIFY_WRITE, inode, filp, |
1544 | size < inode->i_size ? size : inode->i_size, | 1544 | size < inode->i_size ? size : inode->i_size, |
1545 | (size < inode->i_size ? inode->i_size - size | 1545 | (size < inode->i_size ? inode->i_size - size |
1546 | : size - inode->i_size) | 1546 | : size - inode->i_size) |
1547 | ); | 1547 | ); |
1548 | return 0; | 1548 | return 0; |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | static inline int break_lease(struct inode *inode, unsigned int mode) | 1551 | static inline int break_lease(struct inode *inode, unsigned int mode) |
1552 | { | 1552 | { |
1553 | if (inode->i_flock) | 1553 | if (inode->i_flock) |
1554 | return __break_lease(inode, mode); | 1554 | return __break_lease(inode, mode); |
1555 | return 0; | 1555 | return 0; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | /* fs/open.c */ | 1558 | /* fs/open.c */ |
1559 | 1559 | ||
1560 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, | 1560 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, |
1561 | struct file *filp); | 1561 | struct file *filp); |
1562 | extern long do_sys_open(int dfd, const char __user *filename, int flags, | 1562 | extern long do_sys_open(int dfd, const char __user *filename, int flags, |
1563 | int mode); | 1563 | int mode); |
1564 | extern struct file *filp_open(const char *, int, int); | 1564 | extern struct file *filp_open(const char *, int, int); |
1565 | extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); | 1565 | extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); |
1566 | extern int filp_close(struct file *, fl_owner_t id); | 1566 | extern int filp_close(struct file *, fl_owner_t id); |
1567 | extern char * getname(const char __user *); | 1567 | extern char * getname(const char __user *); |
1568 | 1568 | ||
1569 | /* fs/dcache.c */ | 1569 | /* fs/dcache.c */ |
1570 | extern void __init vfs_caches_init_early(void); | 1570 | extern void __init vfs_caches_init_early(void); |
1571 | extern void __init vfs_caches_init(unsigned long); | 1571 | extern void __init vfs_caches_init(unsigned long); |
1572 | 1572 | ||
1573 | extern struct kmem_cache *names_cachep; | 1573 | extern struct kmem_cache *names_cachep; |
1574 | 1574 | ||
1575 | #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) | 1575 | #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) |
1576 | #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) | 1576 | #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) |
1577 | #ifndef CONFIG_AUDITSYSCALL | 1577 | #ifndef CONFIG_AUDITSYSCALL |
1578 | #define putname(name) __putname(name) | 1578 | #define putname(name) __putname(name) |
1579 | #else | 1579 | #else |
1580 | extern void putname(const char *name); | 1580 | extern void putname(const char *name); |
1581 | #endif | 1581 | #endif |
1582 | 1582 | ||
1583 | #ifdef CONFIG_BLOCK | 1583 | #ifdef CONFIG_BLOCK |
1584 | extern int register_blkdev(unsigned int, const char *); | 1584 | extern int register_blkdev(unsigned int, const char *); |
1585 | extern void unregister_blkdev(unsigned int, const char *); | 1585 | extern void unregister_blkdev(unsigned int, const char *); |
1586 | extern struct block_device *bdget(dev_t); | 1586 | extern struct block_device *bdget(dev_t); |
1587 | extern void bd_set_size(struct block_device *, loff_t size); | 1587 | extern void bd_set_size(struct block_device *, loff_t size); |
1588 | extern void bd_forget(struct inode *inode); | 1588 | extern void bd_forget(struct inode *inode); |
1589 | extern void bdput(struct block_device *); | 1589 | extern void bdput(struct block_device *); |
1590 | extern struct block_device *open_by_devnum(dev_t, unsigned); | 1590 | extern struct block_device *open_by_devnum(dev_t, unsigned); |
1591 | extern const struct address_space_operations def_blk_aops; | 1591 | extern const struct address_space_operations def_blk_aops; |
1592 | #else | 1592 | #else |
1593 | static inline void bd_forget(struct inode *inode) {} | 1593 | static inline void bd_forget(struct inode *inode) {} |
1594 | #endif | 1594 | #endif |
1595 | extern const struct file_operations def_blk_fops; | 1595 | extern const struct file_operations def_blk_fops; |
1596 | extern const struct file_operations def_chr_fops; | 1596 | extern const struct file_operations def_chr_fops; |
1597 | extern const struct file_operations bad_sock_fops; | 1597 | extern const struct file_operations bad_sock_fops; |
1598 | extern const struct file_operations def_fifo_fops; | 1598 | extern const struct file_operations def_fifo_fops; |
1599 | #ifdef CONFIG_BLOCK | 1599 | #ifdef CONFIG_BLOCK |
1600 | extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); | 1600 | extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); |
1601 | extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); | 1601 | extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); |
1602 | extern int blkdev_driver_ioctl(struct inode *inode, struct file *file, | 1602 | extern int blkdev_driver_ioctl(struct inode *inode, struct file *file, |
1603 | struct gendisk *disk, unsigned cmd, | 1603 | struct gendisk *disk, unsigned cmd, |
1604 | unsigned long arg); | 1604 | unsigned long arg); |
1605 | extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); | 1605 | extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); |
1606 | extern int blkdev_get(struct block_device *, mode_t, unsigned); | 1606 | extern int blkdev_get(struct block_device *, mode_t, unsigned); |
1607 | extern int blkdev_put(struct block_device *); | 1607 | extern int blkdev_put(struct block_device *); |
1608 | extern int bd_claim(struct block_device *, void *); | 1608 | extern int bd_claim(struct block_device *, void *); |
1609 | extern void bd_release(struct block_device *); | 1609 | extern void bd_release(struct block_device *); |
1610 | #ifdef CONFIG_SYSFS | 1610 | #ifdef CONFIG_SYSFS |
1611 | extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); | 1611 | extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); |
1612 | extern void bd_release_from_disk(struct block_device *, struct gendisk *); | 1612 | extern void bd_release_from_disk(struct block_device *, struct gendisk *); |
1613 | #else | 1613 | #else |
1614 | #define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) | 1614 | #define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) |
1615 | #define bd_release_from_disk(bdev, disk) bd_release(bdev) | 1615 | #define bd_release_from_disk(bdev, disk) bd_release(bdev) |
1616 | #endif | 1616 | #endif |
1617 | #endif | 1617 | #endif |
1618 | 1618 | ||
1619 | /* fs/char_dev.c */ | 1619 | /* fs/char_dev.c */ |
1620 | #define CHRDEV_MAJOR_HASH_SIZE 255 | 1620 | #define CHRDEV_MAJOR_HASH_SIZE 255 |
1621 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); | 1621 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); |
1622 | extern int register_chrdev_region(dev_t, unsigned, const char *); | 1622 | extern int register_chrdev_region(dev_t, unsigned, const char *); |
1623 | extern int register_chrdev(unsigned int, const char *, | 1623 | extern int register_chrdev(unsigned int, const char *, |
1624 | const struct file_operations *); | 1624 | const struct file_operations *); |
1625 | extern void unregister_chrdev(unsigned int, const char *); | 1625 | extern void unregister_chrdev(unsigned int, const char *); |
1626 | extern void unregister_chrdev_region(dev_t, unsigned); | 1626 | extern void unregister_chrdev_region(dev_t, unsigned); |
1627 | extern int chrdev_open(struct inode *, struct file *); | ||
1628 | extern void chrdev_show(struct seq_file *,off_t); | 1627 | extern void chrdev_show(struct seq_file *,off_t); |
1629 | 1628 | ||
1630 | /* fs/block_dev.c */ | 1629 | /* fs/block_dev.c */ |
1631 | #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ | 1630 | #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ |
1632 | 1631 | ||
1633 | #ifdef CONFIG_BLOCK | 1632 | #ifdef CONFIG_BLOCK |
1634 | #define BLKDEV_MAJOR_HASH_SIZE 255 | 1633 | #define BLKDEV_MAJOR_HASH_SIZE 255 |
1635 | extern const char *__bdevname(dev_t, char *buffer); | 1634 | extern const char *__bdevname(dev_t, char *buffer); |
1636 | extern const char *bdevname(struct block_device *bdev, char *buffer); | 1635 | extern const char *bdevname(struct block_device *bdev, char *buffer); |
1637 | extern struct block_device *lookup_bdev(const char *); | 1636 | extern struct block_device *lookup_bdev(const char *); |
1638 | extern struct block_device *open_bdev_excl(const char *, int, void *); | 1637 | extern struct block_device *open_bdev_excl(const char *, int, void *); |
1639 | extern void close_bdev_excl(struct block_device *); | 1638 | extern void close_bdev_excl(struct block_device *); |
1640 | extern void blkdev_show(struct seq_file *,off_t); | 1639 | extern void blkdev_show(struct seq_file *,off_t); |
1641 | #else | 1640 | #else |
1642 | #define BLKDEV_MAJOR_HASH_SIZE 0 | 1641 | #define BLKDEV_MAJOR_HASH_SIZE 0 |
1643 | #endif | 1642 | #endif |
1644 | 1643 | ||
1645 | extern void init_special_inode(struct inode *, umode_t, dev_t); | 1644 | extern void init_special_inode(struct inode *, umode_t, dev_t); |
1646 | 1645 | ||
1647 | /* Invalid inode operations -- fs/bad_inode.c */ | 1646 | /* Invalid inode operations -- fs/bad_inode.c */ |
1648 | extern void make_bad_inode(struct inode *); | 1647 | extern void make_bad_inode(struct inode *); |
1649 | extern int is_bad_inode(struct inode *); | 1648 | extern int is_bad_inode(struct inode *); |
1650 | 1649 | ||
1651 | extern const struct file_operations read_fifo_fops; | 1650 | extern const struct file_operations read_fifo_fops; |
1652 | extern const struct file_operations write_fifo_fops; | 1651 | extern const struct file_operations write_fifo_fops; |
1653 | extern const struct file_operations rdwr_fifo_fops; | 1652 | extern const struct file_operations rdwr_fifo_fops; |
1654 | 1653 | ||
1655 | extern int fs_may_remount_ro(struct super_block *); | 1654 | extern int fs_may_remount_ro(struct super_block *); |
1656 | 1655 | ||
1657 | #ifdef CONFIG_BLOCK | 1656 | #ifdef CONFIG_BLOCK |
1658 | /* | 1657 | /* |
1659 | * return READ, READA, or WRITE | 1658 | * return READ, READA, or WRITE |
1660 | */ | 1659 | */ |
1661 | #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) | 1660 | #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) |
1662 | 1661 | ||
1663 | /* | 1662 | /* |
1664 | * return data direction, READ or WRITE | 1663 | * return data direction, READ or WRITE |
1665 | */ | 1664 | */ |
1666 | #define bio_data_dir(bio) ((bio)->bi_rw & 1) | 1665 | #define bio_data_dir(bio) ((bio)->bi_rw & 1) |
1667 | 1666 | ||
1668 | extern int check_disk_change(struct block_device *); | 1667 | extern int check_disk_change(struct block_device *); |
1669 | extern int __invalidate_device(struct block_device *); | 1668 | extern int __invalidate_device(struct block_device *); |
1670 | extern int invalidate_partition(struct gendisk *, int); | 1669 | extern int invalidate_partition(struct gendisk *, int); |
1671 | #endif | 1670 | #endif |
1672 | extern int invalidate_inodes(struct super_block *); | 1671 | extern int invalidate_inodes(struct super_block *); |
1673 | unsigned long __invalidate_mapping_pages(struct address_space *mapping, | 1672 | unsigned long __invalidate_mapping_pages(struct address_space *mapping, |
1674 | pgoff_t start, pgoff_t end, | 1673 | pgoff_t start, pgoff_t end, |
1675 | bool be_atomic); | 1674 | bool be_atomic); |
1676 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 1675 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
1677 | pgoff_t start, pgoff_t end); | 1676 | pgoff_t start, pgoff_t end); |
1678 | 1677 | ||
1679 | static inline unsigned long __deprecated | 1678 | static inline unsigned long __deprecated |
1680 | invalidate_inode_pages(struct address_space *mapping) | 1679 | invalidate_inode_pages(struct address_space *mapping) |
1681 | { | 1680 | { |
1682 | return invalidate_mapping_pages(mapping, 0, ~0UL); | 1681 | return invalidate_mapping_pages(mapping, 0, ~0UL); |
1683 | } | 1682 | } |
1684 | 1683 | ||
1685 | static inline void invalidate_remote_inode(struct inode *inode) | 1684 | static inline void invalidate_remote_inode(struct inode *inode) |
1686 | { | 1685 | { |
1687 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1686 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
1688 | S_ISLNK(inode->i_mode)) | 1687 | S_ISLNK(inode->i_mode)) |
1689 | invalidate_mapping_pages(inode->i_mapping, 0, -1); | 1688 | invalidate_mapping_pages(inode->i_mapping, 0, -1); |
1690 | } | 1689 | } |
1691 | extern int invalidate_inode_pages2(struct address_space *mapping); | 1690 | extern int invalidate_inode_pages2(struct address_space *mapping); |
1692 | extern int invalidate_inode_pages2_range(struct address_space *mapping, | 1691 | extern int invalidate_inode_pages2_range(struct address_space *mapping, |
1693 | pgoff_t start, pgoff_t end); | 1692 | pgoff_t start, pgoff_t end); |
1694 | extern int write_inode_now(struct inode *, int); | 1693 | extern int write_inode_now(struct inode *, int); |
1695 | extern int filemap_fdatawrite(struct address_space *); | 1694 | extern int filemap_fdatawrite(struct address_space *); |
1696 | extern int filemap_flush(struct address_space *); | 1695 | extern int filemap_flush(struct address_space *); |
1697 | extern int filemap_fdatawait(struct address_space *); | 1696 | extern int filemap_fdatawait(struct address_space *); |
1698 | extern int filemap_write_and_wait(struct address_space *mapping); | 1697 | extern int filemap_write_and_wait(struct address_space *mapping); |
1699 | extern int filemap_write_and_wait_range(struct address_space *mapping, | 1698 | extern int filemap_write_and_wait_range(struct address_space *mapping, |
1700 | loff_t lstart, loff_t lend); | 1699 | loff_t lstart, loff_t lend); |
1701 | extern int wait_on_page_writeback_range(struct address_space *mapping, | 1700 | extern int wait_on_page_writeback_range(struct address_space *mapping, |
1702 | pgoff_t start, pgoff_t end); | 1701 | pgoff_t start, pgoff_t end); |
1703 | extern int __filemap_fdatawrite_range(struct address_space *mapping, | 1702 | extern int __filemap_fdatawrite_range(struct address_space *mapping, |
1704 | loff_t start, loff_t end, int sync_mode); | 1703 | loff_t start, loff_t end, int sync_mode); |
1705 | 1704 | ||
1706 | extern long do_fsync(struct file *file, int datasync); | 1705 | extern long do_fsync(struct file *file, int datasync); |
1707 | extern void sync_supers(void); | 1706 | extern void sync_supers(void); |
1708 | extern void sync_filesystems(int wait); | 1707 | extern void sync_filesystems(int wait); |
1709 | extern void __fsync_super(struct super_block *sb); | 1708 | extern void __fsync_super(struct super_block *sb); |
1710 | extern void emergency_sync(void); | 1709 | extern void emergency_sync(void); |
1711 | extern void emergency_remount(void); | 1710 | extern void emergency_remount(void); |
1712 | extern int do_remount_sb(struct super_block *sb, int flags, | 1711 | extern int do_remount_sb(struct super_block *sb, int flags, |
1713 | void *data, int force); | 1712 | void *data, int force); |
1714 | #ifdef CONFIG_BLOCK | 1713 | #ifdef CONFIG_BLOCK |
1715 | extern sector_t bmap(struct inode *, sector_t); | 1714 | extern sector_t bmap(struct inode *, sector_t); |
1716 | #endif | 1715 | #endif |
1717 | extern int notify_change(struct dentry *, struct iattr *); | 1716 | extern int notify_change(struct dentry *, struct iattr *); |
1718 | extern int permission(struct inode *, int, struct nameidata *); | 1717 | extern int permission(struct inode *, int, struct nameidata *); |
1719 | extern int generic_permission(struct inode *, int, | 1718 | extern int generic_permission(struct inode *, int, |
1720 | int (*check_acl)(struct inode *, int)); | 1719 | int (*check_acl)(struct inode *, int)); |
1721 | 1720 | ||
1722 | extern int get_write_access(struct inode *); | 1721 | extern int get_write_access(struct inode *); |
1723 | extern int deny_write_access(struct file *); | 1722 | extern int deny_write_access(struct file *); |
1724 | static inline void put_write_access(struct inode * inode) | 1723 | static inline void put_write_access(struct inode * inode) |
1725 | { | 1724 | { |
1726 | atomic_dec(&inode->i_writecount); | 1725 | atomic_dec(&inode->i_writecount); |
1727 | } | 1726 | } |
1728 | static inline void allow_write_access(struct file *file) | 1727 | static inline void allow_write_access(struct file *file) |
1729 | { | 1728 | { |
1730 | if (file) | 1729 | if (file) |
1731 | atomic_inc(&file->f_path.dentry->d_inode->i_writecount); | 1730 | atomic_inc(&file->f_path.dentry->d_inode->i_writecount); |
1732 | } | 1731 | } |
1733 | extern int do_pipe(int *); | 1732 | extern int do_pipe(int *); |
1734 | extern struct file *create_read_pipe(struct file *f); | 1733 | extern struct file *create_read_pipe(struct file *f); |
1735 | extern struct file *create_write_pipe(void); | 1734 | extern struct file *create_write_pipe(void); |
1736 | extern void free_write_pipe(struct file *); | 1735 | extern void free_write_pipe(struct file *); |
1737 | 1736 | ||
1738 | extern int open_namei(int dfd, const char *, int, int, struct nameidata *); | 1737 | extern int open_namei(int dfd, const char *, int, int, struct nameidata *); |
1739 | extern int may_open(struct nameidata *, int, int); | 1738 | extern int may_open(struct nameidata *, int, int); |
1740 | 1739 | ||
1741 | extern int kernel_read(struct file *, unsigned long, char *, unsigned long); | 1740 | extern int kernel_read(struct file *, unsigned long, char *, unsigned long); |
1742 | extern struct file * open_exec(const char *); | 1741 | extern struct file * open_exec(const char *); |
1743 | 1742 | ||
1744 | /* fs/dcache.c -- generic fs support functions */ | 1743 | /* fs/dcache.c -- generic fs support functions */ |
1745 | extern int is_subdir(struct dentry *, struct dentry *); | 1744 | extern int is_subdir(struct dentry *, struct dentry *); |
1746 | extern ino_t find_inode_number(struct dentry *, struct qstr *); | 1745 | extern ino_t find_inode_number(struct dentry *, struct qstr *); |
1747 | 1746 | ||
1748 | #include <linux/err.h> | 1747 | #include <linux/err.h> |
1749 | 1748 | ||
1750 | /* needed for stackable file system support */ | 1749 | /* needed for stackable file system support */ |
1751 | extern loff_t default_llseek(struct file *file, loff_t offset, int origin); | 1750 | extern loff_t default_llseek(struct file *file, loff_t offset, int origin); |
1752 | 1751 | ||
1753 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | 1752 | extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); |
1754 | 1753 | ||
1755 | extern void inode_init_once(struct inode *); | 1754 | extern void inode_init_once(struct inode *); |
1756 | extern void iput(struct inode *); | 1755 | extern void iput(struct inode *); |
1757 | extern struct inode * igrab(struct inode *); | 1756 | extern struct inode * igrab(struct inode *); |
1758 | extern ino_t iunique(struct super_block *, ino_t); | 1757 | extern ino_t iunique(struct super_block *, ino_t); |
1759 | extern int inode_needs_sync(struct inode *inode); | 1758 | extern int inode_needs_sync(struct inode *inode); |
1760 | extern void generic_delete_inode(struct inode *inode); | 1759 | extern void generic_delete_inode(struct inode *inode); |
1761 | extern void generic_drop_inode(struct inode *inode); | 1760 | extern void generic_drop_inode(struct inode *inode); |
1762 | 1761 | ||
1763 | extern struct inode *ilookup5_nowait(struct super_block *sb, | 1762 | extern struct inode *ilookup5_nowait(struct super_block *sb, |
1764 | unsigned long hashval, int (*test)(struct inode *, void *), | 1763 | unsigned long hashval, int (*test)(struct inode *, void *), |
1765 | void *data); | 1764 | void *data); |
1766 | extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, | 1765 | extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, |
1767 | int (*test)(struct inode *, void *), void *data); | 1766 | int (*test)(struct inode *, void *), void *data); |
1768 | extern struct inode *ilookup(struct super_block *sb, unsigned long ino); | 1767 | extern struct inode *ilookup(struct super_block *sb, unsigned long ino); |
1769 | 1768 | ||
1770 | extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); | 1769 | extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); |
1771 | extern struct inode * iget_locked(struct super_block *, unsigned long); | 1770 | extern struct inode * iget_locked(struct super_block *, unsigned long); |
1772 | extern void unlock_new_inode(struct inode *); | 1771 | extern void unlock_new_inode(struct inode *); |
1773 | 1772 | ||
1774 | extern void __iget(struct inode * inode); | 1773 | extern void __iget(struct inode * inode); |
1775 | extern void iget_failed(struct inode *); | 1774 | extern void iget_failed(struct inode *); |
1776 | extern void clear_inode(struct inode *); | 1775 | extern void clear_inode(struct inode *); |
1777 | extern void destroy_inode(struct inode *); | 1776 | extern void destroy_inode(struct inode *); |
1778 | extern struct inode *new_inode(struct super_block *); | 1777 | extern struct inode *new_inode(struct super_block *); |
1779 | extern int __remove_suid(struct dentry *, int); | 1778 | extern int __remove_suid(struct dentry *, int); |
1780 | extern int should_remove_suid(struct dentry *); | 1779 | extern int should_remove_suid(struct dentry *); |
1781 | extern int remove_suid(struct dentry *); | 1780 | extern int remove_suid(struct dentry *); |
1782 | 1781 | ||
1783 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); | 1782 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); |
1784 | extern void remove_inode_hash(struct inode *); | 1783 | extern void remove_inode_hash(struct inode *); |
1785 | static inline void insert_inode_hash(struct inode *inode) { | 1784 | static inline void insert_inode_hash(struct inode *inode) { |
1786 | __insert_inode_hash(inode, inode->i_ino); | 1785 | __insert_inode_hash(inode, inode->i_ino); |
1787 | } | 1786 | } |
1788 | 1787 | ||
1789 | extern struct file * get_empty_filp(void); | 1788 | extern struct file * get_empty_filp(void); |
1790 | extern void file_move(struct file *f, struct list_head *list); | 1789 | extern void file_move(struct file *f, struct list_head *list); |
1791 | extern void file_kill(struct file *f); | 1790 | extern void file_kill(struct file *f); |
1792 | #ifdef CONFIG_BLOCK | 1791 | #ifdef CONFIG_BLOCK |
1793 | struct bio; | 1792 | struct bio; |
1794 | extern void submit_bio(int, struct bio *); | 1793 | extern void submit_bio(int, struct bio *); |
1795 | extern int bdev_read_only(struct block_device *); | 1794 | extern int bdev_read_only(struct block_device *); |
1796 | #endif | 1795 | #endif |
1797 | extern int set_blocksize(struct block_device *, int); | 1796 | extern int set_blocksize(struct block_device *, int); |
1798 | extern int sb_set_blocksize(struct super_block *, int); | 1797 | extern int sb_set_blocksize(struct super_block *, int); |
1799 | extern int sb_min_blocksize(struct super_block *, int); | 1798 | extern int sb_min_blocksize(struct super_block *, int); |
1800 | extern int sb_has_dirty_inodes(struct super_block *); | 1799 | extern int sb_has_dirty_inodes(struct super_block *); |
1801 | 1800 | ||
1802 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); | 1801 | extern int generic_file_mmap(struct file *, struct vm_area_struct *); |
1803 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); | 1802 | extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); |
1804 | extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); | 1803 | extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); |
1805 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); | 1804 | int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); |
1806 | extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1805 | extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1807 | extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); | 1806 | extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); |
1808 | extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *, | 1807 | extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *, |
1809 | unsigned long, loff_t); | 1808 | unsigned long, loff_t); |
1810 | extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, | 1809 | extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, |
1811 | unsigned long *, loff_t, loff_t *, size_t, size_t); | 1810 | unsigned long *, loff_t, loff_t *, size_t, size_t); |
1812 | extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, | 1811 | extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, |
1813 | unsigned long, loff_t, loff_t *, size_t, ssize_t); | 1812 | unsigned long, loff_t, loff_t *, size_t, ssize_t); |
1814 | extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); | 1813 | extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); |
1815 | extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); | 1814 | extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); |
1816 | extern int generic_segment_checks(const struct iovec *iov, | 1815 | extern int generic_segment_checks(const struct iovec *iov, |
1817 | unsigned long *nr_segs, size_t *count, int access_flags); | 1816 | unsigned long *nr_segs, size_t *count, int access_flags); |
1818 | 1817 | ||
1819 | /* fs/splice.c */ | 1818 | /* fs/splice.c */ |
1820 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 1819 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
1821 | struct pipe_inode_info *, size_t, unsigned int); | 1820 | struct pipe_inode_info *, size_t, unsigned int); |
1822 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 1821 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
1823 | struct file *, loff_t *, size_t, unsigned int); | 1822 | struct file *, loff_t *, size_t, unsigned int); |
1824 | extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, | 1823 | extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, |
1825 | struct file *, loff_t *, size_t, unsigned int); | 1824 | struct file *, loff_t *, size_t, unsigned int); |
1826 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 1825 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
1827 | struct file *out, loff_t *, size_t len, unsigned int flags); | 1826 | struct file *out, loff_t *, size_t len, unsigned int flags); |
1828 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | 1827 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, |
1829 | size_t len, unsigned int flags); | 1828 | size_t len, unsigned int flags); |
1830 | 1829 | ||
1831 | extern void | 1830 | extern void |
1832 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | 1831 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); |
1833 | extern loff_t no_llseek(struct file *file, loff_t offset, int origin); | 1832 | extern loff_t no_llseek(struct file *file, loff_t offset, int origin); |
1834 | extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); | 1833 | extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); |
1835 | extern loff_t remote_llseek(struct file *file, loff_t offset, int origin); | 1834 | extern loff_t remote_llseek(struct file *file, loff_t offset, int origin); |
1836 | extern int generic_file_open(struct inode * inode, struct file * filp); | 1835 | extern int generic_file_open(struct inode * inode, struct file * filp); |
1837 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 1836 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
1838 | 1837 | ||
1839 | #ifdef CONFIG_FS_XIP | 1838 | #ifdef CONFIG_FS_XIP |
1840 | extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, | 1839 | extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, |
1841 | loff_t *ppos); | 1840 | loff_t *ppos); |
1842 | extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); | 1841 | extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); |
1843 | extern ssize_t xip_file_write(struct file *filp, const char __user *buf, | 1842 | extern ssize_t xip_file_write(struct file *filp, const char __user *buf, |
1844 | size_t len, loff_t *ppos); | 1843 | size_t len, loff_t *ppos); |
1845 | extern int xip_truncate_page(struct address_space *mapping, loff_t from); | 1844 | extern int xip_truncate_page(struct address_space *mapping, loff_t from); |
1846 | #else | 1845 | #else |
1847 | static inline int xip_truncate_page(struct address_space *mapping, loff_t from) | 1846 | static inline int xip_truncate_page(struct address_space *mapping, loff_t from) |
1848 | { | 1847 | { |
1849 | return 0; | 1848 | return 0; |
1850 | } | 1849 | } |
1851 | #endif | 1850 | #endif |
1852 | 1851 | ||
1853 | #ifdef CONFIG_BLOCK | 1852 | #ifdef CONFIG_BLOCK |
1854 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1853 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
1855 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | 1854 | struct block_device *bdev, const struct iovec *iov, loff_t offset, |
1856 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | 1855 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, |
1857 | int lock_type); | 1856 | int lock_type); |
1858 | 1857 | ||
1859 | enum { | 1858 | enum { |
1860 | DIO_LOCKING = 1, /* need locking between buffered and direct access */ | 1859 | DIO_LOCKING = 1, /* need locking between buffered and direct access */ |
1861 | DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */ | 1860 | DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */ |
1862 | DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */ | 1861 | DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */ |
1863 | }; | 1862 | }; |
1864 | 1863 | ||
1865 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 1864 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
1866 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1865 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1867 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | 1866 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1868 | dio_iodone_t end_io) | 1867 | dio_iodone_t end_io) |
1869 | { | 1868 | { |
1870 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1869 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1871 | nr_segs, get_block, end_io, DIO_LOCKING); | 1870 | nr_segs, get_block, end_io, DIO_LOCKING); |
1872 | } | 1871 | } |
1873 | 1872 | ||
1874 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | 1873 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, |
1875 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1874 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1876 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | 1875 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1877 | dio_iodone_t end_io) | 1876 | dio_iodone_t end_io) |
1878 | { | 1877 | { |
1879 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1878 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1880 | nr_segs, get_block, end_io, DIO_NO_LOCKING); | 1879 | nr_segs, get_block, end_io, DIO_NO_LOCKING); |
1881 | } | 1880 | } |
1882 | 1881 | ||
1883 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, | 1882 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, |
1884 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1883 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1885 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | 1884 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1886 | dio_iodone_t end_io) | 1885 | dio_iodone_t end_io) |
1887 | { | 1886 | { |
1888 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1887 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1889 | nr_segs, get_block, end_io, DIO_OWN_LOCKING); | 1888 | nr_segs, get_block, end_io, DIO_OWN_LOCKING); |
1890 | } | 1889 | } |
1891 | #endif | 1890 | #endif |
1892 | 1891 | ||
1893 | extern const struct file_operations generic_ro_fops; | 1892 | extern const struct file_operations generic_ro_fops; |
1894 | 1893 | ||
1895 | #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) | 1894 | #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) |
1896 | 1895 | ||
1897 | extern int vfs_readlink(struct dentry *, char __user *, int, const char *); | 1896 | extern int vfs_readlink(struct dentry *, char __user *, int, const char *); |
1898 | extern int vfs_follow_link(struct nameidata *, const char *); | 1897 | extern int vfs_follow_link(struct nameidata *, const char *); |
1899 | extern int page_readlink(struct dentry *, char __user *, int); | 1898 | extern int page_readlink(struct dentry *, char __user *, int); |
1900 | extern void *page_follow_link_light(struct dentry *, struct nameidata *); | 1899 | extern void *page_follow_link_light(struct dentry *, struct nameidata *); |
1901 | extern void page_put_link(struct dentry *, struct nameidata *, void *); | 1900 | extern void page_put_link(struct dentry *, struct nameidata *, void *); |
1902 | extern int __page_symlink(struct inode *inode, const char *symname, int len, | 1901 | extern int __page_symlink(struct inode *inode, const char *symname, int len, |
1903 | gfp_t gfp_mask); | 1902 | gfp_t gfp_mask); |
1904 | extern int page_symlink(struct inode *inode, const char *symname, int len); | 1903 | extern int page_symlink(struct inode *inode, const char *symname, int len); |
1905 | extern const struct inode_operations page_symlink_inode_operations; | 1904 | extern const struct inode_operations page_symlink_inode_operations; |
1906 | extern int generic_readlink(struct dentry *, char __user *, int); | 1905 | extern int generic_readlink(struct dentry *, char __user *, int); |
1907 | extern void generic_fillattr(struct inode *, struct kstat *); | 1906 | extern void generic_fillattr(struct inode *, struct kstat *); |
1908 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 1907 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
1909 | void inode_add_bytes(struct inode *inode, loff_t bytes); | 1908 | void inode_add_bytes(struct inode *inode, loff_t bytes); |
1910 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | 1909 | void inode_sub_bytes(struct inode *inode, loff_t bytes); |
1911 | loff_t inode_get_bytes(struct inode *inode); | 1910 | loff_t inode_get_bytes(struct inode *inode); |
1912 | void inode_set_bytes(struct inode *inode, loff_t bytes); | 1911 | void inode_set_bytes(struct inode *inode, loff_t bytes); |
1913 | 1912 | ||
1914 | extern int vfs_readdir(struct file *, filldir_t, void *); | 1913 | extern int vfs_readdir(struct file *, filldir_t, void *); |
1915 | 1914 | ||
1916 | extern int vfs_stat(char __user *, struct kstat *); | 1915 | extern int vfs_stat(char __user *, struct kstat *); |
1917 | extern int vfs_lstat(char __user *, struct kstat *); | 1916 | extern int vfs_lstat(char __user *, struct kstat *); |
1918 | extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); | 1917 | extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); |
1919 | extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); | 1918 | extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); |
1920 | extern int vfs_fstat(unsigned int, struct kstat *); | 1919 | extern int vfs_fstat(unsigned int, struct kstat *); |
1921 | 1920 | ||
1922 | extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | 1921 | extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); |
1923 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | 1922 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
1924 | unsigned long arg); | 1923 | unsigned long arg); |
1925 | 1924 | ||
1926 | extern void get_filesystem(struct file_system_type *fs); | 1925 | extern void get_filesystem(struct file_system_type *fs); |
1927 | extern void put_filesystem(struct file_system_type *fs); | 1926 | extern void put_filesystem(struct file_system_type *fs); |
1928 | extern struct file_system_type *get_fs_type(const char *name); | 1927 | extern struct file_system_type *get_fs_type(const char *name); |
1929 | extern struct super_block *get_super(struct block_device *); | 1928 | extern struct super_block *get_super(struct block_device *); |
1930 | extern struct super_block *user_get_super(dev_t); | 1929 | extern struct super_block *user_get_super(dev_t); |
1931 | extern void drop_super(struct super_block *sb); | 1930 | extern void drop_super(struct super_block *sb); |
1932 | 1931 | ||
1933 | extern int dcache_dir_open(struct inode *, struct file *); | 1932 | extern int dcache_dir_open(struct inode *, struct file *); |
1934 | extern int dcache_dir_close(struct inode *, struct file *); | 1933 | extern int dcache_dir_close(struct inode *, struct file *); |
1935 | extern loff_t dcache_dir_lseek(struct file *, loff_t, int); | 1934 | extern loff_t dcache_dir_lseek(struct file *, loff_t, int); |
1936 | extern int dcache_readdir(struct file *, void *, filldir_t); | 1935 | extern int dcache_readdir(struct file *, void *, filldir_t); |
1937 | extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 1936 | extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
1938 | extern int simple_statfs(struct dentry *, struct kstatfs *); | 1937 | extern int simple_statfs(struct dentry *, struct kstatfs *); |
1939 | extern int simple_link(struct dentry *, struct inode *, struct dentry *); | 1938 | extern int simple_link(struct dentry *, struct inode *, struct dentry *); |
1940 | extern int simple_unlink(struct inode *, struct dentry *); | 1939 | extern int simple_unlink(struct inode *, struct dentry *); |
1941 | extern int simple_rmdir(struct inode *, struct dentry *); | 1940 | extern int simple_rmdir(struct inode *, struct dentry *); |
1942 | extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); | 1941 | extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); |
1943 | extern int simple_sync_file(struct file *, struct dentry *, int); | 1942 | extern int simple_sync_file(struct file *, struct dentry *, int); |
1944 | extern int simple_empty(struct dentry *); | 1943 | extern int simple_empty(struct dentry *); |
1945 | extern int simple_readpage(struct file *file, struct page *page); | 1944 | extern int simple_readpage(struct file *file, struct page *page); |
1946 | extern int simple_prepare_write(struct file *file, struct page *page, | 1945 | extern int simple_prepare_write(struct file *file, struct page *page, |
1947 | unsigned offset, unsigned to); | 1946 | unsigned offset, unsigned to); |
1948 | extern int simple_write_begin(struct file *file, struct address_space *mapping, | 1947 | extern int simple_write_begin(struct file *file, struct address_space *mapping, |
1949 | loff_t pos, unsigned len, unsigned flags, | 1948 | loff_t pos, unsigned len, unsigned flags, |
1950 | struct page **pagep, void **fsdata); | 1949 | struct page **pagep, void **fsdata); |
1951 | extern int simple_write_end(struct file *file, struct address_space *mapping, | 1950 | extern int simple_write_end(struct file *file, struct address_space *mapping, |
1952 | loff_t pos, unsigned len, unsigned copied, | 1951 | loff_t pos, unsigned len, unsigned copied, |
1953 | struct page *page, void *fsdata); | 1952 | struct page *page, void *fsdata); |
1954 | 1953 | ||
1955 | extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); | 1954 | extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); |
1956 | extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); | 1955 | extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *); |
1957 | extern const struct file_operations simple_dir_operations; | 1956 | extern const struct file_operations simple_dir_operations; |
1958 | extern const struct inode_operations simple_dir_inode_operations; | 1957 | extern const struct inode_operations simple_dir_inode_operations; |
1959 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; | 1958 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; |
1960 | struct dentry *d_alloc_name(struct dentry *, const char *); | 1959 | struct dentry *d_alloc_name(struct dentry *, const char *); |
1961 | extern int simple_fill_super(struct super_block *, int, struct tree_descr *); | 1960 | extern int simple_fill_super(struct super_block *, int, struct tree_descr *); |
1962 | extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); | 1961 | extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); |
1963 | extern void simple_release_fs(struct vfsmount **mount, int *count); | 1962 | extern void simple_release_fs(struct vfsmount **mount, int *count); |
1964 | 1963 | ||
1965 | extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t); | 1964 | extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t); |
1966 | 1965 | ||
1967 | #ifdef CONFIG_MIGRATION | 1966 | #ifdef CONFIG_MIGRATION |
1968 | extern int buffer_migrate_page(struct address_space *, | 1967 | extern int buffer_migrate_page(struct address_space *, |
1969 | struct page *, struct page *); | 1968 | struct page *, struct page *); |
1970 | #else | 1969 | #else |
1971 | #define buffer_migrate_page NULL | 1970 | #define buffer_migrate_page NULL |
1972 | #endif | 1971 | #endif |
1973 | 1972 | ||
1974 | extern int inode_change_ok(struct inode *, struct iattr *); | 1973 | extern int inode_change_ok(struct inode *, struct iattr *); |
1975 | extern int __must_check inode_setattr(struct inode *, struct iattr *); | 1974 | extern int __must_check inode_setattr(struct inode *, struct iattr *); |
1976 | 1975 | ||
1977 | extern void file_update_time(struct file *file); | 1976 | extern void file_update_time(struct file *file); |
1978 | 1977 | ||
1979 | extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); | 1978 | extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); |
1980 | extern void save_mount_options(struct super_block *sb, char *options); | 1979 | extern void save_mount_options(struct super_block *sb, char *options); |
1981 | 1980 | ||
1982 | static inline ino_t parent_ino(struct dentry *dentry) | 1981 | static inline ino_t parent_ino(struct dentry *dentry) |
1983 | { | 1982 | { |
1984 | ino_t res; | 1983 | ino_t res; |
1985 | 1984 | ||
1986 | spin_lock(&dentry->d_lock); | 1985 | spin_lock(&dentry->d_lock); |
1987 | res = dentry->d_parent->d_inode->i_ino; | 1986 | res = dentry->d_parent->d_inode->i_ino; |
1988 | spin_unlock(&dentry->d_lock); | 1987 | spin_unlock(&dentry->d_lock); |
1989 | return res; | 1988 | return res; |
1990 | } | 1989 | } |
1991 | 1990 | ||
1992 | /* kernel/fork.c */ | 1991 | /* kernel/fork.c */ |
1993 | extern int unshare_files(void); | 1992 | extern int unshare_files(void); |
1994 | 1993 | ||
1995 | /* Transaction based IO helpers */ | 1994 | /* Transaction based IO helpers */ |
1996 | 1995 | ||
1997 | /* | 1996 | /* |
1998 | * An argresp is stored in an allocated page and holds the | 1997 | * An argresp is stored in an allocated page and holds the |
1999 | * size of the argument or response, along with its content | 1998 | * size of the argument or response, along with its content |
2000 | */ | 1999 | */ |
2001 | struct simple_transaction_argresp { | 2000 | struct simple_transaction_argresp { |
2002 | ssize_t size; | 2001 | ssize_t size; |
2003 | char data[0]; | 2002 | char data[0]; |
2004 | }; | 2003 | }; |
2005 | 2004 | ||
2006 | #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) | 2005 | #define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp)) |
2007 | 2006 | ||
2008 | char *simple_transaction_get(struct file *file, const char __user *buf, | 2007 | char *simple_transaction_get(struct file *file, const char __user *buf, |
2009 | size_t size); | 2008 | size_t size); |
2010 | ssize_t simple_transaction_read(struct file *file, char __user *buf, | 2009 | ssize_t simple_transaction_read(struct file *file, char __user *buf, |
2011 | size_t size, loff_t *pos); | 2010 | size_t size, loff_t *pos); |
2012 | int simple_transaction_release(struct inode *inode, struct file *file); | 2011 | int simple_transaction_release(struct inode *inode, struct file *file); |
2013 | 2012 | ||
2014 | static inline void simple_transaction_set(struct file *file, size_t n) | 2013 | static inline void simple_transaction_set(struct file *file, size_t n) |
2015 | { | 2014 | { |
2016 | struct simple_transaction_argresp *ar = file->private_data; | 2015 | struct simple_transaction_argresp *ar = file->private_data; |
2017 | 2016 | ||
2018 | BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); | 2017 | BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); |
2019 | 2018 | ||
2020 | /* | 2019 | /* |
2021 | * The barrier ensures that ar->size will really remain zero until | 2020 | * The barrier ensures that ar->size will really remain zero until |
2022 | * ar->data is ready for reading. | 2021 | * ar->data is ready for reading. |
2023 | */ | 2022 | */ |
2024 | smp_mb(); | 2023 | smp_mb(); |
2025 | ar->size = n; | 2024 | ar->size = n; |
2026 | } | 2025 | } |
2027 | 2026 | ||
2028 | /* | 2027 | /* |
2029 | * simple attribute files | 2028 | * simple attribute files |
2030 | * | 2029 | * |
2031 | * These attributes behave similar to those in sysfs: | 2030 | * These attributes behave similar to those in sysfs: |
2032 | * | 2031 | * |
2033 | * Writing to an attribute immediately sets a value, an open file can be | 2032 | * Writing to an attribute immediately sets a value, an open file can be |
2034 | * written to multiple times. | 2033 | * written to multiple times. |
2035 | * | 2034 | * |
2036 | * Reading from an attribute creates a buffer from the value that might get | 2035 | * Reading from an attribute creates a buffer from the value that might get |
2037 | * read with multiple read calls. When the attribute has been read | 2036 | * read with multiple read calls. When the attribute has been read |
2038 | * completely, no further read calls are possible until the file is opened | 2037 | * completely, no further read calls are possible until the file is opened |
2039 | * again. | 2038 | * again. |
2040 | * | 2039 | * |
2041 | * All attributes contain a text representation of a numeric value | 2040 | * All attributes contain a text representation of a numeric value |
2042 | * that are accessed with the get() and set() functions. | 2041 | * that are accessed with the get() and set() functions. |
2043 | */ | 2042 | */ |
2044 | #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ | 2043 | #define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ |
2045 | static int __fops ## _open(struct inode *inode, struct file *file) \ | 2044 | static int __fops ## _open(struct inode *inode, struct file *file) \ |
2046 | { \ | 2045 | { \ |
2047 | __simple_attr_check_format(__fmt, 0ull); \ | 2046 | __simple_attr_check_format(__fmt, 0ull); \ |
2048 | return simple_attr_open(inode, file, __get, __set, __fmt); \ | 2047 | return simple_attr_open(inode, file, __get, __set, __fmt); \ |
2049 | } \ | 2048 | } \ |
2050 | static struct file_operations __fops = { \ | 2049 | static struct file_operations __fops = { \ |
2051 | .owner = THIS_MODULE, \ | 2050 | .owner = THIS_MODULE, \ |
2052 | .open = __fops ## _open, \ | 2051 | .open = __fops ## _open, \ |
2053 | .release = simple_attr_release, \ | 2052 | .release = simple_attr_release, \ |
2054 | .read = simple_attr_read, \ | 2053 | .read = simple_attr_read, \ |
2055 | .write = simple_attr_write, \ | 2054 | .write = simple_attr_write, \ |
2056 | }; | 2055 | }; |
2057 | 2056 | ||
2058 | static inline void __attribute__((format(printf, 1, 2))) | 2057 | static inline void __attribute__((format(printf, 1, 2))) |
2059 | __simple_attr_check_format(const char *fmt, ...) | 2058 | __simple_attr_check_format(const char *fmt, ...) |
2060 | { | 2059 | { |
2061 | /* don't do anything, just let the compiler check the arguments; */ | 2060 | /* don't do anything, just let the compiler check the arguments; */ |
2062 | } | 2061 | } |
2063 | 2062 | ||
2064 | int simple_attr_open(struct inode *inode, struct file *file, | 2063 | int simple_attr_open(struct inode *inode, struct file *file, |
2065 | int (*get)(void *, u64 *), int (*set)(void *, u64), | 2064 | int (*get)(void *, u64 *), int (*set)(void *, u64), |
2066 | const char *fmt); | 2065 | const char *fmt); |
2067 | int simple_attr_release(struct inode *inode, struct file *file); | 2066 | int simple_attr_release(struct inode *inode, struct file *file); |
2068 | ssize_t simple_attr_read(struct file *file, char __user *buf, | 2067 | ssize_t simple_attr_read(struct file *file, char __user *buf, |
2069 | size_t len, loff_t *ppos); | 2068 | size_t len, loff_t *ppos); |
2070 | ssize_t simple_attr_write(struct file *file, const char __user *buf, | 2069 | ssize_t simple_attr_write(struct file *file, const char __user *buf, |
2071 | size_t len, loff_t *ppos); | 2070 | size_t len, loff_t *ppos); |
2072 | 2071 | ||
2073 | 2072 | ||
2074 | #ifdef CONFIG_SECURITY | 2073 | #ifdef CONFIG_SECURITY |
2075 | static inline char *alloc_secdata(void) | 2074 | static inline char *alloc_secdata(void) |
2076 | { | 2075 | { |
2077 | return (char *)get_zeroed_page(GFP_KERNEL); | 2076 | return (char *)get_zeroed_page(GFP_KERNEL); |
2078 | } | 2077 | } |
2079 | 2078 | ||
2080 | static inline void free_secdata(void *secdata) | 2079 | static inline void free_secdata(void *secdata) |
2081 | { | 2080 | { |
2082 | free_page((unsigned long)secdata); | 2081 | free_page((unsigned long)secdata); |
2083 | } | 2082 | } |
2084 | #else | 2083 | #else |
2085 | static inline char *alloc_secdata(void) | 2084 | static inline char *alloc_secdata(void) |
2086 | { | 2085 | { |
2087 | return (char *)1; | 2086 | return (char *)1; |
2088 | } | 2087 | } |
2089 | 2088 | ||
2090 | static inline void free_secdata(void *secdata) | 2089 | static inline void free_secdata(void *secdata) |
2091 | { } | 2090 | { } |
2092 | #endif /* CONFIG_SECURITY */ | 2091 | #endif /* CONFIG_SECURITY */ |
2093 | 2092 | ||
2094 | struct ctl_table; | 2093 | struct ctl_table; |
2095 | int proc_nr_files(struct ctl_table *table, int write, struct file *filp, | 2094 | int proc_nr_files(struct ctl_table *table, int write, struct file *filp, |
2096 | void __user *buffer, size_t *lenp, loff_t *ppos); | 2095 | void __user *buffer, size_t *lenp, loff_t *ppos); |
2097 | 2096 | ||
2098 | int get_filesystem_list(char * buf); | 2097 | int get_filesystem_list(char * buf); |
2099 | 2098 | ||
2100 | #endif /* __KERNEL__ */ | 2099 | #endif /* __KERNEL__ */ |
2101 | #endif /* _LINUX_FS_H */ | 2100 | #endif /* _LINUX_FS_H */ |
2102 | 2101 |