Commit 2223cbec33ef3a26e7678be89de75cb60c4c257b
Committed by
Greg Kroah-Hartman
1 parent
4dde2d2f3a
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
char: remove use of __devinit
CONFIG_HOTPLUG is going away as an option so __devinit is no longer needed. Signed-off-by: Bill Pemberton <wfp5p@virginia.edu> Cc: Geoff Levand <geoff@infradead.org> Cc: Mattia Dongili <malattia@linux.it> Cc: Amit Shah <amit.shah@redhat.com> Cc: openipmi-developer@lists.sourceforge.net Cc: linuxppc-dev@lists.ozlabs.org Cc: cbe-oss-dev@lists.ozlabs.org Cc: platform-driver-x86@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 6 changed files with 23 additions and 23 deletions Inline Diff
drivers/char/ipmi/ipmi_si_intf.c
1 | /* | 1 | /* |
2 | * ipmi_si.c | 2 | * ipmi_si.c |
3 | * | 3 | * |
4 | * The interface to the IPMI driver for the system interfaces (KCS, SMIC, | 4 | * The interface to the IPMI driver for the system interfaces (KCS, SMIC, |
5 | * BT). | 5 | * BT). |
6 | * | 6 | * |
7 | * Author: MontaVista Software, Inc. | 7 | * Author: MontaVista Software, Inc. |
8 | * Corey Minyard <minyard@mvista.com> | 8 | * Corey Minyard <minyard@mvista.com> |
9 | * source@mvista.com | 9 | * source@mvista.com |
10 | * | 10 | * |
11 | * Copyright 2002 MontaVista Software Inc. | 11 | * Copyright 2002 MontaVista Software Inc. |
12 | * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com> | 12 | * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com> |
13 | * | 13 | * |
14 | * This program is free software; you can redistribute it and/or modify it | 14 | * This program is free software; you can redistribute it and/or modify it |
15 | * under the terms of the GNU General Public License as published by the | 15 | * under the terms of the GNU General Public License as published by the |
16 | * Free Software Foundation; either version 2 of the License, or (at your | 16 | * Free Software Foundation; either version 2 of the License, or (at your |
17 | * option) any later version. | 17 | * option) any later version. |
18 | * | 18 | * |
19 | * | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | 20 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
21 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | 21 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
22 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 22 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
23 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | 23 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | 24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS | 25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
27 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR | 27 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR |
28 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE | 28 | * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE |
29 | * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 29 | * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | * | 30 | * |
31 | * You should have received a copy of the GNU General Public License along | 31 | * You should have received a copy of the GNU General Public License along |
32 | * with this program; if not, write to the Free Software Foundation, Inc., | 32 | * with this program; if not, write to the Free Software Foundation, Inc., |
33 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 33 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
34 | */ | 34 | */ |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This file holds the "policy" for the interface to the SMI state | 37 | * This file holds the "policy" for the interface to the SMI state |
38 | * machine. It does the configuration, handles timers and interrupts, | 38 | * machine. It does the configuration, handles timers and interrupts, |
39 | * and drives the real SMI state machine. | 39 | * and drives the real SMI state machine. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
44 | #include <linux/sched.h> | 44 | #include <linux/sched.h> |
45 | #include <linux/seq_file.h> | 45 | #include <linux/seq_file.h> |
46 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
47 | #include <linux/errno.h> | 47 | #include <linux/errno.h> |
48 | #include <linux/spinlock.h> | 48 | #include <linux/spinlock.h> |
49 | #include <linux/slab.h> | 49 | #include <linux/slab.h> |
50 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
51 | #include <linux/list.h> | 51 | #include <linux/list.h> |
52 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
53 | #include <linux/ioport.h> | 53 | #include <linux/ioport.h> |
54 | #include <linux/notifier.h> | 54 | #include <linux/notifier.h> |
55 | #include <linux/mutex.h> | 55 | #include <linux/mutex.h> |
56 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
57 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
58 | #include <linux/interrupt.h> | 58 | #include <linux/interrupt.h> |
59 | #include <linux/rcupdate.h> | 59 | #include <linux/rcupdate.h> |
60 | #include <linux/ipmi.h> | 60 | #include <linux/ipmi.h> |
61 | #include <linux/ipmi_smi.h> | 61 | #include <linux/ipmi_smi.h> |
62 | #include <asm/io.h> | 62 | #include <asm/io.h> |
63 | #include "ipmi_si_sm.h" | 63 | #include "ipmi_si_sm.h" |
64 | #include <linux/init.h> | 64 | #include <linux/init.h> |
65 | #include <linux/dmi.h> | 65 | #include <linux/dmi.h> |
66 | #include <linux/string.h> | 66 | #include <linux/string.h> |
67 | #include <linux/ctype.h> | 67 | #include <linux/ctype.h> |
68 | #include <linux/pnp.h> | 68 | #include <linux/pnp.h> |
69 | #include <linux/of_device.h> | 69 | #include <linux/of_device.h> |
70 | #include <linux/of_platform.h> | 70 | #include <linux/of_platform.h> |
71 | #include <linux/of_address.h> | 71 | #include <linux/of_address.h> |
72 | #include <linux/of_irq.h> | 72 | #include <linux/of_irq.h> |
73 | 73 | ||
74 | #define PFX "ipmi_si: " | 74 | #define PFX "ipmi_si: " |
75 | 75 | ||
76 | /* Measure times between events in the driver. */ | 76 | /* Measure times between events in the driver. */ |
77 | #undef DEBUG_TIMING | 77 | #undef DEBUG_TIMING |
78 | 78 | ||
79 | /* Call every 10 ms. */ | 79 | /* Call every 10 ms. */ |
80 | #define SI_TIMEOUT_TIME_USEC 10000 | 80 | #define SI_TIMEOUT_TIME_USEC 10000 |
81 | #define SI_USEC_PER_JIFFY (1000000/HZ) | 81 | #define SI_USEC_PER_JIFFY (1000000/HZ) |
82 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) | 82 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) |
83 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a | 83 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a |
84 | short timeout */ | 84 | short timeout */ |
85 | 85 | ||
86 | enum si_intf_state { | 86 | enum si_intf_state { |
87 | SI_NORMAL, | 87 | SI_NORMAL, |
88 | SI_GETTING_FLAGS, | 88 | SI_GETTING_FLAGS, |
89 | SI_GETTING_EVENTS, | 89 | SI_GETTING_EVENTS, |
90 | SI_CLEARING_FLAGS, | 90 | SI_CLEARING_FLAGS, |
91 | SI_CLEARING_FLAGS_THEN_SET_IRQ, | 91 | SI_CLEARING_FLAGS_THEN_SET_IRQ, |
92 | SI_GETTING_MESSAGES, | 92 | SI_GETTING_MESSAGES, |
93 | SI_ENABLE_INTERRUPTS1, | 93 | SI_ENABLE_INTERRUPTS1, |
94 | SI_ENABLE_INTERRUPTS2, | 94 | SI_ENABLE_INTERRUPTS2, |
95 | SI_DISABLE_INTERRUPTS1, | 95 | SI_DISABLE_INTERRUPTS1, |
96 | SI_DISABLE_INTERRUPTS2 | 96 | SI_DISABLE_INTERRUPTS2 |
97 | /* FIXME - add watchdog stuff. */ | 97 | /* FIXME - add watchdog stuff. */ |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* Some BT-specific defines we need here. */ | 100 | /* Some BT-specific defines we need here. */ |
101 | #define IPMI_BT_INTMASK_REG 2 | 101 | #define IPMI_BT_INTMASK_REG 2 |
102 | #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 | 102 | #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 |
103 | #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 | 103 | #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 |
104 | 104 | ||
105 | enum si_type { | 105 | enum si_type { |
106 | SI_KCS, SI_SMIC, SI_BT | 106 | SI_KCS, SI_SMIC, SI_BT |
107 | }; | 107 | }; |
108 | static char *si_to_str[] = { "kcs", "smic", "bt" }; | 108 | static char *si_to_str[] = { "kcs", "smic", "bt" }; |
109 | 109 | ||
110 | static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", | 110 | static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", |
111 | "ACPI", "SMBIOS", "PCI", | 111 | "ACPI", "SMBIOS", "PCI", |
112 | "device-tree", "default" }; | 112 | "device-tree", "default" }; |
113 | 113 | ||
114 | #define DEVICE_NAME "ipmi_si" | 114 | #define DEVICE_NAME "ipmi_si" |
115 | 115 | ||
116 | static struct platform_driver ipmi_driver; | 116 | static struct platform_driver ipmi_driver; |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * Indexes into stats[] in smi_info below. | 119 | * Indexes into stats[] in smi_info below. |
120 | */ | 120 | */ |
121 | enum si_stat_indexes { | 121 | enum si_stat_indexes { |
122 | /* | 122 | /* |
123 | * Number of times the driver requested a timer while an operation | 123 | * Number of times the driver requested a timer while an operation |
124 | * was in progress. | 124 | * was in progress. |
125 | */ | 125 | */ |
126 | SI_STAT_short_timeouts = 0, | 126 | SI_STAT_short_timeouts = 0, |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Number of times the driver requested a timer while nothing was in | 129 | * Number of times the driver requested a timer while nothing was in |
130 | * progress. | 130 | * progress. |
131 | */ | 131 | */ |
132 | SI_STAT_long_timeouts, | 132 | SI_STAT_long_timeouts, |
133 | 133 | ||
134 | /* Number of times the interface was idle while being polled. */ | 134 | /* Number of times the interface was idle while being polled. */ |
135 | SI_STAT_idles, | 135 | SI_STAT_idles, |
136 | 136 | ||
137 | /* Number of interrupts the driver handled. */ | 137 | /* Number of interrupts the driver handled. */ |
138 | SI_STAT_interrupts, | 138 | SI_STAT_interrupts, |
139 | 139 | ||
140 | /* Number of time the driver got an ATTN from the hardware. */ | 140 | /* Number of time the driver got an ATTN from the hardware. */ |
141 | SI_STAT_attentions, | 141 | SI_STAT_attentions, |
142 | 142 | ||
143 | /* Number of times the driver requested flags from the hardware. */ | 143 | /* Number of times the driver requested flags from the hardware. */ |
144 | SI_STAT_flag_fetches, | 144 | SI_STAT_flag_fetches, |
145 | 145 | ||
146 | /* Number of times the hardware didn't follow the state machine. */ | 146 | /* Number of times the hardware didn't follow the state machine. */ |
147 | SI_STAT_hosed_count, | 147 | SI_STAT_hosed_count, |
148 | 148 | ||
149 | /* Number of completed messages. */ | 149 | /* Number of completed messages. */ |
150 | SI_STAT_complete_transactions, | 150 | SI_STAT_complete_transactions, |
151 | 151 | ||
152 | /* Number of IPMI events received from the hardware. */ | 152 | /* Number of IPMI events received from the hardware. */ |
153 | SI_STAT_events, | 153 | SI_STAT_events, |
154 | 154 | ||
155 | /* Number of watchdog pretimeouts. */ | 155 | /* Number of watchdog pretimeouts. */ |
156 | SI_STAT_watchdog_pretimeouts, | 156 | SI_STAT_watchdog_pretimeouts, |
157 | 157 | ||
158 | /* Number of asyncronous messages received. */ | 158 | /* Number of asyncronous messages received. */ |
159 | SI_STAT_incoming_messages, | 159 | SI_STAT_incoming_messages, |
160 | 160 | ||
161 | 161 | ||
162 | /* This *must* remain last, add new values above this. */ | 162 | /* This *must* remain last, add new values above this. */ |
163 | SI_NUM_STATS | 163 | SI_NUM_STATS |
164 | }; | 164 | }; |
165 | 165 | ||
166 | struct smi_info { | 166 | struct smi_info { |
167 | int intf_num; | 167 | int intf_num; |
168 | ipmi_smi_t intf; | 168 | ipmi_smi_t intf; |
169 | struct si_sm_data *si_sm; | 169 | struct si_sm_data *si_sm; |
170 | struct si_sm_handlers *handlers; | 170 | struct si_sm_handlers *handlers; |
171 | enum si_type si_type; | 171 | enum si_type si_type; |
172 | spinlock_t si_lock; | 172 | spinlock_t si_lock; |
173 | struct list_head xmit_msgs; | 173 | struct list_head xmit_msgs; |
174 | struct list_head hp_xmit_msgs; | 174 | struct list_head hp_xmit_msgs; |
175 | struct ipmi_smi_msg *curr_msg; | 175 | struct ipmi_smi_msg *curr_msg; |
176 | enum si_intf_state si_state; | 176 | enum si_intf_state si_state; |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Used to handle the various types of I/O that can occur with | 179 | * Used to handle the various types of I/O that can occur with |
180 | * IPMI | 180 | * IPMI |
181 | */ | 181 | */ |
182 | struct si_sm_io io; | 182 | struct si_sm_io io; |
183 | int (*io_setup)(struct smi_info *info); | 183 | int (*io_setup)(struct smi_info *info); |
184 | void (*io_cleanup)(struct smi_info *info); | 184 | void (*io_cleanup)(struct smi_info *info); |
185 | int (*irq_setup)(struct smi_info *info); | 185 | int (*irq_setup)(struct smi_info *info); |
186 | void (*irq_cleanup)(struct smi_info *info); | 186 | void (*irq_cleanup)(struct smi_info *info); |
187 | unsigned int io_size; | 187 | unsigned int io_size; |
188 | enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ | 188 | enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ |
189 | void (*addr_source_cleanup)(struct smi_info *info); | 189 | void (*addr_source_cleanup)(struct smi_info *info); |
190 | void *addr_source_data; | 190 | void *addr_source_data; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Per-OEM handler, called from handle_flags(). Returns 1 | 193 | * Per-OEM handler, called from handle_flags(). Returns 1 |
194 | * when handle_flags() needs to be re-run or 0 indicating it | 194 | * when handle_flags() needs to be re-run or 0 indicating it |
195 | * set si_state itself. | 195 | * set si_state itself. |
196 | */ | 196 | */ |
197 | int (*oem_data_avail_handler)(struct smi_info *smi_info); | 197 | int (*oem_data_avail_handler)(struct smi_info *smi_info); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Flags from the last GET_MSG_FLAGS command, used when an ATTN | 200 | * Flags from the last GET_MSG_FLAGS command, used when an ATTN |
201 | * is set to hold the flags until we are done handling everything | 201 | * is set to hold the flags until we are done handling everything |
202 | * from the flags. | 202 | * from the flags. |
203 | */ | 203 | */ |
204 | #define RECEIVE_MSG_AVAIL 0x01 | 204 | #define RECEIVE_MSG_AVAIL 0x01 |
205 | #define EVENT_MSG_BUFFER_FULL 0x02 | 205 | #define EVENT_MSG_BUFFER_FULL 0x02 |
206 | #define WDT_PRE_TIMEOUT_INT 0x08 | 206 | #define WDT_PRE_TIMEOUT_INT 0x08 |
207 | #define OEM0_DATA_AVAIL 0x20 | 207 | #define OEM0_DATA_AVAIL 0x20 |
208 | #define OEM1_DATA_AVAIL 0x40 | 208 | #define OEM1_DATA_AVAIL 0x40 |
209 | #define OEM2_DATA_AVAIL 0x80 | 209 | #define OEM2_DATA_AVAIL 0x80 |
210 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ | 210 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ |
211 | OEM1_DATA_AVAIL | \ | 211 | OEM1_DATA_AVAIL | \ |
212 | OEM2_DATA_AVAIL) | 212 | OEM2_DATA_AVAIL) |
213 | unsigned char msg_flags; | 213 | unsigned char msg_flags; |
214 | 214 | ||
215 | /* Does the BMC have an event buffer? */ | 215 | /* Does the BMC have an event buffer? */ |
216 | char has_event_buffer; | 216 | char has_event_buffer; |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * If set to true, this will request events the next time the | 219 | * If set to true, this will request events the next time the |
220 | * state machine is idle. | 220 | * state machine is idle. |
221 | */ | 221 | */ |
222 | atomic_t req_events; | 222 | atomic_t req_events; |
223 | 223 | ||
224 | /* | 224 | /* |
225 | * If true, run the state machine to completion on every send | 225 | * If true, run the state machine to completion on every send |
226 | * call. Generally used after a panic to make sure stuff goes | 226 | * call. Generally used after a panic to make sure stuff goes |
227 | * out. | 227 | * out. |
228 | */ | 228 | */ |
229 | int run_to_completion; | 229 | int run_to_completion; |
230 | 230 | ||
231 | /* The I/O port of an SI interface. */ | 231 | /* The I/O port of an SI interface. */ |
232 | int port; | 232 | int port; |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * The space between start addresses of the two ports. For | 235 | * The space between start addresses of the two ports. For |
236 | * instance, if the first port is 0xca2 and the spacing is 4, then | 236 | * instance, if the first port is 0xca2 and the spacing is 4, then |
237 | * the second port is 0xca6. | 237 | * the second port is 0xca6. |
238 | */ | 238 | */ |
239 | unsigned int spacing; | 239 | unsigned int spacing; |
240 | 240 | ||
241 | /* zero if no irq; */ | 241 | /* zero if no irq; */ |
242 | int irq; | 242 | int irq; |
243 | 243 | ||
244 | /* The timer for this si. */ | 244 | /* The timer for this si. */ |
245 | struct timer_list si_timer; | 245 | struct timer_list si_timer; |
246 | 246 | ||
247 | /* The time (in jiffies) the last timeout occurred at. */ | 247 | /* The time (in jiffies) the last timeout occurred at. */ |
248 | unsigned long last_timeout_jiffies; | 248 | unsigned long last_timeout_jiffies; |
249 | 249 | ||
250 | /* Used to gracefully stop the timer without race conditions. */ | 250 | /* Used to gracefully stop the timer without race conditions. */ |
251 | atomic_t stop_operation; | 251 | atomic_t stop_operation; |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * The driver will disable interrupts when it gets into a | 254 | * The driver will disable interrupts when it gets into a |
255 | * situation where it cannot handle messages due to lack of | 255 | * situation where it cannot handle messages due to lack of |
256 | * memory. Once that situation clears up, it will re-enable | 256 | * memory. Once that situation clears up, it will re-enable |
257 | * interrupts. | 257 | * interrupts. |
258 | */ | 258 | */ |
259 | int interrupt_disabled; | 259 | int interrupt_disabled; |
260 | 260 | ||
261 | /* From the get device id response... */ | 261 | /* From the get device id response... */ |
262 | struct ipmi_device_id device_id; | 262 | struct ipmi_device_id device_id; |
263 | 263 | ||
264 | /* Driver model stuff. */ | 264 | /* Driver model stuff. */ |
265 | struct device *dev; | 265 | struct device *dev; |
266 | struct platform_device *pdev; | 266 | struct platform_device *pdev; |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * True if we allocated the device, false if it came from | 269 | * True if we allocated the device, false if it came from |
270 | * someplace else (like PCI). | 270 | * someplace else (like PCI). |
271 | */ | 271 | */ |
272 | int dev_registered; | 272 | int dev_registered; |
273 | 273 | ||
274 | /* Slave address, could be reported from DMI. */ | 274 | /* Slave address, could be reported from DMI. */ |
275 | unsigned char slave_addr; | 275 | unsigned char slave_addr; |
276 | 276 | ||
277 | /* Counters and things for the proc filesystem. */ | 277 | /* Counters and things for the proc filesystem. */ |
278 | atomic_t stats[SI_NUM_STATS]; | 278 | atomic_t stats[SI_NUM_STATS]; |
279 | 279 | ||
280 | struct task_struct *thread; | 280 | struct task_struct *thread; |
281 | 281 | ||
282 | struct list_head link; | 282 | struct list_head link; |
283 | union ipmi_smi_info_union addr_info; | 283 | union ipmi_smi_info_union addr_info; |
284 | }; | 284 | }; |
285 | 285 | ||
286 | #define smi_inc_stat(smi, stat) \ | 286 | #define smi_inc_stat(smi, stat) \ |
287 | atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) | 287 | atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) |
288 | #define smi_get_stat(smi, stat) \ | 288 | #define smi_get_stat(smi, stat) \ |
289 | ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) | 289 | ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) |
290 | 290 | ||
291 | #define SI_MAX_PARMS 4 | 291 | #define SI_MAX_PARMS 4 |
292 | 292 | ||
293 | static int force_kipmid[SI_MAX_PARMS]; | 293 | static int force_kipmid[SI_MAX_PARMS]; |
294 | static int num_force_kipmid; | 294 | static int num_force_kipmid; |
295 | #ifdef CONFIG_PCI | 295 | #ifdef CONFIG_PCI |
296 | static int pci_registered; | 296 | static int pci_registered; |
297 | #endif | 297 | #endif |
298 | #ifdef CONFIG_ACPI | 298 | #ifdef CONFIG_ACPI |
299 | static int pnp_registered; | 299 | static int pnp_registered; |
300 | #endif | 300 | #endif |
301 | 301 | ||
302 | static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; | 302 | static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; |
303 | static int num_max_busy_us; | 303 | static int num_max_busy_us; |
304 | 304 | ||
305 | static int unload_when_empty = 1; | 305 | static int unload_when_empty = 1; |
306 | 306 | ||
307 | static int add_smi(struct smi_info *smi); | 307 | static int add_smi(struct smi_info *smi); |
308 | static int try_smi_init(struct smi_info *smi); | 308 | static int try_smi_init(struct smi_info *smi); |
309 | static void cleanup_one_si(struct smi_info *to_clean); | 309 | static void cleanup_one_si(struct smi_info *to_clean); |
310 | static void cleanup_ipmi_si(void); | 310 | static void cleanup_ipmi_si(void); |
311 | 311 | ||
312 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); | 312 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); |
313 | static int register_xaction_notifier(struct notifier_block *nb) | 313 | static int register_xaction_notifier(struct notifier_block *nb) |
314 | { | 314 | { |
315 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); | 315 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void deliver_recv_msg(struct smi_info *smi_info, | 318 | static void deliver_recv_msg(struct smi_info *smi_info, |
319 | struct ipmi_smi_msg *msg) | 319 | struct ipmi_smi_msg *msg) |
320 | { | 320 | { |
321 | /* Deliver the message to the upper layer. */ | 321 | /* Deliver the message to the upper layer. */ |
322 | ipmi_smi_msg_received(smi_info->intf, msg); | 322 | ipmi_smi_msg_received(smi_info->intf, msg); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) | 325 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
326 | { | 326 | { |
327 | struct ipmi_smi_msg *msg = smi_info->curr_msg; | 327 | struct ipmi_smi_msg *msg = smi_info->curr_msg; |
328 | 328 | ||
329 | if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) | 329 | if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) |
330 | cCode = IPMI_ERR_UNSPECIFIED; | 330 | cCode = IPMI_ERR_UNSPECIFIED; |
331 | /* else use it as is */ | 331 | /* else use it as is */ |
332 | 332 | ||
333 | /* Make it a response */ | 333 | /* Make it a response */ |
334 | msg->rsp[0] = msg->data[0] | 4; | 334 | msg->rsp[0] = msg->data[0] | 4; |
335 | msg->rsp[1] = msg->data[1]; | 335 | msg->rsp[1] = msg->data[1]; |
336 | msg->rsp[2] = cCode; | 336 | msg->rsp[2] = cCode; |
337 | msg->rsp_size = 3; | 337 | msg->rsp_size = 3; |
338 | 338 | ||
339 | smi_info->curr_msg = NULL; | 339 | smi_info->curr_msg = NULL; |
340 | deliver_recv_msg(smi_info, msg); | 340 | deliver_recv_msg(smi_info, msg); |
341 | } | 341 | } |
342 | 342 | ||
343 | static enum si_sm_result start_next_msg(struct smi_info *smi_info) | 343 | static enum si_sm_result start_next_msg(struct smi_info *smi_info) |
344 | { | 344 | { |
345 | int rv; | 345 | int rv; |
346 | struct list_head *entry = NULL; | 346 | struct list_head *entry = NULL; |
347 | #ifdef DEBUG_TIMING | 347 | #ifdef DEBUG_TIMING |
348 | struct timeval t; | 348 | struct timeval t; |
349 | #endif | 349 | #endif |
350 | 350 | ||
351 | /* Pick the high priority queue first. */ | 351 | /* Pick the high priority queue first. */ |
352 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { | 352 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { |
353 | entry = smi_info->hp_xmit_msgs.next; | 353 | entry = smi_info->hp_xmit_msgs.next; |
354 | } else if (!list_empty(&(smi_info->xmit_msgs))) { | 354 | } else if (!list_empty(&(smi_info->xmit_msgs))) { |
355 | entry = smi_info->xmit_msgs.next; | 355 | entry = smi_info->xmit_msgs.next; |
356 | } | 356 | } |
357 | 357 | ||
358 | if (!entry) { | 358 | if (!entry) { |
359 | smi_info->curr_msg = NULL; | 359 | smi_info->curr_msg = NULL; |
360 | rv = SI_SM_IDLE; | 360 | rv = SI_SM_IDLE; |
361 | } else { | 361 | } else { |
362 | int err; | 362 | int err; |
363 | 363 | ||
364 | list_del(entry); | 364 | list_del(entry); |
365 | smi_info->curr_msg = list_entry(entry, | 365 | smi_info->curr_msg = list_entry(entry, |
366 | struct ipmi_smi_msg, | 366 | struct ipmi_smi_msg, |
367 | link); | 367 | link); |
368 | #ifdef DEBUG_TIMING | 368 | #ifdef DEBUG_TIMING |
369 | do_gettimeofday(&t); | 369 | do_gettimeofday(&t); |
370 | printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 370 | printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
371 | #endif | 371 | #endif |
372 | err = atomic_notifier_call_chain(&xaction_notifier_list, | 372 | err = atomic_notifier_call_chain(&xaction_notifier_list, |
373 | 0, smi_info); | 373 | 0, smi_info); |
374 | if (err & NOTIFY_STOP_MASK) { | 374 | if (err & NOTIFY_STOP_MASK) { |
375 | rv = SI_SM_CALL_WITHOUT_DELAY; | 375 | rv = SI_SM_CALL_WITHOUT_DELAY; |
376 | goto out; | 376 | goto out; |
377 | } | 377 | } |
378 | err = smi_info->handlers->start_transaction( | 378 | err = smi_info->handlers->start_transaction( |
379 | smi_info->si_sm, | 379 | smi_info->si_sm, |
380 | smi_info->curr_msg->data, | 380 | smi_info->curr_msg->data, |
381 | smi_info->curr_msg->data_size); | 381 | smi_info->curr_msg->data_size); |
382 | if (err) | 382 | if (err) |
383 | return_hosed_msg(smi_info, err); | 383 | return_hosed_msg(smi_info, err); |
384 | 384 | ||
385 | rv = SI_SM_CALL_WITHOUT_DELAY; | 385 | rv = SI_SM_CALL_WITHOUT_DELAY; |
386 | } | 386 | } |
387 | out: | 387 | out: |
388 | return rv; | 388 | return rv; |
389 | } | 389 | } |
390 | 390 | ||
391 | static void start_enable_irq(struct smi_info *smi_info) | 391 | static void start_enable_irq(struct smi_info *smi_info) |
392 | { | 392 | { |
393 | unsigned char msg[2]; | 393 | unsigned char msg[2]; |
394 | 394 | ||
395 | /* | 395 | /* |
396 | * If we are enabling interrupts, we have to tell the | 396 | * If we are enabling interrupts, we have to tell the |
397 | * BMC to use them. | 397 | * BMC to use them. |
398 | */ | 398 | */ |
399 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 399 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
400 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 400 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
401 | 401 | ||
402 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 402 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
403 | smi_info->si_state = SI_ENABLE_INTERRUPTS1; | 403 | smi_info->si_state = SI_ENABLE_INTERRUPTS1; |
404 | } | 404 | } |
405 | 405 | ||
406 | static void start_disable_irq(struct smi_info *smi_info) | 406 | static void start_disable_irq(struct smi_info *smi_info) |
407 | { | 407 | { |
408 | unsigned char msg[2]; | 408 | unsigned char msg[2]; |
409 | 409 | ||
410 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 410 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
411 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 411 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
412 | 412 | ||
413 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 413 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
414 | smi_info->si_state = SI_DISABLE_INTERRUPTS1; | 414 | smi_info->si_state = SI_DISABLE_INTERRUPTS1; |
415 | } | 415 | } |
416 | 416 | ||
417 | static void start_clear_flags(struct smi_info *smi_info) | 417 | static void start_clear_flags(struct smi_info *smi_info) |
418 | { | 418 | { |
419 | unsigned char msg[3]; | 419 | unsigned char msg[3]; |
420 | 420 | ||
421 | /* Make sure the watchdog pre-timeout flag is not set at startup. */ | 421 | /* Make sure the watchdog pre-timeout flag is not set at startup. */ |
422 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 422 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
423 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; | 423 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
424 | msg[2] = WDT_PRE_TIMEOUT_INT; | 424 | msg[2] = WDT_PRE_TIMEOUT_INT; |
425 | 425 | ||
426 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | 426 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); |
427 | smi_info->si_state = SI_CLEARING_FLAGS; | 427 | smi_info->si_state = SI_CLEARING_FLAGS; |
428 | } | 428 | } |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * When we have a situtaion where we run out of memory and cannot | 431 | * When we have a situtaion where we run out of memory and cannot |
432 | * allocate messages, we just leave them in the BMC and run the system | 432 | * allocate messages, we just leave them in the BMC and run the system |
433 | * polled until we can allocate some memory. Once we have some | 433 | * polled until we can allocate some memory. Once we have some |
434 | * memory, we will re-enable the interrupt. | 434 | * memory, we will re-enable the interrupt. |
435 | */ | 435 | */ |
436 | static inline void disable_si_irq(struct smi_info *smi_info) | 436 | static inline void disable_si_irq(struct smi_info *smi_info) |
437 | { | 437 | { |
438 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 438 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
439 | start_disable_irq(smi_info); | 439 | start_disable_irq(smi_info); |
440 | smi_info->interrupt_disabled = 1; | 440 | smi_info->interrupt_disabled = 1; |
441 | if (!atomic_read(&smi_info->stop_operation)) | 441 | if (!atomic_read(&smi_info->stop_operation)) |
442 | mod_timer(&smi_info->si_timer, | 442 | mod_timer(&smi_info->si_timer, |
443 | jiffies + SI_TIMEOUT_JIFFIES); | 443 | jiffies + SI_TIMEOUT_JIFFIES); |
444 | } | 444 | } |
445 | } | 445 | } |
446 | 446 | ||
447 | static inline void enable_si_irq(struct smi_info *smi_info) | 447 | static inline void enable_si_irq(struct smi_info *smi_info) |
448 | { | 448 | { |
449 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { | 449 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
450 | start_enable_irq(smi_info); | 450 | start_enable_irq(smi_info); |
451 | smi_info->interrupt_disabled = 0; | 451 | smi_info->interrupt_disabled = 0; |
452 | } | 452 | } |
453 | } | 453 | } |
454 | 454 | ||
455 | static void handle_flags(struct smi_info *smi_info) | 455 | static void handle_flags(struct smi_info *smi_info) |
456 | { | 456 | { |
457 | retry: | 457 | retry: |
458 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { | 458 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { |
459 | /* Watchdog pre-timeout */ | 459 | /* Watchdog pre-timeout */ |
460 | smi_inc_stat(smi_info, watchdog_pretimeouts); | 460 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
461 | 461 | ||
462 | start_clear_flags(smi_info); | 462 | start_clear_flags(smi_info); |
463 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 463 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
464 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | 464 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
465 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { | 465 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { |
466 | /* Messages available. */ | 466 | /* Messages available. */ |
467 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 467 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
468 | if (!smi_info->curr_msg) { | 468 | if (!smi_info->curr_msg) { |
469 | disable_si_irq(smi_info); | 469 | disable_si_irq(smi_info); |
470 | smi_info->si_state = SI_NORMAL; | 470 | smi_info->si_state = SI_NORMAL; |
471 | return; | 471 | return; |
472 | } | 472 | } |
473 | enable_si_irq(smi_info); | 473 | enable_si_irq(smi_info); |
474 | 474 | ||
475 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 475 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
476 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; | 476 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; |
477 | smi_info->curr_msg->data_size = 2; | 477 | smi_info->curr_msg->data_size = 2; |
478 | 478 | ||
479 | smi_info->handlers->start_transaction( | 479 | smi_info->handlers->start_transaction( |
480 | smi_info->si_sm, | 480 | smi_info->si_sm, |
481 | smi_info->curr_msg->data, | 481 | smi_info->curr_msg->data, |
482 | smi_info->curr_msg->data_size); | 482 | smi_info->curr_msg->data_size); |
483 | smi_info->si_state = SI_GETTING_MESSAGES; | 483 | smi_info->si_state = SI_GETTING_MESSAGES; |
484 | } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { | 484 | } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { |
485 | /* Events available. */ | 485 | /* Events available. */ |
486 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 486 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
487 | if (!smi_info->curr_msg) { | 487 | if (!smi_info->curr_msg) { |
488 | disable_si_irq(smi_info); | 488 | disable_si_irq(smi_info); |
489 | smi_info->si_state = SI_NORMAL; | 489 | smi_info->si_state = SI_NORMAL; |
490 | return; | 490 | return; |
491 | } | 491 | } |
492 | enable_si_irq(smi_info); | 492 | enable_si_irq(smi_info); |
493 | 493 | ||
494 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 494 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
495 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; | 495 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
496 | smi_info->curr_msg->data_size = 2; | 496 | smi_info->curr_msg->data_size = 2; |
497 | 497 | ||
498 | smi_info->handlers->start_transaction( | 498 | smi_info->handlers->start_transaction( |
499 | smi_info->si_sm, | 499 | smi_info->si_sm, |
500 | smi_info->curr_msg->data, | 500 | smi_info->curr_msg->data, |
501 | smi_info->curr_msg->data_size); | 501 | smi_info->curr_msg->data_size); |
502 | smi_info->si_state = SI_GETTING_EVENTS; | 502 | smi_info->si_state = SI_GETTING_EVENTS; |
503 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && | 503 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && |
504 | smi_info->oem_data_avail_handler) { | 504 | smi_info->oem_data_avail_handler) { |
505 | if (smi_info->oem_data_avail_handler(smi_info)) | 505 | if (smi_info->oem_data_avail_handler(smi_info)) |
506 | goto retry; | 506 | goto retry; |
507 | } else | 507 | } else |
508 | smi_info->si_state = SI_NORMAL; | 508 | smi_info->si_state = SI_NORMAL; |
509 | } | 509 | } |
510 | 510 | ||
511 | static void handle_transaction_done(struct smi_info *smi_info) | 511 | static void handle_transaction_done(struct smi_info *smi_info) |
512 | { | 512 | { |
513 | struct ipmi_smi_msg *msg; | 513 | struct ipmi_smi_msg *msg; |
514 | #ifdef DEBUG_TIMING | 514 | #ifdef DEBUG_TIMING |
515 | struct timeval t; | 515 | struct timeval t; |
516 | 516 | ||
517 | do_gettimeofday(&t); | 517 | do_gettimeofday(&t); |
518 | printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 518 | printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
519 | #endif | 519 | #endif |
520 | switch (smi_info->si_state) { | 520 | switch (smi_info->si_state) { |
521 | case SI_NORMAL: | 521 | case SI_NORMAL: |
522 | if (!smi_info->curr_msg) | 522 | if (!smi_info->curr_msg) |
523 | break; | 523 | break; |
524 | 524 | ||
525 | smi_info->curr_msg->rsp_size | 525 | smi_info->curr_msg->rsp_size |
526 | = smi_info->handlers->get_result( | 526 | = smi_info->handlers->get_result( |
527 | smi_info->si_sm, | 527 | smi_info->si_sm, |
528 | smi_info->curr_msg->rsp, | 528 | smi_info->curr_msg->rsp, |
529 | IPMI_MAX_MSG_LENGTH); | 529 | IPMI_MAX_MSG_LENGTH); |
530 | 530 | ||
531 | /* | 531 | /* |
532 | * Do this here becase deliver_recv_msg() releases the | 532 | * Do this here becase deliver_recv_msg() releases the |
533 | * lock, and a new message can be put in during the | 533 | * lock, and a new message can be put in during the |
534 | * time the lock is released. | 534 | * time the lock is released. |
535 | */ | 535 | */ |
536 | msg = smi_info->curr_msg; | 536 | msg = smi_info->curr_msg; |
537 | smi_info->curr_msg = NULL; | 537 | smi_info->curr_msg = NULL; |
538 | deliver_recv_msg(smi_info, msg); | 538 | deliver_recv_msg(smi_info, msg); |
539 | break; | 539 | break; |
540 | 540 | ||
541 | case SI_GETTING_FLAGS: | 541 | case SI_GETTING_FLAGS: |
542 | { | 542 | { |
543 | unsigned char msg[4]; | 543 | unsigned char msg[4]; |
544 | unsigned int len; | 544 | unsigned int len; |
545 | 545 | ||
546 | /* We got the flags from the SMI, now handle them. */ | 546 | /* We got the flags from the SMI, now handle them. */ |
547 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 547 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
548 | if (msg[2] != 0) { | 548 | if (msg[2] != 0) { |
549 | /* Error fetching flags, just give up for now. */ | 549 | /* Error fetching flags, just give up for now. */ |
550 | smi_info->si_state = SI_NORMAL; | 550 | smi_info->si_state = SI_NORMAL; |
551 | } else if (len < 4) { | 551 | } else if (len < 4) { |
552 | /* | 552 | /* |
553 | * Hmm, no flags. That's technically illegal, but | 553 | * Hmm, no flags. That's technically illegal, but |
554 | * don't use uninitialized data. | 554 | * don't use uninitialized data. |
555 | */ | 555 | */ |
556 | smi_info->si_state = SI_NORMAL; | 556 | smi_info->si_state = SI_NORMAL; |
557 | } else { | 557 | } else { |
558 | smi_info->msg_flags = msg[3]; | 558 | smi_info->msg_flags = msg[3]; |
559 | handle_flags(smi_info); | 559 | handle_flags(smi_info); |
560 | } | 560 | } |
561 | break; | 561 | break; |
562 | } | 562 | } |
563 | 563 | ||
564 | case SI_CLEARING_FLAGS: | 564 | case SI_CLEARING_FLAGS: |
565 | case SI_CLEARING_FLAGS_THEN_SET_IRQ: | 565 | case SI_CLEARING_FLAGS_THEN_SET_IRQ: |
566 | { | 566 | { |
567 | unsigned char msg[3]; | 567 | unsigned char msg[3]; |
568 | 568 | ||
569 | /* We cleared the flags. */ | 569 | /* We cleared the flags. */ |
570 | smi_info->handlers->get_result(smi_info->si_sm, msg, 3); | 570 | smi_info->handlers->get_result(smi_info->si_sm, msg, 3); |
571 | if (msg[2] != 0) { | 571 | if (msg[2] != 0) { |
572 | /* Error clearing flags */ | 572 | /* Error clearing flags */ |
573 | dev_warn(smi_info->dev, | 573 | dev_warn(smi_info->dev, |
574 | "Error clearing flags: %2.2x\n", msg[2]); | 574 | "Error clearing flags: %2.2x\n", msg[2]); |
575 | } | 575 | } |
576 | if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) | 576 | if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) |
577 | start_enable_irq(smi_info); | 577 | start_enable_irq(smi_info); |
578 | else | 578 | else |
579 | smi_info->si_state = SI_NORMAL; | 579 | smi_info->si_state = SI_NORMAL; |
580 | break; | 580 | break; |
581 | } | 581 | } |
582 | 582 | ||
583 | case SI_GETTING_EVENTS: | 583 | case SI_GETTING_EVENTS: |
584 | { | 584 | { |
585 | smi_info->curr_msg->rsp_size | 585 | smi_info->curr_msg->rsp_size |
586 | = smi_info->handlers->get_result( | 586 | = smi_info->handlers->get_result( |
587 | smi_info->si_sm, | 587 | smi_info->si_sm, |
588 | smi_info->curr_msg->rsp, | 588 | smi_info->curr_msg->rsp, |
589 | IPMI_MAX_MSG_LENGTH); | 589 | IPMI_MAX_MSG_LENGTH); |
590 | 590 | ||
591 | /* | 591 | /* |
592 | * Do this here becase deliver_recv_msg() releases the | 592 | * Do this here becase deliver_recv_msg() releases the |
593 | * lock, and a new message can be put in during the | 593 | * lock, and a new message can be put in during the |
594 | * time the lock is released. | 594 | * time the lock is released. |
595 | */ | 595 | */ |
596 | msg = smi_info->curr_msg; | 596 | msg = smi_info->curr_msg; |
597 | smi_info->curr_msg = NULL; | 597 | smi_info->curr_msg = NULL; |
598 | if (msg->rsp[2] != 0) { | 598 | if (msg->rsp[2] != 0) { |
599 | /* Error getting event, probably done. */ | 599 | /* Error getting event, probably done. */ |
600 | msg->done(msg); | 600 | msg->done(msg); |
601 | 601 | ||
602 | /* Take off the event flag. */ | 602 | /* Take off the event flag. */ |
603 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; | 603 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; |
604 | handle_flags(smi_info); | 604 | handle_flags(smi_info); |
605 | } else { | 605 | } else { |
606 | smi_inc_stat(smi_info, events); | 606 | smi_inc_stat(smi_info, events); |
607 | 607 | ||
608 | /* | 608 | /* |
609 | * Do this before we deliver the message | 609 | * Do this before we deliver the message |
610 | * because delivering the message releases the | 610 | * because delivering the message releases the |
611 | * lock and something else can mess with the | 611 | * lock and something else can mess with the |
612 | * state. | 612 | * state. |
613 | */ | 613 | */ |
614 | handle_flags(smi_info); | 614 | handle_flags(smi_info); |
615 | 615 | ||
616 | deliver_recv_msg(smi_info, msg); | 616 | deliver_recv_msg(smi_info, msg); |
617 | } | 617 | } |
618 | break; | 618 | break; |
619 | } | 619 | } |
620 | 620 | ||
621 | case SI_GETTING_MESSAGES: | 621 | case SI_GETTING_MESSAGES: |
622 | { | 622 | { |
623 | smi_info->curr_msg->rsp_size | 623 | smi_info->curr_msg->rsp_size |
624 | = smi_info->handlers->get_result( | 624 | = smi_info->handlers->get_result( |
625 | smi_info->si_sm, | 625 | smi_info->si_sm, |
626 | smi_info->curr_msg->rsp, | 626 | smi_info->curr_msg->rsp, |
627 | IPMI_MAX_MSG_LENGTH); | 627 | IPMI_MAX_MSG_LENGTH); |
628 | 628 | ||
629 | /* | 629 | /* |
630 | * Do this here becase deliver_recv_msg() releases the | 630 | * Do this here becase deliver_recv_msg() releases the |
631 | * lock, and a new message can be put in during the | 631 | * lock, and a new message can be put in during the |
632 | * time the lock is released. | 632 | * time the lock is released. |
633 | */ | 633 | */ |
634 | msg = smi_info->curr_msg; | 634 | msg = smi_info->curr_msg; |
635 | smi_info->curr_msg = NULL; | 635 | smi_info->curr_msg = NULL; |
636 | if (msg->rsp[2] != 0) { | 636 | if (msg->rsp[2] != 0) { |
637 | /* Error getting event, probably done. */ | 637 | /* Error getting event, probably done. */ |
638 | msg->done(msg); | 638 | msg->done(msg); |
639 | 639 | ||
640 | /* Take off the msg flag. */ | 640 | /* Take off the msg flag. */ |
641 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; | 641 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; |
642 | handle_flags(smi_info); | 642 | handle_flags(smi_info); |
643 | } else { | 643 | } else { |
644 | smi_inc_stat(smi_info, incoming_messages); | 644 | smi_inc_stat(smi_info, incoming_messages); |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * Do this before we deliver the message | 647 | * Do this before we deliver the message |
648 | * because delivering the message releases the | 648 | * because delivering the message releases the |
649 | * lock and something else can mess with the | 649 | * lock and something else can mess with the |
650 | * state. | 650 | * state. |
651 | */ | 651 | */ |
652 | handle_flags(smi_info); | 652 | handle_flags(smi_info); |
653 | 653 | ||
654 | deliver_recv_msg(smi_info, msg); | 654 | deliver_recv_msg(smi_info, msg); |
655 | } | 655 | } |
656 | break; | 656 | break; |
657 | } | 657 | } |
658 | 658 | ||
659 | case SI_ENABLE_INTERRUPTS1: | 659 | case SI_ENABLE_INTERRUPTS1: |
660 | { | 660 | { |
661 | unsigned char msg[4]; | 661 | unsigned char msg[4]; |
662 | 662 | ||
663 | /* We got the flags from the SMI, now handle them. */ | 663 | /* We got the flags from the SMI, now handle them. */ |
664 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 664 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
665 | if (msg[2] != 0) { | 665 | if (msg[2] != 0) { |
666 | dev_warn(smi_info->dev, "Could not enable interrupts" | 666 | dev_warn(smi_info->dev, "Could not enable interrupts" |
667 | ", failed get, using polled mode.\n"); | 667 | ", failed get, using polled mode.\n"); |
668 | smi_info->si_state = SI_NORMAL; | 668 | smi_info->si_state = SI_NORMAL; |
669 | } else { | 669 | } else { |
670 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 670 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
671 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 671 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
672 | msg[2] = (msg[3] | | 672 | msg[2] = (msg[3] | |
673 | IPMI_BMC_RCV_MSG_INTR | | 673 | IPMI_BMC_RCV_MSG_INTR | |
674 | IPMI_BMC_EVT_MSG_INTR); | 674 | IPMI_BMC_EVT_MSG_INTR); |
675 | smi_info->handlers->start_transaction( | 675 | smi_info->handlers->start_transaction( |
676 | smi_info->si_sm, msg, 3); | 676 | smi_info->si_sm, msg, 3); |
677 | smi_info->si_state = SI_ENABLE_INTERRUPTS2; | 677 | smi_info->si_state = SI_ENABLE_INTERRUPTS2; |
678 | } | 678 | } |
679 | break; | 679 | break; |
680 | } | 680 | } |
681 | 681 | ||
682 | case SI_ENABLE_INTERRUPTS2: | 682 | case SI_ENABLE_INTERRUPTS2: |
683 | { | 683 | { |
684 | unsigned char msg[4]; | 684 | unsigned char msg[4]; |
685 | 685 | ||
686 | /* We got the flags from the SMI, now handle them. */ | 686 | /* We got the flags from the SMI, now handle them. */ |
687 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 687 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
688 | if (msg[2] != 0) | 688 | if (msg[2] != 0) |
689 | dev_warn(smi_info->dev, "Could not enable interrupts" | 689 | dev_warn(smi_info->dev, "Could not enable interrupts" |
690 | ", failed set, using polled mode.\n"); | 690 | ", failed set, using polled mode.\n"); |
691 | else | 691 | else |
692 | smi_info->interrupt_disabled = 0; | 692 | smi_info->interrupt_disabled = 0; |
693 | smi_info->si_state = SI_NORMAL; | 693 | smi_info->si_state = SI_NORMAL; |
694 | break; | 694 | break; |
695 | } | 695 | } |
696 | 696 | ||
697 | case SI_DISABLE_INTERRUPTS1: | 697 | case SI_DISABLE_INTERRUPTS1: |
698 | { | 698 | { |
699 | unsigned char msg[4]; | 699 | unsigned char msg[4]; |
700 | 700 | ||
701 | /* We got the flags from the SMI, now handle them. */ | 701 | /* We got the flags from the SMI, now handle them. */ |
702 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 702 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
703 | if (msg[2] != 0) { | 703 | if (msg[2] != 0) { |
704 | dev_warn(smi_info->dev, "Could not disable interrupts" | 704 | dev_warn(smi_info->dev, "Could not disable interrupts" |
705 | ", failed get.\n"); | 705 | ", failed get.\n"); |
706 | smi_info->si_state = SI_NORMAL; | 706 | smi_info->si_state = SI_NORMAL; |
707 | } else { | 707 | } else { |
708 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 708 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
709 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 709 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
710 | msg[2] = (msg[3] & | 710 | msg[2] = (msg[3] & |
711 | ~(IPMI_BMC_RCV_MSG_INTR | | 711 | ~(IPMI_BMC_RCV_MSG_INTR | |
712 | IPMI_BMC_EVT_MSG_INTR)); | 712 | IPMI_BMC_EVT_MSG_INTR)); |
713 | smi_info->handlers->start_transaction( | 713 | smi_info->handlers->start_transaction( |
714 | smi_info->si_sm, msg, 3); | 714 | smi_info->si_sm, msg, 3); |
715 | smi_info->si_state = SI_DISABLE_INTERRUPTS2; | 715 | smi_info->si_state = SI_DISABLE_INTERRUPTS2; |
716 | } | 716 | } |
717 | break; | 717 | break; |
718 | } | 718 | } |
719 | 719 | ||
720 | case SI_DISABLE_INTERRUPTS2: | 720 | case SI_DISABLE_INTERRUPTS2: |
721 | { | 721 | { |
722 | unsigned char msg[4]; | 722 | unsigned char msg[4]; |
723 | 723 | ||
724 | /* We got the flags from the SMI, now handle them. */ | 724 | /* We got the flags from the SMI, now handle them. */ |
725 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 725 | smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
726 | if (msg[2] != 0) { | 726 | if (msg[2] != 0) { |
727 | dev_warn(smi_info->dev, "Could not disable interrupts" | 727 | dev_warn(smi_info->dev, "Could not disable interrupts" |
728 | ", failed set.\n"); | 728 | ", failed set.\n"); |
729 | } | 729 | } |
730 | smi_info->si_state = SI_NORMAL; | 730 | smi_info->si_state = SI_NORMAL; |
731 | break; | 731 | break; |
732 | } | 732 | } |
733 | } | 733 | } |
734 | } | 734 | } |
735 | 735 | ||
736 | /* | 736 | /* |
737 | * Called on timeouts and events. Timeouts should pass the elapsed | 737 | * Called on timeouts and events. Timeouts should pass the elapsed |
738 | * time, interrupts should pass in zero. Must be called with | 738 | * time, interrupts should pass in zero. Must be called with |
739 | * si_lock held and interrupts disabled. | 739 | * si_lock held and interrupts disabled. |
740 | */ | 740 | */ |
741 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | 741 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
742 | int time) | 742 | int time) |
743 | { | 743 | { |
744 | enum si_sm_result si_sm_result; | 744 | enum si_sm_result si_sm_result; |
745 | 745 | ||
746 | restart: | 746 | restart: |
747 | /* | 747 | /* |
748 | * There used to be a loop here that waited a little while | 748 | * There used to be a loop here that waited a little while |
749 | * (around 25us) before giving up. That turned out to be | 749 | * (around 25us) before giving up. That turned out to be |
750 | * pointless, the minimum delays I was seeing were in the 300us | 750 | * pointless, the minimum delays I was seeing were in the 300us |
751 | * range, which is far too long to wait in an interrupt. So | 751 | * range, which is far too long to wait in an interrupt. So |
752 | * we just run until the state machine tells us something | 752 | * we just run until the state machine tells us something |
753 | * happened or it needs a delay. | 753 | * happened or it needs a delay. |
754 | */ | 754 | */ |
755 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); | 755 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); |
756 | time = 0; | 756 | time = 0; |
757 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) | 757 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) |
758 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 758 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
759 | 759 | ||
760 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { | 760 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { |
761 | smi_inc_stat(smi_info, complete_transactions); | 761 | smi_inc_stat(smi_info, complete_transactions); |
762 | 762 | ||
763 | handle_transaction_done(smi_info); | 763 | handle_transaction_done(smi_info); |
764 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 764 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
765 | } else if (si_sm_result == SI_SM_HOSED) { | 765 | } else if (si_sm_result == SI_SM_HOSED) { |
766 | smi_inc_stat(smi_info, hosed_count); | 766 | smi_inc_stat(smi_info, hosed_count); |
767 | 767 | ||
768 | /* | 768 | /* |
769 | * Do the before return_hosed_msg, because that | 769 | * Do the before return_hosed_msg, because that |
770 | * releases the lock. | 770 | * releases the lock. |
771 | */ | 771 | */ |
772 | smi_info->si_state = SI_NORMAL; | 772 | smi_info->si_state = SI_NORMAL; |
773 | if (smi_info->curr_msg != NULL) { | 773 | if (smi_info->curr_msg != NULL) { |
774 | /* | 774 | /* |
775 | * If we were handling a user message, format | 775 | * If we were handling a user message, format |
776 | * a response to send to the upper layer to | 776 | * a response to send to the upper layer to |
777 | * tell it about the error. | 777 | * tell it about the error. |
778 | */ | 778 | */ |
779 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); | 779 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); |
780 | } | 780 | } |
781 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 781 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
782 | } | 782 | } |
783 | 783 | ||
784 | /* | 784 | /* |
785 | * We prefer handling attn over new messages. But don't do | 785 | * We prefer handling attn over new messages. But don't do |
786 | * this if there is not yet an upper layer to handle anything. | 786 | * this if there is not yet an upper layer to handle anything. |
787 | */ | 787 | */ |
788 | if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { | 788 | if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { |
789 | unsigned char msg[2]; | 789 | unsigned char msg[2]; |
790 | 790 | ||
791 | smi_inc_stat(smi_info, attentions); | 791 | smi_inc_stat(smi_info, attentions); |
792 | 792 | ||
793 | /* | 793 | /* |
794 | * Got a attn, send down a get message flags to see | 794 | * Got a attn, send down a get message flags to see |
795 | * what's causing it. It would be better to handle | 795 | * what's causing it. It would be better to handle |
796 | * this in the upper layer, but due to the way | 796 | * this in the upper layer, but due to the way |
797 | * interrupts work with the SMI, that's not really | 797 | * interrupts work with the SMI, that's not really |
798 | * possible. | 798 | * possible. |
799 | */ | 799 | */ |
800 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 800 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
801 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 801 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
802 | 802 | ||
803 | smi_info->handlers->start_transaction( | 803 | smi_info->handlers->start_transaction( |
804 | smi_info->si_sm, msg, 2); | 804 | smi_info->si_sm, msg, 2); |
805 | smi_info->si_state = SI_GETTING_FLAGS; | 805 | smi_info->si_state = SI_GETTING_FLAGS; |
806 | goto restart; | 806 | goto restart; |
807 | } | 807 | } |
808 | 808 | ||
809 | /* If we are currently idle, try to start the next message. */ | 809 | /* If we are currently idle, try to start the next message. */ |
810 | if (si_sm_result == SI_SM_IDLE) { | 810 | if (si_sm_result == SI_SM_IDLE) { |
811 | smi_inc_stat(smi_info, idles); | 811 | smi_inc_stat(smi_info, idles); |
812 | 812 | ||
813 | si_sm_result = start_next_msg(smi_info); | 813 | si_sm_result = start_next_msg(smi_info); |
814 | if (si_sm_result != SI_SM_IDLE) | 814 | if (si_sm_result != SI_SM_IDLE) |
815 | goto restart; | 815 | goto restart; |
816 | } | 816 | } |
817 | 817 | ||
818 | if ((si_sm_result == SI_SM_IDLE) | 818 | if ((si_sm_result == SI_SM_IDLE) |
819 | && (atomic_read(&smi_info->req_events))) { | 819 | && (atomic_read(&smi_info->req_events))) { |
820 | /* | 820 | /* |
821 | * We are idle and the upper layer requested that I fetch | 821 | * We are idle and the upper layer requested that I fetch |
822 | * events, so do so. | 822 | * events, so do so. |
823 | */ | 823 | */ |
824 | atomic_set(&smi_info->req_events, 0); | 824 | atomic_set(&smi_info->req_events, 0); |
825 | 825 | ||
826 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 826 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
827 | if (!smi_info->curr_msg) | 827 | if (!smi_info->curr_msg) |
828 | goto out; | 828 | goto out; |
829 | 829 | ||
830 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 830 | smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
831 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; | 831 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
832 | smi_info->curr_msg->data_size = 2; | 832 | smi_info->curr_msg->data_size = 2; |
833 | 833 | ||
834 | smi_info->handlers->start_transaction( | 834 | smi_info->handlers->start_transaction( |
835 | smi_info->si_sm, | 835 | smi_info->si_sm, |
836 | smi_info->curr_msg->data, | 836 | smi_info->curr_msg->data, |
837 | smi_info->curr_msg->data_size); | 837 | smi_info->curr_msg->data_size); |
838 | smi_info->si_state = SI_GETTING_EVENTS; | 838 | smi_info->si_state = SI_GETTING_EVENTS; |
839 | goto restart; | 839 | goto restart; |
840 | } | 840 | } |
841 | out: | 841 | out: |
842 | return si_sm_result; | 842 | return si_sm_result; |
843 | } | 843 | } |
844 | 844 | ||
845 | static void sender(void *send_info, | 845 | static void sender(void *send_info, |
846 | struct ipmi_smi_msg *msg, | 846 | struct ipmi_smi_msg *msg, |
847 | int priority) | 847 | int priority) |
848 | { | 848 | { |
849 | struct smi_info *smi_info = send_info; | 849 | struct smi_info *smi_info = send_info; |
850 | enum si_sm_result result; | 850 | enum si_sm_result result; |
851 | unsigned long flags; | 851 | unsigned long flags; |
852 | #ifdef DEBUG_TIMING | 852 | #ifdef DEBUG_TIMING |
853 | struct timeval t; | 853 | struct timeval t; |
854 | #endif | 854 | #endif |
855 | 855 | ||
856 | if (atomic_read(&smi_info->stop_operation)) { | 856 | if (atomic_read(&smi_info->stop_operation)) { |
857 | msg->rsp[0] = msg->data[0] | 4; | 857 | msg->rsp[0] = msg->data[0] | 4; |
858 | msg->rsp[1] = msg->data[1]; | 858 | msg->rsp[1] = msg->data[1]; |
859 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; | 859 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; |
860 | msg->rsp_size = 3; | 860 | msg->rsp_size = 3; |
861 | deliver_recv_msg(smi_info, msg); | 861 | deliver_recv_msg(smi_info, msg); |
862 | return; | 862 | return; |
863 | } | 863 | } |
864 | 864 | ||
865 | #ifdef DEBUG_TIMING | 865 | #ifdef DEBUG_TIMING |
866 | do_gettimeofday(&t); | 866 | do_gettimeofday(&t); |
867 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 867 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
868 | #endif | 868 | #endif |
869 | 869 | ||
870 | if (smi_info->run_to_completion) { | 870 | if (smi_info->run_to_completion) { |
871 | /* | 871 | /* |
872 | * If we are running to completion, then throw it in | 872 | * If we are running to completion, then throw it in |
873 | * the list and run transactions until everything is | 873 | * the list and run transactions until everything is |
874 | * clear. Priority doesn't matter here. | 874 | * clear. Priority doesn't matter here. |
875 | */ | 875 | */ |
876 | 876 | ||
877 | /* | 877 | /* |
878 | * Run to completion means we are single-threaded, no | 878 | * Run to completion means we are single-threaded, no |
879 | * need for locks. | 879 | * need for locks. |
880 | */ | 880 | */ |
881 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); | 881 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); |
882 | 882 | ||
883 | result = smi_event_handler(smi_info, 0); | 883 | result = smi_event_handler(smi_info, 0); |
884 | while (result != SI_SM_IDLE) { | 884 | while (result != SI_SM_IDLE) { |
885 | udelay(SI_SHORT_TIMEOUT_USEC); | 885 | udelay(SI_SHORT_TIMEOUT_USEC); |
886 | result = smi_event_handler(smi_info, | 886 | result = smi_event_handler(smi_info, |
887 | SI_SHORT_TIMEOUT_USEC); | 887 | SI_SHORT_TIMEOUT_USEC); |
888 | } | 888 | } |
889 | return; | 889 | return; |
890 | } | 890 | } |
891 | 891 | ||
892 | spin_lock_irqsave(&smi_info->si_lock, flags); | 892 | spin_lock_irqsave(&smi_info->si_lock, flags); |
893 | if (priority > 0) | 893 | if (priority > 0) |
894 | list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); | 894 | list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); |
895 | else | 895 | else |
896 | list_add_tail(&msg->link, &smi_info->xmit_msgs); | 896 | list_add_tail(&msg->link, &smi_info->xmit_msgs); |
897 | 897 | ||
898 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { | 898 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { |
899 | /* | 899 | /* |
900 | * last_timeout_jiffies is updated here to avoid | 900 | * last_timeout_jiffies is updated here to avoid |
901 | * smi_timeout() handler passing very large time_diff | 901 | * smi_timeout() handler passing very large time_diff |
902 | * value to smi_event_handler() that causes | 902 | * value to smi_event_handler() that causes |
903 | * the send command to abort. | 903 | * the send command to abort. |
904 | */ | 904 | */ |
905 | smi_info->last_timeout_jiffies = jiffies; | 905 | smi_info->last_timeout_jiffies = jiffies; |
906 | 906 | ||
907 | mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); | 907 | mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); |
908 | 908 | ||
909 | if (smi_info->thread) | 909 | if (smi_info->thread) |
910 | wake_up_process(smi_info->thread); | 910 | wake_up_process(smi_info->thread); |
911 | 911 | ||
912 | start_next_msg(smi_info); | 912 | start_next_msg(smi_info); |
913 | smi_event_handler(smi_info, 0); | 913 | smi_event_handler(smi_info, 0); |
914 | } | 914 | } |
915 | spin_unlock_irqrestore(&smi_info->si_lock, flags); | 915 | spin_unlock_irqrestore(&smi_info->si_lock, flags); |
916 | } | 916 | } |
917 | 917 | ||
918 | static void set_run_to_completion(void *send_info, int i_run_to_completion) | 918 | static void set_run_to_completion(void *send_info, int i_run_to_completion) |
919 | { | 919 | { |
920 | struct smi_info *smi_info = send_info; | 920 | struct smi_info *smi_info = send_info; |
921 | enum si_sm_result result; | 921 | enum si_sm_result result; |
922 | 922 | ||
923 | smi_info->run_to_completion = i_run_to_completion; | 923 | smi_info->run_to_completion = i_run_to_completion; |
924 | if (i_run_to_completion) { | 924 | if (i_run_to_completion) { |
925 | result = smi_event_handler(smi_info, 0); | 925 | result = smi_event_handler(smi_info, 0); |
926 | while (result != SI_SM_IDLE) { | 926 | while (result != SI_SM_IDLE) { |
927 | udelay(SI_SHORT_TIMEOUT_USEC); | 927 | udelay(SI_SHORT_TIMEOUT_USEC); |
928 | result = smi_event_handler(smi_info, | 928 | result = smi_event_handler(smi_info, |
929 | SI_SHORT_TIMEOUT_USEC); | 929 | SI_SHORT_TIMEOUT_USEC); |
930 | } | 930 | } |
931 | } | 931 | } |
932 | } | 932 | } |
933 | 933 | ||
934 | /* | 934 | /* |
935 | * Use -1 in the nsec value of the busy waiting timespec to tell that | 935 | * Use -1 in the nsec value of the busy waiting timespec to tell that |
936 | * we are spinning in kipmid looking for something and not delaying | 936 | * we are spinning in kipmid looking for something and not delaying |
937 | * between checks | 937 | * between checks |
938 | */ | 938 | */ |
939 | static inline void ipmi_si_set_not_busy(struct timespec *ts) | 939 | static inline void ipmi_si_set_not_busy(struct timespec *ts) |
940 | { | 940 | { |
941 | ts->tv_nsec = -1; | 941 | ts->tv_nsec = -1; |
942 | } | 942 | } |
943 | static inline int ipmi_si_is_busy(struct timespec *ts) | 943 | static inline int ipmi_si_is_busy(struct timespec *ts) |
944 | { | 944 | { |
945 | return ts->tv_nsec != -1; | 945 | return ts->tv_nsec != -1; |
946 | } | 946 | } |
947 | 947 | ||
948 | static int ipmi_thread_busy_wait(enum si_sm_result smi_result, | 948 | static int ipmi_thread_busy_wait(enum si_sm_result smi_result, |
949 | const struct smi_info *smi_info, | 949 | const struct smi_info *smi_info, |
950 | struct timespec *busy_until) | 950 | struct timespec *busy_until) |
951 | { | 951 | { |
952 | unsigned int max_busy_us = 0; | 952 | unsigned int max_busy_us = 0; |
953 | 953 | ||
954 | if (smi_info->intf_num < num_max_busy_us) | 954 | if (smi_info->intf_num < num_max_busy_us) |
955 | max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; | 955 | max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; |
956 | if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) | 956 | if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) |
957 | ipmi_si_set_not_busy(busy_until); | 957 | ipmi_si_set_not_busy(busy_until); |
958 | else if (!ipmi_si_is_busy(busy_until)) { | 958 | else if (!ipmi_si_is_busy(busy_until)) { |
959 | getnstimeofday(busy_until); | 959 | getnstimeofday(busy_until); |
960 | timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); | 960 | timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); |
961 | } else { | 961 | } else { |
962 | struct timespec now; | 962 | struct timespec now; |
963 | getnstimeofday(&now); | 963 | getnstimeofday(&now); |
964 | if (unlikely(timespec_compare(&now, busy_until) > 0)) { | 964 | if (unlikely(timespec_compare(&now, busy_until) > 0)) { |
965 | ipmi_si_set_not_busy(busy_until); | 965 | ipmi_si_set_not_busy(busy_until); |
966 | return 0; | 966 | return 0; |
967 | } | 967 | } |
968 | } | 968 | } |
969 | return 1; | 969 | return 1; |
970 | } | 970 | } |
971 | 971 | ||
972 | 972 | ||
973 | /* | 973 | /* |
974 | * A busy-waiting loop for speeding up IPMI operation. | 974 | * A busy-waiting loop for speeding up IPMI operation. |
975 | * | 975 | * |
976 | * Lousy hardware makes this hard. This is only enabled for systems | 976 | * Lousy hardware makes this hard. This is only enabled for systems |
977 | * that are not BT and do not have interrupts. It starts spinning | 977 | * that are not BT and do not have interrupts. It starts spinning |
978 | * when an operation is complete or until max_busy tells it to stop | 978 | * when an operation is complete or until max_busy tells it to stop |
979 | * (if that is enabled). See the paragraph on kimid_max_busy_us in | 979 | * (if that is enabled). See the paragraph on kimid_max_busy_us in |
980 | * Documentation/IPMI.txt for details. | 980 | * Documentation/IPMI.txt for details. |
981 | */ | 981 | */ |
982 | static int ipmi_thread(void *data) | 982 | static int ipmi_thread(void *data) |
983 | { | 983 | { |
984 | struct smi_info *smi_info = data; | 984 | struct smi_info *smi_info = data; |
985 | unsigned long flags; | 985 | unsigned long flags; |
986 | enum si_sm_result smi_result; | 986 | enum si_sm_result smi_result; |
987 | struct timespec busy_until; | 987 | struct timespec busy_until; |
988 | 988 | ||
989 | ipmi_si_set_not_busy(&busy_until); | 989 | ipmi_si_set_not_busy(&busy_until); |
990 | set_user_nice(current, 19); | 990 | set_user_nice(current, 19); |
991 | while (!kthread_should_stop()) { | 991 | while (!kthread_should_stop()) { |
992 | int busy_wait; | 992 | int busy_wait; |
993 | 993 | ||
994 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 994 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
995 | smi_result = smi_event_handler(smi_info, 0); | 995 | smi_result = smi_event_handler(smi_info, 0); |
996 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 996 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
997 | busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, | 997 | busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, |
998 | &busy_until); | 998 | &busy_until); |
999 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) | 999 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) |
1000 | ; /* do nothing */ | 1000 | ; /* do nothing */ |
1001 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) | 1001 | else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) |
1002 | schedule(); | 1002 | schedule(); |
1003 | else if (smi_result == SI_SM_IDLE) | 1003 | else if (smi_result == SI_SM_IDLE) |
1004 | schedule_timeout_interruptible(100); | 1004 | schedule_timeout_interruptible(100); |
1005 | else | 1005 | else |
1006 | schedule_timeout_interruptible(1); | 1006 | schedule_timeout_interruptible(1); |
1007 | } | 1007 | } |
1008 | return 0; | 1008 | return 0; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | 1011 | ||
1012 | static void poll(void *send_info) | 1012 | static void poll(void *send_info) |
1013 | { | 1013 | { |
1014 | struct smi_info *smi_info = send_info; | 1014 | struct smi_info *smi_info = send_info; |
1015 | unsigned long flags = 0; | 1015 | unsigned long flags = 0; |
1016 | int run_to_completion = smi_info->run_to_completion; | 1016 | int run_to_completion = smi_info->run_to_completion; |
1017 | 1017 | ||
1018 | /* | 1018 | /* |
1019 | * Make sure there is some delay in the poll loop so we can | 1019 | * Make sure there is some delay in the poll loop so we can |
1020 | * drive time forward and timeout things. | 1020 | * drive time forward and timeout things. |
1021 | */ | 1021 | */ |
1022 | udelay(10); | 1022 | udelay(10); |
1023 | if (!run_to_completion) | 1023 | if (!run_to_completion) |
1024 | spin_lock_irqsave(&smi_info->si_lock, flags); | 1024 | spin_lock_irqsave(&smi_info->si_lock, flags); |
1025 | smi_event_handler(smi_info, 10); | 1025 | smi_event_handler(smi_info, 10); |
1026 | if (!run_to_completion) | 1026 | if (!run_to_completion) |
1027 | spin_unlock_irqrestore(&smi_info->si_lock, flags); | 1027 | spin_unlock_irqrestore(&smi_info->si_lock, flags); |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | static void request_events(void *send_info) | 1030 | static void request_events(void *send_info) |
1031 | { | 1031 | { |
1032 | struct smi_info *smi_info = send_info; | 1032 | struct smi_info *smi_info = send_info; |
1033 | 1033 | ||
1034 | if (atomic_read(&smi_info->stop_operation) || | 1034 | if (atomic_read(&smi_info->stop_operation) || |
1035 | !smi_info->has_event_buffer) | 1035 | !smi_info->has_event_buffer) |
1036 | return; | 1036 | return; |
1037 | 1037 | ||
1038 | atomic_set(&smi_info->req_events, 1); | 1038 | atomic_set(&smi_info->req_events, 1); |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | static int initialized; | 1041 | static int initialized; |
1042 | 1042 | ||
1043 | static void smi_timeout(unsigned long data) | 1043 | static void smi_timeout(unsigned long data) |
1044 | { | 1044 | { |
1045 | struct smi_info *smi_info = (struct smi_info *) data; | 1045 | struct smi_info *smi_info = (struct smi_info *) data; |
1046 | enum si_sm_result smi_result; | 1046 | enum si_sm_result smi_result; |
1047 | unsigned long flags; | 1047 | unsigned long flags; |
1048 | unsigned long jiffies_now; | 1048 | unsigned long jiffies_now; |
1049 | long time_diff; | 1049 | long time_diff; |
1050 | long timeout; | 1050 | long timeout; |
1051 | #ifdef DEBUG_TIMING | 1051 | #ifdef DEBUG_TIMING |
1052 | struct timeval t; | 1052 | struct timeval t; |
1053 | #endif | 1053 | #endif |
1054 | 1054 | ||
1055 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1055 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
1056 | #ifdef DEBUG_TIMING | 1056 | #ifdef DEBUG_TIMING |
1057 | do_gettimeofday(&t); | 1057 | do_gettimeofday(&t); |
1058 | printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 1058 | printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
1059 | #endif | 1059 | #endif |
1060 | jiffies_now = jiffies; | 1060 | jiffies_now = jiffies; |
1061 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) | 1061 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) |
1062 | * SI_USEC_PER_JIFFY); | 1062 | * SI_USEC_PER_JIFFY); |
1063 | smi_result = smi_event_handler(smi_info, time_diff); | 1063 | smi_result = smi_event_handler(smi_info, time_diff); |
1064 | 1064 | ||
1065 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 1065 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
1066 | 1066 | ||
1067 | smi_info->last_timeout_jiffies = jiffies_now; | 1067 | smi_info->last_timeout_jiffies = jiffies_now; |
1068 | 1068 | ||
1069 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 1069 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
1070 | /* Running with interrupts, only do long timeouts. */ | 1070 | /* Running with interrupts, only do long timeouts. */ |
1071 | timeout = jiffies + SI_TIMEOUT_JIFFIES; | 1071 | timeout = jiffies + SI_TIMEOUT_JIFFIES; |
1072 | smi_inc_stat(smi_info, long_timeouts); | 1072 | smi_inc_stat(smi_info, long_timeouts); |
1073 | goto do_mod_timer; | 1073 | goto do_mod_timer; |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | /* | 1076 | /* |
1077 | * If the state machine asks for a short delay, then shorten | 1077 | * If the state machine asks for a short delay, then shorten |
1078 | * the timer timeout. | 1078 | * the timer timeout. |
1079 | */ | 1079 | */ |
1080 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1080 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
1081 | smi_inc_stat(smi_info, short_timeouts); | 1081 | smi_inc_stat(smi_info, short_timeouts); |
1082 | timeout = jiffies + 1; | 1082 | timeout = jiffies + 1; |
1083 | } else { | 1083 | } else { |
1084 | smi_inc_stat(smi_info, long_timeouts); | 1084 | smi_inc_stat(smi_info, long_timeouts); |
1085 | timeout = jiffies + SI_TIMEOUT_JIFFIES; | 1085 | timeout = jiffies + SI_TIMEOUT_JIFFIES; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | do_mod_timer: | 1088 | do_mod_timer: |
1089 | if (smi_result != SI_SM_IDLE) | 1089 | if (smi_result != SI_SM_IDLE) |
1090 | mod_timer(&(smi_info->si_timer), timeout); | 1090 | mod_timer(&(smi_info->si_timer), timeout); |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | static irqreturn_t si_irq_handler(int irq, void *data) | 1093 | static irqreturn_t si_irq_handler(int irq, void *data) |
1094 | { | 1094 | { |
1095 | struct smi_info *smi_info = data; | 1095 | struct smi_info *smi_info = data; |
1096 | unsigned long flags; | 1096 | unsigned long flags; |
1097 | #ifdef DEBUG_TIMING | 1097 | #ifdef DEBUG_TIMING |
1098 | struct timeval t; | 1098 | struct timeval t; |
1099 | #endif | 1099 | #endif |
1100 | 1100 | ||
1101 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1101 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
1102 | 1102 | ||
1103 | smi_inc_stat(smi_info, interrupts); | 1103 | smi_inc_stat(smi_info, interrupts); |
1104 | 1104 | ||
1105 | #ifdef DEBUG_TIMING | 1105 | #ifdef DEBUG_TIMING |
1106 | do_gettimeofday(&t); | 1106 | do_gettimeofday(&t); |
1107 | printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 1107 | printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
1108 | #endif | 1108 | #endif |
1109 | smi_event_handler(smi_info, 0); | 1109 | smi_event_handler(smi_info, 0); |
1110 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 1110 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
1111 | return IRQ_HANDLED; | 1111 | return IRQ_HANDLED; |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | static irqreturn_t si_bt_irq_handler(int irq, void *data) | 1114 | static irqreturn_t si_bt_irq_handler(int irq, void *data) |
1115 | { | 1115 | { |
1116 | struct smi_info *smi_info = data; | 1116 | struct smi_info *smi_info = data; |
1117 | /* We need to clear the IRQ flag for the BT interface. */ | 1117 | /* We need to clear the IRQ flag for the BT interface. */ |
1118 | smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, | 1118 | smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, |
1119 | IPMI_BT_INTMASK_CLEAR_IRQ_BIT | 1119 | IPMI_BT_INTMASK_CLEAR_IRQ_BIT |
1120 | | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); | 1120 | | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); |
1121 | return si_irq_handler(irq, data); | 1121 | return si_irq_handler(irq, data); |
1122 | } | 1122 | } |
1123 | 1123 | ||
1124 | static int smi_start_processing(void *send_info, | 1124 | static int smi_start_processing(void *send_info, |
1125 | ipmi_smi_t intf) | 1125 | ipmi_smi_t intf) |
1126 | { | 1126 | { |
1127 | struct smi_info *new_smi = send_info; | 1127 | struct smi_info *new_smi = send_info; |
1128 | int enable = 0; | 1128 | int enable = 0; |
1129 | 1129 | ||
1130 | new_smi->intf = intf; | 1130 | new_smi->intf = intf; |
1131 | 1131 | ||
1132 | /* Try to claim any interrupts. */ | 1132 | /* Try to claim any interrupts. */ |
1133 | if (new_smi->irq_setup) | 1133 | if (new_smi->irq_setup) |
1134 | new_smi->irq_setup(new_smi); | 1134 | new_smi->irq_setup(new_smi); |
1135 | 1135 | ||
1136 | /* Set up the timer that drives the interface. */ | 1136 | /* Set up the timer that drives the interface. */ |
1137 | setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); | 1137 | setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); |
1138 | new_smi->last_timeout_jiffies = jiffies; | 1138 | new_smi->last_timeout_jiffies = jiffies; |
1139 | mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); | 1139 | mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); |
1140 | 1140 | ||
1141 | /* | 1141 | /* |
1142 | * Check if the user forcefully enabled the daemon. | 1142 | * Check if the user forcefully enabled the daemon. |
1143 | */ | 1143 | */ |
1144 | if (new_smi->intf_num < num_force_kipmid) | 1144 | if (new_smi->intf_num < num_force_kipmid) |
1145 | enable = force_kipmid[new_smi->intf_num]; | 1145 | enable = force_kipmid[new_smi->intf_num]; |
1146 | /* | 1146 | /* |
1147 | * The BT interface is efficient enough to not need a thread, | 1147 | * The BT interface is efficient enough to not need a thread, |
1148 | * and there is no need for a thread if we have interrupts. | 1148 | * and there is no need for a thread if we have interrupts. |
1149 | */ | 1149 | */ |
1150 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) | 1150 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) |
1151 | enable = 1; | 1151 | enable = 1; |
1152 | 1152 | ||
1153 | if (enable) { | 1153 | if (enable) { |
1154 | new_smi->thread = kthread_run(ipmi_thread, new_smi, | 1154 | new_smi->thread = kthread_run(ipmi_thread, new_smi, |
1155 | "kipmi%d", new_smi->intf_num); | 1155 | "kipmi%d", new_smi->intf_num); |
1156 | if (IS_ERR(new_smi->thread)) { | 1156 | if (IS_ERR(new_smi->thread)) { |
1157 | dev_notice(new_smi->dev, "Could not start" | 1157 | dev_notice(new_smi->dev, "Could not start" |
1158 | " kernel thread due to error %ld, only using" | 1158 | " kernel thread due to error %ld, only using" |
1159 | " timers to drive the interface\n", | 1159 | " timers to drive the interface\n", |
1160 | PTR_ERR(new_smi->thread)); | 1160 | PTR_ERR(new_smi->thread)); |
1161 | new_smi->thread = NULL; | 1161 | new_smi->thread = NULL; |
1162 | } | 1162 | } |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | return 0; | 1165 | return 0; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | static int get_smi_info(void *send_info, struct ipmi_smi_info *data) | 1168 | static int get_smi_info(void *send_info, struct ipmi_smi_info *data) |
1169 | { | 1169 | { |
1170 | struct smi_info *smi = send_info; | 1170 | struct smi_info *smi = send_info; |
1171 | 1171 | ||
1172 | data->addr_src = smi->addr_source; | 1172 | data->addr_src = smi->addr_source; |
1173 | data->dev = smi->dev; | 1173 | data->dev = smi->dev; |
1174 | data->addr_info = smi->addr_info; | 1174 | data->addr_info = smi->addr_info; |
1175 | get_device(smi->dev); | 1175 | get_device(smi->dev); |
1176 | 1176 | ||
1177 | return 0; | 1177 | return 0; |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static void set_maintenance_mode(void *send_info, int enable) | 1180 | static void set_maintenance_mode(void *send_info, int enable) |
1181 | { | 1181 | { |
1182 | struct smi_info *smi_info = send_info; | 1182 | struct smi_info *smi_info = send_info; |
1183 | 1183 | ||
1184 | if (!enable) | 1184 | if (!enable) |
1185 | atomic_set(&smi_info->req_events, 0); | 1185 | atomic_set(&smi_info->req_events, 0); |
1186 | } | 1186 | } |
1187 | 1187 | ||
1188 | static struct ipmi_smi_handlers handlers = { | 1188 | static struct ipmi_smi_handlers handlers = { |
1189 | .owner = THIS_MODULE, | 1189 | .owner = THIS_MODULE, |
1190 | .start_processing = smi_start_processing, | 1190 | .start_processing = smi_start_processing, |
1191 | .get_smi_info = get_smi_info, | 1191 | .get_smi_info = get_smi_info, |
1192 | .sender = sender, | 1192 | .sender = sender, |
1193 | .request_events = request_events, | 1193 | .request_events = request_events, |
1194 | .set_maintenance_mode = set_maintenance_mode, | 1194 | .set_maintenance_mode = set_maintenance_mode, |
1195 | .set_run_to_completion = set_run_to_completion, | 1195 | .set_run_to_completion = set_run_to_completion, |
1196 | .poll = poll, | 1196 | .poll = poll, |
1197 | }; | 1197 | }; |
1198 | 1198 | ||
1199 | /* | 1199 | /* |
1200 | * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, | 1200 | * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, |
1201 | * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. | 1201 | * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. |
1202 | */ | 1202 | */ |
1203 | 1203 | ||
1204 | static LIST_HEAD(smi_infos); | 1204 | static LIST_HEAD(smi_infos); |
1205 | static DEFINE_MUTEX(smi_infos_lock); | 1205 | static DEFINE_MUTEX(smi_infos_lock); |
1206 | static int smi_num; /* Used to sequence the SMIs */ | 1206 | static int smi_num; /* Used to sequence the SMIs */ |
1207 | 1207 | ||
1208 | #define DEFAULT_REGSPACING 1 | 1208 | #define DEFAULT_REGSPACING 1 |
1209 | #define DEFAULT_REGSIZE 1 | 1209 | #define DEFAULT_REGSIZE 1 |
1210 | 1210 | ||
1211 | static bool si_trydefaults = 1; | 1211 | static bool si_trydefaults = 1; |
1212 | static char *si_type[SI_MAX_PARMS]; | 1212 | static char *si_type[SI_MAX_PARMS]; |
1213 | #define MAX_SI_TYPE_STR 30 | 1213 | #define MAX_SI_TYPE_STR 30 |
1214 | static char si_type_str[MAX_SI_TYPE_STR]; | 1214 | static char si_type_str[MAX_SI_TYPE_STR]; |
1215 | static unsigned long addrs[SI_MAX_PARMS]; | 1215 | static unsigned long addrs[SI_MAX_PARMS]; |
1216 | static unsigned int num_addrs; | 1216 | static unsigned int num_addrs; |
1217 | static unsigned int ports[SI_MAX_PARMS]; | 1217 | static unsigned int ports[SI_MAX_PARMS]; |
1218 | static unsigned int num_ports; | 1218 | static unsigned int num_ports; |
1219 | static int irqs[SI_MAX_PARMS]; | 1219 | static int irqs[SI_MAX_PARMS]; |
1220 | static unsigned int num_irqs; | 1220 | static unsigned int num_irqs; |
1221 | static int regspacings[SI_MAX_PARMS]; | 1221 | static int regspacings[SI_MAX_PARMS]; |
1222 | static unsigned int num_regspacings; | 1222 | static unsigned int num_regspacings; |
1223 | static int regsizes[SI_MAX_PARMS]; | 1223 | static int regsizes[SI_MAX_PARMS]; |
1224 | static unsigned int num_regsizes; | 1224 | static unsigned int num_regsizes; |
1225 | static int regshifts[SI_MAX_PARMS]; | 1225 | static int regshifts[SI_MAX_PARMS]; |
1226 | static unsigned int num_regshifts; | 1226 | static unsigned int num_regshifts; |
1227 | static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ | 1227 | static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ |
1228 | static unsigned int num_slave_addrs; | 1228 | static unsigned int num_slave_addrs; |
1229 | 1229 | ||
1230 | #define IPMI_IO_ADDR_SPACE 0 | 1230 | #define IPMI_IO_ADDR_SPACE 0 |
1231 | #define IPMI_MEM_ADDR_SPACE 1 | 1231 | #define IPMI_MEM_ADDR_SPACE 1 |
1232 | static char *addr_space_to_str[] = { "i/o", "mem" }; | 1232 | static char *addr_space_to_str[] = { "i/o", "mem" }; |
1233 | 1233 | ||
1234 | static int hotmod_handler(const char *val, struct kernel_param *kp); | 1234 | static int hotmod_handler(const char *val, struct kernel_param *kp); |
1235 | 1235 | ||
1236 | module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); | 1236 | module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); |
1237 | MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" | 1237 | MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" |
1238 | " Documentation/IPMI.txt in the kernel sources for the" | 1238 | " Documentation/IPMI.txt in the kernel sources for the" |
1239 | " gory details."); | 1239 | " gory details."); |
1240 | 1240 | ||
1241 | module_param_named(trydefaults, si_trydefaults, bool, 0); | 1241 | module_param_named(trydefaults, si_trydefaults, bool, 0); |
1242 | MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" | 1242 | MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" |
1243 | " default scan of the KCS and SMIC interface at the standard" | 1243 | " default scan of the KCS and SMIC interface at the standard" |
1244 | " address"); | 1244 | " address"); |
1245 | module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); | 1245 | module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); |
1246 | MODULE_PARM_DESC(type, "Defines the type of each interface, each" | 1246 | MODULE_PARM_DESC(type, "Defines the type of each interface, each" |
1247 | " interface separated by commas. The types are 'kcs'," | 1247 | " interface separated by commas. The types are 'kcs'," |
1248 | " 'smic', and 'bt'. For example si_type=kcs,bt will set" | 1248 | " 'smic', and 'bt'. For example si_type=kcs,bt will set" |
1249 | " the first interface to kcs and the second to bt"); | 1249 | " the first interface to kcs and the second to bt"); |
1250 | module_param_array(addrs, ulong, &num_addrs, 0); | 1250 | module_param_array(addrs, ulong, &num_addrs, 0); |
1251 | MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" | 1251 | MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" |
1252 | " addresses separated by commas. Only use if an interface" | 1252 | " addresses separated by commas. Only use if an interface" |
1253 | " is in memory. Otherwise, set it to zero or leave" | 1253 | " is in memory. Otherwise, set it to zero or leave" |
1254 | " it blank."); | 1254 | " it blank."); |
1255 | module_param_array(ports, uint, &num_ports, 0); | 1255 | module_param_array(ports, uint, &num_ports, 0); |
1256 | MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" | 1256 | MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" |
1257 | " addresses separated by commas. Only use if an interface" | 1257 | " addresses separated by commas. Only use if an interface" |
1258 | " is a port. Otherwise, set it to zero or leave" | 1258 | " is a port. Otherwise, set it to zero or leave" |
1259 | " it blank."); | 1259 | " it blank."); |
1260 | module_param_array(irqs, int, &num_irqs, 0); | 1260 | module_param_array(irqs, int, &num_irqs, 0); |
1261 | MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" | 1261 | MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" |
1262 | " addresses separated by commas. Only use if an interface" | 1262 | " addresses separated by commas. Only use if an interface" |
1263 | " has an interrupt. Otherwise, set it to zero or leave" | 1263 | " has an interrupt. Otherwise, set it to zero or leave" |
1264 | " it blank."); | 1264 | " it blank."); |
1265 | module_param_array(regspacings, int, &num_regspacings, 0); | 1265 | module_param_array(regspacings, int, &num_regspacings, 0); |
1266 | MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" | 1266 | MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" |
1267 | " and each successive register used by the interface. For" | 1267 | " and each successive register used by the interface. For" |
1268 | " instance, if the start address is 0xca2 and the spacing" | 1268 | " instance, if the start address is 0xca2 and the spacing" |
1269 | " is 2, then the second address is at 0xca4. Defaults" | 1269 | " is 2, then the second address is at 0xca4. Defaults" |
1270 | " to 1."); | 1270 | " to 1."); |
1271 | module_param_array(regsizes, int, &num_regsizes, 0); | 1271 | module_param_array(regsizes, int, &num_regsizes, 0); |
1272 | MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." | 1272 | MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." |
1273 | " This should generally be 1, 2, 4, or 8 for an 8-bit," | 1273 | " This should generally be 1, 2, 4, or 8 for an 8-bit," |
1274 | " 16-bit, 32-bit, or 64-bit register. Use this if you" | 1274 | " 16-bit, 32-bit, or 64-bit register. Use this if you" |
1275 | " the 8-bit IPMI register has to be read from a larger" | 1275 | " the 8-bit IPMI register has to be read from a larger" |
1276 | " register."); | 1276 | " register."); |
1277 | module_param_array(regshifts, int, &num_regshifts, 0); | 1277 | module_param_array(regshifts, int, &num_regshifts, 0); |
1278 | MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." | 1278 | MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." |
1279 | " IPMI register, in bits. For instance, if the data" | 1279 | " IPMI register, in bits. For instance, if the data" |
1280 | " is read from a 32-bit word and the IPMI data is in" | 1280 | " is read from a 32-bit word and the IPMI data is in" |
1281 | " bit 8-15, then the shift would be 8"); | 1281 | " bit 8-15, then the shift would be 8"); |
1282 | module_param_array(slave_addrs, int, &num_slave_addrs, 0); | 1282 | module_param_array(slave_addrs, int, &num_slave_addrs, 0); |
1283 | MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" | 1283 | MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" |
1284 | " the controller. Normally this is 0x20, but can be" | 1284 | " the controller. Normally this is 0x20, but can be" |
1285 | " overridden by this parm. This is an array indexed" | 1285 | " overridden by this parm. This is an array indexed" |
1286 | " by interface number."); | 1286 | " by interface number."); |
1287 | module_param_array(force_kipmid, int, &num_force_kipmid, 0); | 1287 | module_param_array(force_kipmid, int, &num_force_kipmid, 0); |
1288 | MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" | 1288 | MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" |
1289 | " disabled(0). Normally the IPMI driver auto-detects" | 1289 | " disabled(0). Normally the IPMI driver auto-detects" |
1290 | " this, but the value may be overridden by this parm."); | 1290 | " this, but the value may be overridden by this parm."); |
1291 | module_param(unload_when_empty, int, 0); | 1291 | module_param(unload_when_empty, int, 0); |
1292 | MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" | 1292 | MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" |
1293 | " specified or found, default is 1. Setting to 0" | 1293 | " specified or found, default is 1. Setting to 0" |
1294 | " is useful for hot add of devices using hotmod."); | 1294 | " is useful for hot add of devices using hotmod."); |
1295 | module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); | 1295 | module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); |
1296 | MODULE_PARM_DESC(kipmid_max_busy_us, | 1296 | MODULE_PARM_DESC(kipmid_max_busy_us, |
1297 | "Max time (in microseconds) to busy-wait for IPMI data before" | 1297 | "Max time (in microseconds) to busy-wait for IPMI data before" |
1298 | " sleeping. 0 (default) means to wait forever. Set to 100-500" | 1298 | " sleeping. 0 (default) means to wait forever. Set to 100-500" |
1299 | " if kipmid is using up a lot of CPU time."); | 1299 | " if kipmid is using up a lot of CPU time."); |
1300 | 1300 | ||
1301 | 1301 | ||
1302 | static void std_irq_cleanup(struct smi_info *info) | 1302 | static void std_irq_cleanup(struct smi_info *info) |
1303 | { | 1303 | { |
1304 | if (info->si_type == SI_BT) | 1304 | if (info->si_type == SI_BT) |
1305 | /* Disable the interrupt in the BT interface. */ | 1305 | /* Disable the interrupt in the BT interface. */ |
1306 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); | 1306 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); |
1307 | free_irq(info->irq, info); | 1307 | free_irq(info->irq, info); |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static int std_irq_setup(struct smi_info *info) | 1310 | static int std_irq_setup(struct smi_info *info) |
1311 | { | 1311 | { |
1312 | int rv; | 1312 | int rv; |
1313 | 1313 | ||
1314 | if (!info->irq) | 1314 | if (!info->irq) |
1315 | return 0; | 1315 | return 0; |
1316 | 1316 | ||
1317 | if (info->si_type == SI_BT) { | 1317 | if (info->si_type == SI_BT) { |
1318 | rv = request_irq(info->irq, | 1318 | rv = request_irq(info->irq, |
1319 | si_bt_irq_handler, | 1319 | si_bt_irq_handler, |
1320 | IRQF_SHARED | IRQF_DISABLED, | 1320 | IRQF_SHARED | IRQF_DISABLED, |
1321 | DEVICE_NAME, | 1321 | DEVICE_NAME, |
1322 | info); | 1322 | info); |
1323 | if (!rv) | 1323 | if (!rv) |
1324 | /* Enable the interrupt in the BT interface. */ | 1324 | /* Enable the interrupt in the BT interface. */ |
1325 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, | 1325 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, |
1326 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); | 1326 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); |
1327 | } else | 1327 | } else |
1328 | rv = request_irq(info->irq, | 1328 | rv = request_irq(info->irq, |
1329 | si_irq_handler, | 1329 | si_irq_handler, |
1330 | IRQF_SHARED | IRQF_DISABLED, | 1330 | IRQF_SHARED | IRQF_DISABLED, |
1331 | DEVICE_NAME, | 1331 | DEVICE_NAME, |
1332 | info); | 1332 | info); |
1333 | if (rv) { | 1333 | if (rv) { |
1334 | dev_warn(info->dev, "%s unable to claim interrupt %d," | 1334 | dev_warn(info->dev, "%s unable to claim interrupt %d," |
1335 | " running polled\n", | 1335 | " running polled\n", |
1336 | DEVICE_NAME, info->irq); | 1336 | DEVICE_NAME, info->irq); |
1337 | info->irq = 0; | 1337 | info->irq = 0; |
1338 | } else { | 1338 | } else { |
1339 | info->irq_cleanup = std_irq_cleanup; | 1339 | info->irq_cleanup = std_irq_cleanup; |
1340 | dev_info(info->dev, "Using irq %d\n", info->irq); | 1340 | dev_info(info->dev, "Using irq %d\n", info->irq); |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | return rv; | 1343 | return rv; |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) | 1346 | static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) |
1347 | { | 1347 | { |
1348 | unsigned int addr = io->addr_data; | 1348 | unsigned int addr = io->addr_data; |
1349 | 1349 | ||
1350 | return inb(addr + (offset * io->regspacing)); | 1350 | return inb(addr + (offset * io->regspacing)); |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | static void port_outb(struct si_sm_io *io, unsigned int offset, | 1353 | static void port_outb(struct si_sm_io *io, unsigned int offset, |
1354 | unsigned char b) | 1354 | unsigned char b) |
1355 | { | 1355 | { |
1356 | unsigned int addr = io->addr_data; | 1356 | unsigned int addr = io->addr_data; |
1357 | 1357 | ||
1358 | outb(b, addr + (offset * io->regspacing)); | 1358 | outb(b, addr + (offset * io->regspacing)); |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) | 1361 | static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) |
1362 | { | 1362 | { |
1363 | unsigned int addr = io->addr_data; | 1363 | unsigned int addr = io->addr_data; |
1364 | 1364 | ||
1365 | return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; | 1365 | return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | static void port_outw(struct si_sm_io *io, unsigned int offset, | 1368 | static void port_outw(struct si_sm_io *io, unsigned int offset, |
1369 | unsigned char b) | 1369 | unsigned char b) |
1370 | { | 1370 | { |
1371 | unsigned int addr = io->addr_data; | 1371 | unsigned int addr = io->addr_data; |
1372 | 1372 | ||
1373 | outw(b << io->regshift, addr + (offset * io->regspacing)); | 1373 | outw(b << io->regshift, addr + (offset * io->regspacing)); |
1374 | } | 1374 | } |
1375 | 1375 | ||
1376 | static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) | 1376 | static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) |
1377 | { | 1377 | { |
1378 | unsigned int addr = io->addr_data; | 1378 | unsigned int addr = io->addr_data; |
1379 | 1379 | ||
1380 | return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; | 1380 | return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | static void port_outl(struct si_sm_io *io, unsigned int offset, | 1383 | static void port_outl(struct si_sm_io *io, unsigned int offset, |
1384 | unsigned char b) | 1384 | unsigned char b) |
1385 | { | 1385 | { |
1386 | unsigned int addr = io->addr_data; | 1386 | unsigned int addr = io->addr_data; |
1387 | 1387 | ||
1388 | outl(b << io->regshift, addr+(offset * io->regspacing)); | 1388 | outl(b << io->regshift, addr+(offset * io->regspacing)); |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | static void port_cleanup(struct smi_info *info) | 1391 | static void port_cleanup(struct smi_info *info) |
1392 | { | 1392 | { |
1393 | unsigned int addr = info->io.addr_data; | 1393 | unsigned int addr = info->io.addr_data; |
1394 | int idx; | 1394 | int idx; |
1395 | 1395 | ||
1396 | if (addr) { | 1396 | if (addr) { |
1397 | for (idx = 0; idx < info->io_size; idx++) | 1397 | for (idx = 0; idx < info->io_size; idx++) |
1398 | release_region(addr + idx * info->io.regspacing, | 1398 | release_region(addr + idx * info->io.regspacing, |
1399 | info->io.regsize); | 1399 | info->io.regsize); |
1400 | } | 1400 | } |
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | static int port_setup(struct smi_info *info) | 1403 | static int port_setup(struct smi_info *info) |
1404 | { | 1404 | { |
1405 | unsigned int addr = info->io.addr_data; | 1405 | unsigned int addr = info->io.addr_data; |
1406 | int idx; | 1406 | int idx; |
1407 | 1407 | ||
1408 | if (!addr) | 1408 | if (!addr) |
1409 | return -ENODEV; | 1409 | return -ENODEV; |
1410 | 1410 | ||
1411 | info->io_cleanup = port_cleanup; | 1411 | info->io_cleanup = port_cleanup; |
1412 | 1412 | ||
1413 | /* | 1413 | /* |
1414 | * Figure out the actual inb/inw/inl/etc routine to use based | 1414 | * Figure out the actual inb/inw/inl/etc routine to use based |
1415 | * upon the register size. | 1415 | * upon the register size. |
1416 | */ | 1416 | */ |
1417 | switch (info->io.regsize) { | 1417 | switch (info->io.regsize) { |
1418 | case 1: | 1418 | case 1: |
1419 | info->io.inputb = port_inb; | 1419 | info->io.inputb = port_inb; |
1420 | info->io.outputb = port_outb; | 1420 | info->io.outputb = port_outb; |
1421 | break; | 1421 | break; |
1422 | case 2: | 1422 | case 2: |
1423 | info->io.inputb = port_inw; | 1423 | info->io.inputb = port_inw; |
1424 | info->io.outputb = port_outw; | 1424 | info->io.outputb = port_outw; |
1425 | break; | 1425 | break; |
1426 | case 4: | 1426 | case 4: |
1427 | info->io.inputb = port_inl; | 1427 | info->io.inputb = port_inl; |
1428 | info->io.outputb = port_outl; | 1428 | info->io.outputb = port_outl; |
1429 | break; | 1429 | break; |
1430 | default: | 1430 | default: |
1431 | dev_warn(info->dev, "Invalid register size: %d\n", | 1431 | dev_warn(info->dev, "Invalid register size: %d\n", |
1432 | info->io.regsize); | 1432 | info->io.regsize); |
1433 | return -EINVAL; | 1433 | return -EINVAL; |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* | 1436 | /* |
1437 | * Some BIOSes reserve disjoint I/O regions in their ACPI | 1437 | * Some BIOSes reserve disjoint I/O regions in their ACPI |
1438 | * tables. This causes problems when trying to register the | 1438 | * tables. This causes problems when trying to register the |
1439 | * entire I/O region. Therefore we must register each I/O | 1439 | * entire I/O region. Therefore we must register each I/O |
1440 | * port separately. | 1440 | * port separately. |
1441 | */ | 1441 | */ |
1442 | for (idx = 0; idx < info->io_size; idx++) { | 1442 | for (idx = 0; idx < info->io_size; idx++) { |
1443 | if (request_region(addr + idx * info->io.regspacing, | 1443 | if (request_region(addr + idx * info->io.regspacing, |
1444 | info->io.regsize, DEVICE_NAME) == NULL) { | 1444 | info->io.regsize, DEVICE_NAME) == NULL) { |
1445 | /* Undo allocations */ | 1445 | /* Undo allocations */ |
1446 | while (idx--) { | 1446 | while (idx--) { |
1447 | release_region(addr + idx * info->io.regspacing, | 1447 | release_region(addr + idx * info->io.regspacing, |
1448 | info->io.regsize); | 1448 | info->io.regsize); |
1449 | } | 1449 | } |
1450 | return -EIO; | 1450 | return -EIO; |
1451 | } | 1451 | } |
1452 | } | 1452 | } |
1453 | return 0; | 1453 | return 0; |
1454 | } | 1454 | } |
1455 | 1455 | ||
1456 | static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) | 1456 | static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) |
1457 | { | 1457 | { |
1458 | return readb((io->addr)+(offset * io->regspacing)); | 1458 | return readb((io->addr)+(offset * io->regspacing)); |
1459 | } | 1459 | } |
1460 | 1460 | ||
1461 | static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, | 1461 | static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, |
1462 | unsigned char b) | 1462 | unsigned char b) |
1463 | { | 1463 | { |
1464 | writeb(b, (io->addr)+(offset * io->regspacing)); | 1464 | writeb(b, (io->addr)+(offset * io->regspacing)); |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) | 1467 | static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) |
1468 | { | 1468 | { |
1469 | return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) | 1469 | return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) |
1470 | & 0xff; | 1470 | & 0xff; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, | 1473 | static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, |
1474 | unsigned char b) | 1474 | unsigned char b) |
1475 | { | 1475 | { |
1476 | writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); | 1476 | writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) | 1479 | static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) |
1480 | { | 1480 | { |
1481 | return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) | 1481 | return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) |
1482 | & 0xff; | 1482 | & 0xff; |
1483 | } | 1483 | } |
1484 | 1484 | ||
1485 | static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, | 1485 | static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, |
1486 | unsigned char b) | 1486 | unsigned char b) |
1487 | { | 1487 | { |
1488 | writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); | 1488 | writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); |
1489 | } | 1489 | } |
1490 | 1490 | ||
1491 | #ifdef readq | 1491 | #ifdef readq |
1492 | static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) | 1492 | static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) |
1493 | { | 1493 | { |
1494 | return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) | 1494 | return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) |
1495 | & 0xff; | 1495 | & 0xff; |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | static void mem_outq(struct si_sm_io *io, unsigned int offset, | 1498 | static void mem_outq(struct si_sm_io *io, unsigned int offset, |
1499 | unsigned char b) | 1499 | unsigned char b) |
1500 | { | 1500 | { |
1501 | writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); | 1501 | writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); |
1502 | } | 1502 | } |
1503 | #endif | 1503 | #endif |
1504 | 1504 | ||
1505 | static void mem_cleanup(struct smi_info *info) | 1505 | static void mem_cleanup(struct smi_info *info) |
1506 | { | 1506 | { |
1507 | unsigned long addr = info->io.addr_data; | 1507 | unsigned long addr = info->io.addr_data; |
1508 | int mapsize; | 1508 | int mapsize; |
1509 | 1509 | ||
1510 | if (info->io.addr) { | 1510 | if (info->io.addr) { |
1511 | iounmap(info->io.addr); | 1511 | iounmap(info->io.addr); |
1512 | 1512 | ||
1513 | mapsize = ((info->io_size * info->io.regspacing) | 1513 | mapsize = ((info->io_size * info->io.regspacing) |
1514 | - (info->io.regspacing - info->io.regsize)); | 1514 | - (info->io.regspacing - info->io.regsize)); |
1515 | 1515 | ||
1516 | release_mem_region(addr, mapsize); | 1516 | release_mem_region(addr, mapsize); |
1517 | } | 1517 | } |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | static int mem_setup(struct smi_info *info) | 1520 | static int mem_setup(struct smi_info *info) |
1521 | { | 1521 | { |
1522 | unsigned long addr = info->io.addr_data; | 1522 | unsigned long addr = info->io.addr_data; |
1523 | int mapsize; | 1523 | int mapsize; |
1524 | 1524 | ||
1525 | if (!addr) | 1525 | if (!addr) |
1526 | return -ENODEV; | 1526 | return -ENODEV; |
1527 | 1527 | ||
1528 | info->io_cleanup = mem_cleanup; | 1528 | info->io_cleanup = mem_cleanup; |
1529 | 1529 | ||
1530 | /* | 1530 | /* |
1531 | * Figure out the actual readb/readw/readl/etc routine to use based | 1531 | * Figure out the actual readb/readw/readl/etc routine to use based |
1532 | * upon the register size. | 1532 | * upon the register size. |
1533 | */ | 1533 | */ |
1534 | switch (info->io.regsize) { | 1534 | switch (info->io.regsize) { |
1535 | case 1: | 1535 | case 1: |
1536 | info->io.inputb = intf_mem_inb; | 1536 | info->io.inputb = intf_mem_inb; |
1537 | info->io.outputb = intf_mem_outb; | 1537 | info->io.outputb = intf_mem_outb; |
1538 | break; | 1538 | break; |
1539 | case 2: | 1539 | case 2: |
1540 | info->io.inputb = intf_mem_inw; | 1540 | info->io.inputb = intf_mem_inw; |
1541 | info->io.outputb = intf_mem_outw; | 1541 | info->io.outputb = intf_mem_outw; |
1542 | break; | 1542 | break; |
1543 | case 4: | 1543 | case 4: |
1544 | info->io.inputb = intf_mem_inl; | 1544 | info->io.inputb = intf_mem_inl; |
1545 | info->io.outputb = intf_mem_outl; | 1545 | info->io.outputb = intf_mem_outl; |
1546 | break; | 1546 | break; |
1547 | #ifdef readq | 1547 | #ifdef readq |
1548 | case 8: | 1548 | case 8: |
1549 | info->io.inputb = mem_inq; | 1549 | info->io.inputb = mem_inq; |
1550 | info->io.outputb = mem_outq; | 1550 | info->io.outputb = mem_outq; |
1551 | break; | 1551 | break; |
1552 | #endif | 1552 | #endif |
1553 | default: | 1553 | default: |
1554 | dev_warn(info->dev, "Invalid register size: %d\n", | 1554 | dev_warn(info->dev, "Invalid register size: %d\n", |
1555 | info->io.regsize); | 1555 | info->io.regsize); |
1556 | return -EINVAL; | 1556 | return -EINVAL; |
1557 | } | 1557 | } |
1558 | 1558 | ||
1559 | /* | 1559 | /* |
1560 | * Calculate the total amount of memory to claim. This is an | 1560 | * Calculate the total amount of memory to claim. This is an |
1561 | * unusual looking calculation, but it avoids claiming any | 1561 | * unusual looking calculation, but it avoids claiming any |
1562 | * more memory than it has to. It will claim everything | 1562 | * more memory than it has to. It will claim everything |
1563 | * between the first address to the end of the last full | 1563 | * between the first address to the end of the last full |
1564 | * register. | 1564 | * register. |
1565 | */ | 1565 | */ |
1566 | mapsize = ((info->io_size * info->io.regspacing) | 1566 | mapsize = ((info->io_size * info->io.regspacing) |
1567 | - (info->io.regspacing - info->io.regsize)); | 1567 | - (info->io.regspacing - info->io.regsize)); |
1568 | 1568 | ||
1569 | if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) | 1569 | if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) |
1570 | return -EIO; | 1570 | return -EIO; |
1571 | 1571 | ||
1572 | info->io.addr = ioremap(addr, mapsize); | 1572 | info->io.addr = ioremap(addr, mapsize); |
1573 | if (info->io.addr == NULL) { | 1573 | if (info->io.addr == NULL) { |
1574 | release_mem_region(addr, mapsize); | 1574 | release_mem_region(addr, mapsize); |
1575 | return -EIO; | 1575 | return -EIO; |
1576 | } | 1576 | } |
1577 | return 0; | 1577 | return 0; |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | /* | 1580 | /* |
1581 | * Parms come in as <op1>[:op2[:op3...]]. ops are: | 1581 | * Parms come in as <op1>[:op2[:op3...]]. ops are: |
1582 | * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]] | 1582 | * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]] |
1583 | * Options are: | 1583 | * Options are: |
1584 | * rsp=<regspacing> | 1584 | * rsp=<regspacing> |
1585 | * rsi=<regsize> | 1585 | * rsi=<regsize> |
1586 | * rsh=<regshift> | 1586 | * rsh=<regshift> |
1587 | * irq=<irq> | 1587 | * irq=<irq> |
1588 | * ipmb=<ipmb addr> | 1588 | * ipmb=<ipmb addr> |
1589 | */ | 1589 | */ |
1590 | enum hotmod_op { HM_ADD, HM_REMOVE }; | 1590 | enum hotmod_op { HM_ADD, HM_REMOVE }; |
1591 | struct hotmod_vals { | 1591 | struct hotmod_vals { |
1592 | char *name; | 1592 | char *name; |
1593 | int val; | 1593 | int val; |
1594 | }; | 1594 | }; |
1595 | static struct hotmod_vals hotmod_ops[] = { | 1595 | static struct hotmod_vals hotmod_ops[] = { |
1596 | { "add", HM_ADD }, | 1596 | { "add", HM_ADD }, |
1597 | { "remove", HM_REMOVE }, | 1597 | { "remove", HM_REMOVE }, |
1598 | { NULL } | 1598 | { NULL } |
1599 | }; | 1599 | }; |
1600 | static struct hotmod_vals hotmod_si[] = { | 1600 | static struct hotmod_vals hotmod_si[] = { |
1601 | { "kcs", SI_KCS }, | 1601 | { "kcs", SI_KCS }, |
1602 | { "smic", SI_SMIC }, | 1602 | { "smic", SI_SMIC }, |
1603 | { "bt", SI_BT }, | 1603 | { "bt", SI_BT }, |
1604 | { NULL } | 1604 | { NULL } |
1605 | }; | 1605 | }; |
1606 | static struct hotmod_vals hotmod_as[] = { | 1606 | static struct hotmod_vals hotmod_as[] = { |
1607 | { "mem", IPMI_MEM_ADDR_SPACE }, | 1607 | { "mem", IPMI_MEM_ADDR_SPACE }, |
1608 | { "i/o", IPMI_IO_ADDR_SPACE }, | 1608 | { "i/o", IPMI_IO_ADDR_SPACE }, |
1609 | { NULL } | 1609 | { NULL } |
1610 | }; | 1610 | }; |
1611 | 1611 | ||
1612 | static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) | 1612 | static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) |
1613 | { | 1613 | { |
1614 | char *s; | 1614 | char *s; |
1615 | int i; | 1615 | int i; |
1616 | 1616 | ||
1617 | s = strchr(*curr, ','); | 1617 | s = strchr(*curr, ','); |
1618 | if (!s) { | 1618 | if (!s) { |
1619 | printk(KERN_WARNING PFX "No hotmod %s given.\n", name); | 1619 | printk(KERN_WARNING PFX "No hotmod %s given.\n", name); |
1620 | return -EINVAL; | 1620 | return -EINVAL; |
1621 | } | 1621 | } |
1622 | *s = '\0'; | 1622 | *s = '\0'; |
1623 | s++; | 1623 | s++; |
1624 | for (i = 0; hotmod_ops[i].name; i++) { | 1624 | for (i = 0; hotmod_ops[i].name; i++) { |
1625 | if (strcmp(*curr, v[i].name) == 0) { | 1625 | if (strcmp(*curr, v[i].name) == 0) { |
1626 | *val = v[i].val; | 1626 | *val = v[i].val; |
1627 | *curr = s; | 1627 | *curr = s; |
1628 | return 0; | 1628 | return 0; |
1629 | } | 1629 | } |
1630 | } | 1630 | } |
1631 | 1631 | ||
1632 | printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); | 1632 | printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); |
1633 | return -EINVAL; | 1633 | return -EINVAL; |
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | static int check_hotmod_int_op(const char *curr, const char *option, | 1636 | static int check_hotmod_int_op(const char *curr, const char *option, |
1637 | const char *name, int *val) | 1637 | const char *name, int *val) |
1638 | { | 1638 | { |
1639 | char *n; | 1639 | char *n; |
1640 | 1640 | ||
1641 | if (strcmp(curr, name) == 0) { | 1641 | if (strcmp(curr, name) == 0) { |
1642 | if (!option) { | 1642 | if (!option) { |
1643 | printk(KERN_WARNING PFX | 1643 | printk(KERN_WARNING PFX |
1644 | "No option given for '%s'\n", | 1644 | "No option given for '%s'\n", |
1645 | curr); | 1645 | curr); |
1646 | return -EINVAL; | 1646 | return -EINVAL; |
1647 | } | 1647 | } |
1648 | *val = simple_strtoul(option, &n, 0); | 1648 | *val = simple_strtoul(option, &n, 0); |
1649 | if ((*n != '\0') || (*option == '\0')) { | 1649 | if ((*n != '\0') || (*option == '\0')) { |
1650 | printk(KERN_WARNING PFX | 1650 | printk(KERN_WARNING PFX |
1651 | "Bad option given for '%s'\n", | 1651 | "Bad option given for '%s'\n", |
1652 | curr); | 1652 | curr); |
1653 | return -EINVAL; | 1653 | return -EINVAL; |
1654 | } | 1654 | } |
1655 | return 1; | 1655 | return 1; |
1656 | } | 1656 | } |
1657 | return 0; | 1657 | return 0; |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | static struct smi_info *smi_info_alloc(void) | 1660 | static struct smi_info *smi_info_alloc(void) |
1661 | { | 1661 | { |
1662 | struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); | 1662 | struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); |
1663 | 1663 | ||
1664 | if (info) | 1664 | if (info) |
1665 | spin_lock_init(&info->si_lock); | 1665 | spin_lock_init(&info->si_lock); |
1666 | return info; | 1666 | return info; |
1667 | } | 1667 | } |
1668 | 1668 | ||
1669 | static int hotmod_handler(const char *val, struct kernel_param *kp) | 1669 | static int hotmod_handler(const char *val, struct kernel_param *kp) |
1670 | { | 1670 | { |
1671 | char *str = kstrdup(val, GFP_KERNEL); | 1671 | char *str = kstrdup(val, GFP_KERNEL); |
1672 | int rv; | 1672 | int rv; |
1673 | char *next, *curr, *s, *n, *o; | 1673 | char *next, *curr, *s, *n, *o; |
1674 | enum hotmod_op op; | 1674 | enum hotmod_op op; |
1675 | enum si_type si_type; | 1675 | enum si_type si_type; |
1676 | int addr_space; | 1676 | int addr_space; |
1677 | unsigned long addr; | 1677 | unsigned long addr; |
1678 | int regspacing; | 1678 | int regspacing; |
1679 | int regsize; | 1679 | int regsize; |
1680 | int regshift; | 1680 | int regshift; |
1681 | int irq; | 1681 | int irq; |
1682 | int ipmb; | 1682 | int ipmb; |
1683 | int ival; | 1683 | int ival; |
1684 | int len; | 1684 | int len; |
1685 | struct smi_info *info; | 1685 | struct smi_info *info; |
1686 | 1686 | ||
1687 | if (!str) | 1687 | if (!str) |
1688 | return -ENOMEM; | 1688 | return -ENOMEM; |
1689 | 1689 | ||
1690 | /* Kill any trailing spaces, as we can get a "\n" from echo. */ | 1690 | /* Kill any trailing spaces, as we can get a "\n" from echo. */ |
1691 | len = strlen(str); | 1691 | len = strlen(str); |
1692 | ival = len - 1; | 1692 | ival = len - 1; |
1693 | while ((ival >= 0) && isspace(str[ival])) { | 1693 | while ((ival >= 0) && isspace(str[ival])) { |
1694 | str[ival] = '\0'; | 1694 | str[ival] = '\0'; |
1695 | ival--; | 1695 | ival--; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | for (curr = str; curr; curr = next) { | 1698 | for (curr = str; curr; curr = next) { |
1699 | regspacing = 1; | 1699 | regspacing = 1; |
1700 | regsize = 1; | 1700 | regsize = 1; |
1701 | regshift = 0; | 1701 | regshift = 0; |
1702 | irq = 0; | 1702 | irq = 0; |
1703 | ipmb = 0; /* Choose the default if not specified */ | 1703 | ipmb = 0; /* Choose the default if not specified */ |
1704 | 1704 | ||
1705 | next = strchr(curr, ':'); | 1705 | next = strchr(curr, ':'); |
1706 | if (next) { | 1706 | if (next) { |
1707 | *next = '\0'; | 1707 | *next = '\0'; |
1708 | next++; | 1708 | next++; |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | rv = parse_str(hotmod_ops, &ival, "operation", &curr); | 1711 | rv = parse_str(hotmod_ops, &ival, "operation", &curr); |
1712 | if (rv) | 1712 | if (rv) |
1713 | break; | 1713 | break; |
1714 | op = ival; | 1714 | op = ival; |
1715 | 1715 | ||
1716 | rv = parse_str(hotmod_si, &ival, "interface type", &curr); | 1716 | rv = parse_str(hotmod_si, &ival, "interface type", &curr); |
1717 | if (rv) | 1717 | if (rv) |
1718 | break; | 1718 | break; |
1719 | si_type = ival; | 1719 | si_type = ival; |
1720 | 1720 | ||
1721 | rv = parse_str(hotmod_as, &addr_space, "address space", &curr); | 1721 | rv = parse_str(hotmod_as, &addr_space, "address space", &curr); |
1722 | if (rv) | 1722 | if (rv) |
1723 | break; | 1723 | break; |
1724 | 1724 | ||
1725 | s = strchr(curr, ','); | 1725 | s = strchr(curr, ','); |
1726 | if (s) { | 1726 | if (s) { |
1727 | *s = '\0'; | 1727 | *s = '\0'; |
1728 | s++; | 1728 | s++; |
1729 | } | 1729 | } |
1730 | addr = simple_strtoul(curr, &n, 0); | 1730 | addr = simple_strtoul(curr, &n, 0); |
1731 | if ((*n != '\0') || (*curr == '\0')) { | 1731 | if ((*n != '\0') || (*curr == '\0')) { |
1732 | printk(KERN_WARNING PFX "Invalid hotmod address" | 1732 | printk(KERN_WARNING PFX "Invalid hotmod address" |
1733 | " '%s'\n", curr); | 1733 | " '%s'\n", curr); |
1734 | break; | 1734 | break; |
1735 | } | 1735 | } |
1736 | 1736 | ||
1737 | while (s) { | 1737 | while (s) { |
1738 | curr = s; | 1738 | curr = s; |
1739 | s = strchr(curr, ','); | 1739 | s = strchr(curr, ','); |
1740 | if (s) { | 1740 | if (s) { |
1741 | *s = '\0'; | 1741 | *s = '\0'; |
1742 | s++; | 1742 | s++; |
1743 | } | 1743 | } |
1744 | o = strchr(curr, '='); | 1744 | o = strchr(curr, '='); |
1745 | if (o) { | 1745 | if (o) { |
1746 | *o = '\0'; | 1746 | *o = '\0'; |
1747 | o++; | 1747 | o++; |
1748 | } | 1748 | } |
1749 | rv = check_hotmod_int_op(curr, o, "rsp", ®spacing); | 1749 | rv = check_hotmod_int_op(curr, o, "rsp", ®spacing); |
1750 | if (rv < 0) | 1750 | if (rv < 0) |
1751 | goto out; | 1751 | goto out; |
1752 | else if (rv) | 1752 | else if (rv) |
1753 | continue; | 1753 | continue; |
1754 | rv = check_hotmod_int_op(curr, o, "rsi", ®size); | 1754 | rv = check_hotmod_int_op(curr, o, "rsi", ®size); |
1755 | if (rv < 0) | 1755 | if (rv < 0) |
1756 | goto out; | 1756 | goto out; |
1757 | else if (rv) | 1757 | else if (rv) |
1758 | continue; | 1758 | continue; |
1759 | rv = check_hotmod_int_op(curr, o, "rsh", ®shift); | 1759 | rv = check_hotmod_int_op(curr, o, "rsh", ®shift); |
1760 | if (rv < 0) | 1760 | if (rv < 0) |
1761 | goto out; | 1761 | goto out; |
1762 | else if (rv) | 1762 | else if (rv) |
1763 | continue; | 1763 | continue; |
1764 | rv = check_hotmod_int_op(curr, o, "irq", &irq); | 1764 | rv = check_hotmod_int_op(curr, o, "irq", &irq); |
1765 | if (rv < 0) | 1765 | if (rv < 0) |
1766 | goto out; | 1766 | goto out; |
1767 | else if (rv) | 1767 | else if (rv) |
1768 | continue; | 1768 | continue; |
1769 | rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); | 1769 | rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); |
1770 | if (rv < 0) | 1770 | if (rv < 0) |
1771 | goto out; | 1771 | goto out; |
1772 | else if (rv) | 1772 | else if (rv) |
1773 | continue; | 1773 | continue; |
1774 | 1774 | ||
1775 | rv = -EINVAL; | 1775 | rv = -EINVAL; |
1776 | printk(KERN_WARNING PFX | 1776 | printk(KERN_WARNING PFX |
1777 | "Invalid hotmod option '%s'\n", | 1777 | "Invalid hotmod option '%s'\n", |
1778 | curr); | 1778 | curr); |
1779 | goto out; | 1779 | goto out; |
1780 | } | 1780 | } |
1781 | 1781 | ||
1782 | if (op == HM_ADD) { | 1782 | if (op == HM_ADD) { |
1783 | info = smi_info_alloc(); | 1783 | info = smi_info_alloc(); |
1784 | if (!info) { | 1784 | if (!info) { |
1785 | rv = -ENOMEM; | 1785 | rv = -ENOMEM; |
1786 | goto out; | 1786 | goto out; |
1787 | } | 1787 | } |
1788 | 1788 | ||
1789 | info->addr_source = SI_HOTMOD; | 1789 | info->addr_source = SI_HOTMOD; |
1790 | info->si_type = si_type; | 1790 | info->si_type = si_type; |
1791 | info->io.addr_data = addr; | 1791 | info->io.addr_data = addr; |
1792 | info->io.addr_type = addr_space; | 1792 | info->io.addr_type = addr_space; |
1793 | if (addr_space == IPMI_MEM_ADDR_SPACE) | 1793 | if (addr_space == IPMI_MEM_ADDR_SPACE) |
1794 | info->io_setup = mem_setup; | 1794 | info->io_setup = mem_setup; |
1795 | else | 1795 | else |
1796 | info->io_setup = port_setup; | 1796 | info->io_setup = port_setup; |
1797 | 1797 | ||
1798 | info->io.addr = NULL; | 1798 | info->io.addr = NULL; |
1799 | info->io.regspacing = regspacing; | 1799 | info->io.regspacing = regspacing; |
1800 | if (!info->io.regspacing) | 1800 | if (!info->io.regspacing) |
1801 | info->io.regspacing = DEFAULT_REGSPACING; | 1801 | info->io.regspacing = DEFAULT_REGSPACING; |
1802 | info->io.regsize = regsize; | 1802 | info->io.regsize = regsize; |
1803 | if (!info->io.regsize) | 1803 | if (!info->io.regsize) |
1804 | info->io.regsize = DEFAULT_REGSPACING; | 1804 | info->io.regsize = DEFAULT_REGSPACING; |
1805 | info->io.regshift = regshift; | 1805 | info->io.regshift = regshift; |
1806 | info->irq = irq; | 1806 | info->irq = irq; |
1807 | if (info->irq) | 1807 | if (info->irq) |
1808 | info->irq_setup = std_irq_setup; | 1808 | info->irq_setup = std_irq_setup; |
1809 | info->slave_addr = ipmb; | 1809 | info->slave_addr = ipmb; |
1810 | 1810 | ||
1811 | if (!add_smi(info)) { | 1811 | if (!add_smi(info)) { |
1812 | if (try_smi_init(info)) | 1812 | if (try_smi_init(info)) |
1813 | cleanup_one_si(info); | 1813 | cleanup_one_si(info); |
1814 | } else { | 1814 | } else { |
1815 | kfree(info); | 1815 | kfree(info); |
1816 | } | 1816 | } |
1817 | } else { | 1817 | } else { |
1818 | /* remove */ | 1818 | /* remove */ |
1819 | struct smi_info *e, *tmp_e; | 1819 | struct smi_info *e, *tmp_e; |
1820 | 1820 | ||
1821 | mutex_lock(&smi_infos_lock); | 1821 | mutex_lock(&smi_infos_lock); |
1822 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { | 1822 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { |
1823 | if (e->io.addr_type != addr_space) | 1823 | if (e->io.addr_type != addr_space) |
1824 | continue; | 1824 | continue; |
1825 | if (e->si_type != si_type) | 1825 | if (e->si_type != si_type) |
1826 | continue; | 1826 | continue; |
1827 | if (e->io.addr_data == addr) | 1827 | if (e->io.addr_data == addr) |
1828 | cleanup_one_si(e); | 1828 | cleanup_one_si(e); |
1829 | } | 1829 | } |
1830 | mutex_unlock(&smi_infos_lock); | 1830 | mutex_unlock(&smi_infos_lock); |
1831 | } | 1831 | } |
1832 | } | 1832 | } |
1833 | rv = len; | 1833 | rv = len; |
1834 | out: | 1834 | out: |
1835 | kfree(str); | 1835 | kfree(str); |
1836 | return rv; | 1836 | return rv; |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | static int __devinit hardcode_find_bmc(void) | 1839 | static int hardcode_find_bmc(void) |
1840 | { | 1840 | { |
1841 | int ret = -ENODEV; | 1841 | int ret = -ENODEV; |
1842 | int i; | 1842 | int i; |
1843 | struct smi_info *info; | 1843 | struct smi_info *info; |
1844 | 1844 | ||
1845 | for (i = 0; i < SI_MAX_PARMS; i++) { | 1845 | for (i = 0; i < SI_MAX_PARMS; i++) { |
1846 | if (!ports[i] && !addrs[i]) | 1846 | if (!ports[i] && !addrs[i]) |
1847 | continue; | 1847 | continue; |
1848 | 1848 | ||
1849 | info = smi_info_alloc(); | 1849 | info = smi_info_alloc(); |
1850 | if (!info) | 1850 | if (!info) |
1851 | return -ENOMEM; | 1851 | return -ENOMEM; |
1852 | 1852 | ||
1853 | info->addr_source = SI_HARDCODED; | 1853 | info->addr_source = SI_HARDCODED; |
1854 | printk(KERN_INFO PFX "probing via hardcoded address\n"); | 1854 | printk(KERN_INFO PFX "probing via hardcoded address\n"); |
1855 | 1855 | ||
1856 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { | 1856 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { |
1857 | info->si_type = SI_KCS; | 1857 | info->si_type = SI_KCS; |
1858 | } else if (strcmp(si_type[i], "smic") == 0) { | 1858 | } else if (strcmp(si_type[i], "smic") == 0) { |
1859 | info->si_type = SI_SMIC; | 1859 | info->si_type = SI_SMIC; |
1860 | } else if (strcmp(si_type[i], "bt") == 0) { | 1860 | } else if (strcmp(si_type[i], "bt") == 0) { |
1861 | info->si_type = SI_BT; | 1861 | info->si_type = SI_BT; |
1862 | } else { | 1862 | } else { |
1863 | printk(KERN_WARNING PFX "Interface type specified " | 1863 | printk(KERN_WARNING PFX "Interface type specified " |
1864 | "for interface %d, was invalid: %s\n", | 1864 | "for interface %d, was invalid: %s\n", |
1865 | i, si_type[i]); | 1865 | i, si_type[i]); |
1866 | kfree(info); | 1866 | kfree(info); |
1867 | continue; | 1867 | continue; |
1868 | } | 1868 | } |
1869 | 1869 | ||
1870 | if (ports[i]) { | 1870 | if (ports[i]) { |
1871 | /* An I/O port */ | 1871 | /* An I/O port */ |
1872 | info->io_setup = port_setup; | 1872 | info->io_setup = port_setup; |
1873 | info->io.addr_data = ports[i]; | 1873 | info->io.addr_data = ports[i]; |
1874 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 1874 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1875 | } else if (addrs[i]) { | 1875 | } else if (addrs[i]) { |
1876 | /* A memory port */ | 1876 | /* A memory port */ |
1877 | info->io_setup = mem_setup; | 1877 | info->io_setup = mem_setup; |
1878 | info->io.addr_data = addrs[i]; | 1878 | info->io.addr_data = addrs[i]; |
1879 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 1879 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
1880 | } else { | 1880 | } else { |
1881 | printk(KERN_WARNING PFX "Interface type specified " | 1881 | printk(KERN_WARNING PFX "Interface type specified " |
1882 | "for interface %d, but port and address were " | 1882 | "for interface %d, but port and address were " |
1883 | "not set or set to zero.\n", i); | 1883 | "not set or set to zero.\n", i); |
1884 | kfree(info); | 1884 | kfree(info); |
1885 | continue; | 1885 | continue; |
1886 | } | 1886 | } |
1887 | 1887 | ||
1888 | info->io.addr = NULL; | 1888 | info->io.addr = NULL; |
1889 | info->io.regspacing = regspacings[i]; | 1889 | info->io.regspacing = regspacings[i]; |
1890 | if (!info->io.regspacing) | 1890 | if (!info->io.regspacing) |
1891 | info->io.regspacing = DEFAULT_REGSPACING; | 1891 | info->io.regspacing = DEFAULT_REGSPACING; |
1892 | info->io.regsize = regsizes[i]; | 1892 | info->io.regsize = regsizes[i]; |
1893 | if (!info->io.regsize) | 1893 | if (!info->io.regsize) |
1894 | info->io.regsize = DEFAULT_REGSPACING; | 1894 | info->io.regsize = DEFAULT_REGSPACING; |
1895 | info->io.regshift = regshifts[i]; | 1895 | info->io.regshift = regshifts[i]; |
1896 | info->irq = irqs[i]; | 1896 | info->irq = irqs[i]; |
1897 | if (info->irq) | 1897 | if (info->irq) |
1898 | info->irq_setup = std_irq_setup; | 1898 | info->irq_setup = std_irq_setup; |
1899 | info->slave_addr = slave_addrs[i]; | 1899 | info->slave_addr = slave_addrs[i]; |
1900 | 1900 | ||
1901 | if (!add_smi(info)) { | 1901 | if (!add_smi(info)) { |
1902 | if (try_smi_init(info)) | 1902 | if (try_smi_init(info)) |
1903 | cleanup_one_si(info); | 1903 | cleanup_one_si(info); |
1904 | ret = 0; | 1904 | ret = 0; |
1905 | } else { | 1905 | } else { |
1906 | kfree(info); | 1906 | kfree(info); |
1907 | } | 1907 | } |
1908 | } | 1908 | } |
1909 | return ret; | 1909 | return ret; |
1910 | } | 1910 | } |
1911 | 1911 | ||
1912 | #ifdef CONFIG_ACPI | 1912 | #ifdef CONFIG_ACPI |
1913 | 1913 | ||
1914 | #include <linux/acpi.h> | 1914 | #include <linux/acpi.h> |
1915 | 1915 | ||
1916 | /* | 1916 | /* |
1917 | * Once we get an ACPI failure, we don't try any more, because we go | 1917 | * Once we get an ACPI failure, we don't try any more, because we go |
1918 | * through the tables sequentially. Once we don't find a table, there | 1918 | * through the tables sequentially. Once we don't find a table, there |
1919 | * are no more. | 1919 | * are no more. |
1920 | */ | 1920 | */ |
1921 | static int acpi_failure; | 1921 | static int acpi_failure; |
1922 | 1922 | ||
1923 | /* For GPE-type interrupts. */ | 1923 | /* For GPE-type interrupts. */ |
1924 | static u32 ipmi_acpi_gpe(acpi_handle gpe_device, | 1924 | static u32 ipmi_acpi_gpe(acpi_handle gpe_device, |
1925 | u32 gpe_number, void *context) | 1925 | u32 gpe_number, void *context) |
1926 | { | 1926 | { |
1927 | struct smi_info *smi_info = context; | 1927 | struct smi_info *smi_info = context; |
1928 | unsigned long flags; | 1928 | unsigned long flags; |
1929 | #ifdef DEBUG_TIMING | 1929 | #ifdef DEBUG_TIMING |
1930 | struct timeval t; | 1930 | struct timeval t; |
1931 | #endif | 1931 | #endif |
1932 | 1932 | ||
1933 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1933 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
1934 | 1934 | ||
1935 | smi_inc_stat(smi_info, interrupts); | 1935 | smi_inc_stat(smi_info, interrupts); |
1936 | 1936 | ||
1937 | #ifdef DEBUG_TIMING | 1937 | #ifdef DEBUG_TIMING |
1938 | do_gettimeofday(&t); | 1938 | do_gettimeofday(&t); |
1939 | printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 1939 | printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
1940 | #endif | 1940 | #endif |
1941 | smi_event_handler(smi_info, 0); | 1941 | smi_event_handler(smi_info, 0); |
1942 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 1942 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
1943 | 1943 | ||
1944 | return ACPI_INTERRUPT_HANDLED; | 1944 | return ACPI_INTERRUPT_HANDLED; |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | static void acpi_gpe_irq_cleanup(struct smi_info *info) | 1947 | static void acpi_gpe_irq_cleanup(struct smi_info *info) |
1948 | { | 1948 | { |
1949 | if (!info->irq) | 1949 | if (!info->irq) |
1950 | return; | 1950 | return; |
1951 | 1951 | ||
1952 | acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); | 1952 | acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); |
1953 | } | 1953 | } |
1954 | 1954 | ||
1955 | static int acpi_gpe_irq_setup(struct smi_info *info) | 1955 | static int acpi_gpe_irq_setup(struct smi_info *info) |
1956 | { | 1956 | { |
1957 | acpi_status status; | 1957 | acpi_status status; |
1958 | 1958 | ||
1959 | if (!info->irq) | 1959 | if (!info->irq) |
1960 | return 0; | 1960 | return 0; |
1961 | 1961 | ||
1962 | /* FIXME - is level triggered right? */ | 1962 | /* FIXME - is level triggered right? */ |
1963 | status = acpi_install_gpe_handler(NULL, | 1963 | status = acpi_install_gpe_handler(NULL, |
1964 | info->irq, | 1964 | info->irq, |
1965 | ACPI_GPE_LEVEL_TRIGGERED, | 1965 | ACPI_GPE_LEVEL_TRIGGERED, |
1966 | &ipmi_acpi_gpe, | 1966 | &ipmi_acpi_gpe, |
1967 | info); | 1967 | info); |
1968 | if (status != AE_OK) { | 1968 | if (status != AE_OK) { |
1969 | dev_warn(info->dev, "%s unable to claim ACPI GPE %d," | 1969 | dev_warn(info->dev, "%s unable to claim ACPI GPE %d," |
1970 | " running polled\n", DEVICE_NAME, info->irq); | 1970 | " running polled\n", DEVICE_NAME, info->irq); |
1971 | info->irq = 0; | 1971 | info->irq = 0; |
1972 | return -EINVAL; | 1972 | return -EINVAL; |
1973 | } else { | 1973 | } else { |
1974 | info->irq_cleanup = acpi_gpe_irq_cleanup; | 1974 | info->irq_cleanup = acpi_gpe_irq_cleanup; |
1975 | dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); | 1975 | dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); |
1976 | return 0; | 1976 | return 0; |
1977 | } | 1977 | } |
1978 | } | 1978 | } |
1979 | 1979 | ||
1980 | /* | 1980 | /* |
1981 | * Defined at | 1981 | * Defined at |
1982 | * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf | 1982 | * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf |
1983 | */ | 1983 | */ |
1984 | struct SPMITable { | 1984 | struct SPMITable { |
1985 | s8 Signature[4]; | 1985 | s8 Signature[4]; |
1986 | u32 Length; | 1986 | u32 Length; |
1987 | u8 Revision; | 1987 | u8 Revision; |
1988 | u8 Checksum; | 1988 | u8 Checksum; |
1989 | s8 OEMID[6]; | 1989 | s8 OEMID[6]; |
1990 | s8 OEMTableID[8]; | 1990 | s8 OEMTableID[8]; |
1991 | s8 OEMRevision[4]; | 1991 | s8 OEMRevision[4]; |
1992 | s8 CreatorID[4]; | 1992 | s8 CreatorID[4]; |
1993 | s8 CreatorRevision[4]; | 1993 | s8 CreatorRevision[4]; |
1994 | u8 InterfaceType; | 1994 | u8 InterfaceType; |
1995 | u8 IPMIlegacy; | 1995 | u8 IPMIlegacy; |
1996 | s16 SpecificationRevision; | 1996 | s16 SpecificationRevision; |
1997 | 1997 | ||
1998 | /* | 1998 | /* |
1999 | * Bit 0 - SCI interrupt supported | 1999 | * Bit 0 - SCI interrupt supported |
2000 | * Bit 1 - I/O APIC/SAPIC | 2000 | * Bit 1 - I/O APIC/SAPIC |
2001 | */ | 2001 | */ |
2002 | u8 InterruptType; | 2002 | u8 InterruptType; |
2003 | 2003 | ||
2004 | /* | 2004 | /* |
2005 | * If bit 0 of InterruptType is set, then this is the SCI | 2005 | * If bit 0 of InterruptType is set, then this is the SCI |
2006 | * interrupt in the GPEx_STS register. | 2006 | * interrupt in the GPEx_STS register. |
2007 | */ | 2007 | */ |
2008 | u8 GPE; | 2008 | u8 GPE; |
2009 | 2009 | ||
2010 | s16 Reserved; | 2010 | s16 Reserved; |
2011 | 2011 | ||
2012 | /* | 2012 | /* |
2013 | * If bit 1 of InterruptType is set, then this is the I/O | 2013 | * If bit 1 of InterruptType is set, then this is the I/O |
2014 | * APIC/SAPIC interrupt. | 2014 | * APIC/SAPIC interrupt. |
2015 | */ | 2015 | */ |
2016 | u32 GlobalSystemInterrupt; | 2016 | u32 GlobalSystemInterrupt; |
2017 | 2017 | ||
2018 | /* The actual register address. */ | 2018 | /* The actual register address. */ |
2019 | struct acpi_generic_address addr; | 2019 | struct acpi_generic_address addr; |
2020 | 2020 | ||
2021 | u8 UID[4]; | 2021 | u8 UID[4]; |
2022 | 2022 | ||
2023 | s8 spmi_id[1]; /* A '\0' terminated array starts here. */ | 2023 | s8 spmi_id[1]; /* A '\0' terminated array starts here. */ |
2024 | }; | 2024 | }; |
2025 | 2025 | ||
2026 | static int __devinit try_init_spmi(struct SPMITable *spmi) | 2026 | static int try_init_spmi(struct SPMITable *spmi) |
2027 | { | 2027 | { |
2028 | struct smi_info *info; | 2028 | struct smi_info *info; |
2029 | 2029 | ||
2030 | if (spmi->IPMIlegacy != 1) { | 2030 | if (spmi->IPMIlegacy != 1) { |
2031 | printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); | 2031 | printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); |
2032 | return -ENODEV; | 2032 | return -ENODEV; |
2033 | } | 2033 | } |
2034 | 2034 | ||
2035 | info = smi_info_alloc(); | 2035 | info = smi_info_alloc(); |
2036 | if (!info) { | 2036 | if (!info) { |
2037 | printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); | 2037 | printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); |
2038 | return -ENOMEM; | 2038 | return -ENOMEM; |
2039 | } | 2039 | } |
2040 | 2040 | ||
2041 | info->addr_source = SI_SPMI; | 2041 | info->addr_source = SI_SPMI; |
2042 | printk(KERN_INFO PFX "probing via SPMI\n"); | 2042 | printk(KERN_INFO PFX "probing via SPMI\n"); |
2043 | 2043 | ||
2044 | /* Figure out the interface type. */ | 2044 | /* Figure out the interface type. */ |
2045 | switch (spmi->InterfaceType) { | 2045 | switch (spmi->InterfaceType) { |
2046 | case 1: /* KCS */ | 2046 | case 1: /* KCS */ |
2047 | info->si_type = SI_KCS; | 2047 | info->si_type = SI_KCS; |
2048 | break; | 2048 | break; |
2049 | case 2: /* SMIC */ | 2049 | case 2: /* SMIC */ |
2050 | info->si_type = SI_SMIC; | 2050 | info->si_type = SI_SMIC; |
2051 | break; | 2051 | break; |
2052 | case 3: /* BT */ | 2052 | case 3: /* BT */ |
2053 | info->si_type = SI_BT; | 2053 | info->si_type = SI_BT; |
2054 | break; | 2054 | break; |
2055 | default: | 2055 | default: |
2056 | printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", | 2056 | printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", |
2057 | spmi->InterfaceType); | 2057 | spmi->InterfaceType); |
2058 | kfree(info); | 2058 | kfree(info); |
2059 | return -EIO; | 2059 | return -EIO; |
2060 | } | 2060 | } |
2061 | 2061 | ||
2062 | if (spmi->InterruptType & 1) { | 2062 | if (spmi->InterruptType & 1) { |
2063 | /* We've got a GPE interrupt. */ | 2063 | /* We've got a GPE interrupt. */ |
2064 | info->irq = spmi->GPE; | 2064 | info->irq = spmi->GPE; |
2065 | info->irq_setup = acpi_gpe_irq_setup; | 2065 | info->irq_setup = acpi_gpe_irq_setup; |
2066 | } else if (spmi->InterruptType & 2) { | 2066 | } else if (spmi->InterruptType & 2) { |
2067 | /* We've got an APIC/SAPIC interrupt. */ | 2067 | /* We've got an APIC/SAPIC interrupt. */ |
2068 | info->irq = spmi->GlobalSystemInterrupt; | 2068 | info->irq = spmi->GlobalSystemInterrupt; |
2069 | info->irq_setup = std_irq_setup; | 2069 | info->irq_setup = std_irq_setup; |
2070 | } else { | 2070 | } else { |
2071 | /* Use the default interrupt setting. */ | 2071 | /* Use the default interrupt setting. */ |
2072 | info->irq = 0; | 2072 | info->irq = 0; |
2073 | info->irq_setup = NULL; | 2073 | info->irq_setup = NULL; |
2074 | } | 2074 | } |
2075 | 2075 | ||
2076 | if (spmi->addr.bit_width) { | 2076 | if (spmi->addr.bit_width) { |
2077 | /* A (hopefully) properly formed register bit width. */ | 2077 | /* A (hopefully) properly formed register bit width. */ |
2078 | info->io.regspacing = spmi->addr.bit_width / 8; | 2078 | info->io.regspacing = spmi->addr.bit_width / 8; |
2079 | } else { | 2079 | } else { |
2080 | info->io.regspacing = DEFAULT_REGSPACING; | 2080 | info->io.regspacing = DEFAULT_REGSPACING; |
2081 | } | 2081 | } |
2082 | info->io.regsize = info->io.regspacing; | 2082 | info->io.regsize = info->io.regspacing; |
2083 | info->io.regshift = spmi->addr.bit_offset; | 2083 | info->io.regshift = spmi->addr.bit_offset; |
2084 | 2084 | ||
2085 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 2085 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
2086 | info->io_setup = mem_setup; | 2086 | info->io_setup = mem_setup; |
2087 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 2087 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
2088 | } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | 2088 | } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
2089 | info->io_setup = port_setup; | 2089 | info->io_setup = port_setup; |
2090 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2090 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2091 | } else { | 2091 | } else { |
2092 | kfree(info); | 2092 | kfree(info); |
2093 | printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); | 2093 | printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); |
2094 | return -EIO; | 2094 | return -EIO; |
2095 | } | 2095 | } |
2096 | info->io.addr_data = spmi->addr.address; | 2096 | info->io.addr_data = spmi->addr.address; |
2097 | 2097 | ||
2098 | pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n", | 2098 | pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n", |
2099 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", | 2099 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", |
2100 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2100 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2101 | info->irq); | 2101 | info->irq); |
2102 | 2102 | ||
2103 | if (add_smi(info)) | 2103 | if (add_smi(info)) |
2104 | kfree(info); | 2104 | kfree(info); |
2105 | 2105 | ||
2106 | return 0; | 2106 | return 0; |
2107 | } | 2107 | } |
2108 | 2108 | ||
2109 | static void __devinit spmi_find_bmc(void) | 2109 | static void spmi_find_bmc(void) |
2110 | { | 2110 | { |
2111 | acpi_status status; | 2111 | acpi_status status; |
2112 | struct SPMITable *spmi; | 2112 | struct SPMITable *spmi; |
2113 | int i; | 2113 | int i; |
2114 | 2114 | ||
2115 | if (acpi_disabled) | 2115 | if (acpi_disabled) |
2116 | return; | 2116 | return; |
2117 | 2117 | ||
2118 | if (acpi_failure) | 2118 | if (acpi_failure) |
2119 | return; | 2119 | return; |
2120 | 2120 | ||
2121 | for (i = 0; ; i++) { | 2121 | for (i = 0; ; i++) { |
2122 | status = acpi_get_table(ACPI_SIG_SPMI, i+1, | 2122 | status = acpi_get_table(ACPI_SIG_SPMI, i+1, |
2123 | (struct acpi_table_header **)&spmi); | 2123 | (struct acpi_table_header **)&spmi); |
2124 | if (status != AE_OK) | 2124 | if (status != AE_OK) |
2125 | return; | 2125 | return; |
2126 | 2126 | ||
2127 | try_init_spmi(spmi); | 2127 | try_init_spmi(spmi); |
2128 | } | 2128 | } |
2129 | } | 2129 | } |
2130 | 2130 | ||
2131 | static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, | 2131 | static int ipmi_pnp_probe(struct pnp_dev *dev, |
2132 | const struct pnp_device_id *dev_id) | 2132 | const struct pnp_device_id *dev_id) |
2133 | { | 2133 | { |
2134 | struct acpi_device *acpi_dev; | 2134 | struct acpi_device *acpi_dev; |
2135 | struct smi_info *info; | 2135 | struct smi_info *info; |
2136 | struct resource *res, *res_second; | 2136 | struct resource *res, *res_second; |
2137 | acpi_handle handle; | 2137 | acpi_handle handle; |
2138 | acpi_status status; | 2138 | acpi_status status; |
2139 | unsigned long long tmp; | 2139 | unsigned long long tmp; |
2140 | 2140 | ||
2141 | acpi_dev = pnp_acpi_device(dev); | 2141 | acpi_dev = pnp_acpi_device(dev); |
2142 | if (!acpi_dev) | 2142 | if (!acpi_dev) |
2143 | return -ENODEV; | 2143 | return -ENODEV; |
2144 | 2144 | ||
2145 | info = smi_info_alloc(); | 2145 | info = smi_info_alloc(); |
2146 | if (!info) | 2146 | if (!info) |
2147 | return -ENOMEM; | 2147 | return -ENOMEM; |
2148 | 2148 | ||
2149 | info->addr_source = SI_ACPI; | 2149 | info->addr_source = SI_ACPI; |
2150 | printk(KERN_INFO PFX "probing via ACPI\n"); | 2150 | printk(KERN_INFO PFX "probing via ACPI\n"); |
2151 | 2151 | ||
2152 | handle = acpi_dev->handle; | 2152 | handle = acpi_dev->handle; |
2153 | info->addr_info.acpi_info.acpi_handle = handle; | 2153 | info->addr_info.acpi_info.acpi_handle = handle; |
2154 | 2154 | ||
2155 | /* _IFT tells us the interface type: KCS, BT, etc */ | 2155 | /* _IFT tells us the interface type: KCS, BT, etc */ |
2156 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); | 2156 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); |
2157 | if (ACPI_FAILURE(status)) | 2157 | if (ACPI_FAILURE(status)) |
2158 | goto err_free; | 2158 | goto err_free; |
2159 | 2159 | ||
2160 | switch (tmp) { | 2160 | switch (tmp) { |
2161 | case 1: | 2161 | case 1: |
2162 | info->si_type = SI_KCS; | 2162 | info->si_type = SI_KCS; |
2163 | break; | 2163 | break; |
2164 | case 2: | 2164 | case 2: |
2165 | info->si_type = SI_SMIC; | 2165 | info->si_type = SI_SMIC; |
2166 | break; | 2166 | break; |
2167 | case 3: | 2167 | case 3: |
2168 | info->si_type = SI_BT; | 2168 | info->si_type = SI_BT; |
2169 | break; | 2169 | break; |
2170 | default: | 2170 | default: |
2171 | dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); | 2171 | dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); |
2172 | goto err_free; | 2172 | goto err_free; |
2173 | } | 2173 | } |
2174 | 2174 | ||
2175 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); | 2175 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); |
2176 | if (res) { | 2176 | if (res) { |
2177 | info->io_setup = port_setup; | 2177 | info->io_setup = port_setup; |
2178 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2178 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2179 | } else { | 2179 | } else { |
2180 | res = pnp_get_resource(dev, IORESOURCE_MEM, 0); | 2180 | res = pnp_get_resource(dev, IORESOURCE_MEM, 0); |
2181 | if (res) { | 2181 | if (res) { |
2182 | info->io_setup = mem_setup; | 2182 | info->io_setup = mem_setup; |
2183 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 2183 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
2184 | } | 2184 | } |
2185 | } | 2185 | } |
2186 | if (!res) { | 2186 | if (!res) { |
2187 | dev_err(&dev->dev, "no I/O or memory address\n"); | 2187 | dev_err(&dev->dev, "no I/O or memory address\n"); |
2188 | goto err_free; | 2188 | goto err_free; |
2189 | } | 2189 | } |
2190 | info->io.addr_data = res->start; | 2190 | info->io.addr_data = res->start; |
2191 | 2191 | ||
2192 | info->io.regspacing = DEFAULT_REGSPACING; | 2192 | info->io.regspacing = DEFAULT_REGSPACING; |
2193 | res_second = pnp_get_resource(dev, | 2193 | res_second = pnp_get_resource(dev, |
2194 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? | 2194 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? |
2195 | IORESOURCE_IO : IORESOURCE_MEM, | 2195 | IORESOURCE_IO : IORESOURCE_MEM, |
2196 | 1); | 2196 | 1); |
2197 | if (res_second) { | 2197 | if (res_second) { |
2198 | if (res_second->start > info->io.addr_data) | 2198 | if (res_second->start > info->io.addr_data) |
2199 | info->io.regspacing = res_second->start - info->io.addr_data; | 2199 | info->io.regspacing = res_second->start - info->io.addr_data; |
2200 | } | 2200 | } |
2201 | info->io.regsize = DEFAULT_REGSPACING; | 2201 | info->io.regsize = DEFAULT_REGSPACING; |
2202 | info->io.regshift = 0; | 2202 | info->io.regshift = 0; |
2203 | 2203 | ||
2204 | /* If _GPE exists, use it; otherwise use standard interrupts */ | 2204 | /* If _GPE exists, use it; otherwise use standard interrupts */ |
2205 | status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); | 2205 | status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); |
2206 | if (ACPI_SUCCESS(status)) { | 2206 | if (ACPI_SUCCESS(status)) { |
2207 | info->irq = tmp; | 2207 | info->irq = tmp; |
2208 | info->irq_setup = acpi_gpe_irq_setup; | 2208 | info->irq_setup = acpi_gpe_irq_setup; |
2209 | } else if (pnp_irq_valid(dev, 0)) { | 2209 | } else if (pnp_irq_valid(dev, 0)) { |
2210 | info->irq = pnp_irq(dev, 0); | 2210 | info->irq = pnp_irq(dev, 0); |
2211 | info->irq_setup = std_irq_setup; | 2211 | info->irq_setup = std_irq_setup; |
2212 | } | 2212 | } |
2213 | 2213 | ||
2214 | info->dev = &dev->dev; | 2214 | info->dev = &dev->dev; |
2215 | pnp_set_drvdata(dev, info); | 2215 | pnp_set_drvdata(dev, info); |
2216 | 2216 | ||
2217 | dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", | 2217 | dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", |
2218 | res, info->io.regsize, info->io.regspacing, | 2218 | res, info->io.regsize, info->io.regspacing, |
2219 | info->irq); | 2219 | info->irq); |
2220 | 2220 | ||
2221 | if (add_smi(info)) | 2221 | if (add_smi(info)) |
2222 | goto err_free; | 2222 | goto err_free; |
2223 | 2223 | ||
2224 | return 0; | 2224 | return 0; |
2225 | 2225 | ||
2226 | err_free: | 2226 | err_free: |
2227 | kfree(info); | 2227 | kfree(info); |
2228 | return -EINVAL; | 2228 | return -EINVAL; |
2229 | } | 2229 | } |
2230 | 2230 | ||
2231 | static void __devexit ipmi_pnp_remove(struct pnp_dev *dev) | 2231 | static void __devexit ipmi_pnp_remove(struct pnp_dev *dev) |
2232 | { | 2232 | { |
2233 | struct smi_info *info = pnp_get_drvdata(dev); | 2233 | struct smi_info *info = pnp_get_drvdata(dev); |
2234 | 2234 | ||
2235 | cleanup_one_si(info); | 2235 | cleanup_one_si(info); |
2236 | } | 2236 | } |
2237 | 2237 | ||
2238 | static const struct pnp_device_id pnp_dev_table[] = { | 2238 | static const struct pnp_device_id pnp_dev_table[] = { |
2239 | {"IPI0001", 0}, | 2239 | {"IPI0001", 0}, |
2240 | {"", 0}, | 2240 | {"", 0}, |
2241 | }; | 2241 | }; |
2242 | 2242 | ||
2243 | static struct pnp_driver ipmi_pnp_driver = { | 2243 | static struct pnp_driver ipmi_pnp_driver = { |
2244 | .name = DEVICE_NAME, | 2244 | .name = DEVICE_NAME, |
2245 | .probe = ipmi_pnp_probe, | 2245 | .probe = ipmi_pnp_probe, |
2246 | .remove = __devexit_p(ipmi_pnp_remove), | 2246 | .remove = __devexit_p(ipmi_pnp_remove), |
2247 | .id_table = pnp_dev_table, | 2247 | .id_table = pnp_dev_table, |
2248 | }; | 2248 | }; |
2249 | #endif | 2249 | #endif |
2250 | 2250 | ||
2251 | #ifdef CONFIG_DMI | 2251 | #ifdef CONFIG_DMI |
2252 | struct dmi_ipmi_data { | 2252 | struct dmi_ipmi_data { |
2253 | u8 type; | 2253 | u8 type; |
2254 | u8 addr_space; | 2254 | u8 addr_space; |
2255 | unsigned long base_addr; | 2255 | unsigned long base_addr; |
2256 | u8 irq; | 2256 | u8 irq; |
2257 | u8 offset; | 2257 | u8 offset; |
2258 | u8 slave_addr; | 2258 | u8 slave_addr; |
2259 | }; | 2259 | }; |
2260 | 2260 | ||
2261 | static int __devinit decode_dmi(const struct dmi_header *dm, | 2261 | static int decode_dmi(const struct dmi_header *dm, |
2262 | struct dmi_ipmi_data *dmi) | 2262 | struct dmi_ipmi_data *dmi) |
2263 | { | 2263 | { |
2264 | const u8 *data = (const u8 *)dm; | 2264 | const u8 *data = (const u8 *)dm; |
2265 | unsigned long base_addr; | 2265 | unsigned long base_addr; |
2266 | u8 reg_spacing; | 2266 | u8 reg_spacing; |
2267 | u8 len = dm->length; | 2267 | u8 len = dm->length; |
2268 | 2268 | ||
2269 | dmi->type = data[4]; | 2269 | dmi->type = data[4]; |
2270 | 2270 | ||
2271 | memcpy(&base_addr, data+8, sizeof(unsigned long)); | 2271 | memcpy(&base_addr, data+8, sizeof(unsigned long)); |
2272 | if (len >= 0x11) { | 2272 | if (len >= 0x11) { |
2273 | if (base_addr & 1) { | 2273 | if (base_addr & 1) { |
2274 | /* I/O */ | 2274 | /* I/O */ |
2275 | base_addr &= 0xFFFE; | 2275 | base_addr &= 0xFFFE; |
2276 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2276 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
2277 | } else | 2277 | } else |
2278 | /* Memory */ | 2278 | /* Memory */ |
2279 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; | 2279 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; |
2280 | 2280 | ||
2281 | /* If bit 4 of byte 0x10 is set, then the lsb for the address | 2281 | /* If bit 4 of byte 0x10 is set, then the lsb for the address |
2282 | is odd. */ | 2282 | is odd. */ |
2283 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); | 2283 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); |
2284 | 2284 | ||
2285 | dmi->irq = data[0x11]; | 2285 | dmi->irq = data[0x11]; |
2286 | 2286 | ||
2287 | /* The top two bits of byte 0x10 hold the register spacing. */ | 2287 | /* The top two bits of byte 0x10 hold the register spacing. */ |
2288 | reg_spacing = (data[0x10] & 0xC0) >> 6; | 2288 | reg_spacing = (data[0x10] & 0xC0) >> 6; |
2289 | switch (reg_spacing) { | 2289 | switch (reg_spacing) { |
2290 | case 0x00: /* Byte boundaries */ | 2290 | case 0x00: /* Byte boundaries */ |
2291 | dmi->offset = 1; | 2291 | dmi->offset = 1; |
2292 | break; | 2292 | break; |
2293 | case 0x01: /* 32-bit boundaries */ | 2293 | case 0x01: /* 32-bit boundaries */ |
2294 | dmi->offset = 4; | 2294 | dmi->offset = 4; |
2295 | break; | 2295 | break; |
2296 | case 0x02: /* 16-byte boundaries */ | 2296 | case 0x02: /* 16-byte boundaries */ |
2297 | dmi->offset = 16; | 2297 | dmi->offset = 16; |
2298 | break; | 2298 | break; |
2299 | default: | 2299 | default: |
2300 | /* Some other interface, just ignore it. */ | 2300 | /* Some other interface, just ignore it. */ |
2301 | return -EIO; | 2301 | return -EIO; |
2302 | } | 2302 | } |
2303 | } else { | 2303 | } else { |
2304 | /* Old DMI spec. */ | 2304 | /* Old DMI spec. */ |
2305 | /* | 2305 | /* |
2306 | * Note that technically, the lower bit of the base | 2306 | * Note that technically, the lower bit of the base |
2307 | * address should be 1 if the address is I/O and 0 if | 2307 | * address should be 1 if the address is I/O and 0 if |
2308 | * the address is in memory. So many systems get that | 2308 | * the address is in memory. So many systems get that |
2309 | * wrong (and all that I have seen are I/O) so we just | 2309 | * wrong (and all that I have seen are I/O) so we just |
2310 | * ignore that bit and assume I/O. Systems that use | 2310 | * ignore that bit and assume I/O. Systems that use |
2311 | * memory should use the newer spec, anyway. | 2311 | * memory should use the newer spec, anyway. |
2312 | */ | 2312 | */ |
2313 | dmi->base_addr = base_addr & 0xfffe; | 2313 | dmi->base_addr = base_addr & 0xfffe; |
2314 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2314 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
2315 | dmi->offset = 1; | 2315 | dmi->offset = 1; |
2316 | } | 2316 | } |
2317 | 2317 | ||
2318 | dmi->slave_addr = data[6]; | 2318 | dmi->slave_addr = data[6]; |
2319 | 2319 | ||
2320 | return 0; | 2320 | return 0; |
2321 | } | 2321 | } |
2322 | 2322 | ||
2323 | static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data) | 2323 | static void try_init_dmi(struct dmi_ipmi_data *ipmi_data) |
2324 | { | 2324 | { |
2325 | struct smi_info *info; | 2325 | struct smi_info *info; |
2326 | 2326 | ||
2327 | info = smi_info_alloc(); | 2327 | info = smi_info_alloc(); |
2328 | if (!info) { | 2328 | if (!info) { |
2329 | printk(KERN_ERR PFX "Could not allocate SI data\n"); | 2329 | printk(KERN_ERR PFX "Could not allocate SI data\n"); |
2330 | return; | 2330 | return; |
2331 | } | 2331 | } |
2332 | 2332 | ||
2333 | info->addr_source = SI_SMBIOS; | 2333 | info->addr_source = SI_SMBIOS; |
2334 | printk(KERN_INFO PFX "probing via SMBIOS\n"); | 2334 | printk(KERN_INFO PFX "probing via SMBIOS\n"); |
2335 | 2335 | ||
2336 | switch (ipmi_data->type) { | 2336 | switch (ipmi_data->type) { |
2337 | case 0x01: /* KCS */ | 2337 | case 0x01: /* KCS */ |
2338 | info->si_type = SI_KCS; | 2338 | info->si_type = SI_KCS; |
2339 | break; | 2339 | break; |
2340 | case 0x02: /* SMIC */ | 2340 | case 0x02: /* SMIC */ |
2341 | info->si_type = SI_SMIC; | 2341 | info->si_type = SI_SMIC; |
2342 | break; | 2342 | break; |
2343 | case 0x03: /* BT */ | 2343 | case 0x03: /* BT */ |
2344 | info->si_type = SI_BT; | 2344 | info->si_type = SI_BT; |
2345 | break; | 2345 | break; |
2346 | default: | 2346 | default: |
2347 | kfree(info); | 2347 | kfree(info); |
2348 | return; | 2348 | return; |
2349 | } | 2349 | } |
2350 | 2350 | ||
2351 | switch (ipmi_data->addr_space) { | 2351 | switch (ipmi_data->addr_space) { |
2352 | case IPMI_MEM_ADDR_SPACE: | 2352 | case IPMI_MEM_ADDR_SPACE: |
2353 | info->io_setup = mem_setup; | 2353 | info->io_setup = mem_setup; |
2354 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 2354 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
2355 | break; | 2355 | break; |
2356 | 2356 | ||
2357 | case IPMI_IO_ADDR_SPACE: | 2357 | case IPMI_IO_ADDR_SPACE: |
2358 | info->io_setup = port_setup; | 2358 | info->io_setup = port_setup; |
2359 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2359 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2360 | break; | 2360 | break; |
2361 | 2361 | ||
2362 | default: | 2362 | default: |
2363 | kfree(info); | 2363 | kfree(info); |
2364 | printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", | 2364 | printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", |
2365 | ipmi_data->addr_space); | 2365 | ipmi_data->addr_space); |
2366 | return; | 2366 | return; |
2367 | } | 2367 | } |
2368 | info->io.addr_data = ipmi_data->base_addr; | 2368 | info->io.addr_data = ipmi_data->base_addr; |
2369 | 2369 | ||
2370 | info->io.regspacing = ipmi_data->offset; | 2370 | info->io.regspacing = ipmi_data->offset; |
2371 | if (!info->io.regspacing) | 2371 | if (!info->io.regspacing) |
2372 | info->io.regspacing = DEFAULT_REGSPACING; | 2372 | info->io.regspacing = DEFAULT_REGSPACING; |
2373 | info->io.regsize = DEFAULT_REGSPACING; | 2373 | info->io.regsize = DEFAULT_REGSPACING; |
2374 | info->io.regshift = 0; | 2374 | info->io.regshift = 0; |
2375 | 2375 | ||
2376 | info->slave_addr = ipmi_data->slave_addr; | 2376 | info->slave_addr = ipmi_data->slave_addr; |
2377 | 2377 | ||
2378 | info->irq = ipmi_data->irq; | 2378 | info->irq = ipmi_data->irq; |
2379 | if (info->irq) | 2379 | if (info->irq) |
2380 | info->irq_setup = std_irq_setup; | 2380 | info->irq_setup = std_irq_setup; |
2381 | 2381 | ||
2382 | pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", | 2382 | pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", |
2383 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", | 2383 | (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", |
2384 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2384 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2385 | info->irq); | 2385 | info->irq); |
2386 | 2386 | ||
2387 | if (add_smi(info)) | 2387 | if (add_smi(info)) |
2388 | kfree(info); | 2388 | kfree(info); |
2389 | } | 2389 | } |
2390 | 2390 | ||
2391 | static void __devinit dmi_find_bmc(void) | 2391 | static void dmi_find_bmc(void) |
2392 | { | 2392 | { |
2393 | const struct dmi_device *dev = NULL; | 2393 | const struct dmi_device *dev = NULL; |
2394 | struct dmi_ipmi_data data; | 2394 | struct dmi_ipmi_data data; |
2395 | int rv; | 2395 | int rv; |
2396 | 2396 | ||
2397 | while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { | 2397 | while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { |
2398 | memset(&data, 0, sizeof(data)); | 2398 | memset(&data, 0, sizeof(data)); |
2399 | rv = decode_dmi((const struct dmi_header *) dev->device_data, | 2399 | rv = decode_dmi((const struct dmi_header *) dev->device_data, |
2400 | &data); | 2400 | &data); |
2401 | if (!rv) | 2401 | if (!rv) |
2402 | try_init_dmi(&data); | 2402 | try_init_dmi(&data); |
2403 | } | 2403 | } |
2404 | } | 2404 | } |
2405 | #endif /* CONFIG_DMI */ | 2405 | #endif /* CONFIG_DMI */ |
2406 | 2406 | ||
2407 | #ifdef CONFIG_PCI | 2407 | #ifdef CONFIG_PCI |
2408 | 2408 | ||
2409 | #define PCI_ERMC_CLASSCODE 0x0C0700 | 2409 | #define PCI_ERMC_CLASSCODE 0x0C0700 |
2410 | #define PCI_ERMC_CLASSCODE_MASK 0xffffff00 | 2410 | #define PCI_ERMC_CLASSCODE_MASK 0xffffff00 |
2411 | #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff | 2411 | #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff |
2412 | #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 | 2412 | #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 |
2413 | #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 | 2413 | #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 |
2414 | #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 | 2414 | #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 |
2415 | 2415 | ||
2416 | #define PCI_HP_VENDOR_ID 0x103C | 2416 | #define PCI_HP_VENDOR_ID 0x103C |
2417 | #define PCI_MMC_DEVICE_ID 0x121A | 2417 | #define PCI_MMC_DEVICE_ID 0x121A |
2418 | #define PCI_MMC_ADDR_CW 0x10 | 2418 | #define PCI_MMC_ADDR_CW 0x10 |
2419 | 2419 | ||
2420 | static void ipmi_pci_cleanup(struct smi_info *info) | 2420 | static void ipmi_pci_cleanup(struct smi_info *info) |
2421 | { | 2421 | { |
2422 | struct pci_dev *pdev = info->addr_source_data; | 2422 | struct pci_dev *pdev = info->addr_source_data; |
2423 | 2423 | ||
2424 | pci_disable_device(pdev); | 2424 | pci_disable_device(pdev); |
2425 | } | 2425 | } |
2426 | 2426 | ||
2427 | static int __devinit ipmi_pci_probe_regspacing(struct smi_info *info) | 2427 | static int ipmi_pci_probe_regspacing(struct smi_info *info) |
2428 | { | 2428 | { |
2429 | if (info->si_type == SI_KCS) { | 2429 | if (info->si_type == SI_KCS) { |
2430 | unsigned char status; | 2430 | unsigned char status; |
2431 | int regspacing; | 2431 | int regspacing; |
2432 | 2432 | ||
2433 | info->io.regsize = DEFAULT_REGSIZE; | 2433 | info->io.regsize = DEFAULT_REGSIZE; |
2434 | info->io.regshift = 0; | 2434 | info->io.regshift = 0; |
2435 | info->io_size = 2; | 2435 | info->io_size = 2; |
2436 | info->handlers = &kcs_smi_handlers; | 2436 | info->handlers = &kcs_smi_handlers; |
2437 | 2437 | ||
2438 | /* detect 1, 4, 16byte spacing */ | 2438 | /* detect 1, 4, 16byte spacing */ |
2439 | for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { | 2439 | for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) { |
2440 | info->io.regspacing = regspacing; | 2440 | info->io.regspacing = regspacing; |
2441 | if (info->io_setup(info)) { | 2441 | if (info->io_setup(info)) { |
2442 | dev_err(info->dev, | 2442 | dev_err(info->dev, |
2443 | "Could not setup I/O space\n"); | 2443 | "Could not setup I/O space\n"); |
2444 | return DEFAULT_REGSPACING; | 2444 | return DEFAULT_REGSPACING; |
2445 | } | 2445 | } |
2446 | /* write invalid cmd */ | 2446 | /* write invalid cmd */ |
2447 | info->io.outputb(&info->io, 1, 0x10); | 2447 | info->io.outputb(&info->io, 1, 0x10); |
2448 | /* read status back */ | 2448 | /* read status back */ |
2449 | status = info->io.inputb(&info->io, 1); | 2449 | status = info->io.inputb(&info->io, 1); |
2450 | info->io_cleanup(info); | 2450 | info->io_cleanup(info); |
2451 | if (status) | 2451 | if (status) |
2452 | return regspacing; | 2452 | return regspacing; |
2453 | regspacing *= 4; | 2453 | regspacing *= 4; |
2454 | } | 2454 | } |
2455 | } | 2455 | } |
2456 | return DEFAULT_REGSPACING; | 2456 | return DEFAULT_REGSPACING; |
2457 | } | 2457 | } |
2458 | 2458 | ||
2459 | static int __devinit ipmi_pci_probe(struct pci_dev *pdev, | 2459 | static int ipmi_pci_probe(struct pci_dev *pdev, |
2460 | const struct pci_device_id *ent) | 2460 | const struct pci_device_id *ent) |
2461 | { | 2461 | { |
2462 | int rv; | 2462 | int rv; |
2463 | int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; | 2463 | int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; |
2464 | struct smi_info *info; | 2464 | struct smi_info *info; |
2465 | 2465 | ||
2466 | info = smi_info_alloc(); | 2466 | info = smi_info_alloc(); |
2467 | if (!info) | 2467 | if (!info) |
2468 | return -ENOMEM; | 2468 | return -ENOMEM; |
2469 | 2469 | ||
2470 | info->addr_source = SI_PCI; | 2470 | info->addr_source = SI_PCI; |
2471 | dev_info(&pdev->dev, "probing via PCI"); | 2471 | dev_info(&pdev->dev, "probing via PCI"); |
2472 | 2472 | ||
2473 | switch (class_type) { | 2473 | switch (class_type) { |
2474 | case PCI_ERMC_CLASSCODE_TYPE_SMIC: | 2474 | case PCI_ERMC_CLASSCODE_TYPE_SMIC: |
2475 | info->si_type = SI_SMIC; | 2475 | info->si_type = SI_SMIC; |
2476 | break; | 2476 | break; |
2477 | 2477 | ||
2478 | case PCI_ERMC_CLASSCODE_TYPE_KCS: | 2478 | case PCI_ERMC_CLASSCODE_TYPE_KCS: |
2479 | info->si_type = SI_KCS; | 2479 | info->si_type = SI_KCS; |
2480 | break; | 2480 | break; |
2481 | 2481 | ||
2482 | case PCI_ERMC_CLASSCODE_TYPE_BT: | 2482 | case PCI_ERMC_CLASSCODE_TYPE_BT: |
2483 | info->si_type = SI_BT; | 2483 | info->si_type = SI_BT; |
2484 | break; | 2484 | break; |
2485 | 2485 | ||
2486 | default: | 2486 | default: |
2487 | kfree(info); | 2487 | kfree(info); |
2488 | dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); | 2488 | dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); |
2489 | return -ENOMEM; | 2489 | return -ENOMEM; |
2490 | } | 2490 | } |
2491 | 2491 | ||
2492 | rv = pci_enable_device(pdev); | 2492 | rv = pci_enable_device(pdev); |
2493 | if (rv) { | 2493 | if (rv) { |
2494 | dev_err(&pdev->dev, "couldn't enable PCI device\n"); | 2494 | dev_err(&pdev->dev, "couldn't enable PCI device\n"); |
2495 | kfree(info); | 2495 | kfree(info); |
2496 | return rv; | 2496 | return rv; |
2497 | } | 2497 | } |
2498 | 2498 | ||
2499 | info->addr_source_cleanup = ipmi_pci_cleanup; | 2499 | info->addr_source_cleanup = ipmi_pci_cleanup; |
2500 | info->addr_source_data = pdev; | 2500 | info->addr_source_data = pdev; |
2501 | 2501 | ||
2502 | if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { | 2502 | if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { |
2503 | info->io_setup = port_setup; | 2503 | info->io_setup = port_setup; |
2504 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2504 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2505 | } else { | 2505 | } else { |
2506 | info->io_setup = mem_setup; | 2506 | info->io_setup = mem_setup; |
2507 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 2507 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
2508 | } | 2508 | } |
2509 | info->io.addr_data = pci_resource_start(pdev, 0); | 2509 | info->io.addr_data = pci_resource_start(pdev, 0); |
2510 | 2510 | ||
2511 | info->io.regspacing = ipmi_pci_probe_regspacing(info); | 2511 | info->io.regspacing = ipmi_pci_probe_regspacing(info); |
2512 | info->io.regsize = DEFAULT_REGSIZE; | 2512 | info->io.regsize = DEFAULT_REGSIZE; |
2513 | info->io.regshift = 0; | 2513 | info->io.regshift = 0; |
2514 | 2514 | ||
2515 | info->irq = pdev->irq; | 2515 | info->irq = pdev->irq; |
2516 | if (info->irq) | 2516 | if (info->irq) |
2517 | info->irq_setup = std_irq_setup; | 2517 | info->irq_setup = std_irq_setup; |
2518 | 2518 | ||
2519 | info->dev = &pdev->dev; | 2519 | info->dev = &pdev->dev; |
2520 | pci_set_drvdata(pdev, info); | 2520 | pci_set_drvdata(pdev, info); |
2521 | 2521 | ||
2522 | dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", | 2522 | dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", |
2523 | &pdev->resource[0], info->io.regsize, info->io.regspacing, | 2523 | &pdev->resource[0], info->io.regsize, info->io.regspacing, |
2524 | info->irq); | 2524 | info->irq); |
2525 | 2525 | ||
2526 | if (add_smi(info)) | 2526 | if (add_smi(info)) |
2527 | kfree(info); | 2527 | kfree(info); |
2528 | 2528 | ||
2529 | return 0; | 2529 | return 0; |
2530 | } | 2530 | } |
2531 | 2531 | ||
2532 | static void __devexit ipmi_pci_remove(struct pci_dev *pdev) | 2532 | static void __devexit ipmi_pci_remove(struct pci_dev *pdev) |
2533 | { | 2533 | { |
2534 | struct smi_info *info = pci_get_drvdata(pdev); | 2534 | struct smi_info *info = pci_get_drvdata(pdev); |
2535 | cleanup_one_si(info); | 2535 | cleanup_one_si(info); |
2536 | } | 2536 | } |
2537 | 2537 | ||
2538 | static struct pci_device_id ipmi_pci_devices[] = { | 2538 | static struct pci_device_id ipmi_pci_devices[] = { |
2539 | { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, | 2539 | { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, |
2540 | { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, | 2540 | { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, |
2541 | { 0, } | 2541 | { 0, } |
2542 | }; | 2542 | }; |
2543 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); | 2543 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); |
2544 | 2544 | ||
2545 | static struct pci_driver ipmi_pci_driver = { | 2545 | static struct pci_driver ipmi_pci_driver = { |
2546 | .name = DEVICE_NAME, | 2546 | .name = DEVICE_NAME, |
2547 | .id_table = ipmi_pci_devices, | 2547 | .id_table = ipmi_pci_devices, |
2548 | .probe = ipmi_pci_probe, | 2548 | .probe = ipmi_pci_probe, |
2549 | .remove = __devexit_p(ipmi_pci_remove), | 2549 | .remove = __devexit_p(ipmi_pci_remove), |
2550 | }; | 2550 | }; |
2551 | #endif /* CONFIG_PCI */ | 2551 | #endif /* CONFIG_PCI */ |
2552 | 2552 | ||
2553 | static struct of_device_id ipmi_match[]; | 2553 | static struct of_device_id ipmi_match[]; |
2554 | static int __devinit ipmi_probe(struct platform_device *dev) | 2554 | static int ipmi_probe(struct platform_device *dev) |
2555 | { | 2555 | { |
2556 | #ifdef CONFIG_OF | 2556 | #ifdef CONFIG_OF |
2557 | const struct of_device_id *match; | 2557 | const struct of_device_id *match; |
2558 | struct smi_info *info; | 2558 | struct smi_info *info; |
2559 | struct resource resource; | 2559 | struct resource resource; |
2560 | const __be32 *regsize, *regspacing, *regshift; | 2560 | const __be32 *regsize, *regspacing, *regshift; |
2561 | struct device_node *np = dev->dev.of_node; | 2561 | struct device_node *np = dev->dev.of_node; |
2562 | int ret; | 2562 | int ret; |
2563 | int proplen; | 2563 | int proplen; |
2564 | 2564 | ||
2565 | dev_info(&dev->dev, "probing via device tree\n"); | 2565 | dev_info(&dev->dev, "probing via device tree\n"); |
2566 | 2566 | ||
2567 | match = of_match_device(ipmi_match, &dev->dev); | 2567 | match = of_match_device(ipmi_match, &dev->dev); |
2568 | if (!match) | 2568 | if (!match) |
2569 | return -EINVAL; | 2569 | return -EINVAL; |
2570 | 2570 | ||
2571 | ret = of_address_to_resource(np, 0, &resource); | 2571 | ret = of_address_to_resource(np, 0, &resource); |
2572 | if (ret) { | 2572 | if (ret) { |
2573 | dev_warn(&dev->dev, PFX "invalid address from OF\n"); | 2573 | dev_warn(&dev->dev, PFX "invalid address from OF\n"); |
2574 | return ret; | 2574 | return ret; |
2575 | } | 2575 | } |
2576 | 2576 | ||
2577 | regsize = of_get_property(np, "reg-size", &proplen); | 2577 | regsize = of_get_property(np, "reg-size", &proplen); |
2578 | if (regsize && proplen != 4) { | 2578 | if (regsize && proplen != 4) { |
2579 | dev_warn(&dev->dev, PFX "invalid regsize from OF\n"); | 2579 | dev_warn(&dev->dev, PFX "invalid regsize from OF\n"); |
2580 | return -EINVAL; | 2580 | return -EINVAL; |
2581 | } | 2581 | } |
2582 | 2582 | ||
2583 | regspacing = of_get_property(np, "reg-spacing", &proplen); | 2583 | regspacing = of_get_property(np, "reg-spacing", &proplen); |
2584 | if (regspacing && proplen != 4) { | 2584 | if (regspacing && proplen != 4) { |
2585 | dev_warn(&dev->dev, PFX "invalid regspacing from OF\n"); | 2585 | dev_warn(&dev->dev, PFX "invalid regspacing from OF\n"); |
2586 | return -EINVAL; | 2586 | return -EINVAL; |
2587 | } | 2587 | } |
2588 | 2588 | ||
2589 | regshift = of_get_property(np, "reg-shift", &proplen); | 2589 | regshift = of_get_property(np, "reg-shift", &proplen); |
2590 | if (regshift && proplen != 4) { | 2590 | if (regshift && proplen != 4) { |
2591 | dev_warn(&dev->dev, PFX "invalid regshift from OF\n"); | 2591 | dev_warn(&dev->dev, PFX "invalid regshift from OF\n"); |
2592 | return -EINVAL; | 2592 | return -EINVAL; |
2593 | } | 2593 | } |
2594 | 2594 | ||
2595 | info = smi_info_alloc(); | 2595 | info = smi_info_alloc(); |
2596 | 2596 | ||
2597 | if (!info) { | 2597 | if (!info) { |
2598 | dev_err(&dev->dev, | 2598 | dev_err(&dev->dev, |
2599 | "could not allocate memory for OF probe\n"); | 2599 | "could not allocate memory for OF probe\n"); |
2600 | return -ENOMEM; | 2600 | return -ENOMEM; |
2601 | } | 2601 | } |
2602 | 2602 | ||
2603 | info->si_type = (enum si_type) match->data; | 2603 | info->si_type = (enum si_type) match->data; |
2604 | info->addr_source = SI_DEVICETREE; | 2604 | info->addr_source = SI_DEVICETREE; |
2605 | info->irq_setup = std_irq_setup; | 2605 | info->irq_setup = std_irq_setup; |
2606 | 2606 | ||
2607 | if (resource.flags & IORESOURCE_IO) { | 2607 | if (resource.flags & IORESOURCE_IO) { |
2608 | info->io_setup = port_setup; | 2608 | info->io_setup = port_setup; |
2609 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 2609 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2610 | } else { | 2610 | } else { |
2611 | info->io_setup = mem_setup; | 2611 | info->io_setup = mem_setup; |
2612 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | 2612 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
2613 | } | 2613 | } |
2614 | 2614 | ||
2615 | info->io.addr_data = resource.start; | 2615 | info->io.addr_data = resource.start; |
2616 | 2616 | ||
2617 | info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; | 2617 | info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; |
2618 | info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; | 2618 | info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; |
2619 | info->io.regshift = regshift ? be32_to_cpup(regshift) : 0; | 2619 | info->io.regshift = regshift ? be32_to_cpup(regshift) : 0; |
2620 | 2620 | ||
2621 | info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); | 2621 | info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); |
2622 | info->dev = &dev->dev; | 2622 | info->dev = &dev->dev; |
2623 | 2623 | ||
2624 | dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", | 2624 | dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", |
2625 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2625 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2626 | info->irq); | 2626 | info->irq); |
2627 | 2627 | ||
2628 | dev_set_drvdata(&dev->dev, info); | 2628 | dev_set_drvdata(&dev->dev, info); |
2629 | 2629 | ||
2630 | if (add_smi(info)) { | 2630 | if (add_smi(info)) { |
2631 | kfree(info); | 2631 | kfree(info); |
2632 | return -EBUSY; | 2632 | return -EBUSY; |
2633 | } | 2633 | } |
2634 | #endif | 2634 | #endif |
2635 | return 0; | 2635 | return 0; |
2636 | } | 2636 | } |
2637 | 2637 | ||
2638 | static int __devexit ipmi_remove(struct platform_device *dev) | 2638 | static int __devexit ipmi_remove(struct platform_device *dev) |
2639 | { | 2639 | { |
2640 | #ifdef CONFIG_OF | 2640 | #ifdef CONFIG_OF |
2641 | cleanup_one_si(dev_get_drvdata(&dev->dev)); | 2641 | cleanup_one_si(dev_get_drvdata(&dev->dev)); |
2642 | #endif | 2642 | #endif |
2643 | return 0; | 2643 | return 0; |
2644 | } | 2644 | } |
2645 | 2645 | ||
2646 | static struct of_device_id ipmi_match[] = | 2646 | static struct of_device_id ipmi_match[] = |
2647 | { | 2647 | { |
2648 | { .type = "ipmi", .compatible = "ipmi-kcs", | 2648 | { .type = "ipmi", .compatible = "ipmi-kcs", |
2649 | .data = (void *)(unsigned long) SI_KCS }, | 2649 | .data = (void *)(unsigned long) SI_KCS }, |
2650 | { .type = "ipmi", .compatible = "ipmi-smic", | 2650 | { .type = "ipmi", .compatible = "ipmi-smic", |
2651 | .data = (void *)(unsigned long) SI_SMIC }, | 2651 | .data = (void *)(unsigned long) SI_SMIC }, |
2652 | { .type = "ipmi", .compatible = "ipmi-bt", | 2652 | { .type = "ipmi", .compatible = "ipmi-bt", |
2653 | .data = (void *)(unsigned long) SI_BT }, | 2653 | .data = (void *)(unsigned long) SI_BT }, |
2654 | {}, | 2654 | {}, |
2655 | }; | 2655 | }; |
2656 | 2656 | ||
2657 | static struct platform_driver ipmi_driver = { | 2657 | static struct platform_driver ipmi_driver = { |
2658 | .driver = { | 2658 | .driver = { |
2659 | .name = DEVICE_NAME, | 2659 | .name = DEVICE_NAME, |
2660 | .owner = THIS_MODULE, | 2660 | .owner = THIS_MODULE, |
2661 | .of_match_table = ipmi_match, | 2661 | .of_match_table = ipmi_match, |
2662 | }, | 2662 | }, |
2663 | .probe = ipmi_probe, | 2663 | .probe = ipmi_probe, |
2664 | .remove = __devexit_p(ipmi_remove), | 2664 | .remove = __devexit_p(ipmi_remove), |
2665 | }; | 2665 | }; |
2666 | 2666 | ||
2667 | static int wait_for_msg_done(struct smi_info *smi_info) | 2667 | static int wait_for_msg_done(struct smi_info *smi_info) |
2668 | { | 2668 | { |
2669 | enum si_sm_result smi_result; | 2669 | enum si_sm_result smi_result; |
2670 | 2670 | ||
2671 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); | 2671 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); |
2672 | for (;;) { | 2672 | for (;;) { |
2673 | if (smi_result == SI_SM_CALL_WITH_DELAY || | 2673 | if (smi_result == SI_SM_CALL_WITH_DELAY || |
2674 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { | 2674 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { |
2675 | schedule_timeout_uninterruptible(1); | 2675 | schedule_timeout_uninterruptible(1); |
2676 | smi_result = smi_info->handlers->event( | 2676 | smi_result = smi_info->handlers->event( |
2677 | smi_info->si_sm, 100); | 2677 | smi_info->si_sm, 100); |
2678 | } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { | 2678 | } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { |
2679 | smi_result = smi_info->handlers->event( | 2679 | smi_result = smi_info->handlers->event( |
2680 | smi_info->si_sm, 0); | 2680 | smi_info->si_sm, 0); |
2681 | } else | 2681 | } else |
2682 | break; | 2682 | break; |
2683 | } | 2683 | } |
2684 | if (smi_result == SI_SM_HOSED) | 2684 | if (smi_result == SI_SM_HOSED) |
2685 | /* | 2685 | /* |
2686 | * We couldn't get the state machine to run, so whatever's at | 2686 | * We couldn't get the state machine to run, so whatever's at |
2687 | * the port is probably not an IPMI SMI interface. | 2687 | * the port is probably not an IPMI SMI interface. |
2688 | */ | 2688 | */ |
2689 | return -ENODEV; | 2689 | return -ENODEV; |
2690 | 2690 | ||
2691 | return 0; | 2691 | return 0; |
2692 | } | 2692 | } |
2693 | 2693 | ||
2694 | static int try_get_dev_id(struct smi_info *smi_info) | 2694 | static int try_get_dev_id(struct smi_info *smi_info) |
2695 | { | 2695 | { |
2696 | unsigned char msg[2]; | 2696 | unsigned char msg[2]; |
2697 | unsigned char *resp; | 2697 | unsigned char *resp; |
2698 | unsigned long resp_len; | 2698 | unsigned long resp_len; |
2699 | int rv = 0; | 2699 | int rv = 0; |
2700 | 2700 | ||
2701 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); | 2701 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); |
2702 | if (!resp) | 2702 | if (!resp) |
2703 | return -ENOMEM; | 2703 | return -ENOMEM; |
2704 | 2704 | ||
2705 | /* | 2705 | /* |
2706 | * Do a Get Device ID command, since it comes back with some | 2706 | * Do a Get Device ID command, since it comes back with some |
2707 | * useful info. | 2707 | * useful info. |
2708 | */ | 2708 | */ |
2709 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2709 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2710 | msg[1] = IPMI_GET_DEVICE_ID_CMD; | 2710 | msg[1] = IPMI_GET_DEVICE_ID_CMD; |
2711 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 2711 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
2712 | 2712 | ||
2713 | rv = wait_for_msg_done(smi_info); | 2713 | rv = wait_for_msg_done(smi_info); |
2714 | if (rv) | 2714 | if (rv) |
2715 | goto out; | 2715 | goto out; |
2716 | 2716 | ||
2717 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | 2717 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, |
2718 | resp, IPMI_MAX_MSG_LENGTH); | 2718 | resp, IPMI_MAX_MSG_LENGTH); |
2719 | 2719 | ||
2720 | /* Check and record info from the get device id, in case we need it. */ | 2720 | /* Check and record info from the get device id, in case we need it. */ |
2721 | rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id); | 2721 | rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id); |
2722 | 2722 | ||
2723 | out: | 2723 | out: |
2724 | kfree(resp); | 2724 | kfree(resp); |
2725 | return rv; | 2725 | return rv; |
2726 | } | 2726 | } |
2727 | 2727 | ||
2728 | static int try_enable_event_buffer(struct smi_info *smi_info) | 2728 | static int try_enable_event_buffer(struct smi_info *smi_info) |
2729 | { | 2729 | { |
2730 | unsigned char msg[3]; | 2730 | unsigned char msg[3]; |
2731 | unsigned char *resp; | 2731 | unsigned char *resp; |
2732 | unsigned long resp_len; | 2732 | unsigned long resp_len; |
2733 | int rv = 0; | 2733 | int rv = 0; |
2734 | 2734 | ||
2735 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); | 2735 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); |
2736 | if (!resp) | 2736 | if (!resp) |
2737 | return -ENOMEM; | 2737 | return -ENOMEM; |
2738 | 2738 | ||
2739 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2739 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2740 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 2740 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
2741 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 2741 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
2742 | 2742 | ||
2743 | rv = wait_for_msg_done(smi_info); | 2743 | rv = wait_for_msg_done(smi_info); |
2744 | if (rv) { | 2744 | if (rv) { |
2745 | printk(KERN_WARNING PFX "Error getting response from get" | 2745 | printk(KERN_WARNING PFX "Error getting response from get" |
2746 | " global enables command, the event buffer is not" | 2746 | " global enables command, the event buffer is not" |
2747 | " enabled.\n"); | 2747 | " enabled.\n"); |
2748 | goto out; | 2748 | goto out; |
2749 | } | 2749 | } |
2750 | 2750 | ||
2751 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | 2751 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, |
2752 | resp, IPMI_MAX_MSG_LENGTH); | 2752 | resp, IPMI_MAX_MSG_LENGTH); |
2753 | 2753 | ||
2754 | if (resp_len < 4 || | 2754 | if (resp_len < 4 || |
2755 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | 2755 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || |
2756 | resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || | 2756 | resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || |
2757 | resp[2] != 0) { | 2757 | resp[2] != 0) { |
2758 | printk(KERN_WARNING PFX "Invalid return from get global" | 2758 | printk(KERN_WARNING PFX "Invalid return from get global" |
2759 | " enables command, cannot enable the event buffer.\n"); | 2759 | " enables command, cannot enable the event buffer.\n"); |
2760 | rv = -EINVAL; | 2760 | rv = -EINVAL; |
2761 | goto out; | 2761 | goto out; |
2762 | } | 2762 | } |
2763 | 2763 | ||
2764 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) | 2764 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) |
2765 | /* buffer is already enabled, nothing to do. */ | 2765 | /* buffer is already enabled, nothing to do. */ |
2766 | goto out; | 2766 | goto out; |
2767 | 2767 | ||
2768 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2768 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2769 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 2769 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
2770 | msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; | 2770 | msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; |
2771 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | 2771 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); |
2772 | 2772 | ||
2773 | rv = wait_for_msg_done(smi_info); | 2773 | rv = wait_for_msg_done(smi_info); |
2774 | if (rv) { | 2774 | if (rv) { |
2775 | printk(KERN_WARNING PFX "Error getting response from set" | 2775 | printk(KERN_WARNING PFX "Error getting response from set" |
2776 | " global, enables command, the event buffer is not" | 2776 | " global, enables command, the event buffer is not" |
2777 | " enabled.\n"); | 2777 | " enabled.\n"); |
2778 | goto out; | 2778 | goto out; |
2779 | } | 2779 | } |
2780 | 2780 | ||
2781 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | 2781 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, |
2782 | resp, IPMI_MAX_MSG_LENGTH); | 2782 | resp, IPMI_MAX_MSG_LENGTH); |
2783 | 2783 | ||
2784 | if (resp_len < 3 || | 2784 | if (resp_len < 3 || |
2785 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || | 2785 | resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || |
2786 | resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { | 2786 | resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { |
2787 | printk(KERN_WARNING PFX "Invalid return from get global," | 2787 | printk(KERN_WARNING PFX "Invalid return from get global," |
2788 | "enables command, not enable the event buffer.\n"); | 2788 | "enables command, not enable the event buffer.\n"); |
2789 | rv = -EINVAL; | 2789 | rv = -EINVAL; |
2790 | goto out; | 2790 | goto out; |
2791 | } | 2791 | } |
2792 | 2792 | ||
2793 | if (resp[2] != 0) | 2793 | if (resp[2] != 0) |
2794 | /* | 2794 | /* |
2795 | * An error when setting the event buffer bit means | 2795 | * An error when setting the event buffer bit means |
2796 | * that the event buffer is not supported. | 2796 | * that the event buffer is not supported. |
2797 | */ | 2797 | */ |
2798 | rv = -ENOENT; | 2798 | rv = -ENOENT; |
2799 | out: | 2799 | out: |
2800 | kfree(resp); | 2800 | kfree(resp); |
2801 | return rv; | 2801 | return rv; |
2802 | } | 2802 | } |
2803 | 2803 | ||
2804 | static int smi_type_proc_show(struct seq_file *m, void *v) | 2804 | static int smi_type_proc_show(struct seq_file *m, void *v) |
2805 | { | 2805 | { |
2806 | struct smi_info *smi = m->private; | 2806 | struct smi_info *smi = m->private; |
2807 | 2807 | ||
2808 | return seq_printf(m, "%s\n", si_to_str[smi->si_type]); | 2808 | return seq_printf(m, "%s\n", si_to_str[smi->si_type]); |
2809 | } | 2809 | } |
2810 | 2810 | ||
2811 | static int smi_type_proc_open(struct inode *inode, struct file *file) | 2811 | static int smi_type_proc_open(struct inode *inode, struct file *file) |
2812 | { | 2812 | { |
2813 | return single_open(file, smi_type_proc_show, PDE(inode)->data); | 2813 | return single_open(file, smi_type_proc_show, PDE(inode)->data); |
2814 | } | 2814 | } |
2815 | 2815 | ||
2816 | static const struct file_operations smi_type_proc_ops = { | 2816 | static const struct file_operations smi_type_proc_ops = { |
2817 | .open = smi_type_proc_open, | 2817 | .open = smi_type_proc_open, |
2818 | .read = seq_read, | 2818 | .read = seq_read, |
2819 | .llseek = seq_lseek, | 2819 | .llseek = seq_lseek, |
2820 | .release = single_release, | 2820 | .release = single_release, |
2821 | }; | 2821 | }; |
2822 | 2822 | ||
2823 | static int smi_si_stats_proc_show(struct seq_file *m, void *v) | 2823 | static int smi_si_stats_proc_show(struct seq_file *m, void *v) |
2824 | { | 2824 | { |
2825 | struct smi_info *smi = m->private; | 2825 | struct smi_info *smi = m->private; |
2826 | 2826 | ||
2827 | seq_printf(m, "interrupts_enabled: %d\n", | 2827 | seq_printf(m, "interrupts_enabled: %d\n", |
2828 | smi->irq && !smi->interrupt_disabled); | 2828 | smi->irq && !smi->interrupt_disabled); |
2829 | seq_printf(m, "short_timeouts: %u\n", | 2829 | seq_printf(m, "short_timeouts: %u\n", |
2830 | smi_get_stat(smi, short_timeouts)); | 2830 | smi_get_stat(smi, short_timeouts)); |
2831 | seq_printf(m, "long_timeouts: %u\n", | 2831 | seq_printf(m, "long_timeouts: %u\n", |
2832 | smi_get_stat(smi, long_timeouts)); | 2832 | smi_get_stat(smi, long_timeouts)); |
2833 | seq_printf(m, "idles: %u\n", | 2833 | seq_printf(m, "idles: %u\n", |
2834 | smi_get_stat(smi, idles)); | 2834 | smi_get_stat(smi, idles)); |
2835 | seq_printf(m, "interrupts: %u\n", | 2835 | seq_printf(m, "interrupts: %u\n", |
2836 | smi_get_stat(smi, interrupts)); | 2836 | smi_get_stat(smi, interrupts)); |
2837 | seq_printf(m, "attentions: %u\n", | 2837 | seq_printf(m, "attentions: %u\n", |
2838 | smi_get_stat(smi, attentions)); | 2838 | smi_get_stat(smi, attentions)); |
2839 | seq_printf(m, "flag_fetches: %u\n", | 2839 | seq_printf(m, "flag_fetches: %u\n", |
2840 | smi_get_stat(smi, flag_fetches)); | 2840 | smi_get_stat(smi, flag_fetches)); |
2841 | seq_printf(m, "hosed_count: %u\n", | 2841 | seq_printf(m, "hosed_count: %u\n", |
2842 | smi_get_stat(smi, hosed_count)); | 2842 | smi_get_stat(smi, hosed_count)); |
2843 | seq_printf(m, "complete_transactions: %u\n", | 2843 | seq_printf(m, "complete_transactions: %u\n", |
2844 | smi_get_stat(smi, complete_transactions)); | 2844 | smi_get_stat(smi, complete_transactions)); |
2845 | seq_printf(m, "events: %u\n", | 2845 | seq_printf(m, "events: %u\n", |
2846 | smi_get_stat(smi, events)); | 2846 | smi_get_stat(smi, events)); |
2847 | seq_printf(m, "watchdog_pretimeouts: %u\n", | 2847 | seq_printf(m, "watchdog_pretimeouts: %u\n", |
2848 | smi_get_stat(smi, watchdog_pretimeouts)); | 2848 | smi_get_stat(smi, watchdog_pretimeouts)); |
2849 | seq_printf(m, "incoming_messages: %u\n", | 2849 | seq_printf(m, "incoming_messages: %u\n", |
2850 | smi_get_stat(smi, incoming_messages)); | 2850 | smi_get_stat(smi, incoming_messages)); |
2851 | return 0; | 2851 | return 0; |
2852 | } | 2852 | } |
2853 | 2853 | ||
2854 | static int smi_si_stats_proc_open(struct inode *inode, struct file *file) | 2854 | static int smi_si_stats_proc_open(struct inode *inode, struct file *file) |
2855 | { | 2855 | { |
2856 | return single_open(file, smi_si_stats_proc_show, PDE(inode)->data); | 2856 | return single_open(file, smi_si_stats_proc_show, PDE(inode)->data); |
2857 | } | 2857 | } |
2858 | 2858 | ||
2859 | static const struct file_operations smi_si_stats_proc_ops = { | 2859 | static const struct file_operations smi_si_stats_proc_ops = { |
2860 | .open = smi_si_stats_proc_open, | 2860 | .open = smi_si_stats_proc_open, |
2861 | .read = seq_read, | 2861 | .read = seq_read, |
2862 | .llseek = seq_lseek, | 2862 | .llseek = seq_lseek, |
2863 | .release = single_release, | 2863 | .release = single_release, |
2864 | }; | 2864 | }; |
2865 | 2865 | ||
2866 | static int smi_params_proc_show(struct seq_file *m, void *v) | 2866 | static int smi_params_proc_show(struct seq_file *m, void *v) |
2867 | { | 2867 | { |
2868 | struct smi_info *smi = m->private; | 2868 | struct smi_info *smi = m->private; |
2869 | 2869 | ||
2870 | return seq_printf(m, | 2870 | return seq_printf(m, |
2871 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", | 2871 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", |
2872 | si_to_str[smi->si_type], | 2872 | si_to_str[smi->si_type], |
2873 | addr_space_to_str[smi->io.addr_type], | 2873 | addr_space_to_str[smi->io.addr_type], |
2874 | smi->io.addr_data, | 2874 | smi->io.addr_data, |
2875 | smi->io.regspacing, | 2875 | smi->io.regspacing, |
2876 | smi->io.regsize, | 2876 | smi->io.regsize, |
2877 | smi->io.regshift, | 2877 | smi->io.regshift, |
2878 | smi->irq, | 2878 | smi->irq, |
2879 | smi->slave_addr); | 2879 | smi->slave_addr); |
2880 | } | 2880 | } |
2881 | 2881 | ||
2882 | static int smi_params_proc_open(struct inode *inode, struct file *file) | 2882 | static int smi_params_proc_open(struct inode *inode, struct file *file) |
2883 | { | 2883 | { |
2884 | return single_open(file, smi_params_proc_show, PDE(inode)->data); | 2884 | return single_open(file, smi_params_proc_show, PDE(inode)->data); |
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | static const struct file_operations smi_params_proc_ops = { | 2887 | static const struct file_operations smi_params_proc_ops = { |
2888 | .open = smi_params_proc_open, | 2888 | .open = smi_params_proc_open, |
2889 | .read = seq_read, | 2889 | .read = seq_read, |
2890 | .llseek = seq_lseek, | 2890 | .llseek = seq_lseek, |
2891 | .release = single_release, | 2891 | .release = single_release, |
2892 | }; | 2892 | }; |
2893 | 2893 | ||
2894 | /* | 2894 | /* |
2895 | * oem_data_avail_to_receive_msg_avail | 2895 | * oem_data_avail_to_receive_msg_avail |
2896 | * @info - smi_info structure with msg_flags set | 2896 | * @info - smi_info structure with msg_flags set |
2897 | * | 2897 | * |
2898 | * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL | 2898 | * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL |
2899 | * Returns 1 indicating need to re-run handle_flags(). | 2899 | * Returns 1 indicating need to re-run handle_flags(). |
2900 | */ | 2900 | */ |
2901 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) | 2901 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) |
2902 | { | 2902 | { |
2903 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | | 2903 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | |
2904 | RECEIVE_MSG_AVAIL); | 2904 | RECEIVE_MSG_AVAIL); |
2905 | return 1; | 2905 | return 1; |
2906 | } | 2906 | } |
2907 | 2907 | ||
2908 | /* | 2908 | /* |
2909 | * setup_dell_poweredge_oem_data_handler | 2909 | * setup_dell_poweredge_oem_data_handler |
2910 | * @info - smi_info.device_id must be populated | 2910 | * @info - smi_info.device_id must be populated |
2911 | * | 2911 | * |
2912 | * Systems that match, but have firmware version < 1.40 may assert | 2912 | * Systems that match, but have firmware version < 1.40 may assert |
2913 | * OEM0_DATA_AVAIL on their own, without being told via Set Flags that | 2913 | * OEM0_DATA_AVAIL on their own, without being told via Set Flags that |
2914 | * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL | 2914 | * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL |
2915 | * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags | 2915 | * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags |
2916 | * as RECEIVE_MSG_AVAIL instead. | 2916 | * as RECEIVE_MSG_AVAIL instead. |
2917 | * | 2917 | * |
2918 | * As Dell has no plans to release IPMI 1.5 firmware that *ever* | 2918 | * As Dell has no plans to release IPMI 1.5 firmware that *ever* |
2919 | * assert the OEM[012] bits, and if it did, the driver would have to | 2919 | * assert the OEM[012] bits, and if it did, the driver would have to |
2920 | * change to handle that properly, we don't actually check for the | 2920 | * change to handle that properly, we don't actually check for the |
2921 | * firmware version. | 2921 | * firmware version. |
2922 | * Device ID = 0x20 BMC on PowerEdge 8G servers | 2922 | * Device ID = 0x20 BMC on PowerEdge 8G servers |
2923 | * Device Revision = 0x80 | 2923 | * Device Revision = 0x80 |
2924 | * Firmware Revision1 = 0x01 BMC version 1.40 | 2924 | * Firmware Revision1 = 0x01 BMC version 1.40 |
2925 | * Firmware Revision2 = 0x40 BCD encoded | 2925 | * Firmware Revision2 = 0x40 BCD encoded |
2926 | * IPMI Version = 0x51 IPMI 1.5 | 2926 | * IPMI Version = 0x51 IPMI 1.5 |
2927 | * Manufacturer ID = A2 02 00 Dell IANA | 2927 | * Manufacturer ID = A2 02 00 Dell IANA |
2928 | * | 2928 | * |
2929 | * Additionally, PowerEdge systems with IPMI < 1.5 may also assert | 2929 | * Additionally, PowerEdge systems with IPMI < 1.5 may also assert |
2930 | * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. | 2930 | * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. |
2931 | * | 2931 | * |
2932 | */ | 2932 | */ |
2933 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 | 2933 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 |
2934 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 | 2934 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 |
2935 | #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 | 2935 | #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 |
2936 | #define DELL_IANA_MFR_ID 0x0002a2 | 2936 | #define DELL_IANA_MFR_ID 0x0002a2 |
2937 | static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) | 2937 | static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) |
2938 | { | 2938 | { |
2939 | struct ipmi_device_id *id = &smi_info->device_id; | 2939 | struct ipmi_device_id *id = &smi_info->device_id; |
2940 | if (id->manufacturer_id == DELL_IANA_MFR_ID) { | 2940 | if (id->manufacturer_id == DELL_IANA_MFR_ID) { |
2941 | if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && | 2941 | if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && |
2942 | id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && | 2942 | id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && |
2943 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { | 2943 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { |
2944 | smi_info->oem_data_avail_handler = | 2944 | smi_info->oem_data_avail_handler = |
2945 | oem_data_avail_to_receive_msg_avail; | 2945 | oem_data_avail_to_receive_msg_avail; |
2946 | } else if (ipmi_version_major(id) < 1 || | 2946 | } else if (ipmi_version_major(id) < 1 || |
2947 | (ipmi_version_major(id) == 1 && | 2947 | (ipmi_version_major(id) == 1 && |
2948 | ipmi_version_minor(id) < 5)) { | 2948 | ipmi_version_minor(id) < 5)) { |
2949 | smi_info->oem_data_avail_handler = | 2949 | smi_info->oem_data_avail_handler = |
2950 | oem_data_avail_to_receive_msg_avail; | 2950 | oem_data_avail_to_receive_msg_avail; |
2951 | } | 2951 | } |
2952 | } | 2952 | } |
2953 | } | 2953 | } |
2954 | 2954 | ||
2955 | #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA | 2955 | #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA |
2956 | static void return_hosed_msg_badsize(struct smi_info *smi_info) | 2956 | static void return_hosed_msg_badsize(struct smi_info *smi_info) |
2957 | { | 2957 | { |
2958 | struct ipmi_smi_msg *msg = smi_info->curr_msg; | 2958 | struct ipmi_smi_msg *msg = smi_info->curr_msg; |
2959 | 2959 | ||
2960 | /* Make it a response */ | 2960 | /* Make it a response */ |
2961 | msg->rsp[0] = msg->data[0] | 4; | 2961 | msg->rsp[0] = msg->data[0] | 4; |
2962 | msg->rsp[1] = msg->data[1]; | 2962 | msg->rsp[1] = msg->data[1]; |
2963 | msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; | 2963 | msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; |
2964 | msg->rsp_size = 3; | 2964 | msg->rsp_size = 3; |
2965 | smi_info->curr_msg = NULL; | 2965 | smi_info->curr_msg = NULL; |
2966 | deliver_recv_msg(smi_info, msg); | 2966 | deliver_recv_msg(smi_info, msg); |
2967 | } | 2967 | } |
2968 | 2968 | ||
2969 | /* | 2969 | /* |
2970 | * dell_poweredge_bt_xaction_handler | 2970 | * dell_poweredge_bt_xaction_handler |
2971 | * @info - smi_info.device_id must be populated | 2971 | * @info - smi_info.device_id must be populated |
2972 | * | 2972 | * |
2973 | * Dell PowerEdge servers with the BT interface (x6xx and 1750) will | 2973 | * Dell PowerEdge servers with the BT interface (x6xx and 1750) will |
2974 | * not respond to a Get SDR command if the length of the data | 2974 | * not respond to a Get SDR command if the length of the data |
2975 | * requested is exactly 0x3A, which leads to command timeouts and no | 2975 | * requested is exactly 0x3A, which leads to command timeouts and no |
2976 | * data returned. This intercepts such commands, and causes userspace | 2976 | * data returned. This intercepts such commands, and causes userspace |
2977 | * callers to try again with a different-sized buffer, which succeeds. | 2977 | * callers to try again with a different-sized buffer, which succeeds. |
2978 | */ | 2978 | */ |
2979 | 2979 | ||
2980 | #define STORAGE_NETFN 0x0A | 2980 | #define STORAGE_NETFN 0x0A |
2981 | #define STORAGE_CMD_GET_SDR 0x23 | 2981 | #define STORAGE_CMD_GET_SDR 0x23 |
2982 | static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, | 2982 | static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, |
2983 | unsigned long unused, | 2983 | unsigned long unused, |
2984 | void *in) | 2984 | void *in) |
2985 | { | 2985 | { |
2986 | struct smi_info *smi_info = in; | 2986 | struct smi_info *smi_info = in; |
2987 | unsigned char *data = smi_info->curr_msg->data; | 2987 | unsigned char *data = smi_info->curr_msg->data; |
2988 | unsigned int size = smi_info->curr_msg->data_size; | 2988 | unsigned int size = smi_info->curr_msg->data_size; |
2989 | if (size >= 8 && | 2989 | if (size >= 8 && |
2990 | (data[0]>>2) == STORAGE_NETFN && | 2990 | (data[0]>>2) == STORAGE_NETFN && |
2991 | data[1] == STORAGE_CMD_GET_SDR && | 2991 | data[1] == STORAGE_CMD_GET_SDR && |
2992 | data[7] == 0x3A) { | 2992 | data[7] == 0x3A) { |
2993 | return_hosed_msg_badsize(smi_info); | 2993 | return_hosed_msg_badsize(smi_info); |
2994 | return NOTIFY_STOP; | 2994 | return NOTIFY_STOP; |
2995 | } | 2995 | } |
2996 | return NOTIFY_DONE; | 2996 | return NOTIFY_DONE; |
2997 | } | 2997 | } |
2998 | 2998 | ||
2999 | static struct notifier_block dell_poweredge_bt_xaction_notifier = { | 2999 | static struct notifier_block dell_poweredge_bt_xaction_notifier = { |
3000 | .notifier_call = dell_poweredge_bt_xaction_handler, | 3000 | .notifier_call = dell_poweredge_bt_xaction_handler, |
3001 | }; | 3001 | }; |
3002 | 3002 | ||
3003 | /* | 3003 | /* |
3004 | * setup_dell_poweredge_bt_xaction_handler | 3004 | * setup_dell_poweredge_bt_xaction_handler |
3005 | * @info - smi_info.device_id must be filled in already | 3005 | * @info - smi_info.device_id must be filled in already |
3006 | * | 3006 | * |
3007 | * Fills in smi_info.device_id.start_transaction_pre_hook | 3007 | * Fills in smi_info.device_id.start_transaction_pre_hook |
3008 | * when we know what function to use there. | 3008 | * when we know what function to use there. |
3009 | */ | 3009 | */ |
3010 | static void | 3010 | static void |
3011 | setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) | 3011 | setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) |
3012 | { | 3012 | { |
3013 | struct ipmi_device_id *id = &smi_info->device_id; | 3013 | struct ipmi_device_id *id = &smi_info->device_id; |
3014 | if (id->manufacturer_id == DELL_IANA_MFR_ID && | 3014 | if (id->manufacturer_id == DELL_IANA_MFR_ID && |
3015 | smi_info->si_type == SI_BT) | 3015 | smi_info->si_type == SI_BT) |
3016 | register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); | 3016 | register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); |
3017 | } | 3017 | } |
3018 | 3018 | ||
3019 | /* | 3019 | /* |
3020 | * setup_oem_data_handler | 3020 | * setup_oem_data_handler |
3021 | * @info - smi_info.device_id must be filled in already | 3021 | * @info - smi_info.device_id must be filled in already |
3022 | * | 3022 | * |
3023 | * Fills in smi_info.device_id.oem_data_available_handler | 3023 | * Fills in smi_info.device_id.oem_data_available_handler |
3024 | * when we know what function to use there. | 3024 | * when we know what function to use there. |
3025 | */ | 3025 | */ |
3026 | 3026 | ||
3027 | static void setup_oem_data_handler(struct smi_info *smi_info) | 3027 | static void setup_oem_data_handler(struct smi_info *smi_info) |
3028 | { | 3028 | { |
3029 | setup_dell_poweredge_oem_data_handler(smi_info); | 3029 | setup_dell_poweredge_oem_data_handler(smi_info); |
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | static void setup_xaction_handlers(struct smi_info *smi_info) | 3032 | static void setup_xaction_handlers(struct smi_info *smi_info) |
3033 | { | 3033 | { |
3034 | setup_dell_poweredge_bt_xaction_handler(smi_info); | 3034 | setup_dell_poweredge_bt_xaction_handler(smi_info); |
3035 | } | 3035 | } |
3036 | 3036 | ||
3037 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | 3037 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) |
3038 | { | 3038 | { |
3039 | if (smi_info->intf) { | 3039 | if (smi_info->intf) { |
3040 | /* | 3040 | /* |
3041 | * The timer and thread are only running if the | 3041 | * The timer and thread are only running if the |
3042 | * interface has been started up and registered. | 3042 | * interface has been started up and registered. |
3043 | */ | 3043 | */ |
3044 | if (smi_info->thread != NULL) | 3044 | if (smi_info->thread != NULL) |
3045 | kthread_stop(smi_info->thread); | 3045 | kthread_stop(smi_info->thread); |
3046 | del_timer_sync(&smi_info->si_timer); | 3046 | del_timer_sync(&smi_info->si_timer); |
3047 | } | 3047 | } |
3048 | } | 3048 | } |
3049 | 3049 | ||
3050 | static __devinitdata struct ipmi_default_vals | 3050 | static __devinitdata struct ipmi_default_vals |
3051 | { | 3051 | { |
3052 | int type; | 3052 | int type; |
3053 | int port; | 3053 | int port; |
3054 | } ipmi_defaults[] = | 3054 | } ipmi_defaults[] = |
3055 | { | 3055 | { |
3056 | { .type = SI_KCS, .port = 0xca2 }, | 3056 | { .type = SI_KCS, .port = 0xca2 }, |
3057 | { .type = SI_SMIC, .port = 0xca9 }, | 3057 | { .type = SI_SMIC, .port = 0xca9 }, |
3058 | { .type = SI_BT, .port = 0xe4 }, | 3058 | { .type = SI_BT, .port = 0xe4 }, |
3059 | { .port = 0 } | 3059 | { .port = 0 } |
3060 | }; | 3060 | }; |
3061 | 3061 | ||
3062 | static void __devinit default_find_bmc(void) | 3062 | static void default_find_bmc(void) |
3063 | { | 3063 | { |
3064 | struct smi_info *info; | 3064 | struct smi_info *info; |
3065 | int i; | 3065 | int i; |
3066 | 3066 | ||
3067 | for (i = 0; ; i++) { | 3067 | for (i = 0; ; i++) { |
3068 | if (!ipmi_defaults[i].port) | 3068 | if (!ipmi_defaults[i].port) |
3069 | break; | 3069 | break; |
3070 | #ifdef CONFIG_PPC | 3070 | #ifdef CONFIG_PPC |
3071 | if (check_legacy_ioport(ipmi_defaults[i].port)) | 3071 | if (check_legacy_ioport(ipmi_defaults[i].port)) |
3072 | continue; | 3072 | continue; |
3073 | #endif | 3073 | #endif |
3074 | info = smi_info_alloc(); | 3074 | info = smi_info_alloc(); |
3075 | if (!info) | 3075 | if (!info) |
3076 | return; | 3076 | return; |
3077 | 3077 | ||
3078 | info->addr_source = SI_DEFAULT; | 3078 | info->addr_source = SI_DEFAULT; |
3079 | 3079 | ||
3080 | info->si_type = ipmi_defaults[i].type; | 3080 | info->si_type = ipmi_defaults[i].type; |
3081 | info->io_setup = port_setup; | 3081 | info->io_setup = port_setup; |
3082 | info->io.addr_data = ipmi_defaults[i].port; | 3082 | info->io.addr_data = ipmi_defaults[i].port; |
3083 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 3083 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
3084 | 3084 | ||
3085 | info->io.addr = NULL; | 3085 | info->io.addr = NULL; |
3086 | info->io.regspacing = DEFAULT_REGSPACING; | 3086 | info->io.regspacing = DEFAULT_REGSPACING; |
3087 | info->io.regsize = DEFAULT_REGSPACING; | 3087 | info->io.regsize = DEFAULT_REGSPACING; |
3088 | info->io.regshift = 0; | 3088 | info->io.regshift = 0; |
3089 | 3089 | ||
3090 | if (add_smi(info) == 0) { | 3090 | if (add_smi(info) == 0) { |
3091 | if ((try_smi_init(info)) == 0) { | 3091 | if ((try_smi_init(info)) == 0) { |
3092 | /* Found one... */ | 3092 | /* Found one... */ |
3093 | printk(KERN_INFO PFX "Found default %s" | 3093 | printk(KERN_INFO PFX "Found default %s" |
3094 | " state machine at %s address 0x%lx\n", | 3094 | " state machine at %s address 0x%lx\n", |
3095 | si_to_str[info->si_type], | 3095 | si_to_str[info->si_type], |
3096 | addr_space_to_str[info->io.addr_type], | 3096 | addr_space_to_str[info->io.addr_type], |
3097 | info->io.addr_data); | 3097 | info->io.addr_data); |
3098 | } else | 3098 | } else |
3099 | cleanup_one_si(info); | 3099 | cleanup_one_si(info); |
3100 | } else { | 3100 | } else { |
3101 | kfree(info); | 3101 | kfree(info); |
3102 | } | 3102 | } |
3103 | } | 3103 | } |
3104 | } | 3104 | } |
3105 | 3105 | ||
3106 | static int is_new_interface(struct smi_info *info) | 3106 | static int is_new_interface(struct smi_info *info) |
3107 | { | 3107 | { |
3108 | struct smi_info *e; | 3108 | struct smi_info *e; |
3109 | 3109 | ||
3110 | list_for_each_entry(e, &smi_infos, link) { | 3110 | list_for_each_entry(e, &smi_infos, link) { |
3111 | if (e->io.addr_type != info->io.addr_type) | 3111 | if (e->io.addr_type != info->io.addr_type) |
3112 | continue; | 3112 | continue; |
3113 | if (e->io.addr_data == info->io.addr_data) | 3113 | if (e->io.addr_data == info->io.addr_data) |
3114 | return 0; | 3114 | return 0; |
3115 | } | 3115 | } |
3116 | 3116 | ||
3117 | return 1; | 3117 | return 1; |
3118 | } | 3118 | } |
3119 | 3119 | ||
3120 | static int add_smi(struct smi_info *new_smi) | 3120 | static int add_smi(struct smi_info *new_smi) |
3121 | { | 3121 | { |
3122 | int rv = 0; | 3122 | int rv = 0; |
3123 | 3123 | ||
3124 | printk(KERN_INFO PFX "Adding %s-specified %s state machine", | 3124 | printk(KERN_INFO PFX "Adding %s-specified %s state machine", |
3125 | ipmi_addr_src_to_str[new_smi->addr_source], | 3125 | ipmi_addr_src_to_str[new_smi->addr_source], |
3126 | si_to_str[new_smi->si_type]); | 3126 | si_to_str[new_smi->si_type]); |
3127 | mutex_lock(&smi_infos_lock); | 3127 | mutex_lock(&smi_infos_lock); |
3128 | if (!is_new_interface(new_smi)) { | 3128 | if (!is_new_interface(new_smi)) { |
3129 | printk(KERN_CONT " duplicate interface\n"); | 3129 | printk(KERN_CONT " duplicate interface\n"); |
3130 | rv = -EBUSY; | 3130 | rv = -EBUSY; |
3131 | goto out_err; | 3131 | goto out_err; |
3132 | } | 3132 | } |
3133 | 3133 | ||
3134 | printk(KERN_CONT "\n"); | 3134 | printk(KERN_CONT "\n"); |
3135 | 3135 | ||
3136 | /* So we know not to free it unless we have allocated one. */ | 3136 | /* So we know not to free it unless we have allocated one. */ |
3137 | new_smi->intf = NULL; | 3137 | new_smi->intf = NULL; |
3138 | new_smi->si_sm = NULL; | 3138 | new_smi->si_sm = NULL; |
3139 | new_smi->handlers = NULL; | 3139 | new_smi->handlers = NULL; |
3140 | 3140 | ||
3141 | list_add_tail(&new_smi->link, &smi_infos); | 3141 | list_add_tail(&new_smi->link, &smi_infos); |
3142 | 3142 | ||
3143 | out_err: | 3143 | out_err: |
3144 | mutex_unlock(&smi_infos_lock); | 3144 | mutex_unlock(&smi_infos_lock); |
3145 | return rv; | 3145 | return rv; |
3146 | } | 3146 | } |
3147 | 3147 | ||
3148 | static int try_smi_init(struct smi_info *new_smi) | 3148 | static int try_smi_init(struct smi_info *new_smi) |
3149 | { | 3149 | { |
3150 | int rv = 0; | 3150 | int rv = 0; |
3151 | int i; | 3151 | int i; |
3152 | 3152 | ||
3153 | printk(KERN_INFO PFX "Trying %s-specified %s state" | 3153 | printk(KERN_INFO PFX "Trying %s-specified %s state" |
3154 | " machine at %s address 0x%lx, slave address 0x%x," | 3154 | " machine at %s address 0x%lx, slave address 0x%x," |
3155 | " irq %d\n", | 3155 | " irq %d\n", |
3156 | ipmi_addr_src_to_str[new_smi->addr_source], | 3156 | ipmi_addr_src_to_str[new_smi->addr_source], |
3157 | si_to_str[new_smi->si_type], | 3157 | si_to_str[new_smi->si_type], |
3158 | addr_space_to_str[new_smi->io.addr_type], | 3158 | addr_space_to_str[new_smi->io.addr_type], |
3159 | new_smi->io.addr_data, | 3159 | new_smi->io.addr_data, |
3160 | new_smi->slave_addr, new_smi->irq); | 3160 | new_smi->slave_addr, new_smi->irq); |
3161 | 3161 | ||
3162 | switch (new_smi->si_type) { | 3162 | switch (new_smi->si_type) { |
3163 | case SI_KCS: | 3163 | case SI_KCS: |
3164 | new_smi->handlers = &kcs_smi_handlers; | 3164 | new_smi->handlers = &kcs_smi_handlers; |
3165 | break; | 3165 | break; |
3166 | 3166 | ||
3167 | case SI_SMIC: | 3167 | case SI_SMIC: |
3168 | new_smi->handlers = &smic_smi_handlers; | 3168 | new_smi->handlers = &smic_smi_handlers; |
3169 | break; | 3169 | break; |
3170 | 3170 | ||
3171 | case SI_BT: | 3171 | case SI_BT: |
3172 | new_smi->handlers = &bt_smi_handlers; | 3172 | new_smi->handlers = &bt_smi_handlers; |
3173 | break; | 3173 | break; |
3174 | 3174 | ||
3175 | default: | 3175 | default: |
3176 | /* No support for anything else yet. */ | 3176 | /* No support for anything else yet. */ |
3177 | rv = -EIO; | 3177 | rv = -EIO; |
3178 | goto out_err; | 3178 | goto out_err; |
3179 | } | 3179 | } |
3180 | 3180 | ||
3181 | /* Allocate the state machine's data and initialize it. */ | 3181 | /* Allocate the state machine's data and initialize it. */ |
3182 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); | 3182 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); |
3183 | if (!new_smi->si_sm) { | 3183 | if (!new_smi->si_sm) { |
3184 | printk(KERN_ERR PFX | 3184 | printk(KERN_ERR PFX |
3185 | "Could not allocate state machine memory\n"); | 3185 | "Could not allocate state machine memory\n"); |
3186 | rv = -ENOMEM; | 3186 | rv = -ENOMEM; |
3187 | goto out_err; | 3187 | goto out_err; |
3188 | } | 3188 | } |
3189 | new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, | 3189 | new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, |
3190 | &new_smi->io); | 3190 | &new_smi->io); |
3191 | 3191 | ||
3192 | /* Now that we know the I/O size, we can set up the I/O. */ | 3192 | /* Now that we know the I/O size, we can set up the I/O. */ |
3193 | rv = new_smi->io_setup(new_smi); | 3193 | rv = new_smi->io_setup(new_smi); |
3194 | if (rv) { | 3194 | if (rv) { |
3195 | printk(KERN_ERR PFX "Could not set up I/O space\n"); | 3195 | printk(KERN_ERR PFX "Could not set up I/O space\n"); |
3196 | goto out_err; | 3196 | goto out_err; |
3197 | } | 3197 | } |
3198 | 3198 | ||
3199 | /* Do low-level detection first. */ | 3199 | /* Do low-level detection first. */ |
3200 | if (new_smi->handlers->detect(new_smi->si_sm)) { | 3200 | if (new_smi->handlers->detect(new_smi->si_sm)) { |
3201 | if (new_smi->addr_source) | 3201 | if (new_smi->addr_source) |
3202 | printk(KERN_INFO PFX "Interface detection failed\n"); | 3202 | printk(KERN_INFO PFX "Interface detection failed\n"); |
3203 | rv = -ENODEV; | 3203 | rv = -ENODEV; |
3204 | goto out_err; | 3204 | goto out_err; |
3205 | } | 3205 | } |
3206 | 3206 | ||
3207 | /* | 3207 | /* |
3208 | * Attempt a get device id command. If it fails, we probably | 3208 | * Attempt a get device id command. If it fails, we probably |
3209 | * don't have a BMC here. | 3209 | * don't have a BMC here. |
3210 | */ | 3210 | */ |
3211 | rv = try_get_dev_id(new_smi); | 3211 | rv = try_get_dev_id(new_smi); |
3212 | if (rv) { | 3212 | if (rv) { |
3213 | if (new_smi->addr_source) | 3213 | if (new_smi->addr_source) |
3214 | printk(KERN_INFO PFX "There appears to be no BMC" | 3214 | printk(KERN_INFO PFX "There appears to be no BMC" |
3215 | " at this location\n"); | 3215 | " at this location\n"); |
3216 | goto out_err; | 3216 | goto out_err; |
3217 | } | 3217 | } |
3218 | 3218 | ||
3219 | setup_oem_data_handler(new_smi); | 3219 | setup_oem_data_handler(new_smi); |
3220 | setup_xaction_handlers(new_smi); | 3220 | setup_xaction_handlers(new_smi); |
3221 | 3221 | ||
3222 | INIT_LIST_HEAD(&(new_smi->xmit_msgs)); | 3222 | INIT_LIST_HEAD(&(new_smi->xmit_msgs)); |
3223 | INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); | 3223 | INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); |
3224 | new_smi->curr_msg = NULL; | 3224 | new_smi->curr_msg = NULL; |
3225 | atomic_set(&new_smi->req_events, 0); | 3225 | atomic_set(&new_smi->req_events, 0); |
3226 | new_smi->run_to_completion = 0; | 3226 | new_smi->run_to_completion = 0; |
3227 | for (i = 0; i < SI_NUM_STATS; i++) | 3227 | for (i = 0; i < SI_NUM_STATS; i++) |
3228 | atomic_set(&new_smi->stats[i], 0); | 3228 | atomic_set(&new_smi->stats[i], 0); |
3229 | 3229 | ||
3230 | new_smi->interrupt_disabled = 1; | 3230 | new_smi->interrupt_disabled = 1; |
3231 | atomic_set(&new_smi->stop_operation, 0); | 3231 | atomic_set(&new_smi->stop_operation, 0); |
3232 | new_smi->intf_num = smi_num; | 3232 | new_smi->intf_num = smi_num; |
3233 | smi_num++; | 3233 | smi_num++; |
3234 | 3234 | ||
3235 | rv = try_enable_event_buffer(new_smi); | 3235 | rv = try_enable_event_buffer(new_smi); |
3236 | if (rv == 0) | 3236 | if (rv == 0) |
3237 | new_smi->has_event_buffer = 1; | 3237 | new_smi->has_event_buffer = 1; |
3238 | 3238 | ||
3239 | /* | 3239 | /* |
3240 | * Start clearing the flags before we enable interrupts or the | 3240 | * Start clearing the flags before we enable interrupts or the |
3241 | * timer to avoid racing with the timer. | 3241 | * timer to avoid racing with the timer. |
3242 | */ | 3242 | */ |
3243 | start_clear_flags(new_smi); | 3243 | start_clear_flags(new_smi); |
3244 | /* IRQ is defined to be set when non-zero. */ | 3244 | /* IRQ is defined to be set when non-zero. */ |
3245 | if (new_smi->irq) | 3245 | if (new_smi->irq) |
3246 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; | 3246 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; |
3247 | 3247 | ||
3248 | if (!new_smi->dev) { | 3248 | if (!new_smi->dev) { |
3249 | /* | 3249 | /* |
3250 | * If we don't already have a device from something | 3250 | * If we don't already have a device from something |
3251 | * else (like PCI), then register a new one. | 3251 | * else (like PCI), then register a new one. |
3252 | */ | 3252 | */ |
3253 | new_smi->pdev = platform_device_alloc("ipmi_si", | 3253 | new_smi->pdev = platform_device_alloc("ipmi_si", |
3254 | new_smi->intf_num); | 3254 | new_smi->intf_num); |
3255 | if (!new_smi->pdev) { | 3255 | if (!new_smi->pdev) { |
3256 | printk(KERN_ERR PFX | 3256 | printk(KERN_ERR PFX |
3257 | "Unable to allocate platform device\n"); | 3257 | "Unable to allocate platform device\n"); |
3258 | goto out_err; | 3258 | goto out_err; |
3259 | } | 3259 | } |
3260 | new_smi->dev = &new_smi->pdev->dev; | 3260 | new_smi->dev = &new_smi->pdev->dev; |
3261 | new_smi->dev->driver = &ipmi_driver.driver; | 3261 | new_smi->dev->driver = &ipmi_driver.driver; |
3262 | 3262 | ||
3263 | rv = platform_device_add(new_smi->pdev); | 3263 | rv = platform_device_add(new_smi->pdev); |
3264 | if (rv) { | 3264 | if (rv) { |
3265 | printk(KERN_ERR PFX | 3265 | printk(KERN_ERR PFX |
3266 | "Unable to register system interface device:" | 3266 | "Unable to register system interface device:" |
3267 | " %d\n", | 3267 | " %d\n", |
3268 | rv); | 3268 | rv); |
3269 | goto out_err; | 3269 | goto out_err; |
3270 | } | 3270 | } |
3271 | new_smi->dev_registered = 1; | 3271 | new_smi->dev_registered = 1; |
3272 | } | 3272 | } |
3273 | 3273 | ||
3274 | rv = ipmi_register_smi(&handlers, | 3274 | rv = ipmi_register_smi(&handlers, |
3275 | new_smi, | 3275 | new_smi, |
3276 | &new_smi->device_id, | 3276 | &new_smi->device_id, |
3277 | new_smi->dev, | 3277 | new_smi->dev, |
3278 | "bmc", | 3278 | "bmc", |
3279 | new_smi->slave_addr); | 3279 | new_smi->slave_addr); |
3280 | if (rv) { | 3280 | if (rv) { |
3281 | dev_err(new_smi->dev, "Unable to register device: error %d\n", | 3281 | dev_err(new_smi->dev, "Unable to register device: error %d\n", |
3282 | rv); | 3282 | rv); |
3283 | goto out_err_stop_timer; | 3283 | goto out_err_stop_timer; |
3284 | } | 3284 | } |
3285 | 3285 | ||
3286 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", | 3286 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", |
3287 | &smi_type_proc_ops, | 3287 | &smi_type_proc_ops, |
3288 | new_smi); | 3288 | new_smi); |
3289 | if (rv) { | 3289 | if (rv) { |
3290 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3290 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3291 | goto out_err_stop_timer; | 3291 | goto out_err_stop_timer; |
3292 | } | 3292 | } |
3293 | 3293 | ||
3294 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", | 3294 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", |
3295 | &smi_si_stats_proc_ops, | 3295 | &smi_si_stats_proc_ops, |
3296 | new_smi); | 3296 | new_smi); |
3297 | if (rv) { | 3297 | if (rv) { |
3298 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3298 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3299 | goto out_err_stop_timer; | 3299 | goto out_err_stop_timer; |
3300 | } | 3300 | } |
3301 | 3301 | ||
3302 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | 3302 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", |
3303 | &smi_params_proc_ops, | 3303 | &smi_params_proc_ops, |
3304 | new_smi); | 3304 | new_smi); |
3305 | if (rv) { | 3305 | if (rv) { |
3306 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3306 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
3307 | goto out_err_stop_timer; | 3307 | goto out_err_stop_timer; |
3308 | } | 3308 | } |
3309 | 3309 | ||
3310 | dev_info(new_smi->dev, "IPMI %s interface initialized\n", | 3310 | dev_info(new_smi->dev, "IPMI %s interface initialized\n", |
3311 | si_to_str[new_smi->si_type]); | 3311 | si_to_str[new_smi->si_type]); |
3312 | 3312 | ||
3313 | return 0; | 3313 | return 0; |
3314 | 3314 | ||
3315 | out_err_stop_timer: | 3315 | out_err_stop_timer: |
3316 | atomic_inc(&new_smi->stop_operation); | 3316 | atomic_inc(&new_smi->stop_operation); |
3317 | wait_for_timer_and_thread(new_smi); | 3317 | wait_for_timer_and_thread(new_smi); |
3318 | 3318 | ||
3319 | out_err: | 3319 | out_err: |
3320 | new_smi->interrupt_disabled = 1; | 3320 | new_smi->interrupt_disabled = 1; |
3321 | 3321 | ||
3322 | if (new_smi->intf) { | 3322 | if (new_smi->intf) { |
3323 | ipmi_unregister_smi(new_smi->intf); | 3323 | ipmi_unregister_smi(new_smi->intf); |
3324 | new_smi->intf = NULL; | 3324 | new_smi->intf = NULL; |
3325 | } | 3325 | } |
3326 | 3326 | ||
3327 | if (new_smi->irq_cleanup) { | 3327 | if (new_smi->irq_cleanup) { |
3328 | new_smi->irq_cleanup(new_smi); | 3328 | new_smi->irq_cleanup(new_smi); |
3329 | new_smi->irq_cleanup = NULL; | 3329 | new_smi->irq_cleanup = NULL; |
3330 | } | 3330 | } |
3331 | 3331 | ||
3332 | /* | 3332 | /* |
3333 | * Wait until we know that we are out of any interrupt | 3333 | * Wait until we know that we are out of any interrupt |
3334 | * handlers might have been running before we freed the | 3334 | * handlers might have been running before we freed the |
3335 | * interrupt. | 3335 | * interrupt. |
3336 | */ | 3336 | */ |
3337 | synchronize_sched(); | 3337 | synchronize_sched(); |
3338 | 3338 | ||
3339 | if (new_smi->si_sm) { | 3339 | if (new_smi->si_sm) { |
3340 | if (new_smi->handlers) | 3340 | if (new_smi->handlers) |
3341 | new_smi->handlers->cleanup(new_smi->si_sm); | 3341 | new_smi->handlers->cleanup(new_smi->si_sm); |
3342 | kfree(new_smi->si_sm); | 3342 | kfree(new_smi->si_sm); |
3343 | new_smi->si_sm = NULL; | 3343 | new_smi->si_sm = NULL; |
3344 | } | 3344 | } |
3345 | if (new_smi->addr_source_cleanup) { | 3345 | if (new_smi->addr_source_cleanup) { |
3346 | new_smi->addr_source_cleanup(new_smi); | 3346 | new_smi->addr_source_cleanup(new_smi); |
3347 | new_smi->addr_source_cleanup = NULL; | 3347 | new_smi->addr_source_cleanup = NULL; |
3348 | } | 3348 | } |
3349 | if (new_smi->io_cleanup) { | 3349 | if (new_smi->io_cleanup) { |
3350 | new_smi->io_cleanup(new_smi); | 3350 | new_smi->io_cleanup(new_smi); |
3351 | new_smi->io_cleanup = NULL; | 3351 | new_smi->io_cleanup = NULL; |
3352 | } | 3352 | } |
3353 | 3353 | ||
3354 | if (new_smi->dev_registered) { | 3354 | if (new_smi->dev_registered) { |
3355 | platform_device_unregister(new_smi->pdev); | 3355 | platform_device_unregister(new_smi->pdev); |
3356 | new_smi->dev_registered = 0; | 3356 | new_smi->dev_registered = 0; |
3357 | } | 3357 | } |
3358 | 3358 | ||
3359 | return rv; | 3359 | return rv; |
3360 | } | 3360 | } |
3361 | 3361 | ||
3362 | static int __devinit init_ipmi_si(void) | 3362 | static int init_ipmi_si(void) |
3363 | { | 3363 | { |
3364 | int i; | 3364 | int i; |
3365 | char *str; | 3365 | char *str; |
3366 | int rv; | 3366 | int rv; |
3367 | struct smi_info *e; | 3367 | struct smi_info *e; |
3368 | enum ipmi_addr_src type = SI_INVALID; | 3368 | enum ipmi_addr_src type = SI_INVALID; |
3369 | 3369 | ||
3370 | if (initialized) | 3370 | if (initialized) |
3371 | return 0; | 3371 | return 0; |
3372 | initialized = 1; | 3372 | initialized = 1; |
3373 | 3373 | ||
3374 | rv = platform_driver_register(&ipmi_driver); | 3374 | rv = platform_driver_register(&ipmi_driver); |
3375 | if (rv) { | 3375 | if (rv) { |
3376 | printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); | 3376 | printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); |
3377 | return rv; | 3377 | return rv; |
3378 | } | 3378 | } |
3379 | 3379 | ||
3380 | 3380 | ||
3381 | /* Parse out the si_type string into its components. */ | 3381 | /* Parse out the si_type string into its components. */ |
3382 | str = si_type_str; | 3382 | str = si_type_str; |
3383 | if (*str != '\0') { | 3383 | if (*str != '\0') { |
3384 | for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { | 3384 | for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { |
3385 | si_type[i] = str; | 3385 | si_type[i] = str; |
3386 | str = strchr(str, ','); | 3386 | str = strchr(str, ','); |
3387 | if (str) { | 3387 | if (str) { |
3388 | *str = '\0'; | 3388 | *str = '\0'; |
3389 | str++; | 3389 | str++; |
3390 | } else { | 3390 | } else { |
3391 | break; | 3391 | break; |
3392 | } | 3392 | } |
3393 | } | 3393 | } |
3394 | } | 3394 | } |
3395 | 3395 | ||
3396 | printk(KERN_INFO "IPMI System Interface driver.\n"); | 3396 | printk(KERN_INFO "IPMI System Interface driver.\n"); |
3397 | 3397 | ||
3398 | /* If the user gave us a device, they presumably want us to use it */ | 3398 | /* If the user gave us a device, they presumably want us to use it */ |
3399 | if (!hardcode_find_bmc()) | 3399 | if (!hardcode_find_bmc()) |
3400 | return 0; | 3400 | return 0; |
3401 | 3401 | ||
3402 | #ifdef CONFIG_PCI | 3402 | #ifdef CONFIG_PCI |
3403 | rv = pci_register_driver(&ipmi_pci_driver); | 3403 | rv = pci_register_driver(&ipmi_pci_driver); |
3404 | if (rv) | 3404 | if (rv) |
3405 | printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); | 3405 | printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); |
3406 | else | 3406 | else |
3407 | pci_registered = 1; | 3407 | pci_registered = 1; |
3408 | #endif | 3408 | #endif |
3409 | 3409 | ||
3410 | #ifdef CONFIG_ACPI | 3410 | #ifdef CONFIG_ACPI |
3411 | pnp_register_driver(&ipmi_pnp_driver); | 3411 | pnp_register_driver(&ipmi_pnp_driver); |
3412 | pnp_registered = 1; | 3412 | pnp_registered = 1; |
3413 | #endif | 3413 | #endif |
3414 | 3414 | ||
3415 | #ifdef CONFIG_DMI | 3415 | #ifdef CONFIG_DMI |
3416 | dmi_find_bmc(); | 3416 | dmi_find_bmc(); |
3417 | #endif | 3417 | #endif |
3418 | 3418 | ||
3419 | #ifdef CONFIG_ACPI | 3419 | #ifdef CONFIG_ACPI |
3420 | spmi_find_bmc(); | 3420 | spmi_find_bmc(); |
3421 | #endif | 3421 | #endif |
3422 | 3422 | ||
3423 | /* We prefer devices with interrupts, but in the case of a machine | 3423 | /* We prefer devices with interrupts, but in the case of a machine |
3424 | with multiple BMCs we assume that there will be several instances | 3424 | with multiple BMCs we assume that there will be several instances |
3425 | of a given type so if we succeed in registering a type then also | 3425 | of a given type so if we succeed in registering a type then also |
3426 | try to register everything else of the same type */ | 3426 | try to register everything else of the same type */ |
3427 | 3427 | ||
3428 | mutex_lock(&smi_infos_lock); | 3428 | mutex_lock(&smi_infos_lock); |
3429 | list_for_each_entry(e, &smi_infos, link) { | 3429 | list_for_each_entry(e, &smi_infos, link) { |
3430 | /* Try to register a device if it has an IRQ and we either | 3430 | /* Try to register a device if it has an IRQ and we either |
3431 | haven't successfully registered a device yet or this | 3431 | haven't successfully registered a device yet or this |
3432 | device has the same type as one we successfully registered */ | 3432 | device has the same type as one we successfully registered */ |
3433 | if (e->irq && (!type || e->addr_source == type)) { | 3433 | if (e->irq && (!type || e->addr_source == type)) { |
3434 | if (!try_smi_init(e)) { | 3434 | if (!try_smi_init(e)) { |
3435 | type = e->addr_source; | 3435 | type = e->addr_source; |
3436 | } | 3436 | } |
3437 | } | 3437 | } |
3438 | } | 3438 | } |
3439 | 3439 | ||
3440 | /* type will only have been set if we successfully registered an si */ | 3440 | /* type will only have been set if we successfully registered an si */ |
3441 | if (type) { | 3441 | if (type) { |
3442 | mutex_unlock(&smi_infos_lock); | 3442 | mutex_unlock(&smi_infos_lock); |
3443 | return 0; | 3443 | return 0; |
3444 | } | 3444 | } |
3445 | 3445 | ||
3446 | /* Fall back to the preferred device */ | 3446 | /* Fall back to the preferred device */ |
3447 | 3447 | ||
3448 | list_for_each_entry(e, &smi_infos, link) { | 3448 | list_for_each_entry(e, &smi_infos, link) { |
3449 | if (!e->irq && (!type || e->addr_source == type)) { | 3449 | if (!e->irq && (!type || e->addr_source == type)) { |
3450 | if (!try_smi_init(e)) { | 3450 | if (!try_smi_init(e)) { |
3451 | type = e->addr_source; | 3451 | type = e->addr_source; |
3452 | } | 3452 | } |
3453 | } | 3453 | } |
3454 | } | 3454 | } |
3455 | mutex_unlock(&smi_infos_lock); | 3455 | mutex_unlock(&smi_infos_lock); |
3456 | 3456 | ||
3457 | if (type) | 3457 | if (type) |
3458 | return 0; | 3458 | return 0; |
3459 | 3459 | ||
3460 | if (si_trydefaults) { | 3460 | if (si_trydefaults) { |
3461 | mutex_lock(&smi_infos_lock); | 3461 | mutex_lock(&smi_infos_lock); |
3462 | if (list_empty(&smi_infos)) { | 3462 | if (list_empty(&smi_infos)) { |
3463 | /* No BMC was found, try defaults. */ | 3463 | /* No BMC was found, try defaults. */ |
3464 | mutex_unlock(&smi_infos_lock); | 3464 | mutex_unlock(&smi_infos_lock); |
3465 | default_find_bmc(); | 3465 | default_find_bmc(); |
3466 | } else | 3466 | } else |
3467 | mutex_unlock(&smi_infos_lock); | 3467 | mutex_unlock(&smi_infos_lock); |
3468 | } | 3468 | } |
3469 | 3469 | ||
3470 | mutex_lock(&smi_infos_lock); | 3470 | mutex_lock(&smi_infos_lock); |
3471 | if (unload_when_empty && list_empty(&smi_infos)) { | 3471 | if (unload_when_empty && list_empty(&smi_infos)) { |
3472 | mutex_unlock(&smi_infos_lock); | 3472 | mutex_unlock(&smi_infos_lock); |
3473 | cleanup_ipmi_si(); | 3473 | cleanup_ipmi_si(); |
3474 | printk(KERN_WARNING PFX | 3474 | printk(KERN_WARNING PFX |
3475 | "Unable to find any System Interface(s)\n"); | 3475 | "Unable to find any System Interface(s)\n"); |
3476 | return -ENODEV; | 3476 | return -ENODEV; |
3477 | } else { | 3477 | } else { |
3478 | mutex_unlock(&smi_infos_lock); | 3478 | mutex_unlock(&smi_infos_lock); |
3479 | return 0; | 3479 | return 0; |
3480 | } | 3480 | } |
3481 | } | 3481 | } |
3482 | module_init(init_ipmi_si); | 3482 | module_init(init_ipmi_si); |
3483 | 3483 | ||
3484 | static void cleanup_one_si(struct smi_info *to_clean) | 3484 | static void cleanup_one_si(struct smi_info *to_clean) |
3485 | { | 3485 | { |
3486 | int rv = 0; | 3486 | int rv = 0; |
3487 | unsigned long flags; | 3487 | unsigned long flags; |
3488 | 3488 | ||
3489 | if (!to_clean) | 3489 | if (!to_clean) |
3490 | return; | 3490 | return; |
3491 | 3491 | ||
3492 | list_del(&to_clean->link); | 3492 | list_del(&to_clean->link); |
3493 | 3493 | ||
3494 | /* Tell the driver that we are shutting down. */ | 3494 | /* Tell the driver that we are shutting down. */ |
3495 | atomic_inc(&to_clean->stop_operation); | 3495 | atomic_inc(&to_clean->stop_operation); |
3496 | 3496 | ||
3497 | /* | 3497 | /* |
3498 | * Make sure the timer and thread are stopped and will not run | 3498 | * Make sure the timer and thread are stopped and will not run |
3499 | * again. | 3499 | * again. |
3500 | */ | 3500 | */ |
3501 | wait_for_timer_and_thread(to_clean); | 3501 | wait_for_timer_and_thread(to_clean); |
3502 | 3502 | ||
3503 | /* | 3503 | /* |
3504 | * Timeouts are stopped, now make sure the interrupts are off | 3504 | * Timeouts are stopped, now make sure the interrupts are off |
3505 | * for the device. A little tricky with locks to make sure | 3505 | * for the device. A little tricky with locks to make sure |
3506 | * there are no races. | 3506 | * there are no races. |
3507 | */ | 3507 | */ |
3508 | spin_lock_irqsave(&to_clean->si_lock, flags); | 3508 | spin_lock_irqsave(&to_clean->si_lock, flags); |
3509 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3509 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
3510 | spin_unlock_irqrestore(&to_clean->si_lock, flags); | 3510 | spin_unlock_irqrestore(&to_clean->si_lock, flags); |
3511 | poll(to_clean); | 3511 | poll(to_clean); |
3512 | schedule_timeout_uninterruptible(1); | 3512 | schedule_timeout_uninterruptible(1); |
3513 | spin_lock_irqsave(&to_clean->si_lock, flags); | 3513 | spin_lock_irqsave(&to_clean->si_lock, flags); |
3514 | } | 3514 | } |
3515 | disable_si_irq(to_clean); | 3515 | disable_si_irq(to_clean); |
3516 | spin_unlock_irqrestore(&to_clean->si_lock, flags); | 3516 | spin_unlock_irqrestore(&to_clean->si_lock, flags); |
3517 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3517 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
3518 | poll(to_clean); | 3518 | poll(to_clean); |
3519 | schedule_timeout_uninterruptible(1); | 3519 | schedule_timeout_uninterruptible(1); |
3520 | } | 3520 | } |
3521 | 3521 | ||
3522 | /* Clean up interrupts and make sure that everything is done. */ | 3522 | /* Clean up interrupts and make sure that everything is done. */ |
3523 | if (to_clean->irq_cleanup) | 3523 | if (to_clean->irq_cleanup) |
3524 | to_clean->irq_cleanup(to_clean); | 3524 | to_clean->irq_cleanup(to_clean); |
3525 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3525 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
3526 | poll(to_clean); | 3526 | poll(to_clean); |
3527 | schedule_timeout_uninterruptible(1); | 3527 | schedule_timeout_uninterruptible(1); |
3528 | } | 3528 | } |
3529 | 3529 | ||
3530 | if (to_clean->intf) | 3530 | if (to_clean->intf) |
3531 | rv = ipmi_unregister_smi(to_clean->intf); | 3531 | rv = ipmi_unregister_smi(to_clean->intf); |
3532 | 3532 | ||
3533 | if (rv) { | 3533 | if (rv) { |
3534 | printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", | 3534 | printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", |
3535 | rv); | 3535 | rv); |
3536 | } | 3536 | } |
3537 | 3537 | ||
3538 | if (to_clean->handlers) | 3538 | if (to_clean->handlers) |
3539 | to_clean->handlers->cleanup(to_clean->si_sm); | 3539 | to_clean->handlers->cleanup(to_clean->si_sm); |
3540 | 3540 | ||
3541 | kfree(to_clean->si_sm); | 3541 | kfree(to_clean->si_sm); |
3542 | 3542 | ||
3543 | if (to_clean->addr_source_cleanup) | 3543 | if (to_clean->addr_source_cleanup) |
3544 | to_clean->addr_source_cleanup(to_clean); | 3544 | to_clean->addr_source_cleanup(to_clean); |
3545 | if (to_clean->io_cleanup) | 3545 | if (to_clean->io_cleanup) |
3546 | to_clean->io_cleanup(to_clean); | 3546 | to_clean->io_cleanup(to_clean); |
3547 | 3547 | ||
3548 | if (to_clean->dev_registered) | 3548 | if (to_clean->dev_registered) |
3549 | platform_device_unregister(to_clean->pdev); | 3549 | platform_device_unregister(to_clean->pdev); |
3550 | 3550 | ||
3551 | kfree(to_clean); | 3551 | kfree(to_clean); |
3552 | } | 3552 | } |
3553 | 3553 | ||
3554 | static void cleanup_ipmi_si(void) | 3554 | static void cleanup_ipmi_si(void) |
3555 | { | 3555 | { |
3556 | struct smi_info *e, *tmp_e; | 3556 | struct smi_info *e, *tmp_e; |
3557 | 3557 | ||
3558 | if (!initialized) | 3558 | if (!initialized) |
3559 | return; | 3559 | return; |
3560 | 3560 | ||
3561 | #ifdef CONFIG_PCI | 3561 | #ifdef CONFIG_PCI |
3562 | if (pci_registered) | 3562 | if (pci_registered) |
3563 | pci_unregister_driver(&ipmi_pci_driver); | 3563 | pci_unregister_driver(&ipmi_pci_driver); |
3564 | #endif | 3564 | #endif |
3565 | #ifdef CONFIG_ACPI | 3565 | #ifdef CONFIG_ACPI |
3566 | if (pnp_registered) | 3566 | if (pnp_registered) |
3567 | pnp_unregister_driver(&ipmi_pnp_driver); | 3567 | pnp_unregister_driver(&ipmi_pnp_driver); |
3568 | #endif | 3568 | #endif |
3569 | 3569 | ||
3570 | platform_driver_unregister(&ipmi_driver); | 3570 | platform_driver_unregister(&ipmi_driver); |
3571 | 3571 | ||
3572 | mutex_lock(&smi_infos_lock); | 3572 | mutex_lock(&smi_infos_lock); |
3573 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) | 3573 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) |
3574 | cleanup_one_si(e); | 3574 | cleanup_one_si(e); |
3575 | mutex_unlock(&smi_infos_lock); | 3575 | mutex_unlock(&smi_infos_lock); |
3576 | } | 3576 | } |
3577 | module_exit(cleanup_ipmi_si); | 3577 | module_exit(cleanup_ipmi_si); |
3578 | 3578 | ||
3579 | MODULE_LICENSE("GPL"); | 3579 | MODULE_LICENSE("GPL"); |
3580 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); | 3580 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); |
3581 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" | 3581 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" |
3582 | " system interfaces."); | 3582 | " system interfaces."); |
3583 | 3583 |
drivers/char/ps3flash.c
1 | /* | 1 | /* |
2 | * PS3 FLASH ROM Storage Driver | 2 | * PS3 FLASH ROM Storage Driver |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Sony Computer Entertainment Inc. | 4 | * Copyright (C) 2007 Sony Computer Entertainment Inc. |
5 | * Copyright 2007 Sony Corp. | 5 | * Copyright 2007 Sony Corp. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of the GNU General Public License as published | 8 | * under the terms of the GNU General Public License as published |
9 | * by the Free Software Foundation; version 2 of the License. | 9 | * by the Free Software Foundation; version 2 of the License. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, but | 11 | * This program is distributed in the hope that it will be useful, but |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * General Public License for more details. | 14 | * General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License along | 16 | * You should have received a copy of the GNU General Public License along |
17 | * with this program; if not, write to the Free Software Foundation, Inc., | 17 | * with this program; if not, write to the Free Software Foundation, Inc., |
18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/miscdevice.h> | 22 | #include <linux/miscdevice.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | 26 | ||
27 | #include <asm/lv1call.h> | 27 | #include <asm/lv1call.h> |
28 | #include <asm/ps3stor.h> | 28 | #include <asm/ps3stor.h> |
29 | 29 | ||
30 | 30 | ||
31 | #define DEVICE_NAME "ps3flash" | 31 | #define DEVICE_NAME "ps3flash" |
32 | 32 | ||
33 | #define FLASH_BLOCK_SIZE (256*1024) | 33 | #define FLASH_BLOCK_SIZE (256*1024) |
34 | 34 | ||
35 | 35 | ||
36 | struct ps3flash_private { | 36 | struct ps3flash_private { |
37 | struct mutex mutex; /* Bounce buffer mutex */ | 37 | struct mutex mutex; /* Bounce buffer mutex */ |
38 | u64 chunk_sectors; | 38 | u64 chunk_sectors; |
39 | int tag; /* Start sector of buffer, -1 if invalid */ | 39 | int tag; /* Start sector of buffer, -1 if invalid */ |
40 | bool dirty; | 40 | bool dirty; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct ps3_storage_device *ps3flash_dev; | 43 | static struct ps3_storage_device *ps3flash_dev; |
44 | 44 | ||
45 | static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, | 45 | static int ps3flash_read_write_sectors(struct ps3_storage_device *dev, |
46 | u64 start_sector, int write) | 46 | u64 start_sector, int write) |
47 | { | 47 | { |
48 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); | 48 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); |
49 | u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, | 49 | u64 res = ps3stor_read_write_sectors(dev, dev->bounce_lpar, |
50 | start_sector, priv->chunk_sectors, | 50 | start_sector, priv->chunk_sectors, |
51 | write); | 51 | write); |
52 | if (res) { | 52 | if (res) { |
53 | dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, | 53 | dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__, |
54 | __LINE__, write ? "write" : "read", res); | 54 | __LINE__, write ? "write" : "read", res); |
55 | return -EIO; | 55 | return -EIO; |
56 | } | 56 | } |
57 | return 0; | 57 | return 0; |
58 | } | 58 | } |
59 | 59 | ||
60 | static int ps3flash_writeback(struct ps3_storage_device *dev) | 60 | static int ps3flash_writeback(struct ps3_storage_device *dev) |
61 | { | 61 | { |
62 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); | 62 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); |
63 | int res; | 63 | int res; |
64 | 64 | ||
65 | if (!priv->dirty || priv->tag < 0) | 65 | if (!priv->dirty || priv->tag < 0) |
66 | return 0; | 66 | return 0; |
67 | 67 | ||
68 | res = ps3flash_read_write_sectors(dev, priv->tag, 1); | 68 | res = ps3flash_read_write_sectors(dev, priv->tag, 1); |
69 | if (res) | 69 | if (res) |
70 | return res; | 70 | return res; |
71 | 71 | ||
72 | priv->dirty = false; | 72 | priv->dirty = false; |
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) | 76 | static int ps3flash_fetch(struct ps3_storage_device *dev, u64 start_sector) |
77 | { | 77 | { |
78 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); | 78 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); |
79 | int res; | 79 | int res; |
80 | 80 | ||
81 | if (start_sector == priv->tag) | 81 | if (start_sector == priv->tag) |
82 | return 0; | 82 | return 0; |
83 | 83 | ||
84 | res = ps3flash_writeback(dev); | 84 | res = ps3flash_writeback(dev); |
85 | if (res) | 85 | if (res) |
86 | return res; | 86 | return res; |
87 | 87 | ||
88 | priv->tag = -1; | 88 | priv->tag = -1; |
89 | 89 | ||
90 | res = ps3flash_read_write_sectors(dev, start_sector, 0); | 90 | res = ps3flash_read_write_sectors(dev, start_sector, 0); |
91 | if (res) | 91 | if (res) |
92 | return res; | 92 | return res; |
93 | 93 | ||
94 | priv->tag = start_sector; | 94 | priv->tag = start_sector; |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) | 98 | static loff_t ps3flash_llseek(struct file *file, loff_t offset, int origin) |
99 | { | 99 | { |
100 | struct ps3_storage_device *dev = ps3flash_dev; | 100 | struct ps3_storage_device *dev = ps3flash_dev; |
101 | loff_t res; | 101 | loff_t res; |
102 | 102 | ||
103 | mutex_lock(&file->f_mapping->host->i_mutex); | 103 | mutex_lock(&file->f_mapping->host->i_mutex); |
104 | switch (origin) { | 104 | switch (origin) { |
105 | case 0: | 105 | case 0: |
106 | break; | 106 | break; |
107 | case 1: | 107 | case 1: |
108 | offset += file->f_pos; | 108 | offset += file->f_pos; |
109 | break; | 109 | break; |
110 | case 2: | 110 | case 2: |
111 | offset += dev->regions[dev->region_idx].size*dev->blk_size; | 111 | offset += dev->regions[dev->region_idx].size*dev->blk_size; |
112 | break; | 112 | break; |
113 | default: | 113 | default: |
114 | offset = -1; | 114 | offset = -1; |
115 | } | 115 | } |
116 | if (offset < 0) { | 116 | if (offset < 0) { |
117 | res = -EINVAL; | 117 | res = -EINVAL; |
118 | goto out; | 118 | goto out; |
119 | } | 119 | } |
120 | 120 | ||
121 | file->f_pos = offset; | 121 | file->f_pos = offset; |
122 | res = file->f_pos; | 122 | res = file->f_pos; |
123 | 123 | ||
124 | out: | 124 | out: |
125 | mutex_unlock(&file->f_mapping->host->i_mutex); | 125 | mutex_unlock(&file->f_mapping->host->i_mutex); |
126 | return res; | 126 | return res; |
127 | } | 127 | } |
128 | 128 | ||
129 | static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, | 129 | static ssize_t ps3flash_read(char __user *userbuf, void *kernelbuf, |
130 | size_t count, loff_t *pos) | 130 | size_t count, loff_t *pos) |
131 | { | 131 | { |
132 | struct ps3_storage_device *dev = ps3flash_dev; | 132 | struct ps3_storage_device *dev = ps3flash_dev; |
133 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); | 133 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); |
134 | u64 size, sector, offset; | 134 | u64 size, sector, offset; |
135 | int res; | 135 | int res; |
136 | size_t remaining, n; | 136 | size_t remaining, n; |
137 | const void *src; | 137 | const void *src; |
138 | 138 | ||
139 | dev_dbg(&dev->sbd.core, | 139 | dev_dbg(&dev->sbd.core, |
140 | "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", | 140 | "%s:%u: Reading %zu bytes at position %lld to U0x%p/K0x%p\n", |
141 | __func__, __LINE__, count, *pos, userbuf, kernelbuf); | 141 | __func__, __LINE__, count, *pos, userbuf, kernelbuf); |
142 | 142 | ||
143 | size = dev->regions[dev->region_idx].size*dev->blk_size; | 143 | size = dev->regions[dev->region_idx].size*dev->blk_size; |
144 | if (*pos >= size || !count) | 144 | if (*pos >= size || !count) |
145 | return 0; | 145 | return 0; |
146 | 146 | ||
147 | if (*pos + count > size) { | 147 | if (*pos + count > size) { |
148 | dev_dbg(&dev->sbd.core, | 148 | dev_dbg(&dev->sbd.core, |
149 | "%s:%u Truncating count from %zu to %llu\n", __func__, | 149 | "%s:%u Truncating count from %zu to %llu\n", __func__, |
150 | __LINE__, count, size - *pos); | 150 | __LINE__, count, size - *pos); |
151 | count = size - *pos; | 151 | count = size - *pos; |
152 | } | 152 | } |
153 | 153 | ||
154 | sector = *pos / dev->bounce_size * priv->chunk_sectors; | 154 | sector = *pos / dev->bounce_size * priv->chunk_sectors; |
155 | offset = *pos % dev->bounce_size; | 155 | offset = *pos % dev->bounce_size; |
156 | 156 | ||
157 | remaining = count; | 157 | remaining = count; |
158 | do { | 158 | do { |
159 | n = min_t(u64, remaining, dev->bounce_size - offset); | 159 | n = min_t(u64, remaining, dev->bounce_size - offset); |
160 | src = dev->bounce_buf + offset; | 160 | src = dev->bounce_buf + offset; |
161 | 161 | ||
162 | mutex_lock(&priv->mutex); | 162 | mutex_lock(&priv->mutex); |
163 | 163 | ||
164 | res = ps3flash_fetch(dev, sector); | 164 | res = ps3flash_fetch(dev, sector); |
165 | if (res) | 165 | if (res) |
166 | goto fail; | 166 | goto fail; |
167 | 167 | ||
168 | dev_dbg(&dev->sbd.core, | 168 | dev_dbg(&dev->sbd.core, |
169 | "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", | 169 | "%s:%u: copy %lu bytes from 0x%p to U0x%p/K0x%p\n", |
170 | __func__, __LINE__, n, src, userbuf, kernelbuf); | 170 | __func__, __LINE__, n, src, userbuf, kernelbuf); |
171 | if (userbuf) { | 171 | if (userbuf) { |
172 | if (copy_to_user(userbuf, src, n)) { | 172 | if (copy_to_user(userbuf, src, n)) { |
173 | res = -EFAULT; | 173 | res = -EFAULT; |
174 | goto fail; | 174 | goto fail; |
175 | } | 175 | } |
176 | userbuf += n; | 176 | userbuf += n; |
177 | } | 177 | } |
178 | if (kernelbuf) { | 178 | if (kernelbuf) { |
179 | memcpy(kernelbuf, src, n); | 179 | memcpy(kernelbuf, src, n); |
180 | kernelbuf += n; | 180 | kernelbuf += n; |
181 | } | 181 | } |
182 | 182 | ||
183 | mutex_unlock(&priv->mutex); | 183 | mutex_unlock(&priv->mutex); |
184 | 184 | ||
185 | *pos += n; | 185 | *pos += n; |
186 | remaining -= n; | 186 | remaining -= n; |
187 | sector += priv->chunk_sectors; | 187 | sector += priv->chunk_sectors; |
188 | offset = 0; | 188 | offset = 0; |
189 | } while (remaining > 0); | 189 | } while (remaining > 0); |
190 | 190 | ||
191 | return count; | 191 | return count; |
192 | 192 | ||
193 | fail: | 193 | fail: |
194 | mutex_unlock(&priv->mutex); | 194 | mutex_unlock(&priv->mutex); |
195 | return res; | 195 | return res; |
196 | } | 196 | } |
197 | 197 | ||
198 | static ssize_t ps3flash_write(const char __user *userbuf, | 198 | static ssize_t ps3flash_write(const char __user *userbuf, |
199 | const void *kernelbuf, size_t count, loff_t *pos) | 199 | const void *kernelbuf, size_t count, loff_t *pos) |
200 | { | 200 | { |
201 | struct ps3_storage_device *dev = ps3flash_dev; | 201 | struct ps3_storage_device *dev = ps3flash_dev; |
202 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); | 202 | struct ps3flash_private *priv = ps3_system_bus_get_drvdata(&dev->sbd); |
203 | u64 size, sector, offset; | 203 | u64 size, sector, offset; |
204 | int res = 0; | 204 | int res = 0; |
205 | size_t remaining, n; | 205 | size_t remaining, n; |
206 | void *dst; | 206 | void *dst; |
207 | 207 | ||
208 | dev_dbg(&dev->sbd.core, | 208 | dev_dbg(&dev->sbd.core, |
209 | "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", | 209 | "%s:%u: Writing %zu bytes at position %lld from U0x%p/K0x%p\n", |
210 | __func__, __LINE__, count, *pos, userbuf, kernelbuf); | 210 | __func__, __LINE__, count, *pos, userbuf, kernelbuf); |
211 | 211 | ||
212 | size = dev->regions[dev->region_idx].size*dev->blk_size; | 212 | size = dev->regions[dev->region_idx].size*dev->blk_size; |
213 | if (*pos >= size || !count) | 213 | if (*pos >= size || !count) |
214 | return 0; | 214 | return 0; |
215 | 215 | ||
216 | if (*pos + count > size) { | 216 | if (*pos + count > size) { |
217 | dev_dbg(&dev->sbd.core, | 217 | dev_dbg(&dev->sbd.core, |
218 | "%s:%u Truncating count from %zu to %llu\n", __func__, | 218 | "%s:%u Truncating count from %zu to %llu\n", __func__, |
219 | __LINE__, count, size - *pos); | 219 | __LINE__, count, size - *pos); |
220 | count = size - *pos; | 220 | count = size - *pos; |
221 | } | 221 | } |
222 | 222 | ||
223 | sector = *pos / dev->bounce_size * priv->chunk_sectors; | 223 | sector = *pos / dev->bounce_size * priv->chunk_sectors; |
224 | offset = *pos % dev->bounce_size; | 224 | offset = *pos % dev->bounce_size; |
225 | 225 | ||
226 | remaining = count; | 226 | remaining = count; |
227 | do { | 227 | do { |
228 | n = min_t(u64, remaining, dev->bounce_size - offset); | 228 | n = min_t(u64, remaining, dev->bounce_size - offset); |
229 | dst = dev->bounce_buf + offset; | 229 | dst = dev->bounce_buf + offset; |
230 | 230 | ||
231 | mutex_lock(&priv->mutex); | 231 | mutex_lock(&priv->mutex); |
232 | 232 | ||
233 | if (n != dev->bounce_size) | 233 | if (n != dev->bounce_size) |
234 | res = ps3flash_fetch(dev, sector); | 234 | res = ps3flash_fetch(dev, sector); |
235 | else if (sector != priv->tag) | 235 | else if (sector != priv->tag) |
236 | res = ps3flash_writeback(dev); | 236 | res = ps3flash_writeback(dev); |
237 | if (res) | 237 | if (res) |
238 | goto fail; | 238 | goto fail; |
239 | 239 | ||
240 | dev_dbg(&dev->sbd.core, | 240 | dev_dbg(&dev->sbd.core, |
241 | "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", | 241 | "%s:%u: copy %lu bytes from U0x%p/K0x%p to 0x%p\n", |
242 | __func__, __LINE__, n, userbuf, kernelbuf, dst); | 242 | __func__, __LINE__, n, userbuf, kernelbuf, dst); |
243 | if (userbuf) { | 243 | if (userbuf) { |
244 | if (copy_from_user(dst, userbuf, n)) { | 244 | if (copy_from_user(dst, userbuf, n)) { |
245 | res = -EFAULT; | 245 | res = -EFAULT; |
246 | goto fail; | 246 | goto fail; |
247 | } | 247 | } |
248 | userbuf += n; | 248 | userbuf += n; |
249 | } | 249 | } |
250 | if (kernelbuf) { | 250 | if (kernelbuf) { |
251 | memcpy(dst, kernelbuf, n); | 251 | memcpy(dst, kernelbuf, n); |
252 | kernelbuf += n; | 252 | kernelbuf += n; |
253 | } | 253 | } |
254 | 254 | ||
255 | priv->tag = sector; | 255 | priv->tag = sector; |
256 | priv->dirty = true; | 256 | priv->dirty = true; |
257 | 257 | ||
258 | mutex_unlock(&priv->mutex); | 258 | mutex_unlock(&priv->mutex); |
259 | 259 | ||
260 | *pos += n; | 260 | *pos += n; |
261 | remaining -= n; | 261 | remaining -= n; |
262 | sector += priv->chunk_sectors; | 262 | sector += priv->chunk_sectors; |
263 | offset = 0; | 263 | offset = 0; |
264 | } while (remaining > 0); | 264 | } while (remaining > 0); |
265 | 265 | ||
266 | return count; | 266 | return count; |
267 | 267 | ||
268 | fail: | 268 | fail: |
269 | mutex_unlock(&priv->mutex); | 269 | mutex_unlock(&priv->mutex); |
270 | return res; | 270 | return res; |
271 | } | 271 | } |
272 | 272 | ||
273 | static ssize_t ps3flash_user_read(struct file *file, char __user *buf, | 273 | static ssize_t ps3flash_user_read(struct file *file, char __user *buf, |
274 | size_t count, loff_t *pos) | 274 | size_t count, loff_t *pos) |
275 | { | 275 | { |
276 | return ps3flash_read(buf, NULL, count, pos); | 276 | return ps3flash_read(buf, NULL, count, pos); |
277 | } | 277 | } |
278 | 278 | ||
279 | static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, | 279 | static ssize_t ps3flash_user_write(struct file *file, const char __user *buf, |
280 | size_t count, loff_t *pos) | 280 | size_t count, loff_t *pos) |
281 | { | 281 | { |
282 | return ps3flash_write(buf, NULL, count, pos); | 282 | return ps3flash_write(buf, NULL, count, pos); |
283 | } | 283 | } |
284 | 284 | ||
285 | static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) | 285 | static ssize_t ps3flash_kernel_read(void *buf, size_t count, loff_t pos) |
286 | { | 286 | { |
287 | return ps3flash_read(NULL, buf, count, &pos); | 287 | return ps3flash_read(NULL, buf, count, &pos); |
288 | } | 288 | } |
289 | 289 | ||
290 | static ssize_t ps3flash_kernel_write(const void *buf, size_t count, | 290 | static ssize_t ps3flash_kernel_write(const void *buf, size_t count, |
291 | loff_t pos) | 291 | loff_t pos) |
292 | { | 292 | { |
293 | ssize_t res; | 293 | ssize_t res; |
294 | int wb; | 294 | int wb; |
295 | 295 | ||
296 | res = ps3flash_write(NULL, buf, count, &pos); | 296 | res = ps3flash_write(NULL, buf, count, &pos); |
297 | if (res < 0) | 297 | if (res < 0) |
298 | return res; | 298 | return res; |
299 | 299 | ||
300 | /* Make kernel writes synchronous */ | 300 | /* Make kernel writes synchronous */ |
301 | wb = ps3flash_writeback(ps3flash_dev); | 301 | wb = ps3flash_writeback(ps3flash_dev); |
302 | if (wb) | 302 | if (wb) |
303 | return wb; | 303 | return wb; |
304 | 304 | ||
305 | return res; | 305 | return res; |
306 | } | 306 | } |
307 | 307 | ||
308 | static int ps3flash_flush(struct file *file, fl_owner_t id) | 308 | static int ps3flash_flush(struct file *file, fl_owner_t id) |
309 | { | 309 | { |
310 | return ps3flash_writeback(ps3flash_dev); | 310 | return ps3flash_writeback(ps3flash_dev); |
311 | } | 311 | } |
312 | 312 | ||
313 | static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) | 313 | static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
314 | { | 314 | { |
315 | struct inode *inode = file->f_path.dentry->d_inode; | 315 | struct inode *inode = file->f_path.dentry->d_inode; |
316 | int err; | 316 | int err; |
317 | mutex_lock(&inode->i_mutex); | 317 | mutex_lock(&inode->i_mutex); |
318 | err = ps3flash_writeback(ps3flash_dev); | 318 | err = ps3flash_writeback(ps3flash_dev); |
319 | mutex_unlock(&inode->i_mutex); | 319 | mutex_unlock(&inode->i_mutex); |
320 | return err; | 320 | return err; |
321 | } | 321 | } |
322 | 322 | ||
323 | static irqreturn_t ps3flash_interrupt(int irq, void *data) | 323 | static irqreturn_t ps3flash_interrupt(int irq, void *data) |
324 | { | 324 | { |
325 | struct ps3_storage_device *dev = data; | 325 | struct ps3_storage_device *dev = data; |
326 | int res; | 326 | int res; |
327 | u64 tag, status; | 327 | u64 tag, status; |
328 | 328 | ||
329 | res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); | 329 | res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); |
330 | 330 | ||
331 | if (tag != dev->tag) | 331 | if (tag != dev->tag) |
332 | dev_err(&dev->sbd.core, | 332 | dev_err(&dev->sbd.core, |
333 | "%s:%u: tag mismatch, got %llx, expected %llx\n", | 333 | "%s:%u: tag mismatch, got %llx, expected %llx\n", |
334 | __func__, __LINE__, tag, dev->tag); | 334 | __func__, __LINE__, tag, dev->tag); |
335 | 335 | ||
336 | if (res) { | 336 | if (res) { |
337 | dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", | 337 | dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n", |
338 | __func__, __LINE__, res, status); | 338 | __func__, __LINE__, res, status); |
339 | } else { | 339 | } else { |
340 | dev->lv1_status = status; | 340 | dev->lv1_status = status; |
341 | complete(&dev->done); | 341 | complete(&dev->done); |
342 | } | 342 | } |
343 | return IRQ_HANDLED; | 343 | return IRQ_HANDLED; |
344 | } | 344 | } |
345 | 345 | ||
346 | static const struct file_operations ps3flash_fops = { | 346 | static const struct file_operations ps3flash_fops = { |
347 | .owner = THIS_MODULE, | 347 | .owner = THIS_MODULE, |
348 | .llseek = ps3flash_llseek, | 348 | .llseek = ps3flash_llseek, |
349 | .read = ps3flash_user_read, | 349 | .read = ps3flash_user_read, |
350 | .write = ps3flash_user_write, | 350 | .write = ps3flash_user_write, |
351 | .flush = ps3flash_flush, | 351 | .flush = ps3flash_flush, |
352 | .fsync = ps3flash_fsync, | 352 | .fsync = ps3flash_fsync, |
353 | }; | 353 | }; |
354 | 354 | ||
355 | static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { | 355 | static const struct ps3_os_area_flash_ops ps3flash_kernel_ops = { |
356 | .read = ps3flash_kernel_read, | 356 | .read = ps3flash_kernel_read, |
357 | .write = ps3flash_kernel_write, | 357 | .write = ps3flash_kernel_write, |
358 | }; | 358 | }; |
359 | 359 | ||
360 | static struct miscdevice ps3flash_misc = { | 360 | static struct miscdevice ps3flash_misc = { |
361 | .minor = MISC_DYNAMIC_MINOR, | 361 | .minor = MISC_DYNAMIC_MINOR, |
362 | .name = DEVICE_NAME, | 362 | .name = DEVICE_NAME, |
363 | .fops = &ps3flash_fops, | 363 | .fops = &ps3flash_fops, |
364 | }; | 364 | }; |
365 | 365 | ||
366 | static int __devinit ps3flash_probe(struct ps3_system_bus_device *_dev) | 366 | static int ps3flash_probe(struct ps3_system_bus_device *_dev) |
367 | { | 367 | { |
368 | struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); | 368 | struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); |
369 | struct ps3flash_private *priv; | 369 | struct ps3flash_private *priv; |
370 | int error; | 370 | int error; |
371 | unsigned long tmp; | 371 | unsigned long tmp; |
372 | 372 | ||
373 | tmp = dev->regions[dev->region_idx].start*dev->blk_size; | 373 | tmp = dev->regions[dev->region_idx].start*dev->blk_size; |
374 | if (tmp % FLASH_BLOCK_SIZE) { | 374 | if (tmp % FLASH_BLOCK_SIZE) { |
375 | dev_err(&dev->sbd.core, | 375 | dev_err(&dev->sbd.core, |
376 | "%s:%u region start %lu is not aligned\n", __func__, | 376 | "%s:%u region start %lu is not aligned\n", __func__, |
377 | __LINE__, tmp); | 377 | __LINE__, tmp); |
378 | return -EINVAL; | 378 | return -EINVAL; |
379 | } | 379 | } |
380 | tmp = dev->regions[dev->region_idx].size*dev->blk_size; | 380 | tmp = dev->regions[dev->region_idx].size*dev->blk_size; |
381 | if (tmp % FLASH_BLOCK_SIZE) { | 381 | if (tmp % FLASH_BLOCK_SIZE) { |
382 | dev_err(&dev->sbd.core, | 382 | dev_err(&dev->sbd.core, |
383 | "%s:%u region size %lu is not aligned\n", __func__, | 383 | "%s:%u region size %lu is not aligned\n", __func__, |
384 | __LINE__, tmp); | 384 | __LINE__, tmp); |
385 | return -EINVAL; | 385 | return -EINVAL; |
386 | } | 386 | } |
387 | 387 | ||
388 | /* use static buffer, kmalloc cannot allocate 256 KiB */ | 388 | /* use static buffer, kmalloc cannot allocate 256 KiB */ |
389 | if (!ps3flash_bounce_buffer.address) | 389 | if (!ps3flash_bounce_buffer.address) |
390 | return -ENODEV; | 390 | return -ENODEV; |
391 | 391 | ||
392 | if (ps3flash_dev) { | 392 | if (ps3flash_dev) { |
393 | dev_err(&dev->sbd.core, | 393 | dev_err(&dev->sbd.core, |
394 | "Only one FLASH device is supported\n"); | 394 | "Only one FLASH device is supported\n"); |
395 | return -EBUSY; | 395 | return -EBUSY; |
396 | } | 396 | } |
397 | 397 | ||
398 | ps3flash_dev = dev; | 398 | ps3flash_dev = dev; |
399 | 399 | ||
400 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 400 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
401 | if (!priv) { | 401 | if (!priv) { |
402 | error = -ENOMEM; | 402 | error = -ENOMEM; |
403 | goto fail; | 403 | goto fail; |
404 | } | 404 | } |
405 | 405 | ||
406 | ps3_system_bus_set_drvdata(&dev->sbd, priv); | 406 | ps3_system_bus_set_drvdata(&dev->sbd, priv); |
407 | mutex_init(&priv->mutex); | 407 | mutex_init(&priv->mutex); |
408 | priv->tag = -1; | 408 | priv->tag = -1; |
409 | 409 | ||
410 | dev->bounce_size = ps3flash_bounce_buffer.size; | 410 | dev->bounce_size = ps3flash_bounce_buffer.size; |
411 | dev->bounce_buf = ps3flash_bounce_buffer.address; | 411 | dev->bounce_buf = ps3flash_bounce_buffer.address; |
412 | priv->chunk_sectors = dev->bounce_size / dev->blk_size; | 412 | priv->chunk_sectors = dev->bounce_size / dev->blk_size; |
413 | 413 | ||
414 | error = ps3stor_setup(dev, ps3flash_interrupt); | 414 | error = ps3stor_setup(dev, ps3flash_interrupt); |
415 | if (error) | 415 | if (error) |
416 | goto fail_free_priv; | 416 | goto fail_free_priv; |
417 | 417 | ||
418 | ps3flash_misc.parent = &dev->sbd.core; | 418 | ps3flash_misc.parent = &dev->sbd.core; |
419 | error = misc_register(&ps3flash_misc); | 419 | error = misc_register(&ps3flash_misc); |
420 | if (error) { | 420 | if (error) { |
421 | dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", | 421 | dev_err(&dev->sbd.core, "%s:%u: misc_register failed %d\n", |
422 | __func__, __LINE__, error); | 422 | __func__, __LINE__, error); |
423 | goto fail_teardown; | 423 | goto fail_teardown; |
424 | } | 424 | } |
425 | 425 | ||
426 | dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", | 426 | dev_info(&dev->sbd.core, "%s:%u: registered misc device %d\n", |
427 | __func__, __LINE__, ps3flash_misc.minor); | 427 | __func__, __LINE__, ps3flash_misc.minor); |
428 | 428 | ||
429 | ps3_os_area_flash_register(&ps3flash_kernel_ops); | 429 | ps3_os_area_flash_register(&ps3flash_kernel_ops); |
430 | return 0; | 430 | return 0; |
431 | 431 | ||
432 | fail_teardown: | 432 | fail_teardown: |
433 | ps3stor_teardown(dev); | 433 | ps3stor_teardown(dev); |
434 | fail_free_priv: | 434 | fail_free_priv: |
435 | kfree(priv); | 435 | kfree(priv); |
436 | ps3_system_bus_set_drvdata(&dev->sbd, NULL); | 436 | ps3_system_bus_set_drvdata(&dev->sbd, NULL); |
437 | fail: | 437 | fail: |
438 | ps3flash_dev = NULL; | 438 | ps3flash_dev = NULL; |
439 | return error; | 439 | return error; |
440 | } | 440 | } |
441 | 441 | ||
442 | static int ps3flash_remove(struct ps3_system_bus_device *_dev) | 442 | static int ps3flash_remove(struct ps3_system_bus_device *_dev) |
443 | { | 443 | { |
444 | struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); | 444 | struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); |
445 | 445 | ||
446 | ps3_os_area_flash_register(NULL); | 446 | ps3_os_area_flash_register(NULL); |
447 | misc_deregister(&ps3flash_misc); | 447 | misc_deregister(&ps3flash_misc); |
448 | ps3stor_teardown(dev); | 448 | ps3stor_teardown(dev); |
449 | kfree(ps3_system_bus_get_drvdata(&dev->sbd)); | 449 | kfree(ps3_system_bus_get_drvdata(&dev->sbd)); |
450 | ps3_system_bus_set_drvdata(&dev->sbd, NULL); | 450 | ps3_system_bus_set_drvdata(&dev->sbd, NULL); |
451 | ps3flash_dev = NULL; | 451 | ps3flash_dev = NULL; |
452 | return 0; | 452 | return 0; |
453 | } | 453 | } |
454 | 454 | ||
455 | 455 | ||
456 | static struct ps3_system_bus_driver ps3flash = { | 456 | static struct ps3_system_bus_driver ps3flash = { |
457 | .match_id = PS3_MATCH_ID_STOR_FLASH, | 457 | .match_id = PS3_MATCH_ID_STOR_FLASH, |
458 | .core.name = DEVICE_NAME, | 458 | .core.name = DEVICE_NAME, |
459 | .core.owner = THIS_MODULE, | 459 | .core.owner = THIS_MODULE, |
460 | .probe = ps3flash_probe, | 460 | .probe = ps3flash_probe, |
461 | .remove = ps3flash_remove, | 461 | .remove = ps3flash_remove, |
462 | .shutdown = ps3flash_remove, | 462 | .shutdown = ps3flash_remove, |
463 | }; | 463 | }; |
464 | 464 | ||
465 | 465 | ||
466 | static int __init ps3flash_init(void) | 466 | static int __init ps3flash_init(void) |
467 | { | 467 | { |
468 | return ps3_system_bus_driver_register(&ps3flash); | 468 | return ps3_system_bus_driver_register(&ps3flash); |
469 | } | 469 | } |
470 | 470 | ||
471 | static void __exit ps3flash_exit(void) | 471 | static void __exit ps3flash_exit(void) |
472 | { | 472 | { |
473 | ps3_system_bus_driver_unregister(&ps3flash); | 473 | ps3_system_bus_driver_unregister(&ps3flash); |
474 | } | 474 | } |
475 | 475 | ||
476 | module_init(ps3flash_init); | 476 | module_init(ps3flash_init); |
477 | module_exit(ps3flash_exit); | 477 | module_exit(ps3flash_exit); |
478 | 478 | ||
479 | MODULE_LICENSE("GPL"); | 479 | MODULE_LICENSE("GPL"); |
480 | MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); | 480 | MODULE_DESCRIPTION("PS3 FLASH ROM Storage Driver"); |
481 | MODULE_AUTHOR("Sony Corporation"); | 481 | MODULE_AUTHOR("Sony Corporation"); |
482 | MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH); | 482 | MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_FLASH); |
483 | 483 |
drivers/char/sonypi.c
1 | /* | 1 | /* |
2 | * Sony Programmable I/O Control Device driver for VAIO | 2 | * Sony Programmable I/O Control Device driver for VAIO |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Mattia Dongili <malattia@linux.it> | 4 | * Copyright (C) 2007 Mattia Dongili <malattia@linux.it> |
5 | * | 5 | * |
6 | * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> | 6 | * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net> |
7 | * | 7 | * |
8 | * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> | 8 | * Copyright (C) 2005 Narayanan R S <nars@kadamba.org> |
9 | * | 9 | * |
10 | * Copyright (C) 2001-2002 Alcรดve <www.alcove.com> | 10 | * Copyright (C) 2001-2002 Alcรดve <www.alcove.com> |
11 | * | 11 | * |
12 | * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> | 12 | * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au> |
13 | * | 13 | * |
14 | * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> | 14 | * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp> |
15 | * | 15 | * |
16 | * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> | 16 | * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp> |
17 | * | 17 | * |
18 | * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> | 18 | * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com> |
19 | * | 19 | * |
20 | * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. | 20 | * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras. |
21 | * | 21 | * |
22 | * This program is free software; you can redistribute it and/or modify | 22 | * This program is free software; you can redistribute it and/or modify |
23 | * it under the terms of the GNU General Public License as published by | 23 | * it under the terms of the GNU General Public License as published by |
24 | * the Free Software Foundation; either version 2 of the License, or | 24 | * the Free Software Foundation; either version 2 of the License, or |
25 | * (at your option) any later version. | 25 | * (at your option) any later version. |
26 | * | 26 | * |
27 | * This program is distributed in the hope that it will be useful, | 27 | * This program is distributed in the hope that it will be useful, |
28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
30 | * GNU General Public License for more details. | 30 | * GNU General Public License for more details. |
31 | * | 31 | * |
32 | * You should have received a copy of the GNU General Public License | 32 | * You should have received a copy of the GNU General Public License |
33 | * along with this program; if not, write to the Free Software | 33 | * along with this program; if not, write to the Free Software |
34 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 34 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | 37 | ||
38 | #include <linux/module.h> | 38 | #include <linux/module.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/input.h> | 40 | #include <linux/input.h> |
41 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/interrupt.h> | 43 | #include <linux/interrupt.h> |
44 | #include <linux/miscdevice.h> | 44 | #include <linux/miscdevice.h> |
45 | #include <linux/poll.h> | 45 | #include <linux/poll.h> |
46 | #include <linux/delay.h> | 46 | #include <linux/delay.h> |
47 | #include <linux/wait.h> | 47 | #include <linux/wait.h> |
48 | #include <linux/acpi.h> | 48 | #include <linux/acpi.h> |
49 | #include <linux/dmi.h> | 49 | #include <linux/dmi.h> |
50 | #include <linux/err.h> | 50 | #include <linux/err.h> |
51 | #include <linux/kfifo.h> | 51 | #include <linux/kfifo.h> |
52 | #include <linux/platform_device.h> | 52 | #include <linux/platform_device.h> |
53 | #include <linux/gfp.h> | 53 | #include <linux/gfp.h> |
54 | 54 | ||
55 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
56 | #include <asm/io.h> | 56 | #include <asm/io.h> |
57 | 57 | ||
58 | #include <linux/sonypi.h> | 58 | #include <linux/sonypi.h> |
59 | 59 | ||
60 | #define SONYPI_DRIVER_VERSION "1.26" | 60 | #define SONYPI_DRIVER_VERSION "1.26" |
61 | 61 | ||
62 | MODULE_AUTHOR("Stelian Pop <stelian@popies.net>"); | 62 | MODULE_AUTHOR("Stelian Pop <stelian@popies.net>"); |
63 | MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver"); | 63 | MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver"); |
64 | MODULE_LICENSE("GPL"); | 64 | MODULE_LICENSE("GPL"); |
65 | MODULE_VERSION(SONYPI_DRIVER_VERSION); | 65 | MODULE_VERSION(SONYPI_DRIVER_VERSION); |
66 | 66 | ||
67 | static int minor = -1; | 67 | static int minor = -1; |
68 | module_param(minor, int, 0); | 68 | module_param(minor, int, 0); |
69 | MODULE_PARM_DESC(minor, | 69 | MODULE_PARM_DESC(minor, |
70 | "minor number of the misc device, default is -1 (automatic)"); | 70 | "minor number of the misc device, default is -1 (automatic)"); |
71 | 71 | ||
72 | static int verbose; /* = 0 */ | 72 | static int verbose; /* = 0 */ |
73 | module_param(verbose, int, 0644); | 73 | module_param(verbose, int, 0644); |
74 | MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)"); | 74 | MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)"); |
75 | 75 | ||
76 | static int fnkeyinit; /* = 0 */ | 76 | static int fnkeyinit; /* = 0 */ |
77 | module_param(fnkeyinit, int, 0444); | 77 | module_param(fnkeyinit, int, 0444); |
78 | MODULE_PARM_DESC(fnkeyinit, | 78 | MODULE_PARM_DESC(fnkeyinit, |
79 | "set this if your Fn keys do not generate any event"); | 79 | "set this if your Fn keys do not generate any event"); |
80 | 80 | ||
81 | static int camera; /* = 0 */ | 81 | static int camera; /* = 0 */ |
82 | module_param(camera, int, 0444); | 82 | module_param(camera, int, 0444); |
83 | MODULE_PARM_DESC(camera, | 83 | MODULE_PARM_DESC(camera, |
84 | "set this if you have a MotionEye camera (PictureBook series)"); | 84 | "set this if you have a MotionEye camera (PictureBook series)"); |
85 | 85 | ||
86 | static int compat; /* = 0 */ | 86 | static int compat; /* = 0 */ |
87 | module_param(compat, int, 0444); | 87 | module_param(compat, int, 0444); |
88 | MODULE_PARM_DESC(compat, | 88 | MODULE_PARM_DESC(compat, |
89 | "set this if you want to enable backward compatibility mode"); | 89 | "set this if you want to enable backward compatibility mode"); |
90 | 90 | ||
91 | static unsigned long mask = 0xffffffff; | 91 | static unsigned long mask = 0xffffffff; |
92 | module_param(mask, ulong, 0644); | 92 | module_param(mask, ulong, 0644); |
93 | MODULE_PARM_DESC(mask, | 93 | MODULE_PARM_DESC(mask, |
94 | "set this to the mask of event you want to enable (see doc)"); | 94 | "set this to the mask of event you want to enable (see doc)"); |
95 | 95 | ||
96 | static int useinput = 1; | 96 | static int useinput = 1; |
97 | module_param(useinput, int, 0444); | 97 | module_param(useinput, int, 0444); |
98 | MODULE_PARM_DESC(useinput, | 98 | MODULE_PARM_DESC(useinput, |
99 | "set this if you would like sonypi to feed events to the input subsystem"); | 99 | "set this if you would like sonypi to feed events to the input subsystem"); |
100 | 100 | ||
101 | static int check_ioport = 1; | 101 | static int check_ioport = 1; |
102 | module_param(check_ioport, int, 0444); | 102 | module_param(check_ioport, int, 0444); |
103 | MODULE_PARM_DESC(check_ioport, | 103 | MODULE_PARM_DESC(check_ioport, |
104 | "set this to 0 if you think the automatic ioport check for sony-laptop is wrong"); | 104 | "set this to 0 if you think the automatic ioport check for sony-laptop is wrong"); |
105 | 105 | ||
106 | #define SONYPI_DEVICE_MODEL_TYPE1 1 | 106 | #define SONYPI_DEVICE_MODEL_TYPE1 1 |
107 | #define SONYPI_DEVICE_MODEL_TYPE2 2 | 107 | #define SONYPI_DEVICE_MODEL_TYPE2 2 |
108 | #define SONYPI_DEVICE_MODEL_TYPE3 3 | 108 | #define SONYPI_DEVICE_MODEL_TYPE3 3 |
109 | 109 | ||
110 | /* type1 models use those */ | 110 | /* type1 models use those */ |
111 | #define SONYPI_IRQ_PORT 0x8034 | 111 | #define SONYPI_IRQ_PORT 0x8034 |
112 | #define SONYPI_IRQ_SHIFT 22 | 112 | #define SONYPI_IRQ_SHIFT 22 |
113 | #define SONYPI_TYPE1_BASE 0x50 | 113 | #define SONYPI_TYPE1_BASE 0x50 |
114 | #define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14) | 114 | #define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14) |
115 | #define SONYPI_TYPE1_REGION_SIZE 0x08 | 115 | #define SONYPI_TYPE1_REGION_SIZE 0x08 |
116 | #define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 | 116 | #define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 |
117 | 117 | ||
118 | /* type2 series specifics */ | 118 | /* type2 series specifics */ |
119 | #define SONYPI_SIRQ 0x9b | 119 | #define SONYPI_SIRQ 0x9b |
120 | #define SONYPI_SLOB 0x9c | 120 | #define SONYPI_SLOB 0x9c |
121 | #define SONYPI_SHIB 0x9d | 121 | #define SONYPI_SHIB 0x9d |
122 | #define SONYPI_TYPE2_REGION_SIZE 0x20 | 122 | #define SONYPI_TYPE2_REGION_SIZE 0x20 |
123 | #define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 | 123 | #define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 |
124 | 124 | ||
125 | /* type3 series specifics */ | 125 | /* type3 series specifics */ |
126 | #define SONYPI_TYPE3_BASE 0x40 | 126 | #define SONYPI_TYPE3_BASE 0x40 |
127 | #define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */ | 127 | #define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */ |
128 | #define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */ | 128 | #define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */ |
129 | #define SONYPI_TYPE3_REGION_SIZE 0x20 | 129 | #define SONYPI_TYPE3_REGION_SIZE 0x20 |
130 | #define SONYPI_TYPE3_EVTYPE_OFFSET 0x12 | 130 | #define SONYPI_TYPE3_EVTYPE_OFFSET 0x12 |
131 | 131 | ||
132 | /* battery / brightness addresses */ | 132 | /* battery / brightness addresses */ |
133 | #define SONYPI_BAT_FLAGS 0x81 | 133 | #define SONYPI_BAT_FLAGS 0x81 |
134 | #define SONYPI_LCD_LIGHT 0x96 | 134 | #define SONYPI_LCD_LIGHT 0x96 |
135 | #define SONYPI_BAT1_PCTRM 0xa0 | 135 | #define SONYPI_BAT1_PCTRM 0xa0 |
136 | #define SONYPI_BAT1_LEFT 0xa2 | 136 | #define SONYPI_BAT1_LEFT 0xa2 |
137 | #define SONYPI_BAT1_MAXRT 0xa4 | 137 | #define SONYPI_BAT1_MAXRT 0xa4 |
138 | #define SONYPI_BAT2_PCTRM 0xa8 | 138 | #define SONYPI_BAT2_PCTRM 0xa8 |
139 | #define SONYPI_BAT2_LEFT 0xaa | 139 | #define SONYPI_BAT2_LEFT 0xaa |
140 | #define SONYPI_BAT2_MAXRT 0xac | 140 | #define SONYPI_BAT2_MAXRT 0xac |
141 | #define SONYPI_BAT1_MAXTK 0xb0 | 141 | #define SONYPI_BAT1_MAXTK 0xb0 |
142 | #define SONYPI_BAT1_FULL 0xb2 | 142 | #define SONYPI_BAT1_FULL 0xb2 |
143 | #define SONYPI_BAT2_MAXTK 0xb8 | 143 | #define SONYPI_BAT2_MAXTK 0xb8 |
144 | #define SONYPI_BAT2_FULL 0xba | 144 | #define SONYPI_BAT2_FULL 0xba |
145 | 145 | ||
146 | /* FAN0 information (reverse engineered from ACPI tables) */ | 146 | /* FAN0 information (reverse engineered from ACPI tables) */ |
147 | #define SONYPI_FAN0_STATUS 0x93 | 147 | #define SONYPI_FAN0_STATUS 0x93 |
148 | #define SONYPI_TEMP_STATUS 0xC1 | 148 | #define SONYPI_TEMP_STATUS 0xC1 |
149 | 149 | ||
150 | /* ioports used for brightness and type2 events */ | 150 | /* ioports used for brightness and type2 events */ |
151 | #define SONYPI_DATA_IOPORT 0x62 | 151 | #define SONYPI_DATA_IOPORT 0x62 |
152 | #define SONYPI_CST_IOPORT 0x66 | 152 | #define SONYPI_CST_IOPORT 0x66 |
153 | 153 | ||
154 | /* The set of possible ioports */ | 154 | /* The set of possible ioports */ |
155 | struct sonypi_ioport_list { | 155 | struct sonypi_ioport_list { |
156 | u16 port1; | 156 | u16 port1; |
157 | u16 port2; | 157 | u16 port2; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | static struct sonypi_ioport_list sonypi_type1_ioport_list[] = { | 160 | static struct sonypi_ioport_list sonypi_type1_ioport_list[] = { |
161 | { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */ | 161 | { 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */ |
162 | { 0x1080, 0x1084 }, | 162 | { 0x1080, 0x1084 }, |
163 | { 0x1090, 0x1094 }, | 163 | { 0x1090, 0x1094 }, |
164 | { 0x10a0, 0x10a4 }, | 164 | { 0x10a0, 0x10a4 }, |
165 | { 0x10b0, 0x10b4 }, | 165 | { 0x10b0, 0x10b4 }, |
166 | { 0x0, 0x0 } | 166 | { 0x0, 0x0 } |
167 | }; | 167 | }; |
168 | 168 | ||
169 | static struct sonypi_ioport_list sonypi_type2_ioport_list[] = { | 169 | static struct sonypi_ioport_list sonypi_type2_ioport_list[] = { |
170 | { 0x1080, 0x1084 }, | 170 | { 0x1080, 0x1084 }, |
171 | { 0x10a0, 0x10a4 }, | 171 | { 0x10a0, 0x10a4 }, |
172 | { 0x10c0, 0x10c4 }, | 172 | { 0x10c0, 0x10c4 }, |
173 | { 0x10e0, 0x10e4 }, | 173 | { 0x10e0, 0x10e4 }, |
174 | { 0x0, 0x0 } | 174 | { 0x0, 0x0 } |
175 | }; | 175 | }; |
176 | 176 | ||
177 | /* same as in type 2 models */ | 177 | /* same as in type 2 models */ |
178 | static struct sonypi_ioport_list *sonypi_type3_ioport_list = | 178 | static struct sonypi_ioport_list *sonypi_type3_ioport_list = |
179 | sonypi_type2_ioport_list; | 179 | sonypi_type2_ioport_list; |
180 | 180 | ||
181 | /* The set of possible interrupts */ | 181 | /* The set of possible interrupts */ |
182 | struct sonypi_irq_list { | 182 | struct sonypi_irq_list { |
183 | u16 irq; | 183 | u16 irq; |
184 | u16 bits; | 184 | u16 bits; |
185 | }; | 185 | }; |
186 | 186 | ||
187 | static struct sonypi_irq_list sonypi_type1_irq_list[] = { | 187 | static struct sonypi_irq_list sonypi_type1_irq_list[] = { |
188 | { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */ | 188 | { 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */ |
189 | { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */ | 189 | { 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */ |
190 | { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */ | 190 | { 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */ |
191 | { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */ | 191 | { 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */ |
192 | }; | 192 | }; |
193 | 193 | ||
194 | static struct sonypi_irq_list sonypi_type2_irq_list[] = { | 194 | static struct sonypi_irq_list sonypi_type2_irq_list[] = { |
195 | { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */ | 195 | { 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */ |
196 | { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */ | 196 | { 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */ |
197 | { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */ | 197 | { 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */ |
198 | { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */ | 198 | { 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */ |
199 | { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ | 199 | { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ |
200 | }; | 200 | }; |
201 | 201 | ||
202 | /* same as in type2 models */ | 202 | /* same as in type2 models */ |
203 | static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list; | 203 | static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list; |
204 | 204 | ||
205 | #define SONYPI_CAMERA_BRIGHTNESS 0 | 205 | #define SONYPI_CAMERA_BRIGHTNESS 0 |
206 | #define SONYPI_CAMERA_CONTRAST 1 | 206 | #define SONYPI_CAMERA_CONTRAST 1 |
207 | #define SONYPI_CAMERA_HUE 2 | 207 | #define SONYPI_CAMERA_HUE 2 |
208 | #define SONYPI_CAMERA_COLOR 3 | 208 | #define SONYPI_CAMERA_COLOR 3 |
209 | #define SONYPI_CAMERA_SHARPNESS 4 | 209 | #define SONYPI_CAMERA_SHARPNESS 4 |
210 | 210 | ||
211 | #define SONYPI_CAMERA_PICTURE 5 | 211 | #define SONYPI_CAMERA_PICTURE 5 |
212 | #define SONYPI_CAMERA_EXPOSURE_MASK 0xC | 212 | #define SONYPI_CAMERA_EXPOSURE_MASK 0xC |
213 | #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 | 213 | #define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3 |
214 | #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 | 214 | #define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30 |
215 | #define SONYPI_CAMERA_MUTE_MASK 0x40 | 215 | #define SONYPI_CAMERA_MUTE_MASK 0x40 |
216 | 216 | ||
217 | /* the rest don't need a loop until not 0xff */ | 217 | /* the rest don't need a loop until not 0xff */ |
218 | #define SONYPI_CAMERA_AGC 6 | 218 | #define SONYPI_CAMERA_AGC 6 |
219 | #define SONYPI_CAMERA_AGC_MASK 0x30 | 219 | #define SONYPI_CAMERA_AGC_MASK 0x30 |
220 | #define SONYPI_CAMERA_SHUTTER_MASK 0x7 | 220 | #define SONYPI_CAMERA_SHUTTER_MASK 0x7 |
221 | 221 | ||
222 | #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 | 222 | #define SONYPI_CAMERA_SHUTDOWN_REQUEST 7 |
223 | #define SONYPI_CAMERA_CONTROL 0x10 | 223 | #define SONYPI_CAMERA_CONTROL 0x10 |
224 | 224 | ||
225 | #define SONYPI_CAMERA_STATUS 7 | 225 | #define SONYPI_CAMERA_STATUS 7 |
226 | #define SONYPI_CAMERA_STATUS_READY 0x2 | 226 | #define SONYPI_CAMERA_STATUS_READY 0x2 |
227 | #define SONYPI_CAMERA_STATUS_POSITION 0x4 | 227 | #define SONYPI_CAMERA_STATUS_POSITION 0x4 |
228 | 228 | ||
229 | #define SONYPI_DIRECTION_BACKWARDS 0x4 | 229 | #define SONYPI_DIRECTION_BACKWARDS 0x4 |
230 | 230 | ||
231 | #define SONYPI_CAMERA_REVISION 8 | 231 | #define SONYPI_CAMERA_REVISION 8 |
232 | #define SONYPI_CAMERA_ROMVERSION 9 | 232 | #define SONYPI_CAMERA_ROMVERSION 9 |
233 | 233 | ||
234 | /* Event masks */ | 234 | /* Event masks */ |
235 | #define SONYPI_JOGGER_MASK 0x00000001 | 235 | #define SONYPI_JOGGER_MASK 0x00000001 |
236 | #define SONYPI_CAPTURE_MASK 0x00000002 | 236 | #define SONYPI_CAPTURE_MASK 0x00000002 |
237 | #define SONYPI_FNKEY_MASK 0x00000004 | 237 | #define SONYPI_FNKEY_MASK 0x00000004 |
238 | #define SONYPI_BLUETOOTH_MASK 0x00000008 | 238 | #define SONYPI_BLUETOOTH_MASK 0x00000008 |
239 | #define SONYPI_PKEY_MASK 0x00000010 | 239 | #define SONYPI_PKEY_MASK 0x00000010 |
240 | #define SONYPI_BACK_MASK 0x00000020 | 240 | #define SONYPI_BACK_MASK 0x00000020 |
241 | #define SONYPI_HELP_MASK 0x00000040 | 241 | #define SONYPI_HELP_MASK 0x00000040 |
242 | #define SONYPI_LID_MASK 0x00000080 | 242 | #define SONYPI_LID_MASK 0x00000080 |
243 | #define SONYPI_ZOOM_MASK 0x00000100 | 243 | #define SONYPI_ZOOM_MASK 0x00000100 |
244 | #define SONYPI_THUMBPHRASE_MASK 0x00000200 | 244 | #define SONYPI_THUMBPHRASE_MASK 0x00000200 |
245 | #define SONYPI_MEYE_MASK 0x00000400 | 245 | #define SONYPI_MEYE_MASK 0x00000400 |
246 | #define SONYPI_MEMORYSTICK_MASK 0x00000800 | 246 | #define SONYPI_MEMORYSTICK_MASK 0x00000800 |
247 | #define SONYPI_BATTERY_MASK 0x00001000 | 247 | #define SONYPI_BATTERY_MASK 0x00001000 |
248 | #define SONYPI_WIRELESS_MASK 0x00002000 | 248 | #define SONYPI_WIRELESS_MASK 0x00002000 |
249 | 249 | ||
250 | struct sonypi_event { | 250 | struct sonypi_event { |
251 | u8 data; | 251 | u8 data; |
252 | u8 event; | 252 | u8 event; |
253 | }; | 253 | }; |
254 | 254 | ||
255 | /* The set of possible button release events */ | 255 | /* The set of possible button release events */ |
256 | static struct sonypi_event sonypi_releaseev[] = { | 256 | static struct sonypi_event sonypi_releaseev[] = { |
257 | { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, | 257 | { 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED }, |
258 | { 0, 0 } | 258 | { 0, 0 } |
259 | }; | 259 | }; |
260 | 260 | ||
261 | /* The set of possible jogger events */ | 261 | /* The set of possible jogger events */ |
262 | static struct sonypi_event sonypi_joggerev[] = { | 262 | static struct sonypi_event sonypi_joggerev[] = { |
263 | { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, | 263 | { 0x1f, SONYPI_EVENT_JOGDIAL_UP }, |
264 | { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, | 264 | { 0x01, SONYPI_EVENT_JOGDIAL_DOWN }, |
265 | { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, | 265 | { 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED }, |
266 | { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, | 266 | { 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED }, |
267 | { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, | 267 | { 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP }, |
268 | { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, | 268 | { 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN }, |
269 | { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, | 269 | { 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED }, |
270 | { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, | 270 | { 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED }, |
271 | { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, | 271 | { 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP }, |
272 | { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, | 272 | { 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN }, |
273 | { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, | 273 | { 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED }, |
274 | { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, | 274 | { 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED }, |
275 | { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, | 275 | { 0x40, SONYPI_EVENT_JOGDIAL_PRESSED }, |
276 | { 0, 0 } | 276 | { 0, 0 } |
277 | }; | 277 | }; |
278 | 278 | ||
279 | /* The set of possible capture button events */ | 279 | /* The set of possible capture button events */ |
280 | static struct sonypi_event sonypi_captureev[] = { | 280 | static struct sonypi_event sonypi_captureev[] = { |
281 | { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, | 281 | { 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED }, |
282 | { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, | 282 | { 0x07, SONYPI_EVENT_CAPTURE_PRESSED }, |
283 | { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, | 283 | { 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED }, |
284 | { 0, 0 } | 284 | { 0, 0 } |
285 | }; | 285 | }; |
286 | 286 | ||
287 | /* The set of possible fnkeys events */ | 287 | /* The set of possible fnkeys events */ |
288 | static struct sonypi_event sonypi_fnkeyev[] = { | 288 | static struct sonypi_event sonypi_fnkeyev[] = { |
289 | { 0x10, SONYPI_EVENT_FNKEY_ESC }, | 289 | { 0x10, SONYPI_EVENT_FNKEY_ESC }, |
290 | { 0x11, SONYPI_EVENT_FNKEY_F1 }, | 290 | { 0x11, SONYPI_EVENT_FNKEY_F1 }, |
291 | { 0x12, SONYPI_EVENT_FNKEY_F2 }, | 291 | { 0x12, SONYPI_EVENT_FNKEY_F2 }, |
292 | { 0x13, SONYPI_EVENT_FNKEY_F3 }, | 292 | { 0x13, SONYPI_EVENT_FNKEY_F3 }, |
293 | { 0x14, SONYPI_EVENT_FNKEY_F4 }, | 293 | { 0x14, SONYPI_EVENT_FNKEY_F4 }, |
294 | { 0x15, SONYPI_EVENT_FNKEY_F5 }, | 294 | { 0x15, SONYPI_EVENT_FNKEY_F5 }, |
295 | { 0x16, SONYPI_EVENT_FNKEY_F6 }, | 295 | { 0x16, SONYPI_EVENT_FNKEY_F6 }, |
296 | { 0x17, SONYPI_EVENT_FNKEY_F7 }, | 296 | { 0x17, SONYPI_EVENT_FNKEY_F7 }, |
297 | { 0x18, SONYPI_EVENT_FNKEY_F8 }, | 297 | { 0x18, SONYPI_EVENT_FNKEY_F8 }, |
298 | { 0x19, SONYPI_EVENT_FNKEY_F9 }, | 298 | { 0x19, SONYPI_EVENT_FNKEY_F9 }, |
299 | { 0x1a, SONYPI_EVENT_FNKEY_F10 }, | 299 | { 0x1a, SONYPI_EVENT_FNKEY_F10 }, |
300 | { 0x1b, SONYPI_EVENT_FNKEY_F11 }, | 300 | { 0x1b, SONYPI_EVENT_FNKEY_F11 }, |
301 | { 0x1c, SONYPI_EVENT_FNKEY_F12 }, | 301 | { 0x1c, SONYPI_EVENT_FNKEY_F12 }, |
302 | { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, | 302 | { 0x1f, SONYPI_EVENT_FNKEY_RELEASED }, |
303 | { 0x21, SONYPI_EVENT_FNKEY_1 }, | 303 | { 0x21, SONYPI_EVENT_FNKEY_1 }, |
304 | { 0x22, SONYPI_EVENT_FNKEY_2 }, | 304 | { 0x22, SONYPI_EVENT_FNKEY_2 }, |
305 | { 0x31, SONYPI_EVENT_FNKEY_D }, | 305 | { 0x31, SONYPI_EVENT_FNKEY_D }, |
306 | { 0x32, SONYPI_EVENT_FNKEY_E }, | 306 | { 0x32, SONYPI_EVENT_FNKEY_E }, |
307 | { 0x33, SONYPI_EVENT_FNKEY_F }, | 307 | { 0x33, SONYPI_EVENT_FNKEY_F }, |
308 | { 0x34, SONYPI_EVENT_FNKEY_S }, | 308 | { 0x34, SONYPI_EVENT_FNKEY_S }, |
309 | { 0x35, SONYPI_EVENT_FNKEY_B }, | 309 | { 0x35, SONYPI_EVENT_FNKEY_B }, |
310 | { 0x36, SONYPI_EVENT_FNKEY_ONLY }, | 310 | { 0x36, SONYPI_EVENT_FNKEY_ONLY }, |
311 | { 0, 0 } | 311 | { 0, 0 } |
312 | }; | 312 | }; |
313 | 313 | ||
314 | /* The set of possible program key events */ | 314 | /* The set of possible program key events */ |
315 | static struct sonypi_event sonypi_pkeyev[] = { | 315 | static struct sonypi_event sonypi_pkeyev[] = { |
316 | { 0x01, SONYPI_EVENT_PKEY_P1 }, | 316 | { 0x01, SONYPI_EVENT_PKEY_P1 }, |
317 | { 0x02, SONYPI_EVENT_PKEY_P2 }, | 317 | { 0x02, SONYPI_EVENT_PKEY_P2 }, |
318 | { 0x04, SONYPI_EVENT_PKEY_P3 }, | 318 | { 0x04, SONYPI_EVENT_PKEY_P3 }, |
319 | { 0x5c, SONYPI_EVENT_PKEY_P1 }, | 319 | { 0x5c, SONYPI_EVENT_PKEY_P1 }, |
320 | { 0, 0 } | 320 | { 0, 0 } |
321 | }; | 321 | }; |
322 | 322 | ||
323 | /* The set of possible bluetooth events */ | 323 | /* The set of possible bluetooth events */ |
324 | static struct sonypi_event sonypi_blueev[] = { | 324 | static struct sonypi_event sonypi_blueev[] = { |
325 | { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, | 325 | { 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED }, |
326 | { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, | 326 | { 0x59, SONYPI_EVENT_BLUETOOTH_ON }, |
327 | { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, | 327 | { 0x5a, SONYPI_EVENT_BLUETOOTH_OFF }, |
328 | { 0, 0 } | 328 | { 0, 0 } |
329 | }; | 329 | }; |
330 | 330 | ||
331 | /* The set of possible wireless events */ | 331 | /* The set of possible wireless events */ |
332 | static struct sonypi_event sonypi_wlessev[] = { | 332 | static struct sonypi_event sonypi_wlessev[] = { |
333 | { 0x59, SONYPI_EVENT_WIRELESS_ON }, | 333 | { 0x59, SONYPI_EVENT_WIRELESS_ON }, |
334 | { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, | 334 | { 0x5a, SONYPI_EVENT_WIRELESS_OFF }, |
335 | { 0, 0 } | 335 | { 0, 0 } |
336 | }; | 336 | }; |
337 | 337 | ||
338 | /* The set of possible back button events */ | 338 | /* The set of possible back button events */ |
339 | static struct sonypi_event sonypi_backev[] = { | 339 | static struct sonypi_event sonypi_backev[] = { |
340 | { 0x20, SONYPI_EVENT_BACK_PRESSED }, | 340 | { 0x20, SONYPI_EVENT_BACK_PRESSED }, |
341 | { 0, 0 } | 341 | { 0, 0 } |
342 | }; | 342 | }; |
343 | 343 | ||
344 | /* The set of possible help button events */ | 344 | /* The set of possible help button events */ |
345 | static struct sonypi_event sonypi_helpev[] = { | 345 | static struct sonypi_event sonypi_helpev[] = { |
346 | { 0x3b, SONYPI_EVENT_HELP_PRESSED }, | 346 | { 0x3b, SONYPI_EVENT_HELP_PRESSED }, |
347 | { 0, 0 } | 347 | { 0, 0 } |
348 | }; | 348 | }; |
349 | 349 | ||
350 | 350 | ||
351 | /* The set of possible lid events */ | 351 | /* The set of possible lid events */ |
352 | static struct sonypi_event sonypi_lidev[] = { | 352 | static struct sonypi_event sonypi_lidev[] = { |
353 | { 0x51, SONYPI_EVENT_LID_CLOSED }, | 353 | { 0x51, SONYPI_EVENT_LID_CLOSED }, |
354 | { 0x50, SONYPI_EVENT_LID_OPENED }, | 354 | { 0x50, SONYPI_EVENT_LID_OPENED }, |
355 | { 0, 0 } | 355 | { 0, 0 } |
356 | }; | 356 | }; |
357 | 357 | ||
358 | /* The set of possible zoom events */ | 358 | /* The set of possible zoom events */ |
359 | static struct sonypi_event sonypi_zoomev[] = { | 359 | static struct sonypi_event sonypi_zoomev[] = { |
360 | { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, | 360 | { 0x39, SONYPI_EVENT_ZOOM_PRESSED }, |
361 | { 0, 0 } | 361 | { 0, 0 } |
362 | }; | 362 | }; |
363 | 363 | ||
364 | /* The set of possible thumbphrase events */ | 364 | /* The set of possible thumbphrase events */ |
365 | static struct sonypi_event sonypi_thumbphraseev[] = { | 365 | static struct sonypi_event sonypi_thumbphraseev[] = { |
366 | { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, | 366 | { 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED }, |
367 | { 0, 0 } | 367 | { 0, 0 } |
368 | }; | 368 | }; |
369 | 369 | ||
370 | /* The set of possible motioneye camera events */ | 370 | /* The set of possible motioneye camera events */ |
371 | static struct sonypi_event sonypi_meyeev[] = { | 371 | static struct sonypi_event sonypi_meyeev[] = { |
372 | { 0x00, SONYPI_EVENT_MEYE_FACE }, | 372 | { 0x00, SONYPI_EVENT_MEYE_FACE }, |
373 | { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, | 373 | { 0x01, SONYPI_EVENT_MEYE_OPPOSITE }, |
374 | { 0, 0 } | 374 | { 0, 0 } |
375 | }; | 375 | }; |
376 | 376 | ||
377 | /* The set of possible memorystick events */ | 377 | /* The set of possible memorystick events */ |
378 | static struct sonypi_event sonypi_memorystickev[] = { | 378 | static struct sonypi_event sonypi_memorystickev[] = { |
379 | { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, | 379 | { 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT }, |
380 | { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, | 380 | { 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT }, |
381 | { 0, 0 } | 381 | { 0, 0 } |
382 | }; | 382 | }; |
383 | 383 | ||
384 | /* The set of possible battery events */ | 384 | /* The set of possible battery events */ |
385 | static struct sonypi_event sonypi_batteryev[] = { | 385 | static struct sonypi_event sonypi_batteryev[] = { |
386 | { 0x20, SONYPI_EVENT_BATTERY_INSERT }, | 386 | { 0x20, SONYPI_EVENT_BATTERY_INSERT }, |
387 | { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, | 387 | { 0x30, SONYPI_EVENT_BATTERY_REMOVE }, |
388 | { 0, 0 } | 388 | { 0, 0 } |
389 | }; | 389 | }; |
390 | 390 | ||
391 | static struct sonypi_eventtypes { | 391 | static struct sonypi_eventtypes { |
392 | int model; | 392 | int model; |
393 | u8 data; | 393 | u8 data; |
394 | unsigned long mask; | 394 | unsigned long mask; |
395 | struct sonypi_event * events; | 395 | struct sonypi_event * events; |
396 | } sonypi_eventtypes[] = { | 396 | } sonypi_eventtypes[] = { |
397 | { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev }, | 397 | { SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev }, |
398 | { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, | 398 | { SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev }, |
399 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev }, | 399 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev }, |
400 | { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, | 400 | { SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev }, |
401 | { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, | 401 | { SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev }, |
402 | { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, | 402 | { SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, |
403 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, | 403 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, |
404 | { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, | 404 | { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev }, |
405 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, | 405 | { SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, |
406 | { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, | 406 | { SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev }, |
407 | 407 | ||
408 | { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, | 408 | { SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev }, |
409 | { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, | 409 | { SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev }, |
410 | { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, | 410 | { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev }, |
411 | { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, | 411 | { SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev }, |
412 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, | 412 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, |
413 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, | 413 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, |
414 | { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, | 414 | { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, |
415 | { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, | 415 | { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, |
416 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, | 416 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, |
417 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, | 417 | { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, |
418 | { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, | 418 | { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, |
419 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, | 419 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, |
420 | { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, | 420 | { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, |
421 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, | 421 | { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, |
422 | 422 | ||
423 | { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev }, | 423 | { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev }, |
424 | { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, | 424 | { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev }, |
425 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, | 425 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev }, |
426 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, | 426 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev }, |
427 | { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, | 427 | { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, |
428 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, | 428 | { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, |
429 | { 0 } | 429 | { 0 } |
430 | }; | 430 | }; |
431 | 431 | ||
432 | #define SONYPI_BUF_SIZE 128 | 432 | #define SONYPI_BUF_SIZE 128 |
433 | 433 | ||
434 | /* Correspondance table between sonypi events and input layer events */ | 434 | /* Correspondance table between sonypi events and input layer events */ |
435 | static struct { | 435 | static struct { |
436 | int sonypiev; | 436 | int sonypiev; |
437 | int inputev; | 437 | int inputev; |
438 | } sonypi_inputkeys[] = { | 438 | } sonypi_inputkeys[] = { |
439 | { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA }, | 439 | { SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA }, |
440 | { SONYPI_EVENT_FNKEY_ONLY, KEY_FN }, | 440 | { SONYPI_EVENT_FNKEY_ONLY, KEY_FN }, |
441 | { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC }, | 441 | { SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC }, |
442 | { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 }, | 442 | { SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 }, |
443 | { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 }, | 443 | { SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 }, |
444 | { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 }, | 444 | { SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 }, |
445 | { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 }, | 445 | { SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 }, |
446 | { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 }, | 446 | { SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 }, |
447 | { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 }, | 447 | { SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 }, |
448 | { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 }, | 448 | { SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 }, |
449 | { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 }, | 449 | { SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 }, |
450 | { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 }, | 450 | { SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 }, |
451 | { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 }, | 451 | { SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 }, |
452 | { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 }, | 452 | { SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 }, |
453 | { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 }, | 453 | { SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 }, |
454 | { SONYPI_EVENT_FNKEY_1, KEY_FN_1 }, | 454 | { SONYPI_EVENT_FNKEY_1, KEY_FN_1 }, |
455 | { SONYPI_EVENT_FNKEY_2, KEY_FN_2 }, | 455 | { SONYPI_EVENT_FNKEY_2, KEY_FN_2 }, |
456 | { SONYPI_EVENT_FNKEY_D, KEY_FN_D }, | 456 | { SONYPI_EVENT_FNKEY_D, KEY_FN_D }, |
457 | { SONYPI_EVENT_FNKEY_E, KEY_FN_E }, | 457 | { SONYPI_EVENT_FNKEY_E, KEY_FN_E }, |
458 | { SONYPI_EVENT_FNKEY_F, KEY_FN_F }, | 458 | { SONYPI_EVENT_FNKEY_F, KEY_FN_F }, |
459 | { SONYPI_EVENT_FNKEY_S, KEY_FN_S }, | 459 | { SONYPI_EVENT_FNKEY_S, KEY_FN_S }, |
460 | { SONYPI_EVENT_FNKEY_B, KEY_FN_B }, | 460 | { SONYPI_EVENT_FNKEY_B, KEY_FN_B }, |
461 | { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE }, | 461 | { SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE }, |
462 | { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE }, | 462 | { SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE }, |
463 | { SONYPI_EVENT_PKEY_P1, KEY_PROG1 }, | 463 | { SONYPI_EVENT_PKEY_P1, KEY_PROG1 }, |
464 | { SONYPI_EVENT_PKEY_P2, KEY_PROG2 }, | 464 | { SONYPI_EVENT_PKEY_P2, KEY_PROG2 }, |
465 | { SONYPI_EVENT_PKEY_P3, KEY_PROG3 }, | 465 | { SONYPI_EVENT_PKEY_P3, KEY_PROG3 }, |
466 | { SONYPI_EVENT_BACK_PRESSED, KEY_BACK }, | 466 | { SONYPI_EVENT_BACK_PRESSED, KEY_BACK }, |
467 | { SONYPI_EVENT_HELP_PRESSED, KEY_HELP }, | 467 | { SONYPI_EVENT_HELP_PRESSED, KEY_HELP }, |
468 | { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM }, | 468 | { SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM }, |
469 | { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB }, | 469 | { SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB }, |
470 | { 0, 0 }, | 470 | { 0, 0 }, |
471 | }; | 471 | }; |
472 | 472 | ||
473 | struct sonypi_keypress { | 473 | struct sonypi_keypress { |
474 | struct input_dev *dev; | 474 | struct input_dev *dev; |
475 | int key; | 475 | int key; |
476 | }; | 476 | }; |
477 | 477 | ||
478 | static struct sonypi_device { | 478 | static struct sonypi_device { |
479 | struct pci_dev *dev; | 479 | struct pci_dev *dev; |
480 | u16 irq; | 480 | u16 irq; |
481 | u16 bits; | 481 | u16 bits; |
482 | u16 ioport1; | 482 | u16 ioport1; |
483 | u16 ioport2; | 483 | u16 ioport2; |
484 | u16 region_size; | 484 | u16 region_size; |
485 | u16 evtype_offset; | 485 | u16 evtype_offset; |
486 | int camera_power; | 486 | int camera_power; |
487 | int bluetooth_power; | 487 | int bluetooth_power; |
488 | struct mutex lock; | 488 | struct mutex lock; |
489 | struct kfifo fifo; | 489 | struct kfifo fifo; |
490 | spinlock_t fifo_lock; | 490 | spinlock_t fifo_lock; |
491 | wait_queue_head_t fifo_proc_list; | 491 | wait_queue_head_t fifo_proc_list; |
492 | struct fasync_struct *fifo_async; | 492 | struct fasync_struct *fifo_async; |
493 | int open_count; | 493 | int open_count; |
494 | int model; | 494 | int model; |
495 | struct input_dev *input_jog_dev; | 495 | struct input_dev *input_jog_dev; |
496 | struct input_dev *input_key_dev; | 496 | struct input_dev *input_key_dev; |
497 | struct work_struct input_work; | 497 | struct work_struct input_work; |
498 | struct kfifo input_fifo; | 498 | struct kfifo input_fifo; |
499 | spinlock_t input_fifo_lock; | 499 | spinlock_t input_fifo_lock; |
500 | } sonypi_device; | 500 | } sonypi_device; |
501 | 501 | ||
502 | #define ITERATIONS_LONG 10000 | 502 | #define ITERATIONS_LONG 10000 |
503 | #define ITERATIONS_SHORT 10 | 503 | #define ITERATIONS_SHORT 10 |
504 | 504 | ||
505 | #define wait_on_command(quiet, command, iterations) { \ | 505 | #define wait_on_command(quiet, command, iterations) { \ |
506 | unsigned int n = iterations; \ | 506 | unsigned int n = iterations; \ |
507 | while (--n && (command)) \ | 507 | while (--n && (command)) \ |
508 | udelay(1); \ | 508 | udelay(1); \ |
509 | if (!n && (verbose || !quiet)) \ | 509 | if (!n && (verbose || !quiet)) \ |
510 | printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ | 510 | printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ |
511 | } | 511 | } |
512 | 512 | ||
513 | #ifdef CONFIG_ACPI | 513 | #ifdef CONFIG_ACPI |
514 | #define SONYPI_ACPI_ACTIVE (!acpi_disabled) | 514 | #define SONYPI_ACPI_ACTIVE (!acpi_disabled) |
515 | #else | 515 | #else |
516 | #define SONYPI_ACPI_ACTIVE 0 | 516 | #define SONYPI_ACPI_ACTIVE 0 |
517 | #endif /* CONFIG_ACPI */ | 517 | #endif /* CONFIG_ACPI */ |
518 | 518 | ||
519 | #ifdef CONFIG_ACPI | 519 | #ifdef CONFIG_ACPI |
520 | static struct acpi_device *sonypi_acpi_device; | 520 | static struct acpi_device *sonypi_acpi_device; |
521 | static int acpi_driver_registered; | 521 | static int acpi_driver_registered; |
522 | #endif | 522 | #endif |
523 | 523 | ||
524 | static int sonypi_ec_write(u8 addr, u8 value) | 524 | static int sonypi_ec_write(u8 addr, u8 value) |
525 | { | 525 | { |
526 | #ifdef CONFIG_ACPI | 526 | #ifdef CONFIG_ACPI |
527 | if (SONYPI_ACPI_ACTIVE) | 527 | if (SONYPI_ACPI_ACTIVE) |
528 | return ec_write(addr, value); | 528 | return ec_write(addr, value); |
529 | #endif | 529 | #endif |
530 | wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); | 530 | wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); |
531 | outb_p(0x81, SONYPI_CST_IOPORT); | 531 | outb_p(0x81, SONYPI_CST_IOPORT); |
532 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); | 532 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); |
533 | outb_p(addr, SONYPI_DATA_IOPORT); | 533 | outb_p(addr, SONYPI_DATA_IOPORT); |
534 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); | 534 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); |
535 | outb_p(value, SONYPI_DATA_IOPORT); | 535 | outb_p(value, SONYPI_DATA_IOPORT); |
536 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); | 536 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); |
537 | return 0; | 537 | return 0; |
538 | } | 538 | } |
539 | 539 | ||
540 | static int sonypi_ec_read(u8 addr, u8 *value) | 540 | static int sonypi_ec_read(u8 addr, u8 *value) |
541 | { | 541 | { |
542 | #ifdef CONFIG_ACPI | 542 | #ifdef CONFIG_ACPI |
543 | if (SONYPI_ACPI_ACTIVE) | 543 | if (SONYPI_ACPI_ACTIVE) |
544 | return ec_read(addr, value); | 544 | return ec_read(addr, value); |
545 | #endif | 545 | #endif |
546 | wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); | 546 | wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG); |
547 | outb_p(0x80, SONYPI_CST_IOPORT); | 547 | outb_p(0x80, SONYPI_CST_IOPORT); |
548 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); | 548 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); |
549 | outb_p(addr, SONYPI_DATA_IOPORT); | 549 | outb_p(addr, SONYPI_DATA_IOPORT); |
550 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); | 550 | wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG); |
551 | *value = inb_p(SONYPI_DATA_IOPORT); | 551 | *value = inb_p(SONYPI_DATA_IOPORT); |
552 | return 0; | 552 | return 0; |
553 | } | 553 | } |
554 | 554 | ||
555 | static int ec_read16(u8 addr, u16 *value) | 555 | static int ec_read16(u8 addr, u16 *value) |
556 | { | 556 | { |
557 | u8 val_lb, val_hb; | 557 | u8 val_lb, val_hb; |
558 | if (sonypi_ec_read(addr, &val_lb)) | 558 | if (sonypi_ec_read(addr, &val_lb)) |
559 | return -1; | 559 | return -1; |
560 | if (sonypi_ec_read(addr + 1, &val_hb)) | 560 | if (sonypi_ec_read(addr + 1, &val_hb)) |
561 | return -1; | 561 | return -1; |
562 | *value = val_lb | (val_hb << 8); | 562 | *value = val_lb | (val_hb << 8); |
563 | return 0; | 563 | return 0; |
564 | } | 564 | } |
565 | 565 | ||
566 | /* Initializes the device - this comes from the AML code in the ACPI bios */ | 566 | /* Initializes the device - this comes from the AML code in the ACPI bios */ |
567 | static void sonypi_type1_srs(void) | 567 | static void sonypi_type1_srs(void) |
568 | { | 568 | { |
569 | u32 v; | 569 | u32 v; |
570 | 570 | ||
571 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); | 571 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); |
572 | v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1); | 572 | v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1); |
573 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); | 573 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); |
574 | 574 | ||
575 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); | 575 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); |
576 | v = (v & 0xFFF0FFFF) | | 576 | v = (v & 0xFFF0FFFF) | |
577 | (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16); | 577 | (((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16); |
578 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); | 578 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); |
579 | 579 | ||
580 | v = inl(SONYPI_IRQ_PORT); | 580 | v = inl(SONYPI_IRQ_PORT); |
581 | v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT); | 581 | v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT); |
582 | v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT); | 582 | v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT); |
583 | outl(v, SONYPI_IRQ_PORT); | 583 | outl(v, SONYPI_IRQ_PORT); |
584 | 584 | ||
585 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); | 585 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); |
586 | v = (v & 0xFF1FFFFF) | 0x00C00000; | 586 | v = (v & 0xFF1FFFFF) | 0x00C00000; |
587 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); | 587 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); |
588 | } | 588 | } |
589 | 589 | ||
590 | static void sonypi_type2_srs(void) | 590 | static void sonypi_type2_srs(void) |
591 | { | 591 | { |
592 | if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8)) | 592 | if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8)) |
593 | printk(KERN_WARNING "ec_write failed\n"); | 593 | printk(KERN_WARNING "ec_write failed\n"); |
594 | if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF)) | 594 | if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF)) |
595 | printk(KERN_WARNING "ec_write failed\n"); | 595 | printk(KERN_WARNING "ec_write failed\n"); |
596 | if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits)) | 596 | if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits)) |
597 | printk(KERN_WARNING "ec_write failed\n"); | 597 | printk(KERN_WARNING "ec_write failed\n"); |
598 | udelay(10); | 598 | udelay(10); |
599 | } | 599 | } |
600 | 600 | ||
601 | static void sonypi_type3_srs(void) | 601 | static void sonypi_type3_srs(void) |
602 | { | 602 | { |
603 | u16 v16; | 603 | u16 v16; |
604 | u8 v8; | 604 | u8 v8; |
605 | 605 | ||
606 | /* This model type uses the same initialiazation of | 606 | /* This model type uses the same initialiazation of |
607 | * the embedded controller as the type2 models. */ | 607 | * the embedded controller as the type2 models. */ |
608 | sonypi_type2_srs(); | 608 | sonypi_type2_srs(); |
609 | 609 | ||
610 | /* Initialization of PCI config space of the LPC interface bridge. */ | 610 | /* Initialization of PCI config space of the LPC interface bridge. */ |
611 | v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01; | 611 | v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01; |
612 | pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16); | 612 | pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16); |
613 | pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8); | 613 | pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8); |
614 | v8 = (v8 & 0xCF) | 0x10; | 614 | v8 = (v8 & 0xCF) | 0x10; |
615 | pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8); | 615 | pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8); |
616 | } | 616 | } |
617 | 617 | ||
618 | /* Disables the device - this comes from the AML code in the ACPI bios */ | 618 | /* Disables the device - this comes from the AML code in the ACPI bios */ |
619 | static void sonypi_type1_dis(void) | 619 | static void sonypi_type1_dis(void) |
620 | { | 620 | { |
621 | u32 v; | 621 | u32 v; |
622 | 622 | ||
623 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); | 623 | pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v); |
624 | v = v & 0xFF3FFFFF; | 624 | v = v & 0xFF3FFFFF; |
625 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); | 625 | pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v); |
626 | 626 | ||
627 | v = inl(SONYPI_IRQ_PORT); | 627 | v = inl(SONYPI_IRQ_PORT); |
628 | v |= (0x3 << SONYPI_IRQ_SHIFT); | 628 | v |= (0x3 << SONYPI_IRQ_SHIFT); |
629 | outl(v, SONYPI_IRQ_PORT); | 629 | outl(v, SONYPI_IRQ_PORT); |
630 | } | 630 | } |
631 | 631 | ||
632 | static void sonypi_type2_dis(void) | 632 | static void sonypi_type2_dis(void) |
633 | { | 633 | { |
634 | if (sonypi_ec_write(SONYPI_SHIB, 0)) | 634 | if (sonypi_ec_write(SONYPI_SHIB, 0)) |
635 | printk(KERN_WARNING "ec_write failed\n"); | 635 | printk(KERN_WARNING "ec_write failed\n"); |
636 | if (sonypi_ec_write(SONYPI_SLOB, 0)) | 636 | if (sonypi_ec_write(SONYPI_SLOB, 0)) |
637 | printk(KERN_WARNING "ec_write failed\n"); | 637 | printk(KERN_WARNING "ec_write failed\n"); |
638 | if (sonypi_ec_write(SONYPI_SIRQ, 0)) | 638 | if (sonypi_ec_write(SONYPI_SIRQ, 0)) |
639 | printk(KERN_WARNING "ec_write failed\n"); | 639 | printk(KERN_WARNING "ec_write failed\n"); |
640 | } | 640 | } |
641 | 641 | ||
642 | static void sonypi_type3_dis(void) | 642 | static void sonypi_type3_dis(void) |
643 | { | 643 | { |
644 | sonypi_type2_dis(); | 644 | sonypi_type2_dis(); |
645 | udelay(10); | 645 | udelay(10); |
646 | pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0); | 646 | pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0); |
647 | } | 647 | } |
648 | 648 | ||
649 | static u8 sonypi_call1(u8 dev) | 649 | static u8 sonypi_call1(u8 dev) |
650 | { | 650 | { |
651 | u8 v1, v2; | 651 | u8 v1, v2; |
652 | 652 | ||
653 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 653 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
654 | outb(dev, sonypi_device.ioport2); | 654 | outb(dev, sonypi_device.ioport2); |
655 | v1 = inb_p(sonypi_device.ioport2); | 655 | v1 = inb_p(sonypi_device.ioport2); |
656 | v2 = inb_p(sonypi_device.ioport1); | 656 | v2 = inb_p(sonypi_device.ioport1); |
657 | return v2; | 657 | return v2; |
658 | } | 658 | } |
659 | 659 | ||
660 | static u8 sonypi_call2(u8 dev, u8 fn) | 660 | static u8 sonypi_call2(u8 dev, u8 fn) |
661 | { | 661 | { |
662 | u8 v1; | 662 | u8 v1; |
663 | 663 | ||
664 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 664 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
665 | outb(dev, sonypi_device.ioport2); | 665 | outb(dev, sonypi_device.ioport2); |
666 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 666 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
667 | outb(fn, sonypi_device.ioport1); | 667 | outb(fn, sonypi_device.ioport1); |
668 | v1 = inb_p(sonypi_device.ioport1); | 668 | v1 = inb_p(sonypi_device.ioport1); |
669 | return v1; | 669 | return v1; |
670 | } | 670 | } |
671 | 671 | ||
672 | static u8 sonypi_call3(u8 dev, u8 fn, u8 v) | 672 | static u8 sonypi_call3(u8 dev, u8 fn, u8 v) |
673 | { | 673 | { |
674 | u8 v1; | 674 | u8 v1; |
675 | 675 | ||
676 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 676 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
677 | outb(dev, sonypi_device.ioport2); | 677 | outb(dev, sonypi_device.ioport2); |
678 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 678 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
679 | outb(fn, sonypi_device.ioport1); | 679 | outb(fn, sonypi_device.ioport1); |
680 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); | 680 | wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG); |
681 | outb(v, sonypi_device.ioport1); | 681 | outb(v, sonypi_device.ioport1); |
682 | v1 = inb_p(sonypi_device.ioport1); | 682 | v1 = inb_p(sonypi_device.ioport1); |
683 | return v1; | 683 | return v1; |
684 | } | 684 | } |
685 | 685 | ||
686 | #if 0 | 686 | #if 0 |
687 | /* Get brightness, hue etc. Unreliable... */ | 687 | /* Get brightness, hue etc. Unreliable... */ |
688 | static u8 sonypi_read(u8 fn) | 688 | static u8 sonypi_read(u8 fn) |
689 | { | 689 | { |
690 | u8 v1, v2; | 690 | u8 v1, v2; |
691 | int n = 100; | 691 | int n = 100; |
692 | 692 | ||
693 | while (n--) { | 693 | while (n--) { |
694 | v1 = sonypi_call2(0x8f, fn); | 694 | v1 = sonypi_call2(0x8f, fn); |
695 | v2 = sonypi_call2(0x8f, fn); | 695 | v2 = sonypi_call2(0x8f, fn); |
696 | if (v1 == v2 && v1 != 0xff) | 696 | if (v1 == v2 && v1 != 0xff) |
697 | return v1; | 697 | return v1; |
698 | } | 698 | } |
699 | return 0xff; | 699 | return 0xff; |
700 | } | 700 | } |
701 | #endif | 701 | #endif |
702 | 702 | ||
703 | /* Set brightness, hue etc */ | 703 | /* Set brightness, hue etc */ |
704 | static void sonypi_set(u8 fn, u8 v) | 704 | static void sonypi_set(u8 fn, u8 v) |
705 | { | 705 | { |
706 | wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT); | 706 | wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT); |
707 | } | 707 | } |
708 | 708 | ||
709 | /* Tests if the camera is ready */ | 709 | /* Tests if the camera is ready */ |
710 | static int sonypi_camera_ready(void) | 710 | static int sonypi_camera_ready(void) |
711 | { | 711 | { |
712 | u8 v; | 712 | u8 v; |
713 | 713 | ||
714 | v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS); | 714 | v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS); |
715 | return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); | 715 | return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY)); |
716 | } | 716 | } |
717 | 717 | ||
718 | /* Turns the camera off */ | 718 | /* Turns the camera off */ |
719 | static void sonypi_camera_off(void) | 719 | static void sonypi_camera_off(void) |
720 | { | 720 | { |
721 | sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK); | 721 | sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK); |
722 | 722 | ||
723 | if (!sonypi_device.camera_power) | 723 | if (!sonypi_device.camera_power) |
724 | return; | 724 | return; |
725 | 725 | ||
726 | sonypi_call2(0x91, 0); | 726 | sonypi_call2(0x91, 0); |
727 | sonypi_device.camera_power = 0; | 727 | sonypi_device.camera_power = 0; |
728 | } | 728 | } |
729 | 729 | ||
730 | /* Turns the camera on */ | 730 | /* Turns the camera on */ |
731 | static void sonypi_camera_on(void) | 731 | static void sonypi_camera_on(void) |
732 | { | 732 | { |
733 | int i, j; | 733 | int i, j; |
734 | 734 | ||
735 | if (sonypi_device.camera_power) | 735 | if (sonypi_device.camera_power) |
736 | return; | 736 | return; |
737 | 737 | ||
738 | for (j = 5; j > 0; j--) { | 738 | for (j = 5; j > 0; j--) { |
739 | 739 | ||
740 | while (sonypi_call2(0x91, 0x1)) | 740 | while (sonypi_call2(0x91, 0x1)) |
741 | msleep(10); | 741 | msleep(10); |
742 | sonypi_call1(0x93); | 742 | sonypi_call1(0x93); |
743 | 743 | ||
744 | for (i = 400; i > 0; i--) { | 744 | for (i = 400; i > 0; i--) { |
745 | if (sonypi_camera_ready()) | 745 | if (sonypi_camera_ready()) |
746 | break; | 746 | break; |
747 | msleep(10); | 747 | msleep(10); |
748 | } | 748 | } |
749 | if (i) | 749 | if (i) |
750 | break; | 750 | break; |
751 | } | 751 | } |
752 | 752 | ||
753 | if (j == 0) { | 753 | if (j == 0) { |
754 | printk(KERN_WARNING "sonypi: failed to power on camera\n"); | 754 | printk(KERN_WARNING "sonypi: failed to power on camera\n"); |
755 | return; | 755 | return; |
756 | } | 756 | } |
757 | 757 | ||
758 | sonypi_set(0x10, 0x5a); | 758 | sonypi_set(0x10, 0x5a); |
759 | sonypi_device.camera_power = 1; | 759 | sonypi_device.camera_power = 1; |
760 | } | 760 | } |
761 | 761 | ||
762 | /* sets the bluetooth subsystem power state */ | 762 | /* sets the bluetooth subsystem power state */ |
763 | static void sonypi_setbluetoothpower(u8 state) | 763 | static void sonypi_setbluetoothpower(u8 state) |
764 | { | 764 | { |
765 | state = !!state; | 765 | state = !!state; |
766 | 766 | ||
767 | if (sonypi_device.bluetooth_power == state) | 767 | if (sonypi_device.bluetooth_power == state) |
768 | return; | 768 | return; |
769 | 769 | ||
770 | sonypi_call2(0x96, state); | 770 | sonypi_call2(0x96, state); |
771 | sonypi_call1(0x82); | 771 | sonypi_call1(0x82); |
772 | sonypi_device.bluetooth_power = state; | 772 | sonypi_device.bluetooth_power = state; |
773 | } | 773 | } |
774 | 774 | ||
775 | static void input_keyrelease(struct work_struct *work) | 775 | static void input_keyrelease(struct work_struct *work) |
776 | { | 776 | { |
777 | struct sonypi_keypress kp; | 777 | struct sonypi_keypress kp; |
778 | 778 | ||
779 | while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, | 779 | while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, |
780 | sizeof(kp), &sonypi_device.input_fifo_lock) | 780 | sizeof(kp), &sonypi_device.input_fifo_lock) |
781 | == sizeof(kp)) { | 781 | == sizeof(kp)) { |
782 | msleep(10); | 782 | msleep(10); |
783 | input_report_key(kp.dev, kp.key, 0); | 783 | input_report_key(kp.dev, kp.key, 0); |
784 | input_sync(kp.dev); | 784 | input_sync(kp.dev); |
785 | } | 785 | } |
786 | } | 786 | } |
787 | 787 | ||
788 | static void sonypi_report_input_event(u8 event) | 788 | static void sonypi_report_input_event(u8 event) |
789 | { | 789 | { |
790 | struct input_dev *jog_dev = sonypi_device.input_jog_dev; | 790 | struct input_dev *jog_dev = sonypi_device.input_jog_dev; |
791 | struct input_dev *key_dev = sonypi_device.input_key_dev; | 791 | struct input_dev *key_dev = sonypi_device.input_key_dev; |
792 | struct sonypi_keypress kp = { NULL }; | 792 | struct sonypi_keypress kp = { NULL }; |
793 | int i; | 793 | int i; |
794 | 794 | ||
795 | switch (event) { | 795 | switch (event) { |
796 | case SONYPI_EVENT_JOGDIAL_UP: | 796 | case SONYPI_EVENT_JOGDIAL_UP: |
797 | case SONYPI_EVENT_JOGDIAL_UP_PRESSED: | 797 | case SONYPI_EVENT_JOGDIAL_UP_PRESSED: |
798 | input_report_rel(jog_dev, REL_WHEEL, 1); | 798 | input_report_rel(jog_dev, REL_WHEEL, 1); |
799 | input_sync(jog_dev); | 799 | input_sync(jog_dev); |
800 | break; | 800 | break; |
801 | 801 | ||
802 | case SONYPI_EVENT_JOGDIAL_DOWN: | 802 | case SONYPI_EVENT_JOGDIAL_DOWN: |
803 | case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: | 803 | case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED: |
804 | input_report_rel(jog_dev, REL_WHEEL, -1); | 804 | input_report_rel(jog_dev, REL_WHEEL, -1); |
805 | input_sync(jog_dev); | 805 | input_sync(jog_dev); |
806 | break; | 806 | break; |
807 | 807 | ||
808 | case SONYPI_EVENT_JOGDIAL_PRESSED: | 808 | case SONYPI_EVENT_JOGDIAL_PRESSED: |
809 | kp.key = BTN_MIDDLE; | 809 | kp.key = BTN_MIDDLE; |
810 | kp.dev = jog_dev; | 810 | kp.dev = jog_dev; |
811 | break; | 811 | break; |
812 | 812 | ||
813 | case SONYPI_EVENT_FNKEY_RELEASED: | 813 | case SONYPI_EVENT_FNKEY_RELEASED: |
814 | /* Nothing, not all VAIOs generate this event */ | 814 | /* Nothing, not all VAIOs generate this event */ |
815 | break; | 815 | break; |
816 | 816 | ||
817 | default: | 817 | default: |
818 | for (i = 0; sonypi_inputkeys[i].sonypiev; i++) | 818 | for (i = 0; sonypi_inputkeys[i].sonypiev; i++) |
819 | if (event == sonypi_inputkeys[i].sonypiev) { | 819 | if (event == sonypi_inputkeys[i].sonypiev) { |
820 | kp.dev = key_dev; | 820 | kp.dev = key_dev; |
821 | kp.key = sonypi_inputkeys[i].inputev; | 821 | kp.key = sonypi_inputkeys[i].inputev; |
822 | break; | 822 | break; |
823 | } | 823 | } |
824 | break; | 824 | break; |
825 | } | 825 | } |
826 | 826 | ||
827 | if (kp.dev) { | 827 | if (kp.dev) { |
828 | input_report_key(kp.dev, kp.key, 1); | 828 | input_report_key(kp.dev, kp.key, 1); |
829 | input_sync(kp.dev); | 829 | input_sync(kp.dev); |
830 | kfifo_in_locked(&sonypi_device.input_fifo, | 830 | kfifo_in_locked(&sonypi_device.input_fifo, |
831 | (unsigned char *)&kp, sizeof(kp), | 831 | (unsigned char *)&kp, sizeof(kp), |
832 | &sonypi_device.input_fifo_lock); | 832 | &sonypi_device.input_fifo_lock); |
833 | schedule_work(&sonypi_device.input_work); | 833 | schedule_work(&sonypi_device.input_work); |
834 | } | 834 | } |
835 | } | 835 | } |
836 | 836 | ||
837 | /* Interrupt handler: some event is available */ | 837 | /* Interrupt handler: some event is available */ |
838 | static irqreturn_t sonypi_irq(int irq, void *dev_id) | 838 | static irqreturn_t sonypi_irq(int irq, void *dev_id) |
839 | { | 839 | { |
840 | u8 v1, v2, event = 0; | 840 | u8 v1, v2, event = 0; |
841 | int i, j; | 841 | int i, j; |
842 | 842 | ||
843 | v1 = inb_p(sonypi_device.ioport1); | 843 | v1 = inb_p(sonypi_device.ioport1); |
844 | v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); | 844 | v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset); |
845 | 845 | ||
846 | for (i = 0; sonypi_eventtypes[i].model; i++) { | 846 | for (i = 0; sonypi_eventtypes[i].model; i++) { |
847 | if (sonypi_device.model != sonypi_eventtypes[i].model) | 847 | if (sonypi_device.model != sonypi_eventtypes[i].model) |
848 | continue; | 848 | continue; |
849 | if ((v2 & sonypi_eventtypes[i].data) != | 849 | if ((v2 & sonypi_eventtypes[i].data) != |
850 | sonypi_eventtypes[i].data) | 850 | sonypi_eventtypes[i].data) |
851 | continue; | 851 | continue; |
852 | if (!(mask & sonypi_eventtypes[i].mask)) | 852 | if (!(mask & sonypi_eventtypes[i].mask)) |
853 | continue; | 853 | continue; |
854 | for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { | 854 | for (j = 0; sonypi_eventtypes[i].events[j].event; j++) { |
855 | if (v1 == sonypi_eventtypes[i].events[j].data) { | 855 | if (v1 == sonypi_eventtypes[i].events[j].data) { |
856 | event = sonypi_eventtypes[i].events[j].event; | 856 | event = sonypi_eventtypes[i].events[j].event; |
857 | goto found; | 857 | goto found; |
858 | } | 858 | } |
859 | } | 859 | } |
860 | } | 860 | } |
861 | 861 | ||
862 | if (verbose) | 862 | if (verbose) |
863 | printk(KERN_WARNING | 863 | printk(KERN_WARNING |
864 | "sonypi: unknown event port1=0x%02x,port2=0x%02x\n", | 864 | "sonypi: unknown event port1=0x%02x,port2=0x%02x\n", |
865 | v1, v2); | 865 | v1, v2); |
866 | /* We need to return IRQ_HANDLED here because there *are* | 866 | /* We need to return IRQ_HANDLED here because there *are* |
867 | * events belonging to the sonypi device we don't know about, | 867 | * events belonging to the sonypi device we don't know about, |
868 | * but we still don't want those to pollute the logs... */ | 868 | * but we still don't want those to pollute the logs... */ |
869 | return IRQ_HANDLED; | 869 | return IRQ_HANDLED; |
870 | 870 | ||
871 | found: | 871 | found: |
872 | if (verbose > 1) | 872 | if (verbose > 1) |
873 | printk(KERN_INFO | 873 | printk(KERN_INFO |
874 | "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2); | 874 | "sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2); |
875 | 875 | ||
876 | if (useinput) | 876 | if (useinput) |
877 | sonypi_report_input_event(event); | 877 | sonypi_report_input_event(event); |
878 | 878 | ||
879 | #ifdef CONFIG_ACPI | 879 | #ifdef CONFIG_ACPI |
880 | if (sonypi_acpi_device) | 880 | if (sonypi_acpi_device) |
881 | acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); | 881 | acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); |
882 | #endif | 882 | #endif |
883 | 883 | ||
884 | kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, | 884 | kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, |
885 | sizeof(event), &sonypi_device.fifo_lock); | 885 | sizeof(event), &sonypi_device.fifo_lock); |
886 | kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); | 886 | kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); |
887 | wake_up_interruptible(&sonypi_device.fifo_proc_list); | 887 | wake_up_interruptible(&sonypi_device.fifo_proc_list); |
888 | 888 | ||
889 | return IRQ_HANDLED; | 889 | return IRQ_HANDLED; |
890 | } | 890 | } |
891 | 891 | ||
892 | static int sonypi_misc_fasync(int fd, struct file *filp, int on) | 892 | static int sonypi_misc_fasync(int fd, struct file *filp, int on) |
893 | { | 893 | { |
894 | return fasync_helper(fd, filp, on, &sonypi_device.fifo_async); | 894 | return fasync_helper(fd, filp, on, &sonypi_device.fifo_async); |
895 | } | 895 | } |
896 | 896 | ||
897 | static int sonypi_misc_release(struct inode *inode, struct file *file) | 897 | static int sonypi_misc_release(struct inode *inode, struct file *file) |
898 | { | 898 | { |
899 | mutex_lock(&sonypi_device.lock); | 899 | mutex_lock(&sonypi_device.lock); |
900 | sonypi_device.open_count--; | 900 | sonypi_device.open_count--; |
901 | mutex_unlock(&sonypi_device.lock); | 901 | mutex_unlock(&sonypi_device.lock); |
902 | return 0; | 902 | return 0; |
903 | } | 903 | } |
904 | 904 | ||
905 | static int sonypi_misc_open(struct inode *inode, struct file *file) | 905 | static int sonypi_misc_open(struct inode *inode, struct file *file) |
906 | { | 906 | { |
907 | mutex_lock(&sonypi_device.lock); | 907 | mutex_lock(&sonypi_device.lock); |
908 | /* Flush input queue on first open */ | 908 | /* Flush input queue on first open */ |
909 | if (!sonypi_device.open_count) | 909 | if (!sonypi_device.open_count) |
910 | kfifo_reset(&sonypi_device.fifo); | 910 | kfifo_reset(&sonypi_device.fifo); |
911 | sonypi_device.open_count++; | 911 | sonypi_device.open_count++; |
912 | mutex_unlock(&sonypi_device.lock); | 912 | mutex_unlock(&sonypi_device.lock); |
913 | 913 | ||
914 | return 0; | 914 | return 0; |
915 | } | 915 | } |
916 | 916 | ||
917 | static ssize_t sonypi_misc_read(struct file *file, char __user *buf, | 917 | static ssize_t sonypi_misc_read(struct file *file, char __user *buf, |
918 | size_t count, loff_t *pos) | 918 | size_t count, loff_t *pos) |
919 | { | 919 | { |
920 | ssize_t ret; | 920 | ssize_t ret; |
921 | unsigned char c; | 921 | unsigned char c; |
922 | 922 | ||
923 | if ((kfifo_len(&sonypi_device.fifo) == 0) && | 923 | if ((kfifo_len(&sonypi_device.fifo) == 0) && |
924 | (file->f_flags & O_NONBLOCK)) | 924 | (file->f_flags & O_NONBLOCK)) |
925 | return -EAGAIN; | 925 | return -EAGAIN; |
926 | 926 | ||
927 | ret = wait_event_interruptible(sonypi_device.fifo_proc_list, | 927 | ret = wait_event_interruptible(sonypi_device.fifo_proc_list, |
928 | kfifo_len(&sonypi_device.fifo) != 0); | 928 | kfifo_len(&sonypi_device.fifo) != 0); |
929 | if (ret) | 929 | if (ret) |
930 | return ret; | 930 | return ret; |
931 | 931 | ||
932 | while (ret < count && | 932 | while (ret < count && |
933 | (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), | 933 | (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), |
934 | &sonypi_device.fifo_lock) == sizeof(c))) { | 934 | &sonypi_device.fifo_lock) == sizeof(c))) { |
935 | if (put_user(c, buf++)) | 935 | if (put_user(c, buf++)) |
936 | return -EFAULT; | 936 | return -EFAULT; |
937 | ret++; | 937 | ret++; |
938 | } | 938 | } |
939 | 939 | ||
940 | if (ret > 0) { | 940 | if (ret > 0) { |
941 | struct inode *inode = file->f_path.dentry->d_inode; | 941 | struct inode *inode = file->f_path.dentry->d_inode; |
942 | inode->i_atime = current_fs_time(inode->i_sb); | 942 | inode->i_atime = current_fs_time(inode->i_sb); |
943 | } | 943 | } |
944 | 944 | ||
945 | return ret; | 945 | return ret; |
946 | } | 946 | } |
947 | 947 | ||
948 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) | 948 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) |
949 | { | 949 | { |
950 | poll_wait(file, &sonypi_device.fifo_proc_list, wait); | 950 | poll_wait(file, &sonypi_device.fifo_proc_list, wait); |
951 | if (kfifo_len(&sonypi_device.fifo)) | 951 | if (kfifo_len(&sonypi_device.fifo)) |
952 | return POLLIN | POLLRDNORM; | 952 | return POLLIN | POLLRDNORM; |
953 | return 0; | 953 | return 0; |
954 | } | 954 | } |
955 | 955 | ||
956 | static long sonypi_misc_ioctl(struct file *fp, | 956 | static long sonypi_misc_ioctl(struct file *fp, |
957 | unsigned int cmd, unsigned long arg) | 957 | unsigned int cmd, unsigned long arg) |
958 | { | 958 | { |
959 | long ret = 0; | 959 | long ret = 0; |
960 | void __user *argp = (void __user *)arg; | 960 | void __user *argp = (void __user *)arg; |
961 | u8 val8; | 961 | u8 val8; |
962 | u16 val16; | 962 | u16 val16; |
963 | 963 | ||
964 | mutex_lock(&sonypi_device.lock); | 964 | mutex_lock(&sonypi_device.lock); |
965 | switch (cmd) { | 965 | switch (cmd) { |
966 | case SONYPI_IOCGBRT: | 966 | case SONYPI_IOCGBRT: |
967 | if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) { | 967 | if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) { |
968 | ret = -EIO; | 968 | ret = -EIO; |
969 | break; | 969 | break; |
970 | } | 970 | } |
971 | if (copy_to_user(argp, &val8, sizeof(val8))) | 971 | if (copy_to_user(argp, &val8, sizeof(val8))) |
972 | ret = -EFAULT; | 972 | ret = -EFAULT; |
973 | break; | 973 | break; |
974 | case SONYPI_IOCSBRT: | 974 | case SONYPI_IOCSBRT: |
975 | if (copy_from_user(&val8, argp, sizeof(val8))) { | 975 | if (copy_from_user(&val8, argp, sizeof(val8))) { |
976 | ret = -EFAULT; | 976 | ret = -EFAULT; |
977 | break; | 977 | break; |
978 | } | 978 | } |
979 | if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8)) | 979 | if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8)) |
980 | ret = -EIO; | 980 | ret = -EIO; |
981 | break; | 981 | break; |
982 | case SONYPI_IOCGBAT1CAP: | 982 | case SONYPI_IOCGBAT1CAP: |
983 | if (ec_read16(SONYPI_BAT1_FULL, &val16)) { | 983 | if (ec_read16(SONYPI_BAT1_FULL, &val16)) { |
984 | ret = -EIO; | 984 | ret = -EIO; |
985 | break; | 985 | break; |
986 | } | 986 | } |
987 | if (copy_to_user(argp, &val16, sizeof(val16))) | 987 | if (copy_to_user(argp, &val16, sizeof(val16))) |
988 | ret = -EFAULT; | 988 | ret = -EFAULT; |
989 | break; | 989 | break; |
990 | case SONYPI_IOCGBAT1REM: | 990 | case SONYPI_IOCGBAT1REM: |
991 | if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { | 991 | if (ec_read16(SONYPI_BAT1_LEFT, &val16)) { |
992 | ret = -EIO; | 992 | ret = -EIO; |
993 | break; | 993 | break; |
994 | } | 994 | } |
995 | if (copy_to_user(argp, &val16, sizeof(val16))) | 995 | if (copy_to_user(argp, &val16, sizeof(val16))) |
996 | ret = -EFAULT; | 996 | ret = -EFAULT; |
997 | break; | 997 | break; |
998 | case SONYPI_IOCGBAT2CAP: | 998 | case SONYPI_IOCGBAT2CAP: |
999 | if (ec_read16(SONYPI_BAT2_FULL, &val16)) { | 999 | if (ec_read16(SONYPI_BAT2_FULL, &val16)) { |
1000 | ret = -EIO; | 1000 | ret = -EIO; |
1001 | break; | 1001 | break; |
1002 | } | 1002 | } |
1003 | if (copy_to_user(argp, &val16, sizeof(val16))) | 1003 | if (copy_to_user(argp, &val16, sizeof(val16))) |
1004 | ret = -EFAULT; | 1004 | ret = -EFAULT; |
1005 | break; | 1005 | break; |
1006 | case SONYPI_IOCGBAT2REM: | 1006 | case SONYPI_IOCGBAT2REM: |
1007 | if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { | 1007 | if (ec_read16(SONYPI_BAT2_LEFT, &val16)) { |
1008 | ret = -EIO; | 1008 | ret = -EIO; |
1009 | break; | 1009 | break; |
1010 | } | 1010 | } |
1011 | if (copy_to_user(argp, &val16, sizeof(val16))) | 1011 | if (copy_to_user(argp, &val16, sizeof(val16))) |
1012 | ret = -EFAULT; | 1012 | ret = -EFAULT; |
1013 | break; | 1013 | break; |
1014 | case SONYPI_IOCGBATFLAGS: | 1014 | case SONYPI_IOCGBATFLAGS: |
1015 | if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) { | 1015 | if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) { |
1016 | ret = -EIO; | 1016 | ret = -EIO; |
1017 | break; | 1017 | break; |
1018 | } | 1018 | } |
1019 | val8 &= 0x07; | 1019 | val8 &= 0x07; |
1020 | if (copy_to_user(argp, &val8, sizeof(val8))) | 1020 | if (copy_to_user(argp, &val8, sizeof(val8))) |
1021 | ret = -EFAULT; | 1021 | ret = -EFAULT; |
1022 | break; | 1022 | break; |
1023 | case SONYPI_IOCGBLUE: | 1023 | case SONYPI_IOCGBLUE: |
1024 | val8 = sonypi_device.bluetooth_power; | 1024 | val8 = sonypi_device.bluetooth_power; |
1025 | if (copy_to_user(argp, &val8, sizeof(val8))) | 1025 | if (copy_to_user(argp, &val8, sizeof(val8))) |
1026 | ret = -EFAULT; | 1026 | ret = -EFAULT; |
1027 | break; | 1027 | break; |
1028 | case SONYPI_IOCSBLUE: | 1028 | case SONYPI_IOCSBLUE: |
1029 | if (copy_from_user(&val8, argp, sizeof(val8))) { | 1029 | if (copy_from_user(&val8, argp, sizeof(val8))) { |
1030 | ret = -EFAULT; | 1030 | ret = -EFAULT; |
1031 | break; | 1031 | break; |
1032 | } | 1032 | } |
1033 | sonypi_setbluetoothpower(val8); | 1033 | sonypi_setbluetoothpower(val8); |
1034 | break; | 1034 | break; |
1035 | /* FAN Controls */ | 1035 | /* FAN Controls */ |
1036 | case SONYPI_IOCGFAN: | 1036 | case SONYPI_IOCGFAN: |
1037 | if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) { | 1037 | if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) { |
1038 | ret = -EIO; | 1038 | ret = -EIO; |
1039 | break; | 1039 | break; |
1040 | } | 1040 | } |
1041 | if (copy_to_user(argp, &val8, sizeof(val8))) | 1041 | if (copy_to_user(argp, &val8, sizeof(val8))) |
1042 | ret = -EFAULT; | 1042 | ret = -EFAULT; |
1043 | break; | 1043 | break; |
1044 | case SONYPI_IOCSFAN: | 1044 | case SONYPI_IOCSFAN: |
1045 | if (copy_from_user(&val8, argp, sizeof(val8))) { | 1045 | if (copy_from_user(&val8, argp, sizeof(val8))) { |
1046 | ret = -EFAULT; | 1046 | ret = -EFAULT; |
1047 | break; | 1047 | break; |
1048 | } | 1048 | } |
1049 | if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8)) | 1049 | if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8)) |
1050 | ret = -EIO; | 1050 | ret = -EIO; |
1051 | break; | 1051 | break; |
1052 | /* GET Temperature (useful under APM) */ | 1052 | /* GET Temperature (useful under APM) */ |
1053 | case SONYPI_IOCGTEMP: | 1053 | case SONYPI_IOCGTEMP: |
1054 | if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) { | 1054 | if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) { |
1055 | ret = -EIO; | 1055 | ret = -EIO; |
1056 | break; | 1056 | break; |
1057 | } | 1057 | } |
1058 | if (copy_to_user(argp, &val8, sizeof(val8))) | 1058 | if (copy_to_user(argp, &val8, sizeof(val8))) |
1059 | ret = -EFAULT; | 1059 | ret = -EFAULT; |
1060 | break; | 1060 | break; |
1061 | default: | 1061 | default: |
1062 | ret = -EINVAL; | 1062 | ret = -EINVAL; |
1063 | } | 1063 | } |
1064 | mutex_unlock(&sonypi_device.lock); | 1064 | mutex_unlock(&sonypi_device.lock); |
1065 | return ret; | 1065 | return ret; |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | static const struct file_operations sonypi_misc_fops = { | 1068 | static const struct file_operations sonypi_misc_fops = { |
1069 | .owner = THIS_MODULE, | 1069 | .owner = THIS_MODULE, |
1070 | .read = sonypi_misc_read, | 1070 | .read = sonypi_misc_read, |
1071 | .poll = sonypi_misc_poll, | 1071 | .poll = sonypi_misc_poll, |
1072 | .open = sonypi_misc_open, | 1072 | .open = sonypi_misc_open, |
1073 | .release = sonypi_misc_release, | 1073 | .release = sonypi_misc_release, |
1074 | .fasync = sonypi_misc_fasync, | 1074 | .fasync = sonypi_misc_fasync, |
1075 | .unlocked_ioctl = sonypi_misc_ioctl, | 1075 | .unlocked_ioctl = sonypi_misc_ioctl, |
1076 | .llseek = no_llseek, | 1076 | .llseek = no_llseek, |
1077 | }; | 1077 | }; |
1078 | 1078 | ||
1079 | static struct miscdevice sonypi_misc_device = { | 1079 | static struct miscdevice sonypi_misc_device = { |
1080 | .minor = MISC_DYNAMIC_MINOR, | 1080 | .minor = MISC_DYNAMIC_MINOR, |
1081 | .name = "sonypi", | 1081 | .name = "sonypi", |
1082 | .fops = &sonypi_misc_fops, | 1082 | .fops = &sonypi_misc_fops, |
1083 | }; | 1083 | }; |
1084 | 1084 | ||
1085 | static void sonypi_enable(unsigned int camera_on) | 1085 | static void sonypi_enable(unsigned int camera_on) |
1086 | { | 1086 | { |
1087 | switch (sonypi_device.model) { | 1087 | switch (sonypi_device.model) { |
1088 | case SONYPI_DEVICE_MODEL_TYPE1: | 1088 | case SONYPI_DEVICE_MODEL_TYPE1: |
1089 | sonypi_type1_srs(); | 1089 | sonypi_type1_srs(); |
1090 | break; | 1090 | break; |
1091 | case SONYPI_DEVICE_MODEL_TYPE2: | 1091 | case SONYPI_DEVICE_MODEL_TYPE2: |
1092 | sonypi_type2_srs(); | 1092 | sonypi_type2_srs(); |
1093 | break; | 1093 | break; |
1094 | case SONYPI_DEVICE_MODEL_TYPE3: | 1094 | case SONYPI_DEVICE_MODEL_TYPE3: |
1095 | sonypi_type3_srs(); | 1095 | sonypi_type3_srs(); |
1096 | break; | 1096 | break; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | sonypi_call1(0x82); | 1099 | sonypi_call1(0x82); |
1100 | sonypi_call2(0x81, 0xff); | 1100 | sonypi_call2(0x81, 0xff); |
1101 | sonypi_call1(compat ? 0x92 : 0x82); | 1101 | sonypi_call1(compat ? 0x92 : 0x82); |
1102 | 1102 | ||
1103 | /* Enable ACPI mode to get Fn key events */ | 1103 | /* Enable ACPI mode to get Fn key events */ |
1104 | if (!SONYPI_ACPI_ACTIVE && fnkeyinit) | 1104 | if (!SONYPI_ACPI_ACTIVE && fnkeyinit) |
1105 | outb(0xf0, 0xb2); | 1105 | outb(0xf0, 0xb2); |
1106 | 1106 | ||
1107 | if (camera && camera_on) | 1107 | if (camera && camera_on) |
1108 | sonypi_camera_on(); | 1108 | sonypi_camera_on(); |
1109 | } | 1109 | } |
1110 | 1110 | ||
1111 | static int sonypi_disable(void) | 1111 | static int sonypi_disable(void) |
1112 | { | 1112 | { |
1113 | sonypi_call2(0x81, 0); /* make sure we don't get any more events */ | 1113 | sonypi_call2(0x81, 0); /* make sure we don't get any more events */ |
1114 | if (camera) | 1114 | if (camera) |
1115 | sonypi_camera_off(); | 1115 | sonypi_camera_off(); |
1116 | 1116 | ||
1117 | /* disable ACPI mode */ | 1117 | /* disable ACPI mode */ |
1118 | if (!SONYPI_ACPI_ACTIVE && fnkeyinit) | 1118 | if (!SONYPI_ACPI_ACTIVE && fnkeyinit) |
1119 | outb(0xf1, 0xb2); | 1119 | outb(0xf1, 0xb2); |
1120 | 1120 | ||
1121 | switch (sonypi_device.model) { | 1121 | switch (sonypi_device.model) { |
1122 | case SONYPI_DEVICE_MODEL_TYPE1: | 1122 | case SONYPI_DEVICE_MODEL_TYPE1: |
1123 | sonypi_type1_dis(); | 1123 | sonypi_type1_dis(); |
1124 | break; | 1124 | break; |
1125 | case SONYPI_DEVICE_MODEL_TYPE2: | 1125 | case SONYPI_DEVICE_MODEL_TYPE2: |
1126 | sonypi_type2_dis(); | 1126 | sonypi_type2_dis(); |
1127 | break; | 1127 | break; |
1128 | case SONYPI_DEVICE_MODEL_TYPE3: | 1128 | case SONYPI_DEVICE_MODEL_TYPE3: |
1129 | sonypi_type3_dis(); | 1129 | sonypi_type3_dis(); |
1130 | break; | 1130 | break; |
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | return 0; | 1133 | return 0; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | #ifdef CONFIG_ACPI | 1136 | #ifdef CONFIG_ACPI |
1137 | static int sonypi_acpi_add(struct acpi_device *device) | 1137 | static int sonypi_acpi_add(struct acpi_device *device) |
1138 | { | 1138 | { |
1139 | sonypi_acpi_device = device; | 1139 | sonypi_acpi_device = device; |
1140 | strcpy(acpi_device_name(device), "Sony laptop hotkeys"); | 1140 | strcpy(acpi_device_name(device), "Sony laptop hotkeys"); |
1141 | strcpy(acpi_device_class(device), "sony/hotkey"); | 1141 | strcpy(acpi_device_class(device), "sony/hotkey"); |
1142 | return 0; | 1142 | return 0; |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | static int sonypi_acpi_remove(struct acpi_device *device, int type) | 1145 | static int sonypi_acpi_remove(struct acpi_device *device, int type) |
1146 | { | 1146 | { |
1147 | sonypi_acpi_device = NULL; | 1147 | sonypi_acpi_device = NULL; |
1148 | return 0; | 1148 | return 0; |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | static const struct acpi_device_id sonypi_device_ids[] = { | 1151 | static const struct acpi_device_id sonypi_device_ids[] = { |
1152 | {"SNY6001", 0}, | 1152 | {"SNY6001", 0}, |
1153 | {"", 0}, | 1153 | {"", 0}, |
1154 | }; | 1154 | }; |
1155 | 1155 | ||
1156 | static struct acpi_driver sonypi_acpi_driver = { | 1156 | static struct acpi_driver sonypi_acpi_driver = { |
1157 | .name = "sonypi", | 1157 | .name = "sonypi", |
1158 | .class = "hkey", | 1158 | .class = "hkey", |
1159 | .ids = sonypi_device_ids, | 1159 | .ids = sonypi_device_ids, |
1160 | .ops = { | 1160 | .ops = { |
1161 | .add = sonypi_acpi_add, | 1161 | .add = sonypi_acpi_add, |
1162 | .remove = sonypi_acpi_remove, | 1162 | .remove = sonypi_acpi_remove, |
1163 | }, | 1163 | }, |
1164 | }; | 1164 | }; |
1165 | #endif | 1165 | #endif |
1166 | 1166 | ||
1167 | static int __devinit sonypi_create_input_devices(struct platform_device *pdev) | 1167 | static int sonypi_create_input_devices(struct platform_device *pdev) |
1168 | { | 1168 | { |
1169 | struct input_dev *jog_dev; | 1169 | struct input_dev *jog_dev; |
1170 | struct input_dev *key_dev; | 1170 | struct input_dev *key_dev; |
1171 | int i; | 1171 | int i; |
1172 | int error; | 1172 | int error; |
1173 | 1173 | ||
1174 | sonypi_device.input_jog_dev = jog_dev = input_allocate_device(); | 1174 | sonypi_device.input_jog_dev = jog_dev = input_allocate_device(); |
1175 | if (!jog_dev) | 1175 | if (!jog_dev) |
1176 | return -ENOMEM; | 1176 | return -ENOMEM; |
1177 | 1177 | ||
1178 | jog_dev->name = "Sony Vaio Jogdial"; | 1178 | jog_dev->name = "Sony Vaio Jogdial"; |
1179 | jog_dev->id.bustype = BUS_ISA; | 1179 | jog_dev->id.bustype = BUS_ISA; |
1180 | jog_dev->id.vendor = PCI_VENDOR_ID_SONY; | 1180 | jog_dev->id.vendor = PCI_VENDOR_ID_SONY; |
1181 | jog_dev->dev.parent = &pdev->dev; | 1181 | jog_dev->dev.parent = &pdev->dev; |
1182 | 1182 | ||
1183 | jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); | 1183 | jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); |
1184 | jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); | 1184 | jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); |
1185 | jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); | 1185 | jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); |
1186 | 1186 | ||
1187 | sonypi_device.input_key_dev = key_dev = input_allocate_device(); | 1187 | sonypi_device.input_key_dev = key_dev = input_allocate_device(); |
1188 | if (!key_dev) { | 1188 | if (!key_dev) { |
1189 | error = -ENOMEM; | 1189 | error = -ENOMEM; |
1190 | goto err_free_jogdev; | 1190 | goto err_free_jogdev; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | key_dev->name = "Sony Vaio Keys"; | 1193 | key_dev->name = "Sony Vaio Keys"; |
1194 | key_dev->id.bustype = BUS_ISA; | 1194 | key_dev->id.bustype = BUS_ISA; |
1195 | key_dev->id.vendor = PCI_VENDOR_ID_SONY; | 1195 | key_dev->id.vendor = PCI_VENDOR_ID_SONY; |
1196 | key_dev->dev.parent = &pdev->dev; | 1196 | key_dev->dev.parent = &pdev->dev; |
1197 | 1197 | ||
1198 | /* Initialize the Input Drivers: special keys */ | 1198 | /* Initialize the Input Drivers: special keys */ |
1199 | key_dev->evbit[0] = BIT_MASK(EV_KEY); | 1199 | key_dev->evbit[0] = BIT_MASK(EV_KEY); |
1200 | for (i = 0; sonypi_inputkeys[i].sonypiev; i++) | 1200 | for (i = 0; sonypi_inputkeys[i].sonypiev; i++) |
1201 | if (sonypi_inputkeys[i].inputev) | 1201 | if (sonypi_inputkeys[i].inputev) |
1202 | set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit); | 1202 | set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit); |
1203 | 1203 | ||
1204 | error = input_register_device(jog_dev); | 1204 | error = input_register_device(jog_dev); |
1205 | if (error) | 1205 | if (error) |
1206 | goto err_free_keydev; | 1206 | goto err_free_keydev; |
1207 | 1207 | ||
1208 | error = input_register_device(key_dev); | 1208 | error = input_register_device(key_dev); |
1209 | if (error) | 1209 | if (error) |
1210 | goto err_unregister_jogdev; | 1210 | goto err_unregister_jogdev; |
1211 | 1211 | ||
1212 | return 0; | 1212 | return 0; |
1213 | 1213 | ||
1214 | err_unregister_jogdev: | 1214 | err_unregister_jogdev: |
1215 | input_unregister_device(jog_dev); | 1215 | input_unregister_device(jog_dev); |
1216 | /* Set to NULL so we don't free it again below */ | 1216 | /* Set to NULL so we don't free it again below */ |
1217 | jog_dev = NULL; | 1217 | jog_dev = NULL; |
1218 | err_free_keydev: | 1218 | err_free_keydev: |
1219 | input_free_device(key_dev); | 1219 | input_free_device(key_dev); |
1220 | sonypi_device.input_key_dev = NULL; | 1220 | sonypi_device.input_key_dev = NULL; |
1221 | err_free_jogdev: | 1221 | err_free_jogdev: |
1222 | input_free_device(jog_dev); | 1222 | input_free_device(jog_dev); |
1223 | sonypi_device.input_jog_dev = NULL; | 1223 | sonypi_device.input_jog_dev = NULL; |
1224 | 1224 | ||
1225 | return error; | 1225 | return error; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | static int __devinit sonypi_setup_ioports(struct sonypi_device *dev, | 1228 | static int sonypi_setup_ioports(struct sonypi_device *dev, |
1229 | const struct sonypi_ioport_list *ioport_list) | 1229 | const struct sonypi_ioport_list *ioport_list) |
1230 | { | 1230 | { |
1231 | /* try to detect if sony-laptop is being used and thus | 1231 | /* try to detect if sony-laptop is being used and thus |
1232 | * has already requested one of the known ioports. | 1232 | * has already requested one of the known ioports. |
1233 | * As in the deprecated check_region this is racy has we have | 1233 | * As in the deprecated check_region this is racy has we have |
1234 | * multiple ioports available and one of them can be requested | 1234 | * multiple ioports available and one of them can be requested |
1235 | * between this check and the subsequent request. Anyway, as an | 1235 | * between this check and the subsequent request. Anyway, as an |
1236 | * attempt to be some more user-friendly as we currently are, | 1236 | * attempt to be some more user-friendly as we currently are, |
1237 | * this is enough. | 1237 | * this is enough. |
1238 | */ | 1238 | */ |
1239 | const struct sonypi_ioport_list *check = ioport_list; | 1239 | const struct sonypi_ioport_list *check = ioport_list; |
1240 | while (check_ioport && check->port1) { | 1240 | while (check_ioport && check->port1) { |
1241 | if (!request_region(check->port1, | 1241 | if (!request_region(check->port1, |
1242 | sonypi_device.region_size, | 1242 | sonypi_device.region_size, |
1243 | "Sony Programmable I/O Device Check")) { | 1243 | "Sony Programmable I/O Device Check")) { |
1244 | printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? " | 1244 | printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? " |
1245 | "if not use check_ioport=0\n", | 1245 | "if not use check_ioport=0\n", |
1246 | check->port1); | 1246 | check->port1); |
1247 | return -EBUSY; | 1247 | return -EBUSY; |
1248 | } | 1248 | } |
1249 | release_region(check->port1, sonypi_device.region_size); | 1249 | release_region(check->port1, sonypi_device.region_size); |
1250 | check++; | 1250 | check++; |
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | while (ioport_list->port1) { | 1253 | while (ioport_list->port1) { |
1254 | 1254 | ||
1255 | if (request_region(ioport_list->port1, | 1255 | if (request_region(ioport_list->port1, |
1256 | sonypi_device.region_size, | 1256 | sonypi_device.region_size, |
1257 | "Sony Programmable I/O Device")) { | 1257 | "Sony Programmable I/O Device")) { |
1258 | dev->ioport1 = ioport_list->port1; | 1258 | dev->ioport1 = ioport_list->port1; |
1259 | dev->ioport2 = ioport_list->port2; | 1259 | dev->ioport2 = ioport_list->port2; |
1260 | return 0; | 1260 | return 0; |
1261 | } | 1261 | } |
1262 | ioport_list++; | 1262 | ioport_list++; |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | return -EBUSY; | 1265 | return -EBUSY; |
1266 | } | 1266 | } |
1267 | 1267 | ||
1268 | static int __devinit sonypi_setup_irq(struct sonypi_device *dev, | 1268 | static int sonypi_setup_irq(struct sonypi_device *dev, |
1269 | const struct sonypi_irq_list *irq_list) | 1269 | const struct sonypi_irq_list *irq_list) |
1270 | { | 1270 | { |
1271 | while (irq_list->irq) { | 1271 | while (irq_list->irq) { |
1272 | 1272 | ||
1273 | if (!request_irq(irq_list->irq, sonypi_irq, | 1273 | if (!request_irq(irq_list->irq, sonypi_irq, |
1274 | IRQF_SHARED, "sonypi", sonypi_irq)) { | 1274 | IRQF_SHARED, "sonypi", sonypi_irq)) { |
1275 | dev->irq = irq_list->irq; | 1275 | dev->irq = irq_list->irq; |
1276 | dev->bits = irq_list->bits; | 1276 | dev->bits = irq_list->bits; |
1277 | return 0; | 1277 | return 0; |
1278 | } | 1278 | } |
1279 | irq_list++; | 1279 | irq_list++; |
1280 | } | 1280 | } |
1281 | 1281 | ||
1282 | return -EBUSY; | 1282 | return -EBUSY; |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | static void __devinit sonypi_display_info(void) | 1285 | static void sonypi_display_info(void) |
1286 | { | 1286 | { |
1287 | printk(KERN_INFO "sonypi: detected type%d model, " | 1287 | printk(KERN_INFO "sonypi: detected type%d model, " |
1288 | "verbose = %d, fnkeyinit = %s, camera = %s, " | 1288 | "verbose = %d, fnkeyinit = %s, camera = %s, " |
1289 | "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", | 1289 | "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", |
1290 | sonypi_device.model, | 1290 | sonypi_device.model, |
1291 | verbose, | 1291 | verbose, |
1292 | fnkeyinit ? "on" : "off", | 1292 | fnkeyinit ? "on" : "off", |
1293 | camera ? "on" : "off", | 1293 | camera ? "on" : "off", |
1294 | compat ? "on" : "off", | 1294 | compat ? "on" : "off", |
1295 | mask, | 1295 | mask, |
1296 | useinput ? "on" : "off", | 1296 | useinput ? "on" : "off", |
1297 | SONYPI_ACPI_ACTIVE ? "on" : "off"); | 1297 | SONYPI_ACPI_ACTIVE ? "on" : "off"); |
1298 | printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", | 1298 | printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n", |
1299 | sonypi_device.irq, | 1299 | sonypi_device.irq, |
1300 | sonypi_device.ioport1, sonypi_device.ioport2); | 1300 | sonypi_device.ioport1, sonypi_device.ioport2); |
1301 | 1301 | ||
1302 | if (minor == -1) | 1302 | if (minor == -1) |
1303 | printk(KERN_INFO "sonypi: device allocated minor is %d\n", | 1303 | printk(KERN_INFO "sonypi: device allocated minor is %d\n", |
1304 | sonypi_misc_device.minor); | 1304 | sonypi_misc_device.minor); |
1305 | } | 1305 | } |
1306 | 1306 | ||
1307 | static int __devinit sonypi_probe(struct platform_device *dev) | 1307 | static int sonypi_probe(struct platform_device *dev) |
1308 | { | 1308 | { |
1309 | const struct sonypi_ioport_list *ioport_list; | 1309 | const struct sonypi_ioport_list *ioport_list; |
1310 | const struct sonypi_irq_list *irq_list; | 1310 | const struct sonypi_irq_list *irq_list; |
1311 | struct pci_dev *pcidev; | 1311 | struct pci_dev *pcidev; |
1312 | int error; | 1312 | int error; |
1313 | 1313 | ||
1314 | printk(KERN_WARNING "sonypi: please try the sony-laptop module instead " | 1314 | printk(KERN_WARNING "sonypi: please try the sony-laptop module instead " |
1315 | "and report failures, see also " | 1315 | "and report failures, see also " |
1316 | "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); | 1316 | "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); |
1317 | 1317 | ||
1318 | spin_lock_init(&sonypi_device.fifo_lock); | 1318 | spin_lock_init(&sonypi_device.fifo_lock); |
1319 | error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); | 1319 | error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); |
1320 | if (error) { | 1320 | if (error) { |
1321 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); | 1321 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); |
1322 | return error; | 1322 | return error; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | init_waitqueue_head(&sonypi_device.fifo_proc_list); | 1325 | init_waitqueue_head(&sonypi_device.fifo_proc_list); |
1326 | mutex_init(&sonypi_device.lock); | 1326 | mutex_init(&sonypi_device.lock); |
1327 | sonypi_device.bluetooth_power = -1; | 1327 | sonypi_device.bluetooth_power = -1; |
1328 | 1328 | ||
1329 | if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1329 | if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
1330 | PCI_DEVICE_ID_INTEL_82371AB_3, NULL))) | 1330 | PCI_DEVICE_ID_INTEL_82371AB_3, NULL))) |
1331 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1; | 1331 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1; |
1332 | else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1332 | else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
1333 | PCI_DEVICE_ID_INTEL_ICH6_1, NULL))) | 1333 | PCI_DEVICE_ID_INTEL_ICH6_1, NULL))) |
1334 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; | 1334 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; |
1335 | else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, | 1335 | else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, |
1336 | PCI_DEVICE_ID_INTEL_ICH7_1, NULL))) | 1336 | PCI_DEVICE_ID_INTEL_ICH7_1, NULL))) |
1337 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; | 1337 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3; |
1338 | else | 1338 | else |
1339 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2; | 1339 | sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2; |
1340 | 1340 | ||
1341 | if (pcidev && pci_enable_device(pcidev)) { | 1341 | if (pcidev && pci_enable_device(pcidev)) { |
1342 | printk(KERN_ERR "sonypi: pci_enable_device failed\n"); | 1342 | printk(KERN_ERR "sonypi: pci_enable_device failed\n"); |
1343 | error = -EIO; | 1343 | error = -EIO; |
1344 | goto err_put_pcidev; | 1344 | goto err_put_pcidev; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | sonypi_device.dev = pcidev; | 1347 | sonypi_device.dev = pcidev; |
1348 | 1348 | ||
1349 | if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) { | 1349 | if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) { |
1350 | ioport_list = sonypi_type1_ioport_list; | 1350 | ioport_list = sonypi_type1_ioport_list; |
1351 | sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; | 1351 | sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; |
1352 | sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; | 1352 | sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; |
1353 | irq_list = sonypi_type1_irq_list; | 1353 | irq_list = sonypi_type1_irq_list; |
1354 | } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { | 1354 | } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { |
1355 | ioport_list = sonypi_type2_ioport_list; | 1355 | ioport_list = sonypi_type2_ioport_list; |
1356 | sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; | 1356 | sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; |
1357 | sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; | 1357 | sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; |
1358 | irq_list = sonypi_type2_irq_list; | 1358 | irq_list = sonypi_type2_irq_list; |
1359 | } else { | 1359 | } else { |
1360 | ioport_list = sonypi_type3_ioport_list; | 1360 | ioport_list = sonypi_type3_ioport_list; |
1361 | sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE; | 1361 | sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE; |
1362 | sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET; | 1362 | sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET; |
1363 | irq_list = sonypi_type3_irq_list; | 1363 | irq_list = sonypi_type3_irq_list; |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | error = sonypi_setup_ioports(&sonypi_device, ioport_list); | 1366 | error = sonypi_setup_ioports(&sonypi_device, ioport_list); |
1367 | if (error) { | 1367 | if (error) { |
1368 | printk(KERN_ERR "sonypi: failed to request ioports\n"); | 1368 | printk(KERN_ERR "sonypi: failed to request ioports\n"); |
1369 | goto err_disable_pcidev; | 1369 | goto err_disable_pcidev; |
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | error = sonypi_setup_irq(&sonypi_device, irq_list); | 1372 | error = sonypi_setup_irq(&sonypi_device, irq_list); |
1373 | if (error) { | 1373 | if (error) { |
1374 | printk(KERN_ERR "sonypi: request_irq failed\n"); | 1374 | printk(KERN_ERR "sonypi: request_irq failed\n"); |
1375 | goto err_free_ioports; | 1375 | goto err_free_ioports; |
1376 | } | 1376 | } |
1377 | 1377 | ||
1378 | if (minor != -1) | 1378 | if (minor != -1) |
1379 | sonypi_misc_device.minor = minor; | 1379 | sonypi_misc_device.minor = minor; |
1380 | error = misc_register(&sonypi_misc_device); | 1380 | error = misc_register(&sonypi_misc_device); |
1381 | if (error) { | 1381 | if (error) { |
1382 | printk(KERN_ERR "sonypi: misc_register failed\n"); | 1382 | printk(KERN_ERR "sonypi: misc_register failed\n"); |
1383 | goto err_free_irq; | 1383 | goto err_free_irq; |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | sonypi_display_info(); | 1386 | sonypi_display_info(); |
1387 | 1387 | ||
1388 | if (useinput) { | 1388 | if (useinput) { |
1389 | 1389 | ||
1390 | error = sonypi_create_input_devices(dev); | 1390 | error = sonypi_create_input_devices(dev); |
1391 | if (error) { | 1391 | if (error) { |
1392 | printk(KERN_ERR | 1392 | printk(KERN_ERR |
1393 | "sonypi: failed to create input devices\n"); | 1393 | "sonypi: failed to create input devices\n"); |
1394 | goto err_miscdev_unregister; | 1394 | goto err_miscdev_unregister; |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | spin_lock_init(&sonypi_device.input_fifo_lock); | 1397 | spin_lock_init(&sonypi_device.input_fifo_lock); |
1398 | error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, | 1398 | error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, |
1399 | GFP_KERNEL); | 1399 | GFP_KERNEL); |
1400 | if (error) { | 1400 | if (error) { |
1401 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); | 1401 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); |
1402 | goto err_inpdev_unregister; | 1402 | goto err_inpdev_unregister; |
1403 | } | 1403 | } |
1404 | 1404 | ||
1405 | INIT_WORK(&sonypi_device.input_work, input_keyrelease); | 1405 | INIT_WORK(&sonypi_device.input_work, input_keyrelease); |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | sonypi_enable(0); | 1408 | sonypi_enable(0); |
1409 | 1409 | ||
1410 | return 0; | 1410 | return 0; |
1411 | 1411 | ||
1412 | err_inpdev_unregister: | 1412 | err_inpdev_unregister: |
1413 | input_unregister_device(sonypi_device.input_key_dev); | 1413 | input_unregister_device(sonypi_device.input_key_dev); |
1414 | input_unregister_device(sonypi_device.input_jog_dev); | 1414 | input_unregister_device(sonypi_device.input_jog_dev); |
1415 | err_miscdev_unregister: | 1415 | err_miscdev_unregister: |
1416 | misc_deregister(&sonypi_misc_device); | 1416 | misc_deregister(&sonypi_misc_device); |
1417 | err_free_irq: | 1417 | err_free_irq: |
1418 | free_irq(sonypi_device.irq, sonypi_irq); | 1418 | free_irq(sonypi_device.irq, sonypi_irq); |
1419 | err_free_ioports: | 1419 | err_free_ioports: |
1420 | release_region(sonypi_device.ioport1, sonypi_device.region_size); | 1420 | release_region(sonypi_device.ioport1, sonypi_device.region_size); |
1421 | err_disable_pcidev: | 1421 | err_disable_pcidev: |
1422 | if (pcidev) | 1422 | if (pcidev) |
1423 | pci_disable_device(pcidev); | 1423 | pci_disable_device(pcidev); |
1424 | err_put_pcidev: | 1424 | err_put_pcidev: |
1425 | pci_dev_put(pcidev); | 1425 | pci_dev_put(pcidev); |
1426 | kfifo_free(&sonypi_device.fifo); | 1426 | kfifo_free(&sonypi_device.fifo); |
1427 | 1427 | ||
1428 | return error; | 1428 | return error; |
1429 | } | 1429 | } |
1430 | 1430 | ||
1431 | static int __devexit sonypi_remove(struct platform_device *dev) | 1431 | static int __devexit sonypi_remove(struct platform_device *dev) |
1432 | { | 1432 | { |
1433 | sonypi_disable(); | 1433 | sonypi_disable(); |
1434 | 1434 | ||
1435 | synchronize_irq(sonypi_device.irq); | 1435 | synchronize_irq(sonypi_device.irq); |
1436 | flush_work(&sonypi_device.input_work); | 1436 | flush_work(&sonypi_device.input_work); |
1437 | 1437 | ||
1438 | if (useinput) { | 1438 | if (useinput) { |
1439 | input_unregister_device(sonypi_device.input_key_dev); | 1439 | input_unregister_device(sonypi_device.input_key_dev); |
1440 | input_unregister_device(sonypi_device.input_jog_dev); | 1440 | input_unregister_device(sonypi_device.input_jog_dev); |
1441 | kfifo_free(&sonypi_device.input_fifo); | 1441 | kfifo_free(&sonypi_device.input_fifo); |
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | misc_deregister(&sonypi_misc_device); | 1444 | misc_deregister(&sonypi_misc_device); |
1445 | 1445 | ||
1446 | free_irq(sonypi_device.irq, sonypi_irq); | 1446 | free_irq(sonypi_device.irq, sonypi_irq); |
1447 | release_region(sonypi_device.ioport1, sonypi_device.region_size); | 1447 | release_region(sonypi_device.ioport1, sonypi_device.region_size); |
1448 | 1448 | ||
1449 | if (sonypi_device.dev) { | 1449 | if (sonypi_device.dev) { |
1450 | pci_disable_device(sonypi_device.dev); | 1450 | pci_disable_device(sonypi_device.dev); |
1451 | pci_dev_put(sonypi_device.dev); | 1451 | pci_dev_put(sonypi_device.dev); |
1452 | } | 1452 | } |
1453 | 1453 | ||
1454 | kfifo_free(&sonypi_device.fifo); | 1454 | kfifo_free(&sonypi_device.fifo); |
1455 | 1455 | ||
1456 | return 0; | 1456 | return 0; |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | #ifdef CONFIG_PM_SLEEP | 1459 | #ifdef CONFIG_PM_SLEEP |
1460 | static int old_camera_power; | 1460 | static int old_camera_power; |
1461 | 1461 | ||
1462 | static int sonypi_suspend(struct device *dev) | 1462 | static int sonypi_suspend(struct device *dev) |
1463 | { | 1463 | { |
1464 | old_camera_power = sonypi_device.camera_power; | 1464 | old_camera_power = sonypi_device.camera_power; |
1465 | sonypi_disable(); | 1465 | sonypi_disable(); |
1466 | 1466 | ||
1467 | return 0; | 1467 | return 0; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | static int sonypi_resume(struct device *dev) | 1470 | static int sonypi_resume(struct device *dev) |
1471 | { | 1471 | { |
1472 | sonypi_enable(old_camera_power); | 1472 | sonypi_enable(old_camera_power); |
1473 | return 0; | 1473 | return 0; |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume); | 1476 | static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume); |
1477 | #define SONYPI_PM (&sonypi_pm) | 1477 | #define SONYPI_PM (&sonypi_pm) |
1478 | #else | 1478 | #else |
1479 | #define SONYPI_PM NULL | 1479 | #define SONYPI_PM NULL |
1480 | #endif | 1480 | #endif |
1481 | 1481 | ||
1482 | static void sonypi_shutdown(struct platform_device *dev) | 1482 | static void sonypi_shutdown(struct platform_device *dev) |
1483 | { | 1483 | { |
1484 | sonypi_disable(); | 1484 | sonypi_disable(); |
1485 | } | 1485 | } |
1486 | 1486 | ||
1487 | static struct platform_driver sonypi_driver = { | 1487 | static struct platform_driver sonypi_driver = { |
1488 | .driver = { | 1488 | .driver = { |
1489 | .name = "sonypi", | 1489 | .name = "sonypi", |
1490 | .owner = THIS_MODULE, | 1490 | .owner = THIS_MODULE, |
1491 | .pm = SONYPI_PM, | 1491 | .pm = SONYPI_PM, |
1492 | }, | 1492 | }, |
1493 | .probe = sonypi_probe, | 1493 | .probe = sonypi_probe, |
1494 | .remove = sonypi_remove, | 1494 | .remove = sonypi_remove, |
1495 | .shutdown = sonypi_shutdown, | 1495 | .shutdown = sonypi_shutdown, |
1496 | }; | 1496 | }; |
1497 | 1497 | ||
1498 | static struct platform_device *sonypi_platform_device; | 1498 | static struct platform_device *sonypi_platform_device; |
1499 | 1499 | ||
1500 | static struct dmi_system_id __initdata sonypi_dmi_table[] = { | 1500 | static struct dmi_system_id __initdata sonypi_dmi_table[] = { |
1501 | { | 1501 | { |
1502 | .ident = "Sony Vaio", | 1502 | .ident = "Sony Vaio", |
1503 | .matches = { | 1503 | .matches = { |
1504 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | 1504 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
1505 | DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), | 1505 | DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"), |
1506 | }, | 1506 | }, |
1507 | }, | 1507 | }, |
1508 | { | 1508 | { |
1509 | .ident = "Sony Vaio", | 1509 | .ident = "Sony Vaio", |
1510 | .matches = { | 1510 | .matches = { |
1511 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | 1511 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
1512 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), | 1512 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"), |
1513 | }, | 1513 | }, |
1514 | }, | 1514 | }, |
1515 | { } | 1515 | { } |
1516 | }; | 1516 | }; |
1517 | 1517 | ||
1518 | static int __init sonypi_init(void) | 1518 | static int __init sonypi_init(void) |
1519 | { | 1519 | { |
1520 | int error; | 1520 | int error; |
1521 | 1521 | ||
1522 | printk(KERN_INFO | 1522 | printk(KERN_INFO |
1523 | "sonypi: Sony Programmable I/O Controller Driver v%s.\n", | 1523 | "sonypi: Sony Programmable I/O Controller Driver v%s.\n", |
1524 | SONYPI_DRIVER_VERSION); | 1524 | SONYPI_DRIVER_VERSION); |
1525 | 1525 | ||
1526 | if (!dmi_check_system(sonypi_dmi_table)) | 1526 | if (!dmi_check_system(sonypi_dmi_table)) |
1527 | return -ENODEV; | 1527 | return -ENODEV; |
1528 | 1528 | ||
1529 | error = platform_driver_register(&sonypi_driver); | 1529 | error = platform_driver_register(&sonypi_driver); |
1530 | if (error) | 1530 | if (error) |
1531 | return error; | 1531 | return error; |
1532 | 1532 | ||
1533 | sonypi_platform_device = platform_device_alloc("sonypi", -1); | 1533 | sonypi_platform_device = platform_device_alloc("sonypi", -1); |
1534 | if (!sonypi_platform_device) { | 1534 | if (!sonypi_platform_device) { |
1535 | error = -ENOMEM; | 1535 | error = -ENOMEM; |
1536 | goto err_driver_unregister; | 1536 | goto err_driver_unregister; |
1537 | } | 1537 | } |
1538 | 1538 | ||
1539 | error = platform_device_add(sonypi_platform_device); | 1539 | error = platform_device_add(sonypi_platform_device); |
1540 | if (error) | 1540 | if (error) |
1541 | goto err_free_device; | 1541 | goto err_free_device; |
1542 | 1542 | ||
1543 | #ifdef CONFIG_ACPI | 1543 | #ifdef CONFIG_ACPI |
1544 | if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0) | 1544 | if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0) |
1545 | acpi_driver_registered = 1; | 1545 | acpi_driver_registered = 1; |
1546 | #endif | 1546 | #endif |
1547 | 1547 | ||
1548 | return 0; | 1548 | return 0; |
1549 | 1549 | ||
1550 | err_free_device: | 1550 | err_free_device: |
1551 | platform_device_put(sonypi_platform_device); | 1551 | platform_device_put(sonypi_platform_device); |
1552 | err_driver_unregister: | 1552 | err_driver_unregister: |
1553 | platform_driver_unregister(&sonypi_driver); | 1553 | platform_driver_unregister(&sonypi_driver); |
1554 | return error; | 1554 | return error; |
1555 | } | 1555 | } |
1556 | 1556 | ||
1557 | static void __exit sonypi_exit(void) | 1557 | static void __exit sonypi_exit(void) |
1558 | { | 1558 | { |
1559 | #ifdef CONFIG_ACPI | 1559 | #ifdef CONFIG_ACPI |
1560 | if (acpi_driver_registered) | 1560 | if (acpi_driver_registered) |
1561 | acpi_bus_unregister_driver(&sonypi_acpi_driver); | 1561 | acpi_bus_unregister_driver(&sonypi_acpi_driver); |
1562 | #endif | 1562 | #endif |
1563 | platform_device_unregister(sonypi_platform_device); | 1563 | platform_device_unregister(sonypi_platform_device); |
1564 | platform_driver_unregister(&sonypi_driver); | 1564 | platform_driver_unregister(&sonypi_driver); |
1565 | printk(KERN_INFO "sonypi: removed.\n"); | 1565 | printk(KERN_INFO "sonypi: removed.\n"); |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | module_init(sonypi_init); | 1568 | module_init(sonypi_init); |
1569 | module_exit(sonypi_exit); | 1569 | module_exit(sonypi_exit); |
1570 | 1570 |
drivers/char/tb0219.c
1 | /* | 1 | /* |
2 | * Driver for TANBAC TB0219 base board. | 2 | * Driver for TANBAC TB0219 base board. |
3 | * | 3 | * |
4 | * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org> | 4 | * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | 24 | ||
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/reboot.h> | 26 | #include <asm/reboot.h> |
27 | #include <asm/vr41xx/giu.h> | 27 | #include <asm/vr41xx/giu.h> |
28 | #include <asm/vr41xx/tb0219.h> | 28 | #include <asm/vr41xx/tb0219.h> |
29 | 29 | ||
30 | MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); | 30 | MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>"); |
31 | MODULE_DESCRIPTION("TANBAC TB0219 base board driver"); | 31 | MODULE_DESCRIPTION("TANBAC TB0219 base board driver"); |
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | 33 | ||
34 | static int major; /* default is dynamic major device number */ | 34 | static int major; /* default is dynamic major device number */ |
35 | module_param(major, int, 0); | 35 | module_param(major, int, 0); |
36 | MODULE_PARM_DESC(major, "Major device number"); | 36 | MODULE_PARM_DESC(major, "Major device number"); |
37 | 37 | ||
38 | static void (*old_machine_restart)(char *command); | 38 | static void (*old_machine_restart)(char *command); |
39 | static void __iomem *tb0219_base; | 39 | static void __iomem *tb0219_base; |
40 | static DEFINE_SPINLOCK(tb0219_lock); | 40 | static DEFINE_SPINLOCK(tb0219_lock); |
41 | 41 | ||
42 | #define tb0219_read(offset) readw(tb0219_base + (offset)) | 42 | #define tb0219_read(offset) readw(tb0219_base + (offset)) |
43 | #define tb0219_write(offset, value) writew((value), tb0219_base + (offset)) | 43 | #define tb0219_write(offset, value) writew((value), tb0219_base + (offset)) |
44 | 44 | ||
45 | #define TB0219_START 0x0a000000UL | 45 | #define TB0219_START 0x0a000000UL |
46 | #define TB0219_SIZE 0x20UL | 46 | #define TB0219_SIZE 0x20UL |
47 | 47 | ||
48 | #define TB0219_LED 0x00 | 48 | #define TB0219_LED 0x00 |
49 | #define TB0219_GPIO_INPUT 0x02 | 49 | #define TB0219_GPIO_INPUT 0x02 |
50 | #define TB0219_GPIO_OUTPUT 0x04 | 50 | #define TB0219_GPIO_OUTPUT 0x04 |
51 | #define TB0219_DIP_SWITCH 0x06 | 51 | #define TB0219_DIP_SWITCH 0x06 |
52 | #define TB0219_MISC 0x08 | 52 | #define TB0219_MISC 0x08 |
53 | #define TB0219_RESET 0x0e | 53 | #define TB0219_RESET 0x0e |
54 | #define TB0219_PCI_SLOT1_IRQ_STATUS 0x10 | 54 | #define TB0219_PCI_SLOT1_IRQ_STATUS 0x10 |
55 | #define TB0219_PCI_SLOT2_IRQ_STATUS 0x12 | 55 | #define TB0219_PCI_SLOT2_IRQ_STATUS 0x12 |
56 | #define TB0219_PCI_SLOT3_IRQ_STATUS 0x14 | 56 | #define TB0219_PCI_SLOT3_IRQ_STATUS 0x14 |
57 | 57 | ||
58 | typedef enum { | 58 | typedef enum { |
59 | TYPE_LED, | 59 | TYPE_LED, |
60 | TYPE_GPIO_OUTPUT, | 60 | TYPE_GPIO_OUTPUT, |
61 | } tb0219_type_t; | 61 | } tb0219_type_t; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * Minor device number | 64 | * Minor device number |
65 | * 0 = 7 segment LED | 65 | * 0 = 7 segment LED |
66 | * | 66 | * |
67 | * 16 = GPIO IN 0 | 67 | * 16 = GPIO IN 0 |
68 | * 17 = GPIO IN 1 | 68 | * 17 = GPIO IN 1 |
69 | * 18 = GPIO IN 2 | 69 | * 18 = GPIO IN 2 |
70 | * 19 = GPIO IN 3 | 70 | * 19 = GPIO IN 3 |
71 | * 20 = GPIO IN 4 | 71 | * 20 = GPIO IN 4 |
72 | * 21 = GPIO IN 5 | 72 | * 21 = GPIO IN 5 |
73 | * 22 = GPIO IN 6 | 73 | * 22 = GPIO IN 6 |
74 | * 23 = GPIO IN 7 | 74 | * 23 = GPIO IN 7 |
75 | * | 75 | * |
76 | * 32 = GPIO OUT 0 | 76 | * 32 = GPIO OUT 0 |
77 | * 33 = GPIO OUT 1 | 77 | * 33 = GPIO OUT 1 |
78 | * 34 = GPIO OUT 2 | 78 | * 34 = GPIO OUT 2 |
79 | * 35 = GPIO OUT 3 | 79 | * 35 = GPIO OUT 3 |
80 | * 36 = GPIO OUT 4 | 80 | * 36 = GPIO OUT 4 |
81 | * 37 = GPIO OUT 5 | 81 | * 37 = GPIO OUT 5 |
82 | * 38 = GPIO OUT 6 | 82 | * 38 = GPIO OUT 6 |
83 | * 39 = GPIO OUT 7 | 83 | * 39 = GPIO OUT 7 |
84 | * | 84 | * |
85 | * 48 = DIP switch 1 | 85 | * 48 = DIP switch 1 |
86 | * 49 = DIP switch 2 | 86 | * 49 = DIP switch 2 |
87 | * 50 = DIP switch 3 | 87 | * 50 = DIP switch 3 |
88 | * 51 = DIP switch 4 | 88 | * 51 = DIP switch 4 |
89 | * 52 = DIP switch 5 | 89 | * 52 = DIP switch 5 |
90 | * 53 = DIP switch 6 | 90 | * 53 = DIP switch 6 |
91 | * 54 = DIP switch 7 | 91 | * 54 = DIP switch 7 |
92 | * 55 = DIP switch 8 | 92 | * 55 = DIP switch 8 |
93 | */ | 93 | */ |
94 | 94 | ||
95 | static inline char get_led(void) | 95 | static inline char get_led(void) |
96 | { | 96 | { |
97 | return (char)tb0219_read(TB0219_LED); | 97 | return (char)tb0219_read(TB0219_LED); |
98 | } | 98 | } |
99 | 99 | ||
100 | static inline char get_gpio_input_pin(unsigned int pin) | 100 | static inline char get_gpio_input_pin(unsigned int pin) |
101 | { | 101 | { |
102 | uint16_t values; | 102 | uint16_t values; |
103 | 103 | ||
104 | values = tb0219_read(TB0219_GPIO_INPUT); | 104 | values = tb0219_read(TB0219_GPIO_INPUT); |
105 | if (values & (1 << pin)) | 105 | if (values & (1 << pin)) |
106 | return '1'; | 106 | return '1'; |
107 | 107 | ||
108 | return '0'; | 108 | return '0'; |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline char get_gpio_output_pin(unsigned int pin) | 111 | static inline char get_gpio_output_pin(unsigned int pin) |
112 | { | 112 | { |
113 | uint16_t values; | 113 | uint16_t values; |
114 | 114 | ||
115 | values = tb0219_read(TB0219_GPIO_OUTPUT); | 115 | values = tb0219_read(TB0219_GPIO_OUTPUT); |
116 | if (values & (1 << pin)) | 116 | if (values & (1 << pin)) |
117 | return '1'; | 117 | return '1'; |
118 | 118 | ||
119 | return '0'; | 119 | return '0'; |
120 | } | 120 | } |
121 | 121 | ||
122 | static inline char get_dip_switch(unsigned int pin) | 122 | static inline char get_dip_switch(unsigned int pin) |
123 | { | 123 | { |
124 | uint16_t values; | 124 | uint16_t values; |
125 | 125 | ||
126 | values = tb0219_read(TB0219_DIP_SWITCH); | 126 | values = tb0219_read(TB0219_DIP_SWITCH); |
127 | if (values & (1 << pin)) | 127 | if (values & (1 << pin)) |
128 | return '1'; | 128 | return '1'; |
129 | 129 | ||
130 | return '0'; | 130 | return '0'; |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int set_led(char command) | 133 | static inline int set_led(char command) |
134 | { | 134 | { |
135 | tb0219_write(TB0219_LED, command); | 135 | tb0219_write(TB0219_LED, command); |
136 | 136 | ||
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline int set_gpio_output_pin(unsigned int pin, char command) | 140 | static inline int set_gpio_output_pin(unsigned int pin, char command) |
141 | { | 141 | { |
142 | unsigned long flags; | 142 | unsigned long flags; |
143 | uint16_t value; | 143 | uint16_t value; |
144 | 144 | ||
145 | if (command != '0' && command != '1') | 145 | if (command != '0' && command != '1') |
146 | return -EINVAL; | 146 | return -EINVAL; |
147 | 147 | ||
148 | spin_lock_irqsave(&tb0219_lock, flags); | 148 | spin_lock_irqsave(&tb0219_lock, flags); |
149 | value = tb0219_read(TB0219_GPIO_OUTPUT); | 149 | value = tb0219_read(TB0219_GPIO_OUTPUT); |
150 | if (command == '0') | 150 | if (command == '0') |
151 | value &= ~(1 << pin); | 151 | value &= ~(1 << pin); |
152 | else | 152 | else |
153 | value |= 1 << pin; | 153 | value |= 1 << pin; |
154 | tb0219_write(TB0219_GPIO_OUTPUT, value); | 154 | tb0219_write(TB0219_GPIO_OUTPUT, value); |
155 | spin_unlock_irqrestore(&tb0219_lock, flags); | 155 | spin_unlock_irqrestore(&tb0219_lock, flags); |
156 | 156 | ||
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | } | 159 | } |
160 | 160 | ||
161 | static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len, | 161 | static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len, |
162 | loff_t *ppos) | 162 | loff_t *ppos) |
163 | { | 163 | { |
164 | unsigned int minor; | 164 | unsigned int minor; |
165 | char value; | 165 | char value; |
166 | 166 | ||
167 | minor = iminor(file->f_path.dentry->d_inode); | 167 | minor = iminor(file->f_path.dentry->d_inode); |
168 | switch (minor) { | 168 | switch (minor) { |
169 | case 0: | 169 | case 0: |
170 | value = get_led(); | 170 | value = get_led(); |
171 | break; | 171 | break; |
172 | case 16 ... 23: | 172 | case 16 ... 23: |
173 | value = get_gpio_input_pin(minor - 16); | 173 | value = get_gpio_input_pin(minor - 16); |
174 | break; | 174 | break; |
175 | case 32 ... 39: | 175 | case 32 ... 39: |
176 | value = get_gpio_output_pin(minor - 32); | 176 | value = get_gpio_output_pin(minor - 32); |
177 | break; | 177 | break; |
178 | case 48 ... 55: | 178 | case 48 ... 55: |
179 | value = get_dip_switch(minor - 48); | 179 | value = get_dip_switch(minor - 48); |
180 | break; | 180 | break; |
181 | default: | 181 | default: |
182 | return -EBADF; | 182 | return -EBADF; |
183 | } | 183 | } |
184 | 184 | ||
185 | if (len <= 0) | 185 | if (len <= 0) |
186 | return -EFAULT; | 186 | return -EFAULT; |
187 | 187 | ||
188 | if (put_user(value, buf)) | 188 | if (put_user(value, buf)) |
189 | return -EFAULT; | 189 | return -EFAULT; |
190 | 190 | ||
191 | return 1; | 191 | return 1; |
192 | } | 192 | } |
193 | 193 | ||
194 | static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data, | 194 | static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data, |
195 | size_t len, loff_t *ppos) | 195 | size_t len, loff_t *ppos) |
196 | { | 196 | { |
197 | unsigned int minor; | 197 | unsigned int minor; |
198 | tb0219_type_t type; | 198 | tb0219_type_t type; |
199 | size_t i; | 199 | size_t i; |
200 | int retval = 0; | 200 | int retval = 0; |
201 | char c; | 201 | char c; |
202 | 202 | ||
203 | minor = iminor(file->f_path.dentry->d_inode); | 203 | minor = iminor(file->f_path.dentry->d_inode); |
204 | switch (minor) { | 204 | switch (minor) { |
205 | case 0: | 205 | case 0: |
206 | type = TYPE_LED; | 206 | type = TYPE_LED; |
207 | break; | 207 | break; |
208 | case 32 ... 39: | 208 | case 32 ... 39: |
209 | type = TYPE_GPIO_OUTPUT; | 209 | type = TYPE_GPIO_OUTPUT; |
210 | break; | 210 | break; |
211 | default: | 211 | default: |
212 | return -EBADF; | 212 | return -EBADF; |
213 | } | 213 | } |
214 | 214 | ||
215 | for (i = 0; i < len; i++) { | 215 | for (i = 0; i < len; i++) { |
216 | if (get_user(c, data + i)) | 216 | if (get_user(c, data + i)) |
217 | return -EFAULT; | 217 | return -EFAULT; |
218 | 218 | ||
219 | switch (type) { | 219 | switch (type) { |
220 | case TYPE_LED: | 220 | case TYPE_LED: |
221 | retval = set_led(c); | 221 | retval = set_led(c); |
222 | break; | 222 | break; |
223 | case TYPE_GPIO_OUTPUT: | 223 | case TYPE_GPIO_OUTPUT: |
224 | retval = set_gpio_output_pin(minor - 32, c); | 224 | retval = set_gpio_output_pin(minor - 32, c); |
225 | break; | 225 | break; |
226 | } | 226 | } |
227 | 227 | ||
228 | if (retval < 0) | 228 | if (retval < 0) |
229 | break; | 229 | break; |
230 | } | 230 | } |
231 | 231 | ||
232 | return i; | 232 | return i; |
233 | } | 233 | } |
234 | 234 | ||
235 | static int tanbac_tb0219_open(struct inode *inode, struct file *file) | 235 | static int tanbac_tb0219_open(struct inode *inode, struct file *file) |
236 | { | 236 | { |
237 | unsigned int minor; | 237 | unsigned int minor; |
238 | 238 | ||
239 | minor = iminor(inode); | 239 | minor = iminor(inode); |
240 | switch (minor) { | 240 | switch (minor) { |
241 | case 0: | 241 | case 0: |
242 | case 16 ... 23: | 242 | case 16 ... 23: |
243 | case 32 ... 39: | 243 | case 32 ... 39: |
244 | case 48 ... 55: | 244 | case 48 ... 55: |
245 | return nonseekable_open(inode, file); | 245 | return nonseekable_open(inode, file); |
246 | default: | 246 | default: |
247 | break; | 247 | break; |
248 | } | 248 | } |
249 | 249 | ||
250 | return -EBADF; | 250 | return -EBADF; |
251 | } | 251 | } |
252 | 252 | ||
253 | static int tanbac_tb0219_release(struct inode *inode, struct file *file) | 253 | static int tanbac_tb0219_release(struct inode *inode, struct file *file) |
254 | { | 254 | { |
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | static const struct file_operations tb0219_fops = { | 258 | static const struct file_operations tb0219_fops = { |
259 | .owner = THIS_MODULE, | 259 | .owner = THIS_MODULE, |
260 | .read = tanbac_tb0219_read, | 260 | .read = tanbac_tb0219_read, |
261 | .write = tanbac_tb0219_write, | 261 | .write = tanbac_tb0219_write, |
262 | .open = tanbac_tb0219_open, | 262 | .open = tanbac_tb0219_open, |
263 | .release = tanbac_tb0219_release, | 263 | .release = tanbac_tb0219_release, |
264 | .llseek = no_llseek, | 264 | .llseek = no_llseek, |
265 | }; | 265 | }; |
266 | 266 | ||
267 | static void tb0219_restart(char *command) | 267 | static void tb0219_restart(char *command) |
268 | { | 268 | { |
269 | tb0219_write(TB0219_RESET, 0); | 269 | tb0219_write(TB0219_RESET, 0); |
270 | } | 270 | } |
271 | 271 | ||
272 | static void tb0219_pci_irq_init(void) | 272 | static void tb0219_pci_irq_init(void) |
273 | { | 273 | { |
274 | /* PCI Slot 1 */ | 274 | /* PCI Slot 1 */ |
275 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); | 275 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); |
276 | vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW); | 276 | vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW); |
277 | 277 | ||
278 | /* PCI Slot 2 */ | 278 | /* PCI Slot 2 */ |
279 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); | 279 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); |
280 | vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW); | 280 | vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW); |
281 | 281 | ||
282 | /* PCI Slot 3 */ | 282 | /* PCI Slot 3 */ |
283 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); | 283 | vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH); |
284 | vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW); | 284 | vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW); |
285 | } | 285 | } |
286 | 286 | ||
287 | static int __devinit tb0219_probe(struct platform_device *dev) | 287 | static int tb0219_probe(struct platform_device *dev) |
288 | { | 288 | { |
289 | int retval; | 289 | int retval; |
290 | 290 | ||
291 | if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL) | 291 | if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL) |
292 | return -EBUSY; | 292 | return -EBUSY; |
293 | 293 | ||
294 | tb0219_base = ioremap(TB0219_START, TB0219_SIZE); | 294 | tb0219_base = ioremap(TB0219_START, TB0219_SIZE); |
295 | if (tb0219_base == NULL) { | 295 | if (tb0219_base == NULL) { |
296 | release_mem_region(TB0219_START, TB0219_SIZE); | 296 | release_mem_region(TB0219_START, TB0219_SIZE); |
297 | return -ENOMEM; | 297 | return -ENOMEM; |
298 | } | 298 | } |
299 | 299 | ||
300 | retval = register_chrdev(major, "TB0219", &tb0219_fops); | 300 | retval = register_chrdev(major, "TB0219", &tb0219_fops); |
301 | if (retval < 0) { | 301 | if (retval < 0) { |
302 | iounmap(tb0219_base); | 302 | iounmap(tb0219_base); |
303 | tb0219_base = NULL; | 303 | tb0219_base = NULL; |
304 | release_mem_region(TB0219_START, TB0219_SIZE); | 304 | release_mem_region(TB0219_START, TB0219_SIZE); |
305 | return retval; | 305 | return retval; |
306 | } | 306 | } |
307 | 307 | ||
308 | old_machine_restart = _machine_restart; | 308 | old_machine_restart = _machine_restart; |
309 | _machine_restart = tb0219_restart; | 309 | _machine_restart = tb0219_restart; |
310 | 310 | ||
311 | tb0219_pci_irq_init(); | 311 | tb0219_pci_irq_init(); |
312 | 312 | ||
313 | if (major == 0) { | 313 | if (major == 0) { |
314 | major = retval; | 314 | major = retval; |
315 | printk(KERN_INFO "TB0219: major number %d\n", major); | 315 | printk(KERN_INFO "TB0219: major number %d\n", major); |
316 | } | 316 | } |
317 | 317 | ||
318 | return 0; | 318 | return 0; |
319 | } | 319 | } |
320 | 320 | ||
321 | static int __devexit tb0219_remove(struct platform_device *dev) | 321 | static int __devexit tb0219_remove(struct platform_device *dev) |
322 | { | 322 | { |
323 | _machine_restart = old_machine_restart; | 323 | _machine_restart = old_machine_restart; |
324 | 324 | ||
325 | iounmap(tb0219_base); | 325 | iounmap(tb0219_base); |
326 | tb0219_base = NULL; | 326 | tb0219_base = NULL; |
327 | 327 | ||
328 | release_mem_region(TB0219_START, TB0219_SIZE); | 328 | release_mem_region(TB0219_START, TB0219_SIZE); |
329 | 329 | ||
330 | return 0; | 330 | return 0; |
331 | } | 331 | } |
332 | 332 | ||
333 | static struct platform_device *tb0219_platform_device; | 333 | static struct platform_device *tb0219_platform_device; |
334 | 334 | ||
335 | static struct platform_driver tb0219_device_driver = { | 335 | static struct platform_driver tb0219_device_driver = { |
336 | .probe = tb0219_probe, | 336 | .probe = tb0219_probe, |
337 | .remove = tb0219_remove, | 337 | .remove = tb0219_remove, |
338 | .driver = { | 338 | .driver = { |
339 | .name = "TB0219", | 339 | .name = "TB0219", |
340 | .owner = THIS_MODULE, | 340 | .owner = THIS_MODULE, |
341 | }, | 341 | }, |
342 | }; | 342 | }; |
343 | 343 | ||
344 | static int __init tanbac_tb0219_init(void) | 344 | static int __init tanbac_tb0219_init(void) |
345 | { | 345 | { |
346 | int retval; | 346 | int retval; |
347 | 347 | ||
348 | tb0219_platform_device = platform_device_alloc("TB0219", -1); | 348 | tb0219_platform_device = platform_device_alloc("TB0219", -1); |
349 | if (!tb0219_platform_device) | 349 | if (!tb0219_platform_device) |
350 | return -ENOMEM; | 350 | return -ENOMEM; |
351 | 351 | ||
352 | retval = platform_device_add(tb0219_platform_device); | 352 | retval = platform_device_add(tb0219_platform_device); |
353 | if (retval < 0) { | 353 | if (retval < 0) { |
354 | platform_device_put(tb0219_platform_device); | 354 | platform_device_put(tb0219_platform_device); |
355 | return retval; | 355 | return retval; |
356 | } | 356 | } |
357 | 357 | ||
358 | retval = platform_driver_register(&tb0219_device_driver); | 358 | retval = platform_driver_register(&tb0219_device_driver); |
359 | if (retval < 0) | 359 | if (retval < 0) |
360 | platform_device_unregister(tb0219_platform_device); | 360 | platform_device_unregister(tb0219_platform_device); |
361 | 361 | ||
362 | return retval; | 362 | return retval; |
363 | } | 363 | } |
364 | 364 | ||
365 | static void __exit tanbac_tb0219_exit(void) | 365 | static void __exit tanbac_tb0219_exit(void) |
366 | { | 366 | { |
367 | platform_driver_unregister(&tb0219_device_driver); | 367 | platform_driver_unregister(&tb0219_device_driver); |
368 | platform_device_unregister(tb0219_platform_device); | 368 | platform_device_unregister(tb0219_platform_device); |
369 | } | 369 | } |
370 | 370 | ||
371 | module_init(tanbac_tb0219_init); | 371 | module_init(tanbac_tb0219_init); |
372 | module_exit(tanbac_tb0219_exit); | 372 | module_exit(tanbac_tb0219_exit); |
373 | 373 |
drivers/char/virtio_console.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation | 2 | * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation |
3 | * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. | 3 | * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. |
4 | * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com> | 4 | * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
21 | #include <linux/debugfs.h> | 21 | #include <linux/debugfs.h> |
22 | #include <linux/completion.h> | 22 | #include <linux/completion.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/splice.h> | 27 | #include <linux/splice.h> |
28 | #include <linux/pagemap.h> | 28 | #include <linux/pagemap.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/list.h> | 30 | #include <linux/list.h> |
31 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/virtio.h> | 35 | #include <linux/virtio.h> |
36 | #include <linux/virtio_console.h> | 36 | #include <linux/virtio_console.h> |
37 | #include <linux/wait.h> | 37 | #include <linux/wait.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include "../tty/hvc/hvc_console.h" | 40 | #include "../tty/hvc/hvc_console.h" |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * This is a global struct for storing common data for all the devices | 43 | * This is a global struct for storing common data for all the devices |
44 | * this driver handles. | 44 | * this driver handles. |
45 | * | 45 | * |
46 | * Mainly, it has a linked list for all the consoles in one place so | 46 | * Mainly, it has a linked list for all the consoles in one place so |
47 | * that callbacks from hvc for get_chars(), put_chars() work properly | 47 | * that callbacks from hvc for get_chars(), put_chars() work properly |
48 | * across multiple devices and multiple ports per device. | 48 | * across multiple devices and multiple ports per device. |
49 | */ | 49 | */ |
50 | struct ports_driver_data { | 50 | struct ports_driver_data { |
51 | /* Used for registering chardevs */ | 51 | /* Used for registering chardevs */ |
52 | struct class *class; | 52 | struct class *class; |
53 | 53 | ||
54 | /* Used for exporting per-port information to debugfs */ | 54 | /* Used for exporting per-port information to debugfs */ |
55 | struct dentry *debugfs_dir; | 55 | struct dentry *debugfs_dir; |
56 | 56 | ||
57 | /* List of all the devices we're handling */ | 57 | /* List of all the devices we're handling */ |
58 | struct list_head portdevs; | 58 | struct list_head portdevs; |
59 | 59 | ||
60 | /* Number of devices this driver is handling */ | 60 | /* Number of devices this driver is handling */ |
61 | unsigned int index; | 61 | unsigned int index; |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * This is used to keep track of the number of hvc consoles | 64 | * This is used to keep track of the number of hvc consoles |
65 | * spawned by this driver. This number is given as the first | 65 | * spawned by this driver. This number is given as the first |
66 | * argument to hvc_alloc(). To correctly map an initial | 66 | * argument to hvc_alloc(). To correctly map an initial |
67 | * console spawned via hvc_instantiate to the console being | 67 | * console spawned via hvc_instantiate to the console being |
68 | * hooked up via hvc_alloc, we need to pass the same vtermno. | 68 | * hooked up via hvc_alloc, we need to pass the same vtermno. |
69 | * | 69 | * |
70 | * We also just assume the first console being initialised was | 70 | * We also just assume the first console being initialised was |
71 | * the first one that got used as the initial console. | 71 | * the first one that got used as the initial console. |
72 | */ | 72 | */ |
73 | unsigned int next_vtermno; | 73 | unsigned int next_vtermno; |
74 | 74 | ||
75 | /* All the console devices handled by this driver */ | 75 | /* All the console devices handled by this driver */ |
76 | struct list_head consoles; | 76 | struct list_head consoles; |
77 | }; | 77 | }; |
78 | static struct ports_driver_data pdrvdata; | 78 | static struct ports_driver_data pdrvdata; |
79 | 79 | ||
80 | DEFINE_SPINLOCK(pdrvdata_lock); | 80 | DEFINE_SPINLOCK(pdrvdata_lock); |
81 | DECLARE_COMPLETION(early_console_added); | 81 | DECLARE_COMPLETION(early_console_added); |
82 | 82 | ||
83 | /* This struct holds information that's relevant only for console ports */ | 83 | /* This struct holds information that's relevant only for console ports */ |
84 | struct console { | 84 | struct console { |
85 | /* We'll place all consoles in a list in the pdrvdata struct */ | 85 | /* We'll place all consoles in a list in the pdrvdata struct */ |
86 | struct list_head list; | 86 | struct list_head list; |
87 | 87 | ||
88 | /* The hvc device associated with this console port */ | 88 | /* The hvc device associated with this console port */ |
89 | struct hvc_struct *hvc; | 89 | struct hvc_struct *hvc; |
90 | 90 | ||
91 | /* The size of the console */ | 91 | /* The size of the console */ |
92 | struct winsize ws; | 92 | struct winsize ws; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * This number identifies the number that we used to register | 95 | * This number identifies the number that we used to register |
96 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the | 96 | * with hvc in hvc_instantiate() and hvc_alloc(); this is the |
97 | * number passed on by the hvc callbacks to us to | 97 | * number passed on by the hvc callbacks to us to |
98 | * differentiate between the other console ports handled by | 98 | * differentiate between the other console ports handled by |
99 | * this driver | 99 | * this driver |
100 | */ | 100 | */ |
101 | u32 vtermno; | 101 | u32 vtermno; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | struct port_buffer { | 104 | struct port_buffer { |
105 | char *buf; | 105 | char *buf; |
106 | 106 | ||
107 | /* size of the buffer in *buf above */ | 107 | /* size of the buffer in *buf above */ |
108 | size_t size; | 108 | size_t size; |
109 | 109 | ||
110 | /* used length of the buffer */ | 110 | /* used length of the buffer */ |
111 | size_t len; | 111 | size_t len; |
112 | /* offset in the buf from which to consume data */ | 112 | /* offset in the buf from which to consume data */ |
113 | size_t offset; | 113 | size_t offset; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * This is a per-device struct that stores data common to all the | 117 | * This is a per-device struct that stores data common to all the |
118 | * ports for that device (vdev->priv). | 118 | * ports for that device (vdev->priv). |
119 | */ | 119 | */ |
120 | struct ports_device { | 120 | struct ports_device { |
121 | /* Next portdev in the list, head is in the pdrvdata struct */ | 121 | /* Next portdev in the list, head is in the pdrvdata struct */ |
122 | struct list_head list; | 122 | struct list_head list; |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Workqueue handlers where we process deferred work after | 125 | * Workqueue handlers where we process deferred work after |
126 | * notification | 126 | * notification |
127 | */ | 127 | */ |
128 | struct work_struct control_work; | 128 | struct work_struct control_work; |
129 | 129 | ||
130 | struct list_head ports; | 130 | struct list_head ports; |
131 | 131 | ||
132 | /* To protect the list of ports */ | 132 | /* To protect the list of ports */ |
133 | spinlock_t ports_lock; | 133 | spinlock_t ports_lock; |
134 | 134 | ||
135 | /* To protect the vq operations for the control channel */ | 135 | /* To protect the vq operations for the control channel */ |
136 | spinlock_t cvq_lock; | 136 | spinlock_t cvq_lock; |
137 | 137 | ||
138 | /* The current config space is stored here */ | 138 | /* The current config space is stored here */ |
139 | struct virtio_console_config config; | 139 | struct virtio_console_config config; |
140 | 140 | ||
141 | /* The virtio device we're associated with */ | 141 | /* The virtio device we're associated with */ |
142 | struct virtio_device *vdev; | 142 | struct virtio_device *vdev; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * A couple of virtqueues for the control channel: one for | 145 | * A couple of virtqueues for the control channel: one for |
146 | * guest->host transfers, one for host->guest transfers | 146 | * guest->host transfers, one for host->guest transfers |
147 | */ | 147 | */ |
148 | struct virtqueue *c_ivq, *c_ovq; | 148 | struct virtqueue *c_ivq, *c_ovq; |
149 | 149 | ||
150 | /* Array of per-port IO virtqueues */ | 150 | /* Array of per-port IO virtqueues */ |
151 | struct virtqueue **in_vqs, **out_vqs; | 151 | struct virtqueue **in_vqs, **out_vqs; |
152 | 152 | ||
153 | /* Used for numbering devices for sysfs and debugfs */ | 153 | /* Used for numbering devices for sysfs and debugfs */ |
154 | unsigned int drv_index; | 154 | unsigned int drv_index; |
155 | 155 | ||
156 | /* Major number for this device. Ports will be created as minors. */ | 156 | /* Major number for this device. Ports will be created as minors. */ |
157 | int chr_major; | 157 | int chr_major; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | struct port_stats { | 160 | struct port_stats { |
161 | unsigned long bytes_sent, bytes_received, bytes_discarded; | 161 | unsigned long bytes_sent, bytes_received, bytes_discarded; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | /* This struct holds the per-port data */ | 164 | /* This struct holds the per-port data */ |
165 | struct port { | 165 | struct port { |
166 | /* Next port in the list, head is in the ports_device */ | 166 | /* Next port in the list, head is in the ports_device */ |
167 | struct list_head list; | 167 | struct list_head list; |
168 | 168 | ||
169 | /* Pointer to the parent virtio_console device */ | 169 | /* Pointer to the parent virtio_console device */ |
170 | struct ports_device *portdev; | 170 | struct ports_device *portdev; |
171 | 171 | ||
172 | /* The current buffer from which data has to be fed to readers */ | 172 | /* The current buffer from which data has to be fed to readers */ |
173 | struct port_buffer *inbuf; | 173 | struct port_buffer *inbuf; |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * To protect the operations on the in_vq associated with this | 176 | * To protect the operations on the in_vq associated with this |
177 | * port. Has to be a spinlock because it can be called from | 177 | * port. Has to be a spinlock because it can be called from |
178 | * interrupt context (get_char()). | 178 | * interrupt context (get_char()). |
179 | */ | 179 | */ |
180 | spinlock_t inbuf_lock; | 180 | spinlock_t inbuf_lock; |
181 | 181 | ||
182 | /* Protect the operations on the out_vq. */ | 182 | /* Protect the operations on the out_vq. */ |
183 | spinlock_t outvq_lock; | 183 | spinlock_t outvq_lock; |
184 | 184 | ||
185 | /* The IO vqs for this port */ | 185 | /* The IO vqs for this port */ |
186 | struct virtqueue *in_vq, *out_vq; | 186 | struct virtqueue *in_vq, *out_vq; |
187 | 187 | ||
188 | /* File in the debugfs directory that exposes this port's information */ | 188 | /* File in the debugfs directory that exposes this port's information */ |
189 | struct dentry *debugfs_file; | 189 | struct dentry *debugfs_file; |
190 | 190 | ||
191 | /* | 191 | /* |
192 | * Keep count of the bytes sent, received and discarded for | 192 | * Keep count of the bytes sent, received and discarded for |
193 | * this port for accounting and debugging purposes. These | 193 | * this port for accounting and debugging purposes. These |
194 | * counts are not reset across port open / close events. | 194 | * counts are not reset across port open / close events. |
195 | */ | 195 | */ |
196 | struct port_stats stats; | 196 | struct port_stats stats; |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * The entries in this struct will be valid if this port is | 199 | * The entries in this struct will be valid if this port is |
200 | * hooked up to an hvc console | 200 | * hooked up to an hvc console |
201 | */ | 201 | */ |
202 | struct console cons; | 202 | struct console cons; |
203 | 203 | ||
204 | /* Each port associates with a separate char device */ | 204 | /* Each port associates with a separate char device */ |
205 | struct cdev *cdev; | 205 | struct cdev *cdev; |
206 | struct device *dev; | 206 | struct device *dev; |
207 | 207 | ||
208 | /* Reference-counting to handle port hot-unplugs and file operations */ | 208 | /* Reference-counting to handle port hot-unplugs and file operations */ |
209 | struct kref kref; | 209 | struct kref kref; |
210 | 210 | ||
211 | /* A waitqueue for poll() or blocking read operations */ | 211 | /* A waitqueue for poll() or blocking read operations */ |
212 | wait_queue_head_t waitqueue; | 212 | wait_queue_head_t waitqueue; |
213 | 213 | ||
214 | /* The 'name' of the port that we expose via sysfs properties */ | 214 | /* The 'name' of the port that we expose via sysfs properties */ |
215 | char *name; | 215 | char *name; |
216 | 216 | ||
217 | /* We can notify apps of host connect / disconnect events via SIGIO */ | 217 | /* We can notify apps of host connect / disconnect events via SIGIO */ |
218 | struct fasync_struct *async_queue; | 218 | struct fasync_struct *async_queue; |
219 | 219 | ||
220 | /* The 'id' to identify the port with the Host */ | 220 | /* The 'id' to identify the port with the Host */ |
221 | u32 id; | 221 | u32 id; |
222 | 222 | ||
223 | bool outvq_full; | 223 | bool outvq_full; |
224 | 224 | ||
225 | /* Is the host device open */ | 225 | /* Is the host device open */ |
226 | bool host_connected; | 226 | bool host_connected; |
227 | 227 | ||
228 | /* We should allow only one process to open a port */ | 228 | /* We should allow only one process to open a port */ |
229 | bool guest_connected; | 229 | bool guest_connected; |
230 | }; | 230 | }; |
231 | 231 | ||
232 | /* This is the very early arch-specified put chars function. */ | 232 | /* This is the very early arch-specified put chars function. */ |
233 | static int (*early_put_chars)(u32, const char *, int); | 233 | static int (*early_put_chars)(u32, const char *, int); |
234 | 234 | ||
235 | static struct port *find_port_by_vtermno(u32 vtermno) | 235 | static struct port *find_port_by_vtermno(u32 vtermno) |
236 | { | 236 | { |
237 | struct port *port; | 237 | struct port *port; |
238 | struct console *cons; | 238 | struct console *cons; |
239 | unsigned long flags; | 239 | unsigned long flags; |
240 | 240 | ||
241 | spin_lock_irqsave(&pdrvdata_lock, flags); | 241 | spin_lock_irqsave(&pdrvdata_lock, flags); |
242 | list_for_each_entry(cons, &pdrvdata.consoles, list) { | 242 | list_for_each_entry(cons, &pdrvdata.consoles, list) { |
243 | if (cons->vtermno == vtermno) { | 243 | if (cons->vtermno == vtermno) { |
244 | port = container_of(cons, struct port, cons); | 244 | port = container_of(cons, struct port, cons); |
245 | goto out; | 245 | goto out; |
246 | } | 246 | } |
247 | } | 247 | } |
248 | port = NULL; | 248 | port = NULL; |
249 | out: | 249 | out: |
250 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | 250 | spin_unlock_irqrestore(&pdrvdata_lock, flags); |
251 | return port; | 251 | return port; |
252 | } | 252 | } |
253 | 253 | ||
254 | static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, | 254 | static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, |
255 | dev_t dev) | 255 | dev_t dev) |
256 | { | 256 | { |
257 | struct port *port; | 257 | struct port *port; |
258 | unsigned long flags; | 258 | unsigned long flags; |
259 | 259 | ||
260 | spin_lock_irqsave(&portdev->ports_lock, flags); | 260 | spin_lock_irqsave(&portdev->ports_lock, flags); |
261 | list_for_each_entry(port, &portdev->ports, list) | 261 | list_for_each_entry(port, &portdev->ports, list) |
262 | if (port->cdev->dev == dev) | 262 | if (port->cdev->dev == dev) |
263 | goto out; | 263 | goto out; |
264 | port = NULL; | 264 | port = NULL; |
265 | out: | 265 | out: |
266 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | 266 | spin_unlock_irqrestore(&portdev->ports_lock, flags); |
267 | 267 | ||
268 | return port; | 268 | return port; |
269 | } | 269 | } |
270 | 270 | ||
271 | static struct port *find_port_by_devt(dev_t dev) | 271 | static struct port *find_port_by_devt(dev_t dev) |
272 | { | 272 | { |
273 | struct ports_device *portdev; | 273 | struct ports_device *portdev; |
274 | struct port *port; | 274 | struct port *port; |
275 | unsigned long flags; | 275 | unsigned long flags; |
276 | 276 | ||
277 | spin_lock_irqsave(&pdrvdata_lock, flags); | 277 | spin_lock_irqsave(&pdrvdata_lock, flags); |
278 | list_for_each_entry(portdev, &pdrvdata.portdevs, list) { | 278 | list_for_each_entry(portdev, &pdrvdata.portdevs, list) { |
279 | port = find_port_by_devt_in_portdev(portdev, dev); | 279 | port = find_port_by_devt_in_portdev(portdev, dev); |
280 | if (port) | 280 | if (port) |
281 | goto out; | 281 | goto out; |
282 | } | 282 | } |
283 | port = NULL; | 283 | port = NULL; |
284 | out: | 284 | out: |
285 | spin_unlock_irqrestore(&pdrvdata_lock, flags); | 285 | spin_unlock_irqrestore(&pdrvdata_lock, flags); |
286 | return port; | 286 | return port; |
287 | } | 287 | } |
288 | 288 | ||
289 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) | 289 | static struct port *find_port_by_id(struct ports_device *portdev, u32 id) |
290 | { | 290 | { |
291 | struct port *port; | 291 | struct port *port; |
292 | unsigned long flags; | 292 | unsigned long flags; |
293 | 293 | ||
294 | spin_lock_irqsave(&portdev->ports_lock, flags); | 294 | spin_lock_irqsave(&portdev->ports_lock, flags); |
295 | list_for_each_entry(port, &portdev->ports, list) | 295 | list_for_each_entry(port, &portdev->ports, list) |
296 | if (port->id == id) | 296 | if (port->id == id) |
297 | goto out; | 297 | goto out; |
298 | port = NULL; | 298 | port = NULL; |
299 | out: | 299 | out: |
300 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | 300 | spin_unlock_irqrestore(&portdev->ports_lock, flags); |
301 | 301 | ||
302 | return port; | 302 | return port; |
303 | } | 303 | } |
304 | 304 | ||
305 | static struct port *find_port_by_vq(struct ports_device *portdev, | 305 | static struct port *find_port_by_vq(struct ports_device *portdev, |
306 | struct virtqueue *vq) | 306 | struct virtqueue *vq) |
307 | { | 307 | { |
308 | struct port *port; | 308 | struct port *port; |
309 | unsigned long flags; | 309 | unsigned long flags; |
310 | 310 | ||
311 | spin_lock_irqsave(&portdev->ports_lock, flags); | 311 | spin_lock_irqsave(&portdev->ports_lock, flags); |
312 | list_for_each_entry(port, &portdev->ports, list) | 312 | list_for_each_entry(port, &portdev->ports, list) |
313 | if (port->in_vq == vq || port->out_vq == vq) | 313 | if (port->in_vq == vq || port->out_vq == vq) |
314 | goto out; | 314 | goto out; |
315 | port = NULL; | 315 | port = NULL; |
316 | out: | 316 | out: |
317 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | 317 | spin_unlock_irqrestore(&portdev->ports_lock, flags); |
318 | return port; | 318 | return port; |
319 | } | 319 | } |
320 | 320 | ||
321 | static bool is_console_port(struct port *port) | 321 | static bool is_console_port(struct port *port) |
322 | { | 322 | { |
323 | if (port->cons.hvc) | 323 | if (port->cons.hvc) |
324 | return true; | 324 | return true; |
325 | return false; | 325 | return false; |
326 | } | 326 | } |
327 | 327 | ||
328 | static inline bool use_multiport(struct ports_device *portdev) | 328 | static inline bool use_multiport(struct ports_device *portdev) |
329 | { | 329 | { |
330 | /* | 330 | /* |
331 | * This condition can be true when put_chars is called from | 331 | * This condition can be true when put_chars is called from |
332 | * early_init | 332 | * early_init |
333 | */ | 333 | */ |
334 | if (!portdev->vdev) | 334 | if (!portdev->vdev) |
335 | return 0; | 335 | return 0; |
336 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | 336 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); |
337 | } | 337 | } |
338 | 338 | ||
339 | static void free_buf(struct port_buffer *buf) | 339 | static void free_buf(struct port_buffer *buf) |
340 | { | 340 | { |
341 | kfree(buf->buf); | 341 | kfree(buf->buf); |
342 | kfree(buf); | 342 | kfree(buf); |
343 | } | 343 | } |
344 | 344 | ||
345 | static struct port_buffer *alloc_buf(size_t buf_size) | 345 | static struct port_buffer *alloc_buf(size_t buf_size) |
346 | { | 346 | { |
347 | struct port_buffer *buf; | 347 | struct port_buffer *buf; |
348 | 348 | ||
349 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | 349 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); |
350 | if (!buf) | 350 | if (!buf) |
351 | goto fail; | 351 | goto fail; |
352 | buf->buf = kzalloc(buf_size, GFP_KERNEL); | 352 | buf->buf = kzalloc(buf_size, GFP_KERNEL); |
353 | if (!buf->buf) | 353 | if (!buf->buf) |
354 | goto free_buf; | 354 | goto free_buf; |
355 | buf->len = 0; | 355 | buf->len = 0; |
356 | buf->offset = 0; | 356 | buf->offset = 0; |
357 | buf->size = buf_size; | 357 | buf->size = buf_size; |
358 | return buf; | 358 | return buf; |
359 | 359 | ||
360 | free_buf: | 360 | free_buf: |
361 | kfree(buf); | 361 | kfree(buf); |
362 | fail: | 362 | fail: |
363 | return NULL; | 363 | return NULL; |
364 | } | 364 | } |
365 | 365 | ||
366 | /* Callers should take appropriate locks */ | 366 | /* Callers should take appropriate locks */ |
367 | static struct port_buffer *get_inbuf(struct port *port) | 367 | static struct port_buffer *get_inbuf(struct port *port) |
368 | { | 368 | { |
369 | struct port_buffer *buf; | 369 | struct port_buffer *buf; |
370 | unsigned int len; | 370 | unsigned int len; |
371 | 371 | ||
372 | if (port->inbuf) | 372 | if (port->inbuf) |
373 | return port->inbuf; | 373 | return port->inbuf; |
374 | 374 | ||
375 | buf = virtqueue_get_buf(port->in_vq, &len); | 375 | buf = virtqueue_get_buf(port->in_vq, &len); |
376 | if (buf) { | 376 | if (buf) { |
377 | buf->len = len; | 377 | buf->len = len; |
378 | buf->offset = 0; | 378 | buf->offset = 0; |
379 | port->stats.bytes_received += len; | 379 | port->stats.bytes_received += len; |
380 | } | 380 | } |
381 | return buf; | 381 | return buf; |
382 | } | 382 | } |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * Create a scatter-gather list representing our input buffer and put | 385 | * Create a scatter-gather list representing our input buffer and put |
386 | * it in the queue. | 386 | * it in the queue. |
387 | * | 387 | * |
388 | * Callers should take appropriate locks. | 388 | * Callers should take appropriate locks. |
389 | */ | 389 | */ |
390 | static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | 390 | static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) |
391 | { | 391 | { |
392 | struct scatterlist sg[1]; | 392 | struct scatterlist sg[1]; |
393 | int ret; | 393 | int ret; |
394 | 394 | ||
395 | sg_init_one(sg, buf->buf, buf->size); | 395 | sg_init_one(sg, buf->buf, buf->size); |
396 | 396 | ||
397 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); | 397 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); |
398 | virtqueue_kick(vq); | 398 | virtqueue_kick(vq); |
399 | return ret; | 399 | return ret; |
400 | } | 400 | } |
401 | 401 | ||
402 | /* Discard any unread data this port has. Callers lockers. */ | 402 | /* Discard any unread data this port has. Callers lockers. */ |
403 | static void discard_port_data(struct port *port) | 403 | static void discard_port_data(struct port *port) |
404 | { | 404 | { |
405 | struct port_buffer *buf; | 405 | struct port_buffer *buf; |
406 | unsigned int err; | 406 | unsigned int err; |
407 | 407 | ||
408 | if (!port->portdev) { | 408 | if (!port->portdev) { |
409 | /* Device has been unplugged. vqs are already gone. */ | 409 | /* Device has been unplugged. vqs are already gone. */ |
410 | return; | 410 | return; |
411 | } | 411 | } |
412 | buf = get_inbuf(port); | 412 | buf = get_inbuf(port); |
413 | 413 | ||
414 | err = 0; | 414 | err = 0; |
415 | while (buf) { | 415 | while (buf) { |
416 | port->stats.bytes_discarded += buf->len - buf->offset; | 416 | port->stats.bytes_discarded += buf->len - buf->offset; |
417 | if (add_inbuf(port->in_vq, buf) < 0) { | 417 | if (add_inbuf(port->in_vq, buf) < 0) { |
418 | err++; | 418 | err++; |
419 | free_buf(buf); | 419 | free_buf(buf); |
420 | } | 420 | } |
421 | port->inbuf = NULL; | 421 | port->inbuf = NULL; |
422 | buf = get_inbuf(port); | 422 | buf = get_inbuf(port); |
423 | } | 423 | } |
424 | if (err) | 424 | if (err) |
425 | dev_warn(port->dev, "Errors adding %d buffers back to vq\n", | 425 | dev_warn(port->dev, "Errors adding %d buffers back to vq\n", |
426 | err); | 426 | err); |
427 | } | 427 | } |
428 | 428 | ||
429 | static bool port_has_data(struct port *port) | 429 | static bool port_has_data(struct port *port) |
430 | { | 430 | { |
431 | unsigned long flags; | 431 | unsigned long flags; |
432 | bool ret; | 432 | bool ret; |
433 | 433 | ||
434 | ret = false; | 434 | ret = false; |
435 | spin_lock_irqsave(&port->inbuf_lock, flags); | 435 | spin_lock_irqsave(&port->inbuf_lock, flags); |
436 | port->inbuf = get_inbuf(port); | 436 | port->inbuf = get_inbuf(port); |
437 | if (port->inbuf) | 437 | if (port->inbuf) |
438 | ret = true; | 438 | ret = true; |
439 | 439 | ||
440 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 440 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
441 | return ret; | 441 | return ret; |
442 | } | 442 | } |
443 | 443 | ||
444 | static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | 444 | static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, |
445 | unsigned int event, unsigned int value) | 445 | unsigned int event, unsigned int value) |
446 | { | 446 | { |
447 | struct scatterlist sg[1]; | 447 | struct scatterlist sg[1]; |
448 | struct virtio_console_control cpkt; | 448 | struct virtio_console_control cpkt; |
449 | struct virtqueue *vq; | 449 | struct virtqueue *vq; |
450 | unsigned int len; | 450 | unsigned int len; |
451 | 451 | ||
452 | if (!use_multiport(portdev)) | 452 | if (!use_multiport(portdev)) |
453 | return 0; | 453 | return 0; |
454 | 454 | ||
455 | cpkt.id = port_id; | 455 | cpkt.id = port_id; |
456 | cpkt.event = event; | 456 | cpkt.event = event; |
457 | cpkt.value = value; | 457 | cpkt.value = value; |
458 | 458 | ||
459 | vq = portdev->c_ovq; | 459 | vq = portdev->c_ovq; |
460 | 460 | ||
461 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 461 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
462 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) { | 462 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) { |
463 | virtqueue_kick(vq); | 463 | virtqueue_kick(vq); |
464 | while (!virtqueue_get_buf(vq, &len)) | 464 | while (!virtqueue_get_buf(vq, &len)) |
465 | cpu_relax(); | 465 | cpu_relax(); |
466 | } | 466 | } |
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
469 | 469 | ||
470 | static ssize_t send_control_msg(struct port *port, unsigned int event, | 470 | static ssize_t send_control_msg(struct port *port, unsigned int event, |
471 | unsigned int value) | 471 | unsigned int value) |
472 | { | 472 | { |
473 | /* Did the port get unplugged before userspace closed it? */ | 473 | /* Did the port get unplugged before userspace closed it? */ |
474 | if (port->portdev) | 474 | if (port->portdev) |
475 | return __send_control_msg(port->portdev, port->id, event, value); | 475 | return __send_control_msg(port->portdev, port->id, event, value); |
476 | return 0; | 476 | return 0; |
477 | } | 477 | } |
478 | 478 | ||
479 | struct buffer_token { | 479 | struct buffer_token { |
480 | union { | 480 | union { |
481 | void *buf; | 481 | void *buf; |
482 | struct scatterlist *sg; | 482 | struct scatterlist *sg; |
483 | } u; | 483 | } u; |
484 | /* If sgpages == 0 then buf is used, else sg is used */ | 484 | /* If sgpages == 0 then buf is used, else sg is used */ |
485 | unsigned int sgpages; | 485 | unsigned int sgpages; |
486 | }; | 486 | }; |
487 | 487 | ||
488 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) | 488 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) |
489 | { | 489 | { |
490 | int i; | 490 | int i; |
491 | struct page *page; | 491 | struct page *page; |
492 | 492 | ||
493 | for (i = 0; i < nrpages; i++) { | 493 | for (i = 0; i < nrpages; i++) { |
494 | page = sg_page(&sg[i]); | 494 | page = sg_page(&sg[i]); |
495 | if (!page) | 495 | if (!page) |
496 | break; | 496 | break; |
497 | put_page(page); | 497 | put_page(page); |
498 | } | 498 | } |
499 | kfree(sg); | 499 | kfree(sg); |
500 | } | 500 | } |
501 | 501 | ||
502 | /* Callers must take the port->outvq_lock */ | 502 | /* Callers must take the port->outvq_lock */ |
503 | static void reclaim_consumed_buffers(struct port *port) | 503 | static void reclaim_consumed_buffers(struct port *port) |
504 | { | 504 | { |
505 | struct buffer_token *tok; | 505 | struct buffer_token *tok; |
506 | unsigned int len; | 506 | unsigned int len; |
507 | 507 | ||
508 | if (!port->portdev) { | 508 | if (!port->portdev) { |
509 | /* Device has been unplugged. vqs are already gone. */ | 509 | /* Device has been unplugged. vqs are already gone. */ |
510 | return; | 510 | return; |
511 | } | 511 | } |
512 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { | 512 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { |
513 | if (tok->sgpages) | 513 | if (tok->sgpages) |
514 | reclaim_sg_pages(tok->u.sg, tok->sgpages); | 514 | reclaim_sg_pages(tok->u.sg, tok->sgpages); |
515 | else | 515 | else |
516 | kfree(tok->u.buf); | 516 | kfree(tok->u.buf); |
517 | kfree(tok); | 517 | kfree(tok); |
518 | port->outvq_full = false; | 518 | port->outvq_full = false; |
519 | } | 519 | } |
520 | } | 520 | } |
521 | 521 | ||
522 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | 522 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, |
523 | int nents, size_t in_count, | 523 | int nents, size_t in_count, |
524 | struct buffer_token *tok, bool nonblock) | 524 | struct buffer_token *tok, bool nonblock) |
525 | { | 525 | { |
526 | struct virtqueue *out_vq; | 526 | struct virtqueue *out_vq; |
527 | ssize_t ret; | 527 | ssize_t ret; |
528 | unsigned long flags; | 528 | unsigned long flags; |
529 | unsigned int len; | 529 | unsigned int len; |
530 | 530 | ||
531 | out_vq = port->out_vq; | 531 | out_vq = port->out_vq; |
532 | 532 | ||
533 | spin_lock_irqsave(&port->outvq_lock, flags); | 533 | spin_lock_irqsave(&port->outvq_lock, flags); |
534 | 534 | ||
535 | reclaim_consumed_buffers(port); | 535 | reclaim_consumed_buffers(port); |
536 | 536 | ||
537 | ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); | 537 | ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); |
538 | 538 | ||
539 | /* Tell Host to go! */ | 539 | /* Tell Host to go! */ |
540 | virtqueue_kick(out_vq); | 540 | virtqueue_kick(out_vq); |
541 | 541 | ||
542 | if (ret < 0) { | 542 | if (ret < 0) { |
543 | in_count = 0; | 543 | in_count = 0; |
544 | goto done; | 544 | goto done; |
545 | } | 545 | } |
546 | 546 | ||
547 | if (ret == 0) | 547 | if (ret == 0) |
548 | port->outvq_full = true; | 548 | port->outvq_full = true; |
549 | 549 | ||
550 | if (nonblock) | 550 | if (nonblock) |
551 | goto done; | 551 | goto done; |
552 | 552 | ||
553 | /* | 553 | /* |
554 | * Wait till the host acknowledges it pushed out the data we | 554 | * Wait till the host acknowledges it pushed out the data we |
555 | * sent. This is done for data from the hvc_console; the tty | 555 | * sent. This is done for data from the hvc_console; the tty |
556 | * operations are performed with spinlocks held so we can't | 556 | * operations are performed with spinlocks held so we can't |
557 | * sleep here. An alternative would be to copy the data to a | 557 | * sleep here. An alternative would be to copy the data to a |
558 | * buffer and relax the spinning requirement. The downside is | 558 | * buffer and relax the spinning requirement. The downside is |
559 | * we need to kmalloc a GFP_ATOMIC buffer each time the | 559 | * we need to kmalloc a GFP_ATOMIC buffer each time the |
560 | * console driver writes something out. | 560 | * console driver writes something out. |
561 | */ | 561 | */ |
562 | while (!virtqueue_get_buf(out_vq, &len)) | 562 | while (!virtqueue_get_buf(out_vq, &len)) |
563 | cpu_relax(); | 563 | cpu_relax(); |
564 | done: | 564 | done: |
565 | spin_unlock_irqrestore(&port->outvq_lock, flags); | 565 | spin_unlock_irqrestore(&port->outvq_lock, flags); |
566 | 566 | ||
567 | port->stats.bytes_sent += in_count; | 567 | port->stats.bytes_sent += in_count; |
568 | /* | 568 | /* |
569 | * We're expected to return the amount of data we wrote -- all | 569 | * We're expected to return the amount of data we wrote -- all |
570 | * of it | 570 | * of it |
571 | */ | 571 | */ |
572 | return in_count; | 572 | return in_count; |
573 | } | 573 | } |
574 | 574 | ||
575 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | 575 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, |
576 | bool nonblock) | 576 | bool nonblock) |
577 | { | 577 | { |
578 | struct scatterlist sg[1]; | 578 | struct scatterlist sg[1]; |
579 | struct buffer_token *tok; | 579 | struct buffer_token *tok; |
580 | 580 | ||
581 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | 581 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); |
582 | if (!tok) | 582 | if (!tok) |
583 | return -ENOMEM; | 583 | return -ENOMEM; |
584 | tok->sgpages = 0; | 584 | tok->sgpages = 0; |
585 | tok->u.buf = in_buf; | 585 | tok->u.buf = in_buf; |
586 | 586 | ||
587 | sg_init_one(sg, in_buf, in_count); | 587 | sg_init_one(sg, in_buf, in_count); |
588 | 588 | ||
589 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); | 589 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); |
590 | } | 590 | } |
591 | 591 | ||
592 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, | 592 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, |
593 | size_t in_count, bool nonblock) | 593 | size_t in_count, bool nonblock) |
594 | { | 594 | { |
595 | struct buffer_token *tok; | 595 | struct buffer_token *tok; |
596 | 596 | ||
597 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | 597 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); |
598 | if (!tok) | 598 | if (!tok) |
599 | return -ENOMEM; | 599 | return -ENOMEM; |
600 | tok->sgpages = nents; | 600 | tok->sgpages = nents; |
601 | tok->u.sg = sg; | 601 | tok->u.sg = sg; |
602 | 602 | ||
603 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); | 603 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); |
604 | } | 604 | } |
605 | 605 | ||
606 | /* | 606 | /* |
607 | * Give out the data that's requested from the buffer that we have | 607 | * Give out the data that's requested from the buffer that we have |
608 | * queued up. | 608 | * queued up. |
609 | */ | 609 | */ |
610 | static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, | 610 | static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, |
611 | bool to_user) | 611 | bool to_user) |
612 | { | 612 | { |
613 | struct port_buffer *buf; | 613 | struct port_buffer *buf; |
614 | unsigned long flags; | 614 | unsigned long flags; |
615 | 615 | ||
616 | if (!out_count || !port_has_data(port)) | 616 | if (!out_count || !port_has_data(port)) |
617 | return 0; | 617 | return 0; |
618 | 618 | ||
619 | buf = port->inbuf; | 619 | buf = port->inbuf; |
620 | out_count = min(out_count, buf->len - buf->offset); | 620 | out_count = min(out_count, buf->len - buf->offset); |
621 | 621 | ||
622 | if (to_user) { | 622 | if (to_user) { |
623 | ssize_t ret; | 623 | ssize_t ret; |
624 | 624 | ||
625 | ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); | 625 | ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); |
626 | if (ret) | 626 | if (ret) |
627 | return -EFAULT; | 627 | return -EFAULT; |
628 | } else { | 628 | } else { |
629 | memcpy(out_buf, buf->buf + buf->offset, out_count); | 629 | memcpy(out_buf, buf->buf + buf->offset, out_count); |
630 | } | 630 | } |
631 | 631 | ||
632 | buf->offset += out_count; | 632 | buf->offset += out_count; |
633 | 633 | ||
634 | if (buf->offset == buf->len) { | 634 | if (buf->offset == buf->len) { |
635 | /* | 635 | /* |
636 | * We're done using all the data in this buffer. | 636 | * We're done using all the data in this buffer. |
637 | * Re-queue so that the Host can send us more data. | 637 | * Re-queue so that the Host can send us more data. |
638 | */ | 638 | */ |
639 | spin_lock_irqsave(&port->inbuf_lock, flags); | 639 | spin_lock_irqsave(&port->inbuf_lock, flags); |
640 | port->inbuf = NULL; | 640 | port->inbuf = NULL; |
641 | 641 | ||
642 | if (add_inbuf(port->in_vq, buf) < 0) | 642 | if (add_inbuf(port->in_vq, buf) < 0) |
643 | dev_warn(port->dev, "failed add_buf\n"); | 643 | dev_warn(port->dev, "failed add_buf\n"); |
644 | 644 | ||
645 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 645 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
646 | } | 646 | } |
647 | /* Return the number of bytes actually copied */ | 647 | /* Return the number of bytes actually copied */ |
648 | return out_count; | 648 | return out_count; |
649 | } | 649 | } |
650 | 650 | ||
651 | /* The condition that must be true for polling to end */ | 651 | /* The condition that must be true for polling to end */ |
652 | static bool will_read_block(struct port *port) | 652 | static bool will_read_block(struct port *port) |
653 | { | 653 | { |
654 | if (!port->guest_connected) { | 654 | if (!port->guest_connected) { |
655 | /* Port got hot-unplugged. Let's exit. */ | 655 | /* Port got hot-unplugged. Let's exit. */ |
656 | return false; | 656 | return false; |
657 | } | 657 | } |
658 | return !port_has_data(port) && port->host_connected; | 658 | return !port_has_data(port) && port->host_connected; |
659 | } | 659 | } |
660 | 660 | ||
661 | static bool will_write_block(struct port *port) | 661 | static bool will_write_block(struct port *port) |
662 | { | 662 | { |
663 | bool ret; | 663 | bool ret; |
664 | 664 | ||
665 | if (!port->guest_connected) { | 665 | if (!port->guest_connected) { |
666 | /* Port got hot-unplugged. Let's exit. */ | 666 | /* Port got hot-unplugged. Let's exit. */ |
667 | return false; | 667 | return false; |
668 | } | 668 | } |
669 | if (!port->host_connected) | 669 | if (!port->host_connected) |
670 | return true; | 670 | return true; |
671 | 671 | ||
672 | spin_lock_irq(&port->outvq_lock); | 672 | spin_lock_irq(&port->outvq_lock); |
673 | /* | 673 | /* |
674 | * Check if the Host has consumed any buffers since we last | 674 | * Check if the Host has consumed any buffers since we last |
675 | * sent data (this is only applicable for nonblocking ports). | 675 | * sent data (this is only applicable for nonblocking ports). |
676 | */ | 676 | */ |
677 | reclaim_consumed_buffers(port); | 677 | reclaim_consumed_buffers(port); |
678 | ret = port->outvq_full; | 678 | ret = port->outvq_full; |
679 | spin_unlock_irq(&port->outvq_lock); | 679 | spin_unlock_irq(&port->outvq_lock); |
680 | 680 | ||
681 | return ret; | 681 | return ret; |
682 | } | 682 | } |
683 | 683 | ||
684 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | 684 | static ssize_t port_fops_read(struct file *filp, char __user *ubuf, |
685 | size_t count, loff_t *offp) | 685 | size_t count, loff_t *offp) |
686 | { | 686 | { |
687 | struct port *port; | 687 | struct port *port; |
688 | ssize_t ret; | 688 | ssize_t ret; |
689 | 689 | ||
690 | port = filp->private_data; | 690 | port = filp->private_data; |
691 | 691 | ||
692 | if (!port_has_data(port)) { | 692 | if (!port_has_data(port)) { |
693 | /* | 693 | /* |
694 | * If nothing's connected on the host just return 0 in | 694 | * If nothing's connected on the host just return 0 in |
695 | * case of list_empty; this tells the userspace app | 695 | * case of list_empty; this tells the userspace app |
696 | * that there's no connection | 696 | * that there's no connection |
697 | */ | 697 | */ |
698 | if (!port->host_connected) | 698 | if (!port->host_connected) |
699 | return 0; | 699 | return 0; |
700 | if (filp->f_flags & O_NONBLOCK) | 700 | if (filp->f_flags & O_NONBLOCK) |
701 | return -EAGAIN; | 701 | return -EAGAIN; |
702 | 702 | ||
703 | ret = wait_event_freezable(port->waitqueue, | 703 | ret = wait_event_freezable(port->waitqueue, |
704 | !will_read_block(port)); | 704 | !will_read_block(port)); |
705 | if (ret < 0) | 705 | if (ret < 0) |
706 | return ret; | 706 | return ret; |
707 | } | 707 | } |
708 | /* Port got hot-unplugged. */ | 708 | /* Port got hot-unplugged. */ |
709 | if (!port->guest_connected) | 709 | if (!port->guest_connected) |
710 | return -ENODEV; | 710 | return -ENODEV; |
711 | /* | 711 | /* |
712 | * We could've received a disconnection message while we were | 712 | * We could've received a disconnection message while we were |
713 | * waiting for more data. | 713 | * waiting for more data. |
714 | * | 714 | * |
715 | * This check is not clubbed in the if() statement above as we | 715 | * This check is not clubbed in the if() statement above as we |
716 | * might receive some data as well as the host could get | 716 | * might receive some data as well as the host could get |
717 | * disconnected after we got woken up from our wait. So we | 717 | * disconnected after we got woken up from our wait. So we |
718 | * really want to give off whatever data we have and only then | 718 | * really want to give off whatever data we have and only then |
719 | * check for host_connected. | 719 | * check for host_connected. |
720 | */ | 720 | */ |
721 | if (!port_has_data(port) && !port->host_connected) | 721 | if (!port_has_data(port) && !port->host_connected) |
722 | return 0; | 722 | return 0; |
723 | 723 | ||
724 | return fill_readbuf(port, ubuf, count, true); | 724 | return fill_readbuf(port, ubuf, count, true); |
725 | } | 725 | } |
726 | 726 | ||
727 | static int wait_port_writable(struct port *port, bool nonblock) | 727 | static int wait_port_writable(struct port *port, bool nonblock) |
728 | { | 728 | { |
729 | int ret; | 729 | int ret; |
730 | 730 | ||
731 | if (will_write_block(port)) { | 731 | if (will_write_block(port)) { |
732 | if (nonblock) | 732 | if (nonblock) |
733 | return -EAGAIN; | 733 | return -EAGAIN; |
734 | 734 | ||
735 | ret = wait_event_freezable(port->waitqueue, | 735 | ret = wait_event_freezable(port->waitqueue, |
736 | !will_write_block(port)); | 736 | !will_write_block(port)); |
737 | if (ret < 0) | 737 | if (ret < 0) |
738 | return ret; | 738 | return ret; |
739 | } | 739 | } |
740 | /* Port got hot-unplugged. */ | 740 | /* Port got hot-unplugged. */ |
741 | if (!port->guest_connected) | 741 | if (!port->guest_connected) |
742 | return -ENODEV; | 742 | return -ENODEV; |
743 | 743 | ||
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | 746 | ||
747 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | 747 | static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, |
748 | size_t count, loff_t *offp) | 748 | size_t count, loff_t *offp) |
749 | { | 749 | { |
750 | struct port *port; | 750 | struct port *port; |
751 | char *buf; | 751 | char *buf; |
752 | ssize_t ret; | 752 | ssize_t ret; |
753 | bool nonblock; | 753 | bool nonblock; |
754 | 754 | ||
755 | /* Userspace could be out to fool us */ | 755 | /* Userspace could be out to fool us */ |
756 | if (!count) | 756 | if (!count) |
757 | return 0; | 757 | return 0; |
758 | 758 | ||
759 | port = filp->private_data; | 759 | port = filp->private_data; |
760 | 760 | ||
761 | nonblock = filp->f_flags & O_NONBLOCK; | 761 | nonblock = filp->f_flags & O_NONBLOCK; |
762 | 762 | ||
763 | ret = wait_port_writable(port, nonblock); | 763 | ret = wait_port_writable(port, nonblock); |
764 | if (ret < 0) | 764 | if (ret < 0) |
765 | return ret; | 765 | return ret; |
766 | 766 | ||
767 | count = min((size_t)(32 * 1024), count); | 767 | count = min((size_t)(32 * 1024), count); |
768 | 768 | ||
769 | buf = kmalloc(count, GFP_KERNEL); | 769 | buf = kmalloc(count, GFP_KERNEL); |
770 | if (!buf) | 770 | if (!buf) |
771 | return -ENOMEM; | 771 | return -ENOMEM; |
772 | 772 | ||
773 | ret = copy_from_user(buf, ubuf, count); | 773 | ret = copy_from_user(buf, ubuf, count); |
774 | if (ret) { | 774 | if (ret) { |
775 | ret = -EFAULT; | 775 | ret = -EFAULT; |
776 | goto free_buf; | 776 | goto free_buf; |
777 | } | 777 | } |
778 | 778 | ||
779 | /* | 779 | /* |
780 | * We now ask send_buf() to not spin for generic ports -- we | 780 | * We now ask send_buf() to not spin for generic ports -- we |
781 | * can re-use the same code path that non-blocking file | 781 | * can re-use the same code path that non-blocking file |
782 | * descriptors take for blocking file descriptors since the | 782 | * descriptors take for blocking file descriptors since the |
783 | * wait is already done and we're certain the write will go | 783 | * wait is already done and we're certain the write will go |
784 | * through to the host. | 784 | * through to the host. |
785 | */ | 785 | */ |
786 | nonblock = true; | 786 | nonblock = true; |
787 | ret = send_buf(port, buf, count, nonblock); | 787 | ret = send_buf(port, buf, count, nonblock); |
788 | 788 | ||
789 | if (nonblock && ret > 0) | 789 | if (nonblock && ret > 0) |
790 | goto out; | 790 | goto out; |
791 | 791 | ||
792 | free_buf: | 792 | free_buf: |
793 | kfree(buf); | 793 | kfree(buf); |
794 | out: | 794 | out: |
795 | return ret; | 795 | return ret; |
796 | } | 796 | } |
797 | 797 | ||
798 | struct sg_list { | 798 | struct sg_list { |
799 | unsigned int n; | 799 | unsigned int n; |
800 | unsigned int size; | 800 | unsigned int size; |
801 | size_t len; | 801 | size_t len; |
802 | struct scatterlist *sg; | 802 | struct scatterlist *sg; |
803 | }; | 803 | }; |
804 | 804 | ||
805 | static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | 805 | static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, |
806 | struct splice_desc *sd) | 806 | struct splice_desc *sd) |
807 | { | 807 | { |
808 | struct sg_list *sgl = sd->u.data; | 808 | struct sg_list *sgl = sd->u.data; |
809 | unsigned int offset, len; | 809 | unsigned int offset, len; |
810 | 810 | ||
811 | if (sgl->n == sgl->size) | 811 | if (sgl->n == sgl->size) |
812 | return 0; | 812 | return 0; |
813 | 813 | ||
814 | /* Try lock this page */ | 814 | /* Try lock this page */ |
815 | if (buf->ops->steal(pipe, buf) == 0) { | 815 | if (buf->ops->steal(pipe, buf) == 0) { |
816 | /* Get reference and unlock page for moving */ | 816 | /* Get reference and unlock page for moving */ |
817 | get_page(buf->page); | 817 | get_page(buf->page); |
818 | unlock_page(buf->page); | 818 | unlock_page(buf->page); |
819 | 819 | ||
820 | len = min(buf->len, sd->len); | 820 | len = min(buf->len, sd->len); |
821 | sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); | 821 | sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); |
822 | } else { | 822 | } else { |
823 | /* Failback to copying a page */ | 823 | /* Failback to copying a page */ |
824 | struct page *page = alloc_page(GFP_KERNEL); | 824 | struct page *page = alloc_page(GFP_KERNEL); |
825 | char *src = buf->ops->map(pipe, buf, 1); | 825 | char *src = buf->ops->map(pipe, buf, 1); |
826 | char *dst; | 826 | char *dst; |
827 | 827 | ||
828 | if (!page) | 828 | if (!page) |
829 | return -ENOMEM; | 829 | return -ENOMEM; |
830 | dst = kmap(page); | 830 | dst = kmap(page); |
831 | 831 | ||
832 | offset = sd->pos & ~PAGE_MASK; | 832 | offset = sd->pos & ~PAGE_MASK; |
833 | 833 | ||
834 | len = sd->len; | 834 | len = sd->len; |
835 | if (len + offset > PAGE_SIZE) | 835 | if (len + offset > PAGE_SIZE) |
836 | len = PAGE_SIZE - offset; | 836 | len = PAGE_SIZE - offset; |
837 | 837 | ||
838 | memcpy(dst + offset, src + buf->offset, len); | 838 | memcpy(dst + offset, src + buf->offset, len); |
839 | 839 | ||
840 | kunmap(page); | 840 | kunmap(page); |
841 | buf->ops->unmap(pipe, buf, src); | 841 | buf->ops->unmap(pipe, buf, src); |
842 | 842 | ||
843 | sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); | 843 | sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); |
844 | } | 844 | } |
845 | sgl->n++; | 845 | sgl->n++; |
846 | sgl->len += len; | 846 | sgl->len += len; |
847 | 847 | ||
848 | return len; | 848 | return len; |
849 | } | 849 | } |
850 | 850 | ||
851 | /* Faster zero-copy write by splicing */ | 851 | /* Faster zero-copy write by splicing */ |
852 | static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | 852 | static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, |
853 | struct file *filp, loff_t *ppos, | 853 | struct file *filp, loff_t *ppos, |
854 | size_t len, unsigned int flags) | 854 | size_t len, unsigned int flags) |
855 | { | 855 | { |
856 | struct port *port = filp->private_data; | 856 | struct port *port = filp->private_data; |
857 | struct sg_list sgl; | 857 | struct sg_list sgl; |
858 | ssize_t ret; | 858 | ssize_t ret; |
859 | struct splice_desc sd = { | 859 | struct splice_desc sd = { |
860 | .total_len = len, | 860 | .total_len = len, |
861 | .flags = flags, | 861 | .flags = flags, |
862 | .pos = *ppos, | 862 | .pos = *ppos, |
863 | .u.data = &sgl, | 863 | .u.data = &sgl, |
864 | }; | 864 | }; |
865 | 865 | ||
866 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 866 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); |
867 | if (ret < 0) | 867 | if (ret < 0) |
868 | return ret; | 868 | return ret; |
869 | 869 | ||
870 | sgl.n = 0; | 870 | sgl.n = 0; |
871 | sgl.len = 0; | 871 | sgl.len = 0; |
872 | sgl.size = pipe->nrbufs; | 872 | sgl.size = pipe->nrbufs; |
873 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | 873 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); |
874 | if (unlikely(!sgl.sg)) | 874 | if (unlikely(!sgl.sg)) |
875 | return -ENOMEM; | 875 | return -ENOMEM; |
876 | 876 | ||
877 | sg_init_table(sgl.sg, sgl.size); | 877 | sg_init_table(sgl.sg, sgl.size); |
878 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 878 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
879 | if (likely(ret > 0)) | 879 | if (likely(ret > 0)) |
880 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | 880 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); |
881 | 881 | ||
882 | return ret; | 882 | return ret; |
883 | } | 883 | } |
884 | 884 | ||
885 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | 885 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) |
886 | { | 886 | { |
887 | struct port *port; | 887 | struct port *port; |
888 | unsigned int ret; | 888 | unsigned int ret; |
889 | 889 | ||
890 | port = filp->private_data; | 890 | port = filp->private_data; |
891 | poll_wait(filp, &port->waitqueue, wait); | 891 | poll_wait(filp, &port->waitqueue, wait); |
892 | 892 | ||
893 | if (!port->guest_connected) { | 893 | if (!port->guest_connected) { |
894 | /* Port got unplugged */ | 894 | /* Port got unplugged */ |
895 | return POLLHUP; | 895 | return POLLHUP; |
896 | } | 896 | } |
897 | ret = 0; | 897 | ret = 0; |
898 | if (!will_read_block(port)) | 898 | if (!will_read_block(port)) |
899 | ret |= POLLIN | POLLRDNORM; | 899 | ret |= POLLIN | POLLRDNORM; |
900 | if (!will_write_block(port)) | 900 | if (!will_write_block(port)) |
901 | ret |= POLLOUT; | 901 | ret |= POLLOUT; |
902 | if (!port->host_connected) | 902 | if (!port->host_connected) |
903 | ret |= POLLHUP; | 903 | ret |= POLLHUP; |
904 | 904 | ||
905 | return ret; | 905 | return ret; |
906 | } | 906 | } |
907 | 907 | ||
908 | static void remove_port(struct kref *kref); | 908 | static void remove_port(struct kref *kref); |
909 | 909 | ||
910 | static int port_fops_release(struct inode *inode, struct file *filp) | 910 | static int port_fops_release(struct inode *inode, struct file *filp) |
911 | { | 911 | { |
912 | struct port *port; | 912 | struct port *port; |
913 | 913 | ||
914 | port = filp->private_data; | 914 | port = filp->private_data; |
915 | 915 | ||
916 | /* Notify host of port being closed */ | 916 | /* Notify host of port being closed */ |
917 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); | 917 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); |
918 | 918 | ||
919 | spin_lock_irq(&port->inbuf_lock); | 919 | spin_lock_irq(&port->inbuf_lock); |
920 | port->guest_connected = false; | 920 | port->guest_connected = false; |
921 | 921 | ||
922 | discard_port_data(port); | 922 | discard_port_data(port); |
923 | 923 | ||
924 | spin_unlock_irq(&port->inbuf_lock); | 924 | spin_unlock_irq(&port->inbuf_lock); |
925 | 925 | ||
926 | spin_lock_irq(&port->outvq_lock); | 926 | spin_lock_irq(&port->outvq_lock); |
927 | reclaim_consumed_buffers(port); | 927 | reclaim_consumed_buffers(port); |
928 | spin_unlock_irq(&port->outvq_lock); | 928 | spin_unlock_irq(&port->outvq_lock); |
929 | 929 | ||
930 | /* | 930 | /* |
931 | * Locks aren't necessary here as a port can't be opened after | 931 | * Locks aren't necessary here as a port can't be opened after |
932 | * unplug, and if a port isn't unplugged, a kref would already | 932 | * unplug, and if a port isn't unplugged, a kref would already |
933 | * exist for the port. Plus, taking ports_lock here would | 933 | * exist for the port. Plus, taking ports_lock here would |
934 | * create a dependency on other locks taken by functions | 934 | * create a dependency on other locks taken by functions |
935 | * inside remove_port if we're the last holder of the port, | 935 | * inside remove_port if we're the last holder of the port, |
936 | * creating many problems. | 936 | * creating many problems. |
937 | */ | 937 | */ |
938 | kref_put(&port->kref, remove_port); | 938 | kref_put(&port->kref, remove_port); |
939 | 939 | ||
940 | return 0; | 940 | return 0; |
941 | } | 941 | } |
942 | 942 | ||
943 | static int port_fops_open(struct inode *inode, struct file *filp) | 943 | static int port_fops_open(struct inode *inode, struct file *filp) |
944 | { | 944 | { |
945 | struct cdev *cdev = inode->i_cdev; | 945 | struct cdev *cdev = inode->i_cdev; |
946 | struct port *port; | 946 | struct port *port; |
947 | int ret; | 947 | int ret; |
948 | 948 | ||
949 | port = find_port_by_devt(cdev->dev); | 949 | port = find_port_by_devt(cdev->dev); |
950 | filp->private_data = port; | 950 | filp->private_data = port; |
951 | 951 | ||
952 | /* Prevent against a port getting hot-unplugged at the same time */ | 952 | /* Prevent against a port getting hot-unplugged at the same time */ |
953 | spin_lock_irq(&port->portdev->ports_lock); | 953 | spin_lock_irq(&port->portdev->ports_lock); |
954 | kref_get(&port->kref); | 954 | kref_get(&port->kref); |
955 | spin_unlock_irq(&port->portdev->ports_lock); | 955 | spin_unlock_irq(&port->portdev->ports_lock); |
956 | 956 | ||
957 | /* | 957 | /* |
958 | * Don't allow opening of console port devices -- that's done | 958 | * Don't allow opening of console port devices -- that's done |
959 | * via /dev/hvc | 959 | * via /dev/hvc |
960 | */ | 960 | */ |
961 | if (is_console_port(port)) { | 961 | if (is_console_port(port)) { |
962 | ret = -ENXIO; | 962 | ret = -ENXIO; |
963 | goto out; | 963 | goto out; |
964 | } | 964 | } |
965 | 965 | ||
966 | /* Allow only one process to open a particular port at a time */ | 966 | /* Allow only one process to open a particular port at a time */ |
967 | spin_lock_irq(&port->inbuf_lock); | 967 | spin_lock_irq(&port->inbuf_lock); |
968 | if (port->guest_connected) { | 968 | if (port->guest_connected) { |
969 | spin_unlock_irq(&port->inbuf_lock); | 969 | spin_unlock_irq(&port->inbuf_lock); |
970 | ret = -EMFILE; | 970 | ret = -EMFILE; |
971 | goto out; | 971 | goto out; |
972 | } | 972 | } |
973 | 973 | ||
974 | port->guest_connected = true; | 974 | port->guest_connected = true; |
975 | spin_unlock_irq(&port->inbuf_lock); | 975 | spin_unlock_irq(&port->inbuf_lock); |
976 | 976 | ||
977 | spin_lock_irq(&port->outvq_lock); | 977 | spin_lock_irq(&port->outvq_lock); |
978 | /* | 978 | /* |
979 | * There might be a chance that we missed reclaiming a few | 979 | * There might be a chance that we missed reclaiming a few |
980 | * buffers in the window of the port getting previously closed | 980 | * buffers in the window of the port getting previously closed |
981 | * and opening now. | 981 | * and opening now. |
982 | */ | 982 | */ |
983 | reclaim_consumed_buffers(port); | 983 | reclaim_consumed_buffers(port); |
984 | spin_unlock_irq(&port->outvq_lock); | 984 | spin_unlock_irq(&port->outvq_lock); |
985 | 985 | ||
986 | nonseekable_open(inode, filp); | 986 | nonseekable_open(inode, filp); |
987 | 987 | ||
988 | /* Notify host of port being opened */ | 988 | /* Notify host of port being opened */ |
989 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); | 989 | send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); |
990 | 990 | ||
991 | return 0; | 991 | return 0; |
992 | out: | 992 | out: |
993 | kref_put(&port->kref, remove_port); | 993 | kref_put(&port->kref, remove_port); |
994 | return ret; | 994 | return ret; |
995 | } | 995 | } |
996 | 996 | ||
997 | static int port_fops_fasync(int fd, struct file *filp, int mode) | 997 | static int port_fops_fasync(int fd, struct file *filp, int mode) |
998 | { | 998 | { |
999 | struct port *port; | 999 | struct port *port; |
1000 | 1000 | ||
1001 | port = filp->private_data; | 1001 | port = filp->private_data; |
1002 | return fasync_helper(fd, filp, mode, &port->async_queue); | 1002 | return fasync_helper(fd, filp, mode, &port->async_queue); |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
1006 | * The file operations that we support: programs in the guest can open | 1006 | * The file operations that we support: programs in the guest can open |
1007 | * a console device, read from it, write to it, poll for data and | 1007 | * a console device, read from it, write to it, poll for data and |
1008 | * close it. The devices are at | 1008 | * close it. The devices are at |
1009 | * /dev/vport<device number>p<port number> | 1009 | * /dev/vport<device number>p<port number> |
1010 | */ | 1010 | */ |
1011 | static const struct file_operations port_fops = { | 1011 | static const struct file_operations port_fops = { |
1012 | .owner = THIS_MODULE, | 1012 | .owner = THIS_MODULE, |
1013 | .open = port_fops_open, | 1013 | .open = port_fops_open, |
1014 | .read = port_fops_read, | 1014 | .read = port_fops_read, |
1015 | .write = port_fops_write, | 1015 | .write = port_fops_write, |
1016 | .splice_write = port_fops_splice_write, | 1016 | .splice_write = port_fops_splice_write, |
1017 | .poll = port_fops_poll, | 1017 | .poll = port_fops_poll, |
1018 | .release = port_fops_release, | 1018 | .release = port_fops_release, |
1019 | .fasync = port_fops_fasync, | 1019 | .fasync = port_fops_fasync, |
1020 | .llseek = no_llseek, | 1020 | .llseek = no_llseek, |
1021 | }; | 1021 | }; |
1022 | 1022 | ||
1023 | /* | 1023 | /* |
1024 | * The put_chars() callback is pretty straightforward. | 1024 | * The put_chars() callback is pretty straightforward. |
1025 | * | 1025 | * |
1026 | * We turn the characters into a scatter-gather list, add it to the | 1026 | * We turn the characters into a scatter-gather list, add it to the |
1027 | * output queue and then kick the Host. Then we sit here waiting for | 1027 | * output queue and then kick the Host. Then we sit here waiting for |
1028 | * it to finish: inefficient in theory, but in practice | 1028 | * it to finish: inefficient in theory, but in practice |
1029 | * implementations will do it immediately (lguest's Launcher does). | 1029 | * implementations will do it immediately (lguest's Launcher does). |
1030 | */ | 1030 | */ |
1031 | static int put_chars(u32 vtermno, const char *buf, int count) | 1031 | static int put_chars(u32 vtermno, const char *buf, int count) |
1032 | { | 1032 | { |
1033 | struct port *port; | 1033 | struct port *port; |
1034 | 1034 | ||
1035 | if (unlikely(early_put_chars)) | 1035 | if (unlikely(early_put_chars)) |
1036 | return early_put_chars(vtermno, buf, count); | 1036 | return early_put_chars(vtermno, buf, count); |
1037 | 1037 | ||
1038 | port = find_port_by_vtermno(vtermno); | 1038 | port = find_port_by_vtermno(vtermno); |
1039 | if (!port) | 1039 | if (!port) |
1040 | return -EPIPE; | 1040 | return -EPIPE; |
1041 | 1041 | ||
1042 | return send_buf(port, (void *)buf, count, false); | 1042 | return send_buf(port, (void *)buf, count, false); |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | /* | 1045 | /* |
1046 | * get_chars() is the callback from the hvc_console infrastructure | 1046 | * get_chars() is the callback from the hvc_console infrastructure |
1047 | * when an interrupt is received. | 1047 | * when an interrupt is received. |
1048 | * | 1048 | * |
1049 | * We call out to fill_readbuf that gets us the required data from the | 1049 | * We call out to fill_readbuf that gets us the required data from the |
1050 | * buffers that are queued up. | 1050 | * buffers that are queued up. |
1051 | */ | 1051 | */ |
1052 | static int get_chars(u32 vtermno, char *buf, int count) | 1052 | static int get_chars(u32 vtermno, char *buf, int count) |
1053 | { | 1053 | { |
1054 | struct port *port; | 1054 | struct port *port; |
1055 | 1055 | ||
1056 | /* If we've not set up the port yet, we have no input to give. */ | 1056 | /* If we've not set up the port yet, we have no input to give. */ |
1057 | if (unlikely(early_put_chars)) | 1057 | if (unlikely(early_put_chars)) |
1058 | return 0; | 1058 | return 0; |
1059 | 1059 | ||
1060 | port = find_port_by_vtermno(vtermno); | 1060 | port = find_port_by_vtermno(vtermno); |
1061 | if (!port) | 1061 | if (!port) |
1062 | return -EPIPE; | 1062 | return -EPIPE; |
1063 | 1063 | ||
1064 | /* If we don't have an input queue yet, we can't get input. */ | 1064 | /* If we don't have an input queue yet, we can't get input. */ |
1065 | BUG_ON(!port->in_vq); | 1065 | BUG_ON(!port->in_vq); |
1066 | 1066 | ||
1067 | return fill_readbuf(port, buf, count, false); | 1067 | return fill_readbuf(port, buf, count, false); |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | static void resize_console(struct port *port) | 1070 | static void resize_console(struct port *port) |
1071 | { | 1071 | { |
1072 | struct virtio_device *vdev; | 1072 | struct virtio_device *vdev; |
1073 | 1073 | ||
1074 | /* The port could have been hot-unplugged */ | 1074 | /* The port could have been hot-unplugged */ |
1075 | if (!port || !is_console_port(port)) | 1075 | if (!port || !is_console_port(port)) |
1076 | return; | 1076 | return; |
1077 | 1077 | ||
1078 | vdev = port->portdev->vdev; | 1078 | vdev = port->portdev->vdev; |
1079 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | 1079 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) |
1080 | hvc_resize(port->cons.hvc, port->cons.ws); | 1080 | hvc_resize(port->cons.hvc, port->cons.ws); |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | /* We set the configuration at this point, since we now have a tty */ | 1083 | /* We set the configuration at this point, since we now have a tty */ |
1084 | static int notifier_add_vio(struct hvc_struct *hp, int data) | 1084 | static int notifier_add_vio(struct hvc_struct *hp, int data) |
1085 | { | 1085 | { |
1086 | struct port *port; | 1086 | struct port *port; |
1087 | 1087 | ||
1088 | port = find_port_by_vtermno(hp->vtermno); | 1088 | port = find_port_by_vtermno(hp->vtermno); |
1089 | if (!port) | 1089 | if (!port) |
1090 | return -EINVAL; | 1090 | return -EINVAL; |
1091 | 1091 | ||
1092 | hp->irq_requested = 1; | 1092 | hp->irq_requested = 1; |
1093 | resize_console(port); | 1093 | resize_console(port); |
1094 | 1094 | ||
1095 | return 0; | 1095 | return 0; |
1096 | } | 1096 | } |
1097 | 1097 | ||
1098 | static void notifier_del_vio(struct hvc_struct *hp, int data) | 1098 | static void notifier_del_vio(struct hvc_struct *hp, int data) |
1099 | { | 1099 | { |
1100 | hp->irq_requested = 0; | 1100 | hp->irq_requested = 0; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | /* The operations for console ports. */ | 1103 | /* The operations for console ports. */ |
1104 | static const struct hv_ops hv_ops = { | 1104 | static const struct hv_ops hv_ops = { |
1105 | .get_chars = get_chars, | 1105 | .get_chars = get_chars, |
1106 | .put_chars = put_chars, | 1106 | .put_chars = put_chars, |
1107 | .notifier_add = notifier_add_vio, | 1107 | .notifier_add = notifier_add_vio, |
1108 | .notifier_del = notifier_del_vio, | 1108 | .notifier_del = notifier_del_vio, |
1109 | .notifier_hangup = notifier_del_vio, | 1109 | .notifier_hangup = notifier_del_vio, |
1110 | }; | 1110 | }; |
1111 | 1111 | ||
1112 | /* | 1112 | /* |
1113 | * Console drivers are initialized very early so boot messages can go | 1113 | * Console drivers are initialized very early so boot messages can go |
1114 | * out, so we do things slightly differently from the generic virtio | 1114 | * out, so we do things slightly differently from the generic virtio |
1115 | * initialization of the net and block drivers. | 1115 | * initialization of the net and block drivers. |
1116 | * | 1116 | * |
1117 | * At this stage, the console is output-only. It's too early to set | 1117 | * At this stage, the console is output-only. It's too early to set |
1118 | * up a virtqueue, so we let the drivers do some boutique early-output | 1118 | * up a virtqueue, so we let the drivers do some boutique early-output |
1119 | * thing. | 1119 | * thing. |
1120 | */ | 1120 | */ |
1121 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) | 1121 | int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) |
1122 | { | 1122 | { |
1123 | early_put_chars = put_chars; | 1123 | early_put_chars = put_chars; |
1124 | return hvc_instantiate(0, 0, &hv_ops); | 1124 | return hvc_instantiate(0, 0, &hv_ops); |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | int init_port_console(struct port *port) | 1127 | int init_port_console(struct port *port) |
1128 | { | 1128 | { |
1129 | int ret; | 1129 | int ret; |
1130 | 1130 | ||
1131 | /* | 1131 | /* |
1132 | * The Host's telling us this port is a console port. Hook it | 1132 | * The Host's telling us this port is a console port. Hook it |
1133 | * up with an hvc console. | 1133 | * up with an hvc console. |
1134 | * | 1134 | * |
1135 | * To set up and manage our virtual console, we call | 1135 | * To set up and manage our virtual console, we call |
1136 | * hvc_alloc(). | 1136 | * hvc_alloc(). |
1137 | * | 1137 | * |
1138 | * The first argument of hvc_alloc() is the virtual console | 1138 | * The first argument of hvc_alloc() is the virtual console |
1139 | * number. The second argument is the parameter for the | 1139 | * number. The second argument is the parameter for the |
1140 | * notification mechanism (like irq number). We currently | 1140 | * notification mechanism (like irq number). We currently |
1141 | * leave this as zero, virtqueues have implicit notifications. | 1141 | * leave this as zero, virtqueues have implicit notifications. |
1142 | * | 1142 | * |
1143 | * The third argument is a "struct hv_ops" containing the | 1143 | * The third argument is a "struct hv_ops" containing the |
1144 | * put_chars() get_chars(), notifier_add() and notifier_del() | 1144 | * put_chars() get_chars(), notifier_add() and notifier_del() |
1145 | * pointers. The final argument is the output buffer size: we | 1145 | * pointers. The final argument is the output buffer size: we |
1146 | * can do any size, so we put PAGE_SIZE here. | 1146 | * can do any size, so we put PAGE_SIZE here. |
1147 | */ | 1147 | */ |
1148 | port->cons.vtermno = pdrvdata.next_vtermno; | 1148 | port->cons.vtermno = pdrvdata.next_vtermno; |
1149 | 1149 | ||
1150 | port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); | 1150 | port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); |
1151 | if (IS_ERR(port->cons.hvc)) { | 1151 | if (IS_ERR(port->cons.hvc)) { |
1152 | ret = PTR_ERR(port->cons.hvc); | 1152 | ret = PTR_ERR(port->cons.hvc); |
1153 | dev_err(port->dev, | 1153 | dev_err(port->dev, |
1154 | "error %d allocating hvc for port\n", ret); | 1154 | "error %d allocating hvc for port\n", ret); |
1155 | port->cons.hvc = NULL; | 1155 | port->cons.hvc = NULL; |
1156 | return ret; | 1156 | return ret; |
1157 | } | 1157 | } |
1158 | spin_lock_irq(&pdrvdata_lock); | 1158 | spin_lock_irq(&pdrvdata_lock); |
1159 | pdrvdata.next_vtermno++; | 1159 | pdrvdata.next_vtermno++; |
1160 | list_add_tail(&port->cons.list, &pdrvdata.consoles); | 1160 | list_add_tail(&port->cons.list, &pdrvdata.consoles); |
1161 | spin_unlock_irq(&pdrvdata_lock); | 1161 | spin_unlock_irq(&pdrvdata_lock); |
1162 | port->guest_connected = true; | 1162 | port->guest_connected = true; |
1163 | 1163 | ||
1164 | /* | 1164 | /* |
1165 | * Start using the new console output if this is the first | 1165 | * Start using the new console output if this is the first |
1166 | * console to come up. | 1166 | * console to come up. |
1167 | */ | 1167 | */ |
1168 | if (early_put_chars) | 1168 | if (early_put_chars) |
1169 | early_put_chars = NULL; | 1169 | early_put_chars = NULL; |
1170 | 1170 | ||
1171 | /* Notify host of port being opened */ | 1171 | /* Notify host of port being opened */ |
1172 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | 1172 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); |
1173 | 1173 | ||
1174 | return 0; | 1174 | return 0; |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | static ssize_t show_port_name(struct device *dev, | 1177 | static ssize_t show_port_name(struct device *dev, |
1178 | struct device_attribute *attr, char *buffer) | 1178 | struct device_attribute *attr, char *buffer) |
1179 | { | 1179 | { |
1180 | struct port *port; | 1180 | struct port *port; |
1181 | 1181 | ||
1182 | port = dev_get_drvdata(dev); | 1182 | port = dev_get_drvdata(dev); |
1183 | 1183 | ||
1184 | return sprintf(buffer, "%s\n", port->name); | 1184 | return sprintf(buffer, "%s\n", port->name); |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); | 1187 | static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); |
1188 | 1188 | ||
1189 | static struct attribute *port_sysfs_entries[] = { | 1189 | static struct attribute *port_sysfs_entries[] = { |
1190 | &dev_attr_name.attr, | 1190 | &dev_attr_name.attr, |
1191 | NULL | 1191 | NULL |
1192 | }; | 1192 | }; |
1193 | 1193 | ||
1194 | static struct attribute_group port_attribute_group = { | 1194 | static struct attribute_group port_attribute_group = { |
1195 | .name = NULL, /* put in device directory */ | 1195 | .name = NULL, /* put in device directory */ |
1196 | .attrs = port_sysfs_entries, | 1196 | .attrs = port_sysfs_entries, |
1197 | }; | 1197 | }; |
1198 | 1198 | ||
1199 | static ssize_t debugfs_read(struct file *filp, char __user *ubuf, | 1199 | static ssize_t debugfs_read(struct file *filp, char __user *ubuf, |
1200 | size_t count, loff_t *offp) | 1200 | size_t count, loff_t *offp) |
1201 | { | 1201 | { |
1202 | struct port *port; | 1202 | struct port *port; |
1203 | char *buf; | 1203 | char *buf; |
1204 | ssize_t ret, out_offset, out_count; | 1204 | ssize_t ret, out_offset, out_count; |
1205 | 1205 | ||
1206 | out_count = 1024; | 1206 | out_count = 1024; |
1207 | buf = kmalloc(out_count, GFP_KERNEL); | 1207 | buf = kmalloc(out_count, GFP_KERNEL); |
1208 | if (!buf) | 1208 | if (!buf) |
1209 | return -ENOMEM; | 1209 | return -ENOMEM; |
1210 | 1210 | ||
1211 | port = filp->private_data; | 1211 | port = filp->private_data; |
1212 | out_offset = 0; | 1212 | out_offset = 0; |
1213 | out_offset += snprintf(buf + out_offset, out_count, | 1213 | out_offset += snprintf(buf + out_offset, out_count, |
1214 | "name: %s\n", port->name ? port->name : ""); | 1214 | "name: %s\n", port->name ? port->name : ""); |
1215 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1215 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1216 | "guest_connected: %d\n", port->guest_connected); | 1216 | "guest_connected: %d\n", port->guest_connected); |
1217 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1217 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1218 | "host_connected: %d\n", port->host_connected); | 1218 | "host_connected: %d\n", port->host_connected); |
1219 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1219 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1220 | "outvq_full: %d\n", port->outvq_full); | 1220 | "outvq_full: %d\n", port->outvq_full); |
1221 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1221 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1222 | "bytes_sent: %lu\n", port->stats.bytes_sent); | 1222 | "bytes_sent: %lu\n", port->stats.bytes_sent); |
1223 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1223 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1224 | "bytes_received: %lu\n", | 1224 | "bytes_received: %lu\n", |
1225 | port->stats.bytes_received); | 1225 | port->stats.bytes_received); |
1226 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1226 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1227 | "bytes_discarded: %lu\n", | 1227 | "bytes_discarded: %lu\n", |
1228 | port->stats.bytes_discarded); | 1228 | port->stats.bytes_discarded); |
1229 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1229 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1230 | "is_console: %s\n", | 1230 | "is_console: %s\n", |
1231 | is_console_port(port) ? "yes" : "no"); | 1231 | is_console_port(port) ? "yes" : "no"); |
1232 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | 1232 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
1233 | "console_vtermno: %u\n", port->cons.vtermno); | 1233 | "console_vtermno: %u\n", port->cons.vtermno); |
1234 | 1234 | ||
1235 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); | 1235 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); |
1236 | kfree(buf); | 1236 | kfree(buf); |
1237 | return ret; | 1237 | return ret; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | static const struct file_operations port_debugfs_ops = { | 1240 | static const struct file_operations port_debugfs_ops = { |
1241 | .owner = THIS_MODULE, | 1241 | .owner = THIS_MODULE, |
1242 | .open = simple_open, | 1242 | .open = simple_open, |
1243 | .read = debugfs_read, | 1243 | .read = debugfs_read, |
1244 | }; | 1244 | }; |
1245 | 1245 | ||
1246 | static void set_console_size(struct port *port, u16 rows, u16 cols) | 1246 | static void set_console_size(struct port *port, u16 rows, u16 cols) |
1247 | { | 1247 | { |
1248 | if (!port || !is_console_port(port)) | 1248 | if (!port || !is_console_port(port)) |
1249 | return; | 1249 | return; |
1250 | 1250 | ||
1251 | port->cons.ws.ws_row = rows; | 1251 | port->cons.ws.ws_row = rows; |
1252 | port->cons.ws.ws_col = cols; | 1252 | port->cons.ws.ws_col = cols; |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | 1255 | static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) |
1256 | { | 1256 | { |
1257 | struct port_buffer *buf; | 1257 | struct port_buffer *buf; |
1258 | unsigned int nr_added_bufs; | 1258 | unsigned int nr_added_bufs; |
1259 | int ret; | 1259 | int ret; |
1260 | 1260 | ||
1261 | nr_added_bufs = 0; | 1261 | nr_added_bufs = 0; |
1262 | do { | 1262 | do { |
1263 | buf = alloc_buf(PAGE_SIZE); | 1263 | buf = alloc_buf(PAGE_SIZE); |
1264 | if (!buf) | 1264 | if (!buf) |
1265 | break; | 1265 | break; |
1266 | 1266 | ||
1267 | spin_lock_irq(lock); | 1267 | spin_lock_irq(lock); |
1268 | ret = add_inbuf(vq, buf); | 1268 | ret = add_inbuf(vq, buf); |
1269 | if (ret < 0) { | 1269 | if (ret < 0) { |
1270 | spin_unlock_irq(lock); | 1270 | spin_unlock_irq(lock); |
1271 | free_buf(buf); | 1271 | free_buf(buf); |
1272 | break; | 1272 | break; |
1273 | } | 1273 | } |
1274 | nr_added_bufs++; | 1274 | nr_added_bufs++; |
1275 | spin_unlock_irq(lock); | 1275 | spin_unlock_irq(lock); |
1276 | } while (ret > 0); | 1276 | } while (ret > 0); |
1277 | 1277 | ||
1278 | return nr_added_bufs; | 1278 | return nr_added_bufs; |
1279 | } | 1279 | } |
1280 | 1280 | ||
1281 | static void send_sigio_to_port(struct port *port) | 1281 | static void send_sigio_to_port(struct port *port) |
1282 | { | 1282 | { |
1283 | if (port->async_queue && port->guest_connected) | 1283 | if (port->async_queue && port->guest_connected) |
1284 | kill_fasync(&port->async_queue, SIGIO, POLL_OUT); | 1284 | kill_fasync(&port->async_queue, SIGIO, POLL_OUT); |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | static int add_port(struct ports_device *portdev, u32 id) | 1287 | static int add_port(struct ports_device *portdev, u32 id) |
1288 | { | 1288 | { |
1289 | char debugfs_name[16]; | 1289 | char debugfs_name[16]; |
1290 | struct port *port; | 1290 | struct port *port; |
1291 | struct port_buffer *buf; | 1291 | struct port_buffer *buf; |
1292 | dev_t devt; | 1292 | dev_t devt; |
1293 | unsigned int nr_added_bufs; | 1293 | unsigned int nr_added_bufs; |
1294 | int err; | 1294 | int err; |
1295 | 1295 | ||
1296 | port = kmalloc(sizeof(*port), GFP_KERNEL); | 1296 | port = kmalloc(sizeof(*port), GFP_KERNEL); |
1297 | if (!port) { | 1297 | if (!port) { |
1298 | err = -ENOMEM; | 1298 | err = -ENOMEM; |
1299 | goto fail; | 1299 | goto fail; |
1300 | } | 1300 | } |
1301 | kref_init(&port->kref); | 1301 | kref_init(&port->kref); |
1302 | 1302 | ||
1303 | port->portdev = portdev; | 1303 | port->portdev = portdev; |
1304 | port->id = id; | 1304 | port->id = id; |
1305 | 1305 | ||
1306 | port->name = NULL; | 1306 | port->name = NULL; |
1307 | port->inbuf = NULL; | 1307 | port->inbuf = NULL; |
1308 | port->cons.hvc = NULL; | 1308 | port->cons.hvc = NULL; |
1309 | port->async_queue = NULL; | 1309 | port->async_queue = NULL; |
1310 | 1310 | ||
1311 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; | 1311 | port->cons.ws.ws_row = port->cons.ws.ws_col = 0; |
1312 | 1312 | ||
1313 | port->host_connected = port->guest_connected = false; | 1313 | port->host_connected = port->guest_connected = false; |
1314 | port->stats = (struct port_stats) { 0 }; | 1314 | port->stats = (struct port_stats) { 0 }; |
1315 | 1315 | ||
1316 | port->outvq_full = false; | 1316 | port->outvq_full = false; |
1317 | 1317 | ||
1318 | port->in_vq = portdev->in_vqs[port->id]; | 1318 | port->in_vq = portdev->in_vqs[port->id]; |
1319 | port->out_vq = portdev->out_vqs[port->id]; | 1319 | port->out_vq = portdev->out_vqs[port->id]; |
1320 | 1320 | ||
1321 | port->cdev = cdev_alloc(); | 1321 | port->cdev = cdev_alloc(); |
1322 | if (!port->cdev) { | 1322 | if (!port->cdev) { |
1323 | dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); | 1323 | dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); |
1324 | err = -ENOMEM; | 1324 | err = -ENOMEM; |
1325 | goto free_port; | 1325 | goto free_port; |
1326 | } | 1326 | } |
1327 | port->cdev->ops = &port_fops; | 1327 | port->cdev->ops = &port_fops; |
1328 | 1328 | ||
1329 | devt = MKDEV(portdev->chr_major, id); | 1329 | devt = MKDEV(portdev->chr_major, id); |
1330 | err = cdev_add(port->cdev, devt, 1); | 1330 | err = cdev_add(port->cdev, devt, 1); |
1331 | if (err < 0) { | 1331 | if (err < 0) { |
1332 | dev_err(&port->portdev->vdev->dev, | 1332 | dev_err(&port->portdev->vdev->dev, |
1333 | "Error %d adding cdev for port %u\n", err, id); | 1333 | "Error %d adding cdev for port %u\n", err, id); |
1334 | goto free_cdev; | 1334 | goto free_cdev; |
1335 | } | 1335 | } |
1336 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, | 1336 | port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, |
1337 | devt, port, "vport%up%u", | 1337 | devt, port, "vport%up%u", |
1338 | port->portdev->drv_index, id); | 1338 | port->portdev->drv_index, id); |
1339 | if (IS_ERR(port->dev)) { | 1339 | if (IS_ERR(port->dev)) { |
1340 | err = PTR_ERR(port->dev); | 1340 | err = PTR_ERR(port->dev); |
1341 | dev_err(&port->portdev->vdev->dev, | 1341 | dev_err(&port->portdev->vdev->dev, |
1342 | "Error %d creating device for port %u\n", | 1342 | "Error %d creating device for port %u\n", |
1343 | err, id); | 1343 | err, id); |
1344 | goto free_cdev; | 1344 | goto free_cdev; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | spin_lock_init(&port->inbuf_lock); | 1347 | spin_lock_init(&port->inbuf_lock); |
1348 | spin_lock_init(&port->outvq_lock); | 1348 | spin_lock_init(&port->outvq_lock); |
1349 | init_waitqueue_head(&port->waitqueue); | 1349 | init_waitqueue_head(&port->waitqueue); |
1350 | 1350 | ||
1351 | /* Fill the in_vq with buffers so the host can send us data. */ | 1351 | /* Fill the in_vq with buffers so the host can send us data. */ |
1352 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); | 1352 | nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); |
1353 | if (!nr_added_bufs) { | 1353 | if (!nr_added_bufs) { |
1354 | dev_err(port->dev, "Error allocating inbufs\n"); | 1354 | dev_err(port->dev, "Error allocating inbufs\n"); |
1355 | err = -ENOMEM; | 1355 | err = -ENOMEM; |
1356 | goto free_device; | 1356 | goto free_device; |
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | /* | 1359 | /* |
1360 | * If we're not using multiport support, this has to be a console port | 1360 | * If we're not using multiport support, this has to be a console port |
1361 | */ | 1361 | */ |
1362 | if (!use_multiport(port->portdev)) { | 1362 | if (!use_multiport(port->portdev)) { |
1363 | err = init_port_console(port); | 1363 | err = init_port_console(port); |
1364 | if (err) | 1364 | if (err) |
1365 | goto free_inbufs; | 1365 | goto free_inbufs; |
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | spin_lock_irq(&portdev->ports_lock); | 1368 | spin_lock_irq(&portdev->ports_lock); |
1369 | list_add_tail(&port->list, &port->portdev->ports); | 1369 | list_add_tail(&port->list, &port->portdev->ports); |
1370 | spin_unlock_irq(&portdev->ports_lock); | 1370 | spin_unlock_irq(&portdev->ports_lock); |
1371 | 1371 | ||
1372 | /* | 1372 | /* |
1373 | * Tell the Host we're set so that it can send us various | 1373 | * Tell the Host we're set so that it can send us various |
1374 | * configuration parameters for this port (eg, port name, | 1374 | * configuration parameters for this port (eg, port name, |
1375 | * caching, whether this is a console port, etc.) | 1375 | * caching, whether this is a console port, etc.) |
1376 | */ | 1376 | */ |
1377 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | 1377 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); |
1378 | 1378 | ||
1379 | if (pdrvdata.debugfs_dir) { | 1379 | if (pdrvdata.debugfs_dir) { |
1380 | /* | 1380 | /* |
1381 | * Finally, create the debugfs file that we can use to | 1381 | * Finally, create the debugfs file that we can use to |
1382 | * inspect a port's state at any time | 1382 | * inspect a port's state at any time |
1383 | */ | 1383 | */ |
1384 | sprintf(debugfs_name, "vport%up%u", | 1384 | sprintf(debugfs_name, "vport%up%u", |
1385 | port->portdev->drv_index, id); | 1385 | port->portdev->drv_index, id); |
1386 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, | 1386 | port->debugfs_file = debugfs_create_file(debugfs_name, 0444, |
1387 | pdrvdata.debugfs_dir, | 1387 | pdrvdata.debugfs_dir, |
1388 | port, | 1388 | port, |
1389 | &port_debugfs_ops); | 1389 | &port_debugfs_ops); |
1390 | } | 1390 | } |
1391 | return 0; | 1391 | return 0; |
1392 | 1392 | ||
1393 | free_inbufs: | 1393 | free_inbufs: |
1394 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1394 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1395 | free_buf(buf); | 1395 | free_buf(buf); |
1396 | free_device: | 1396 | free_device: |
1397 | device_destroy(pdrvdata.class, port->dev->devt); | 1397 | device_destroy(pdrvdata.class, port->dev->devt); |
1398 | free_cdev: | 1398 | free_cdev: |
1399 | cdev_del(port->cdev); | 1399 | cdev_del(port->cdev); |
1400 | free_port: | 1400 | free_port: |
1401 | kfree(port); | 1401 | kfree(port); |
1402 | fail: | 1402 | fail: |
1403 | /* The host might want to notify management sw about port add failure */ | 1403 | /* The host might want to notify management sw about port add failure */ |
1404 | __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); | 1404 | __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); |
1405 | return err; | 1405 | return err; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | /* No users remain, remove all port-specific data. */ | 1408 | /* No users remain, remove all port-specific data. */ |
1409 | static void remove_port(struct kref *kref) | 1409 | static void remove_port(struct kref *kref) |
1410 | { | 1410 | { |
1411 | struct port *port; | 1411 | struct port *port; |
1412 | 1412 | ||
1413 | port = container_of(kref, struct port, kref); | 1413 | port = container_of(kref, struct port, kref); |
1414 | 1414 | ||
1415 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | 1415 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); |
1416 | device_destroy(pdrvdata.class, port->dev->devt); | 1416 | device_destroy(pdrvdata.class, port->dev->devt); |
1417 | cdev_del(port->cdev); | 1417 | cdev_del(port->cdev); |
1418 | 1418 | ||
1419 | kfree(port->name); | 1419 | kfree(port->name); |
1420 | 1420 | ||
1421 | debugfs_remove(port->debugfs_file); | 1421 | debugfs_remove(port->debugfs_file); |
1422 | 1422 | ||
1423 | kfree(port); | 1423 | kfree(port); |
1424 | } | 1424 | } |
1425 | 1425 | ||
1426 | static void remove_port_data(struct port *port) | 1426 | static void remove_port_data(struct port *port) |
1427 | { | 1427 | { |
1428 | struct port_buffer *buf; | 1428 | struct port_buffer *buf; |
1429 | 1429 | ||
1430 | /* Remove unused data this port might have received. */ | 1430 | /* Remove unused data this port might have received. */ |
1431 | discard_port_data(port); | 1431 | discard_port_data(port); |
1432 | 1432 | ||
1433 | reclaim_consumed_buffers(port); | 1433 | reclaim_consumed_buffers(port); |
1434 | 1434 | ||
1435 | /* Remove buffers we queued up for the Host to send us data in. */ | 1435 | /* Remove buffers we queued up for the Host to send us data in. */ |
1436 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1436 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1437 | free_buf(buf); | 1437 | free_buf(buf); |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | /* | 1440 | /* |
1441 | * Port got unplugged. Remove port from portdev's list and drop the | 1441 | * Port got unplugged. Remove port from portdev's list and drop the |
1442 | * kref reference. If no userspace has this port opened, it will | 1442 | * kref reference. If no userspace has this port opened, it will |
1443 | * result in immediate removal the port. | 1443 | * result in immediate removal the port. |
1444 | */ | 1444 | */ |
1445 | static void unplug_port(struct port *port) | 1445 | static void unplug_port(struct port *port) |
1446 | { | 1446 | { |
1447 | spin_lock_irq(&port->portdev->ports_lock); | 1447 | spin_lock_irq(&port->portdev->ports_lock); |
1448 | list_del(&port->list); | 1448 | list_del(&port->list); |
1449 | spin_unlock_irq(&port->portdev->ports_lock); | 1449 | spin_unlock_irq(&port->portdev->ports_lock); |
1450 | 1450 | ||
1451 | if (port->guest_connected) { | 1451 | if (port->guest_connected) { |
1452 | port->guest_connected = false; | 1452 | port->guest_connected = false; |
1453 | port->host_connected = false; | 1453 | port->host_connected = false; |
1454 | wake_up_interruptible(&port->waitqueue); | 1454 | wake_up_interruptible(&port->waitqueue); |
1455 | 1455 | ||
1456 | /* Let the app know the port is going down. */ | 1456 | /* Let the app know the port is going down. */ |
1457 | send_sigio_to_port(port); | 1457 | send_sigio_to_port(port); |
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | if (is_console_port(port)) { | 1460 | if (is_console_port(port)) { |
1461 | spin_lock_irq(&pdrvdata_lock); | 1461 | spin_lock_irq(&pdrvdata_lock); |
1462 | list_del(&port->cons.list); | 1462 | list_del(&port->cons.list); |
1463 | spin_unlock_irq(&pdrvdata_lock); | 1463 | spin_unlock_irq(&pdrvdata_lock); |
1464 | hvc_remove(port->cons.hvc); | 1464 | hvc_remove(port->cons.hvc); |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | remove_port_data(port); | 1467 | remove_port_data(port); |
1468 | 1468 | ||
1469 | /* | 1469 | /* |
1470 | * We should just assume the device itself has gone off -- | 1470 | * We should just assume the device itself has gone off -- |
1471 | * else a close on an open port later will try to send out a | 1471 | * else a close on an open port later will try to send out a |
1472 | * control message. | 1472 | * control message. |
1473 | */ | 1473 | */ |
1474 | port->portdev = NULL; | 1474 | port->portdev = NULL; |
1475 | 1475 | ||
1476 | /* | 1476 | /* |
1477 | * Locks around here are not necessary - a port can't be | 1477 | * Locks around here are not necessary - a port can't be |
1478 | * opened after we removed the port struct from ports_list | 1478 | * opened after we removed the port struct from ports_list |
1479 | * above. | 1479 | * above. |
1480 | */ | 1480 | */ |
1481 | kref_put(&port->kref, remove_port); | 1481 | kref_put(&port->kref, remove_port); |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | /* Any private messages that the Host and Guest want to share */ | 1484 | /* Any private messages that the Host and Guest want to share */ |
1485 | static void handle_control_message(struct ports_device *portdev, | 1485 | static void handle_control_message(struct ports_device *portdev, |
1486 | struct port_buffer *buf) | 1486 | struct port_buffer *buf) |
1487 | { | 1487 | { |
1488 | struct virtio_console_control *cpkt; | 1488 | struct virtio_console_control *cpkt; |
1489 | struct port *port; | 1489 | struct port *port; |
1490 | size_t name_size; | 1490 | size_t name_size; |
1491 | int err; | 1491 | int err; |
1492 | 1492 | ||
1493 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); | 1493 | cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); |
1494 | 1494 | ||
1495 | port = find_port_by_id(portdev, cpkt->id); | 1495 | port = find_port_by_id(portdev, cpkt->id); |
1496 | if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { | 1496 | if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { |
1497 | /* No valid header at start of buffer. Drop it. */ | 1497 | /* No valid header at start of buffer. Drop it. */ |
1498 | dev_dbg(&portdev->vdev->dev, | 1498 | dev_dbg(&portdev->vdev->dev, |
1499 | "Invalid index %u in control packet\n", cpkt->id); | 1499 | "Invalid index %u in control packet\n", cpkt->id); |
1500 | return; | 1500 | return; |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | switch (cpkt->event) { | 1503 | switch (cpkt->event) { |
1504 | case VIRTIO_CONSOLE_PORT_ADD: | 1504 | case VIRTIO_CONSOLE_PORT_ADD: |
1505 | if (port) { | 1505 | if (port) { |
1506 | dev_dbg(&portdev->vdev->dev, | 1506 | dev_dbg(&portdev->vdev->dev, |
1507 | "Port %u already added\n", port->id); | 1507 | "Port %u already added\n", port->id); |
1508 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | 1508 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); |
1509 | break; | 1509 | break; |
1510 | } | 1510 | } |
1511 | if (cpkt->id >= portdev->config.max_nr_ports) { | 1511 | if (cpkt->id >= portdev->config.max_nr_ports) { |
1512 | dev_warn(&portdev->vdev->dev, | 1512 | dev_warn(&portdev->vdev->dev, |
1513 | "Request for adding port with out-of-bound id %u, max. supported id: %u\n", | 1513 | "Request for adding port with out-of-bound id %u, max. supported id: %u\n", |
1514 | cpkt->id, portdev->config.max_nr_ports - 1); | 1514 | cpkt->id, portdev->config.max_nr_ports - 1); |
1515 | break; | 1515 | break; |
1516 | } | 1516 | } |
1517 | add_port(portdev, cpkt->id); | 1517 | add_port(portdev, cpkt->id); |
1518 | break; | 1518 | break; |
1519 | case VIRTIO_CONSOLE_PORT_REMOVE: | 1519 | case VIRTIO_CONSOLE_PORT_REMOVE: |
1520 | unplug_port(port); | 1520 | unplug_port(port); |
1521 | break; | 1521 | break; |
1522 | case VIRTIO_CONSOLE_CONSOLE_PORT: | 1522 | case VIRTIO_CONSOLE_CONSOLE_PORT: |
1523 | if (!cpkt->value) | 1523 | if (!cpkt->value) |
1524 | break; | 1524 | break; |
1525 | if (is_console_port(port)) | 1525 | if (is_console_port(port)) |
1526 | break; | 1526 | break; |
1527 | 1527 | ||
1528 | init_port_console(port); | 1528 | init_port_console(port); |
1529 | complete(&early_console_added); | 1529 | complete(&early_console_added); |
1530 | /* | 1530 | /* |
1531 | * Could remove the port here in case init fails - but | 1531 | * Could remove the port here in case init fails - but |
1532 | * have to notify the host first. | 1532 | * have to notify the host first. |
1533 | */ | 1533 | */ |
1534 | break; | 1534 | break; |
1535 | case VIRTIO_CONSOLE_RESIZE: { | 1535 | case VIRTIO_CONSOLE_RESIZE: { |
1536 | struct { | 1536 | struct { |
1537 | __u16 rows; | 1537 | __u16 rows; |
1538 | __u16 cols; | 1538 | __u16 cols; |
1539 | } size; | 1539 | } size; |
1540 | 1540 | ||
1541 | if (!is_console_port(port)) | 1541 | if (!is_console_port(port)) |
1542 | break; | 1542 | break; |
1543 | 1543 | ||
1544 | memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), | 1544 | memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), |
1545 | sizeof(size)); | 1545 | sizeof(size)); |
1546 | set_console_size(port, size.rows, size.cols); | 1546 | set_console_size(port, size.rows, size.cols); |
1547 | 1547 | ||
1548 | port->cons.hvc->irq_requested = 1; | 1548 | port->cons.hvc->irq_requested = 1; |
1549 | resize_console(port); | 1549 | resize_console(port); |
1550 | break; | 1550 | break; |
1551 | } | 1551 | } |
1552 | case VIRTIO_CONSOLE_PORT_OPEN: | 1552 | case VIRTIO_CONSOLE_PORT_OPEN: |
1553 | port->host_connected = cpkt->value; | 1553 | port->host_connected = cpkt->value; |
1554 | wake_up_interruptible(&port->waitqueue); | 1554 | wake_up_interruptible(&port->waitqueue); |
1555 | /* | 1555 | /* |
1556 | * If the host port got closed and the host had any | 1556 | * If the host port got closed and the host had any |
1557 | * unconsumed buffers, we'll be able to reclaim them | 1557 | * unconsumed buffers, we'll be able to reclaim them |
1558 | * now. | 1558 | * now. |
1559 | */ | 1559 | */ |
1560 | spin_lock_irq(&port->outvq_lock); | 1560 | spin_lock_irq(&port->outvq_lock); |
1561 | reclaim_consumed_buffers(port); | 1561 | reclaim_consumed_buffers(port); |
1562 | spin_unlock_irq(&port->outvq_lock); | 1562 | spin_unlock_irq(&port->outvq_lock); |
1563 | 1563 | ||
1564 | /* | 1564 | /* |
1565 | * If the guest is connected, it'll be interested in | 1565 | * If the guest is connected, it'll be interested in |
1566 | * knowing the host connection state changed. | 1566 | * knowing the host connection state changed. |
1567 | */ | 1567 | */ |
1568 | send_sigio_to_port(port); | 1568 | send_sigio_to_port(port); |
1569 | break; | 1569 | break; |
1570 | case VIRTIO_CONSOLE_PORT_NAME: | 1570 | case VIRTIO_CONSOLE_PORT_NAME: |
1571 | /* | 1571 | /* |
1572 | * If we woke up after hibernation, we can get this | 1572 | * If we woke up after hibernation, we can get this |
1573 | * again. Skip it in that case. | 1573 | * again. Skip it in that case. |
1574 | */ | 1574 | */ |
1575 | if (port->name) | 1575 | if (port->name) |
1576 | break; | 1576 | break; |
1577 | 1577 | ||
1578 | /* | 1578 | /* |
1579 | * Skip the size of the header and the cpkt to get the size | 1579 | * Skip the size of the header and the cpkt to get the size |
1580 | * of the name that was sent | 1580 | * of the name that was sent |
1581 | */ | 1581 | */ |
1582 | name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; | 1582 | name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; |
1583 | 1583 | ||
1584 | port->name = kmalloc(name_size, GFP_KERNEL); | 1584 | port->name = kmalloc(name_size, GFP_KERNEL); |
1585 | if (!port->name) { | 1585 | if (!port->name) { |
1586 | dev_err(port->dev, | 1586 | dev_err(port->dev, |
1587 | "Not enough space to store port name\n"); | 1587 | "Not enough space to store port name\n"); |
1588 | break; | 1588 | break; |
1589 | } | 1589 | } |
1590 | strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), | 1590 | strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), |
1591 | name_size - 1); | 1591 | name_size - 1); |
1592 | port->name[name_size - 1] = 0; | 1592 | port->name[name_size - 1] = 0; |
1593 | 1593 | ||
1594 | /* | 1594 | /* |
1595 | * Since we only have one sysfs attribute, 'name', | 1595 | * Since we only have one sysfs attribute, 'name', |
1596 | * create it only if we have a name for the port. | 1596 | * create it only if we have a name for the port. |
1597 | */ | 1597 | */ |
1598 | err = sysfs_create_group(&port->dev->kobj, | 1598 | err = sysfs_create_group(&port->dev->kobj, |
1599 | &port_attribute_group); | 1599 | &port_attribute_group); |
1600 | if (err) { | 1600 | if (err) { |
1601 | dev_err(port->dev, | 1601 | dev_err(port->dev, |
1602 | "Error %d creating sysfs device attributes\n", | 1602 | "Error %d creating sysfs device attributes\n", |
1603 | err); | 1603 | err); |
1604 | } else { | 1604 | } else { |
1605 | /* | 1605 | /* |
1606 | * Generate a udev event so that appropriate | 1606 | * Generate a udev event so that appropriate |
1607 | * symlinks can be created based on udev | 1607 | * symlinks can be created based on udev |
1608 | * rules. | 1608 | * rules. |
1609 | */ | 1609 | */ |
1610 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); | 1610 | kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); |
1611 | } | 1611 | } |
1612 | break; | 1612 | break; |
1613 | } | 1613 | } |
1614 | } | 1614 | } |
1615 | 1615 | ||
1616 | static void control_work_handler(struct work_struct *work) | 1616 | static void control_work_handler(struct work_struct *work) |
1617 | { | 1617 | { |
1618 | struct ports_device *portdev; | 1618 | struct ports_device *portdev; |
1619 | struct virtqueue *vq; | 1619 | struct virtqueue *vq; |
1620 | struct port_buffer *buf; | 1620 | struct port_buffer *buf; |
1621 | unsigned int len; | 1621 | unsigned int len; |
1622 | 1622 | ||
1623 | portdev = container_of(work, struct ports_device, control_work); | 1623 | portdev = container_of(work, struct ports_device, control_work); |
1624 | vq = portdev->c_ivq; | 1624 | vq = portdev->c_ivq; |
1625 | 1625 | ||
1626 | spin_lock(&portdev->cvq_lock); | 1626 | spin_lock(&portdev->cvq_lock); |
1627 | while ((buf = virtqueue_get_buf(vq, &len))) { | 1627 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1628 | spin_unlock(&portdev->cvq_lock); | 1628 | spin_unlock(&portdev->cvq_lock); |
1629 | 1629 | ||
1630 | buf->len = len; | 1630 | buf->len = len; |
1631 | buf->offset = 0; | 1631 | buf->offset = 0; |
1632 | 1632 | ||
1633 | handle_control_message(portdev, buf); | 1633 | handle_control_message(portdev, buf); |
1634 | 1634 | ||
1635 | spin_lock(&portdev->cvq_lock); | 1635 | spin_lock(&portdev->cvq_lock); |
1636 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1636 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1637 | dev_warn(&portdev->vdev->dev, | 1637 | dev_warn(&portdev->vdev->dev, |
1638 | "Error adding buffer to queue\n"); | 1638 | "Error adding buffer to queue\n"); |
1639 | free_buf(buf); | 1639 | free_buf(buf); |
1640 | } | 1640 | } |
1641 | } | 1641 | } |
1642 | spin_unlock(&portdev->cvq_lock); | 1642 | spin_unlock(&portdev->cvq_lock); |
1643 | } | 1643 | } |
1644 | 1644 | ||
1645 | static void out_intr(struct virtqueue *vq) | 1645 | static void out_intr(struct virtqueue *vq) |
1646 | { | 1646 | { |
1647 | struct port *port; | 1647 | struct port *port; |
1648 | 1648 | ||
1649 | port = find_port_by_vq(vq->vdev->priv, vq); | 1649 | port = find_port_by_vq(vq->vdev->priv, vq); |
1650 | if (!port) | 1650 | if (!port) |
1651 | return; | 1651 | return; |
1652 | 1652 | ||
1653 | wake_up_interruptible(&port->waitqueue); | 1653 | wake_up_interruptible(&port->waitqueue); |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | static void in_intr(struct virtqueue *vq) | 1656 | static void in_intr(struct virtqueue *vq) |
1657 | { | 1657 | { |
1658 | struct port *port; | 1658 | struct port *port; |
1659 | unsigned long flags; | 1659 | unsigned long flags; |
1660 | 1660 | ||
1661 | port = find_port_by_vq(vq->vdev->priv, vq); | 1661 | port = find_port_by_vq(vq->vdev->priv, vq); |
1662 | if (!port) | 1662 | if (!port) |
1663 | return; | 1663 | return; |
1664 | 1664 | ||
1665 | spin_lock_irqsave(&port->inbuf_lock, flags); | 1665 | spin_lock_irqsave(&port->inbuf_lock, flags); |
1666 | port->inbuf = get_inbuf(port); | 1666 | port->inbuf = get_inbuf(port); |
1667 | 1667 | ||
1668 | /* | 1668 | /* |
1669 | * Don't queue up data when port is closed. This condition | 1669 | * Don't queue up data when port is closed. This condition |
1670 | * can be reached when a console port is not yet connected (no | 1670 | * can be reached when a console port is not yet connected (no |
1671 | * tty is spawned) and the host sends out data to console | 1671 | * tty is spawned) and the host sends out data to console |
1672 | * ports. For generic serial ports, the host won't | 1672 | * ports. For generic serial ports, the host won't |
1673 | * (shouldn't) send data till the guest is connected. | 1673 | * (shouldn't) send data till the guest is connected. |
1674 | */ | 1674 | */ |
1675 | if (!port->guest_connected) | 1675 | if (!port->guest_connected) |
1676 | discard_port_data(port); | 1676 | discard_port_data(port); |
1677 | 1677 | ||
1678 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 1678 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
1679 | 1679 | ||
1680 | wake_up_interruptible(&port->waitqueue); | 1680 | wake_up_interruptible(&port->waitqueue); |
1681 | 1681 | ||
1682 | /* Send a SIGIO indicating new data in case the process asked for it */ | 1682 | /* Send a SIGIO indicating new data in case the process asked for it */ |
1683 | send_sigio_to_port(port); | 1683 | send_sigio_to_port(port); |
1684 | 1684 | ||
1685 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) | 1685 | if (is_console_port(port) && hvc_poll(port->cons.hvc)) |
1686 | hvc_kick(); | 1686 | hvc_kick(); |
1687 | } | 1687 | } |
1688 | 1688 | ||
1689 | static void control_intr(struct virtqueue *vq) | 1689 | static void control_intr(struct virtqueue *vq) |
1690 | { | 1690 | { |
1691 | struct ports_device *portdev; | 1691 | struct ports_device *portdev; |
1692 | 1692 | ||
1693 | portdev = vq->vdev->priv; | 1693 | portdev = vq->vdev->priv; |
1694 | schedule_work(&portdev->control_work); | 1694 | schedule_work(&portdev->control_work); |
1695 | } | 1695 | } |
1696 | 1696 | ||
1697 | static void config_intr(struct virtio_device *vdev) | 1697 | static void config_intr(struct virtio_device *vdev) |
1698 | { | 1698 | { |
1699 | struct ports_device *portdev; | 1699 | struct ports_device *portdev; |
1700 | 1700 | ||
1701 | portdev = vdev->priv; | 1701 | portdev = vdev->priv; |
1702 | 1702 | ||
1703 | if (!use_multiport(portdev)) { | 1703 | if (!use_multiport(portdev)) { |
1704 | struct port *port; | 1704 | struct port *port; |
1705 | u16 rows, cols; | 1705 | u16 rows, cols; |
1706 | 1706 | ||
1707 | vdev->config->get(vdev, | 1707 | vdev->config->get(vdev, |
1708 | offsetof(struct virtio_console_config, cols), | 1708 | offsetof(struct virtio_console_config, cols), |
1709 | &cols, sizeof(u16)); | 1709 | &cols, sizeof(u16)); |
1710 | vdev->config->get(vdev, | 1710 | vdev->config->get(vdev, |
1711 | offsetof(struct virtio_console_config, rows), | 1711 | offsetof(struct virtio_console_config, rows), |
1712 | &rows, sizeof(u16)); | 1712 | &rows, sizeof(u16)); |
1713 | 1713 | ||
1714 | port = find_port_by_id(portdev, 0); | 1714 | port = find_port_by_id(portdev, 0); |
1715 | set_console_size(port, rows, cols); | 1715 | set_console_size(port, rows, cols); |
1716 | 1716 | ||
1717 | /* | 1717 | /* |
1718 | * We'll use this way of resizing only for legacy | 1718 | * We'll use this way of resizing only for legacy |
1719 | * support. For newer userspace | 1719 | * support. For newer userspace |
1720 | * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages | 1720 | * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages |
1721 | * to indicate console size changes so that it can be | 1721 | * to indicate console size changes so that it can be |
1722 | * done per-port. | 1722 | * done per-port. |
1723 | */ | 1723 | */ |
1724 | resize_console(port); | 1724 | resize_console(port); |
1725 | } | 1725 | } |
1726 | } | 1726 | } |
1727 | 1727 | ||
1728 | static int init_vqs(struct ports_device *portdev) | 1728 | static int init_vqs(struct ports_device *portdev) |
1729 | { | 1729 | { |
1730 | vq_callback_t **io_callbacks; | 1730 | vq_callback_t **io_callbacks; |
1731 | char **io_names; | 1731 | char **io_names; |
1732 | struct virtqueue **vqs; | 1732 | struct virtqueue **vqs; |
1733 | u32 i, j, nr_ports, nr_queues; | 1733 | u32 i, j, nr_ports, nr_queues; |
1734 | int err; | 1734 | int err; |
1735 | 1735 | ||
1736 | nr_ports = portdev->config.max_nr_ports; | 1736 | nr_ports = portdev->config.max_nr_ports; |
1737 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; | 1737 | nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; |
1738 | 1738 | ||
1739 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); | 1739 | vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); |
1740 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); | 1740 | io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); |
1741 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); | 1741 | io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); |
1742 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1742 | portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1743 | GFP_KERNEL); | 1743 | GFP_KERNEL); |
1744 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), | 1744 | portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), |
1745 | GFP_KERNEL); | 1745 | GFP_KERNEL); |
1746 | if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || | 1746 | if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || |
1747 | !portdev->out_vqs) { | 1747 | !portdev->out_vqs) { |
1748 | err = -ENOMEM; | 1748 | err = -ENOMEM; |
1749 | goto free; | 1749 | goto free; |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | /* | 1752 | /* |
1753 | * For backward compat (newer host but older guest), the host | 1753 | * For backward compat (newer host but older guest), the host |
1754 | * spawns a console port first and also inits the vqs for port | 1754 | * spawns a console port first and also inits the vqs for port |
1755 | * 0 before others. | 1755 | * 0 before others. |
1756 | */ | 1756 | */ |
1757 | j = 0; | 1757 | j = 0; |
1758 | io_callbacks[j] = in_intr; | 1758 | io_callbacks[j] = in_intr; |
1759 | io_callbacks[j + 1] = out_intr; | 1759 | io_callbacks[j + 1] = out_intr; |
1760 | io_names[j] = "input"; | 1760 | io_names[j] = "input"; |
1761 | io_names[j + 1] = "output"; | 1761 | io_names[j + 1] = "output"; |
1762 | j += 2; | 1762 | j += 2; |
1763 | 1763 | ||
1764 | if (use_multiport(portdev)) { | 1764 | if (use_multiport(portdev)) { |
1765 | io_callbacks[j] = control_intr; | 1765 | io_callbacks[j] = control_intr; |
1766 | io_callbacks[j + 1] = NULL; | 1766 | io_callbacks[j + 1] = NULL; |
1767 | io_names[j] = "control-i"; | 1767 | io_names[j] = "control-i"; |
1768 | io_names[j + 1] = "control-o"; | 1768 | io_names[j + 1] = "control-o"; |
1769 | 1769 | ||
1770 | for (i = 1; i < nr_ports; i++) { | 1770 | for (i = 1; i < nr_ports; i++) { |
1771 | j += 2; | 1771 | j += 2; |
1772 | io_callbacks[j] = in_intr; | 1772 | io_callbacks[j] = in_intr; |
1773 | io_callbacks[j + 1] = out_intr; | 1773 | io_callbacks[j + 1] = out_intr; |
1774 | io_names[j] = "input"; | 1774 | io_names[j] = "input"; |
1775 | io_names[j + 1] = "output"; | 1775 | io_names[j + 1] = "output"; |
1776 | } | 1776 | } |
1777 | } | 1777 | } |
1778 | /* Find the queues. */ | 1778 | /* Find the queues. */ |
1779 | err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, | 1779 | err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, |
1780 | io_callbacks, | 1780 | io_callbacks, |
1781 | (const char **)io_names); | 1781 | (const char **)io_names); |
1782 | if (err) | 1782 | if (err) |
1783 | goto free; | 1783 | goto free; |
1784 | 1784 | ||
1785 | j = 0; | 1785 | j = 0; |
1786 | portdev->in_vqs[0] = vqs[0]; | 1786 | portdev->in_vqs[0] = vqs[0]; |
1787 | portdev->out_vqs[0] = vqs[1]; | 1787 | portdev->out_vqs[0] = vqs[1]; |
1788 | j += 2; | 1788 | j += 2; |
1789 | if (use_multiport(portdev)) { | 1789 | if (use_multiport(portdev)) { |
1790 | portdev->c_ivq = vqs[j]; | 1790 | portdev->c_ivq = vqs[j]; |
1791 | portdev->c_ovq = vqs[j + 1]; | 1791 | portdev->c_ovq = vqs[j + 1]; |
1792 | 1792 | ||
1793 | for (i = 1; i < nr_ports; i++) { | 1793 | for (i = 1; i < nr_ports; i++) { |
1794 | j += 2; | 1794 | j += 2; |
1795 | portdev->in_vqs[i] = vqs[j]; | 1795 | portdev->in_vqs[i] = vqs[j]; |
1796 | portdev->out_vqs[i] = vqs[j + 1]; | 1796 | portdev->out_vqs[i] = vqs[j + 1]; |
1797 | } | 1797 | } |
1798 | } | 1798 | } |
1799 | kfree(io_names); | 1799 | kfree(io_names); |
1800 | kfree(io_callbacks); | 1800 | kfree(io_callbacks); |
1801 | kfree(vqs); | 1801 | kfree(vqs); |
1802 | 1802 | ||
1803 | return 0; | 1803 | return 0; |
1804 | 1804 | ||
1805 | free: | 1805 | free: |
1806 | kfree(portdev->out_vqs); | 1806 | kfree(portdev->out_vqs); |
1807 | kfree(portdev->in_vqs); | 1807 | kfree(portdev->in_vqs); |
1808 | kfree(io_names); | 1808 | kfree(io_names); |
1809 | kfree(io_callbacks); | 1809 | kfree(io_callbacks); |
1810 | kfree(vqs); | 1810 | kfree(vqs); |
1811 | 1811 | ||
1812 | return err; | 1812 | return err; |
1813 | } | 1813 | } |
1814 | 1814 | ||
1815 | static const struct file_operations portdev_fops = { | 1815 | static const struct file_operations portdev_fops = { |
1816 | .owner = THIS_MODULE, | 1816 | .owner = THIS_MODULE, |
1817 | }; | 1817 | }; |
1818 | 1818 | ||
1819 | static void remove_vqs(struct ports_device *portdev) | 1819 | static void remove_vqs(struct ports_device *portdev) |
1820 | { | 1820 | { |
1821 | portdev->vdev->config->del_vqs(portdev->vdev); | 1821 | portdev->vdev->config->del_vqs(portdev->vdev); |
1822 | kfree(portdev->in_vqs); | 1822 | kfree(portdev->in_vqs); |
1823 | kfree(portdev->out_vqs); | 1823 | kfree(portdev->out_vqs); |
1824 | } | 1824 | } |
1825 | 1825 | ||
1826 | static void remove_controlq_data(struct ports_device *portdev) | 1826 | static void remove_controlq_data(struct ports_device *portdev) |
1827 | { | 1827 | { |
1828 | struct port_buffer *buf; | 1828 | struct port_buffer *buf; |
1829 | unsigned int len; | 1829 | unsigned int len; |
1830 | 1830 | ||
1831 | if (!use_multiport(portdev)) | 1831 | if (!use_multiport(portdev)) |
1832 | return; | 1832 | return; |
1833 | 1833 | ||
1834 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | 1834 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
1835 | free_buf(buf); | 1835 | free_buf(buf); |
1836 | 1836 | ||
1837 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | 1837 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) |
1838 | free_buf(buf); | 1838 | free_buf(buf); |
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | /* | 1841 | /* |
1842 | * Once we're further in boot, we get probed like any other virtio | 1842 | * Once we're further in boot, we get probed like any other virtio |
1843 | * device. | 1843 | * device. |
1844 | * | 1844 | * |
1845 | * If the host also supports multiple console ports, we check the | 1845 | * If the host also supports multiple console ports, we check the |
1846 | * config space to see how many ports the host has spawned. We | 1846 | * config space to see how many ports the host has spawned. We |
1847 | * initialize each port found. | 1847 | * initialize each port found. |
1848 | */ | 1848 | */ |
1849 | static int __devinit virtcons_probe(struct virtio_device *vdev) | 1849 | static int virtcons_probe(struct virtio_device *vdev) |
1850 | { | 1850 | { |
1851 | struct ports_device *portdev; | 1851 | struct ports_device *portdev; |
1852 | int err; | 1852 | int err; |
1853 | bool multiport; | 1853 | bool multiport; |
1854 | bool early = early_put_chars != NULL; | 1854 | bool early = early_put_chars != NULL; |
1855 | 1855 | ||
1856 | /* Ensure to read early_put_chars now */ | 1856 | /* Ensure to read early_put_chars now */ |
1857 | barrier(); | 1857 | barrier(); |
1858 | 1858 | ||
1859 | portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); | 1859 | portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); |
1860 | if (!portdev) { | 1860 | if (!portdev) { |
1861 | err = -ENOMEM; | 1861 | err = -ENOMEM; |
1862 | goto fail; | 1862 | goto fail; |
1863 | } | 1863 | } |
1864 | 1864 | ||
1865 | /* Attach this portdev to this virtio_device, and vice-versa. */ | 1865 | /* Attach this portdev to this virtio_device, and vice-versa. */ |
1866 | portdev->vdev = vdev; | 1866 | portdev->vdev = vdev; |
1867 | vdev->priv = portdev; | 1867 | vdev->priv = portdev; |
1868 | 1868 | ||
1869 | spin_lock_irq(&pdrvdata_lock); | 1869 | spin_lock_irq(&pdrvdata_lock); |
1870 | portdev->drv_index = pdrvdata.index++; | 1870 | portdev->drv_index = pdrvdata.index++; |
1871 | spin_unlock_irq(&pdrvdata_lock); | 1871 | spin_unlock_irq(&pdrvdata_lock); |
1872 | 1872 | ||
1873 | portdev->chr_major = register_chrdev(0, "virtio-portsdev", | 1873 | portdev->chr_major = register_chrdev(0, "virtio-portsdev", |
1874 | &portdev_fops); | 1874 | &portdev_fops); |
1875 | if (portdev->chr_major < 0) { | 1875 | if (portdev->chr_major < 0) { |
1876 | dev_err(&vdev->dev, | 1876 | dev_err(&vdev->dev, |
1877 | "Error %d registering chrdev for device %u\n", | 1877 | "Error %d registering chrdev for device %u\n", |
1878 | portdev->chr_major, portdev->drv_index); | 1878 | portdev->chr_major, portdev->drv_index); |
1879 | err = portdev->chr_major; | 1879 | err = portdev->chr_major; |
1880 | goto free; | 1880 | goto free; |
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | multiport = false; | 1883 | multiport = false; |
1884 | portdev->config.max_nr_ports = 1; | 1884 | portdev->config.max_nr_ports = 1; |
1885 | if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, | 1885 | if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, |
1886 | offsetof(struct virtio_console_config, | 1886 | offsetof(struct virtio_console_config, |
1887 | max_nr_ports), | 1887 | max_nr_ports), |
1888 | &portdev->config.max_nr_ports) == 0) | 1888 | &portdev->config.max_nr_ports) == 0) |
1889 | multiport = true; | 1889 | multiport = true; |
1890 | 1890 | ||
1891 | err = init_vqs(portdev); | 1891 | err = init_vqs(portdev); |
1892 | if (err < 0) { | 1892 | if (err < 0) { |
1893 | dev_err(&vdev->dev, "Error %d initializing vqs\n", err); | 1893 | dev_err(&vdev->dev, "Error %d initializing vqs\n", err); |
1894 | goto free_chrdev; | 1894 | goto free_chrdev; |
1895 | } | 1895 | } |
1896 | 1896 | ||
1897 | spin_lock_init(&portdev->ports_lock); | 1897 | spin_lock_init(&portdev->ports_lock); |
1898 | INIT_LIST_HEAD(&portdev->ports); | 1898 | INIT_LIST_HEAD(&portdev->ports); |
1899 | 1899 | ||
1900 | if (multiport) { | 1900 | if (multiport) { |
1901 | unsigned int nr_added_bufs; | 1901 | unsigned int nr_added_bufs; |
1902 | 1902 | ||
1903 | spin_lock_init(&portdev->cvq_lock); | 1903 | spin_lock_init(&portdev->cvq_lock); |
1904 | INIT_WORK(&portdev->control_work, &control_work_handler); | 1904 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1905 | 1905 | ||
1906 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 1906 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); |
1907 | if (!nr_added_bufs) { | 1907 | if (!nr_added_bufs) { |
1908 | dev_err(&vdev->dev, | 1908 | dev_err(&vdev->dev, |
1909 | "Error allocating buffers for control queue\n"); | 1909 | "Error allocating buffers for control queue\n"); |
1910 | err = -ENOMEM; | 1910 | err = -ENOMEM; |
1911 | goto free_vqs; | 1911 | goto free_vqs; |
1912 | } | 1912 | } |
1913 | } else { | 1913 | } else { |
1914 | /* | 1914 | /* |
1915 | * For backward compatibility: Create a console port | 1915 | * For backward compatibility: Create a console port |
1916 | * if we're running on older host. | 1916 | * if we're running on older host. |
1917 | */ | 1917 | */ |
1918 | add_port(portdev, 0); | 1918 | add_port(portdev, 0); |
1919 | } | 1919 | } |
1920 | 1920 | ||
1921 | spin_lock_irq(&pdrvdata_lock); | 1921 | spin_lock_irq(&pdrvdata_lock); |
1922 | list_add_tail(&portdev->list, &pdrvdata.portdevs); | 1922 | list_add_tail(&portdev->list, &pdrvdata.portdevs); |
1923 | spin_unlock_irq(&pdrvdata_lock); | 1923 | spin_unlock_irq(&pdrvdata_lock); |
1924 | 1924 | ||
1925 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, | 1925 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, |
1926 | VIRTIO_CONSOLE_DEVICE_READY, 1); | 1926 | VIRTIO_CONSOLE_DEVICE_READY, 1); |
1927 | 1927 | ||
1928 | /* | 1928 | /* |
1929 | * If there was an early virtio console, assume that there are no | 1929 | * If there was an early virtio console, assume that there are no |
1930 | * other consoles. We need to wait until the hvc_alloc matches the | 1930 | * other consoles. We need to wait until the hvc_alloc matches the |
1931 | * hvc_instantiate, otherwise tty_open will complain, resulting in | 1931 | * hvc_instantiate, otherwise tty_open will complain, resulting in |
1932 | * a "Warning: unable to open an initial console" boot failure. | 1932 | * a "Warning: unable to open an initial console" boot failure. |
1933 | * Without multiport this is done in add_port above. With multiport | 1933 | * Without multiport this is done in add_port above. With multiport |
1934 | * this might take some host<->guest communication - thus we have to | 1934 | * this might take some host<->guest communication - thus we have to |
1935 | * wait. | 1935 | * wait. |
1936 | */ | 1936 | */ |
1937 | if (multiport && early) | 1937 | if (multiport && early) |
1938 | wait_for_completion(&early_console_added); | 1938 | wait_for_completion(&early_console_added); |
1939 | 1939 | ||
1940 | return 0; | 1940 | return 0; |
1941 | 1941 | ||
1942 | free_vqs: | 1942 | free_vqs: |
1943 | /* The host might want to notify mgmt sw about device add failure */ | 1943 | /* The host might want to notify mgmt sw about device add failure */ |
1944 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, | 1944 | __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, |
1945 | VIRTIO_CONSOLE_DEVICE_READY, 0); | 1945 | VIRTIO_CONSOLE_DEVICE_READY, 0); |
1946 | remove_vqs(portdev); | 1946 | remove_vqs(portdev); |
1947 | free_chrdev: | 1947 | free_chrdev: |
1948 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | 1948 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); |
1949 | free: | 1949 | free: |
1950 | kfree(portdev); | 1950 | kfree(portdev); |
1951 | fail: | 1951 | fail: |
1952 | return err; | 1952 | return err; |
1953 | } | 1953 | } |
1954 | 1954 | ||
1955 | static void virtcons_remove(struct virtio_device *vdev) | 1955 | static void virtcons_remove(struct virtio_device *vdev) |
1956 | { | 1956 | { |
1957 | struct ports_device *portdev; | 1957 | struct ports_device *portdev; |
1958 | struct port *port, *port2; | 1958 | struct port *port, *port2; |
1959 | 1959 | ||
1960 | portdev = vdev->priv; | 1960 | portdev = vdev->priv; |
1961 | 1961 | ||
1962 | spin_lock_irq(&pdrvdata_lock); | 1962 | spin_lock_irq(&pdrvdata_lock); |
1963 | list_del(&portdev->list); | 1963 | list_del(&portdev->list); |
1964 | spin_unlock_irq(&pdrvdata_lock); | 1964 | spin_unlock_irq(&pdrvdata_lock); |
1965 | 1965 | ||
1966 | /* Disable interrupts for vqs */ | 1966 | /* Disable interrupts for vqs */ |
1967 | vdev->config->reset(vdev); | 1967 | vdev->config->reset(vdev); |
1968 | /* Finish up work that's lined up */ | 1968 | /* Finish up work that's lined up */ |
1969 | cancel_work_sync(&portdev->control_work); | 1969 | cancel_work_sync(&portdev->control_work); |
1970 | 1970 | ||
1971 | list_for_each_entry_safe(port, port2, &portdev->ports, list) | 1971 | list_for_each_entry_safe(port, port2, &portdev->ports, list) |
1972 | unplug_port(port); | 1972 | unplug_port(port); |
1973 | 1973 | ||
1974 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); | 1974 | unregister_chrdev(portdev->chr_major, "virtio-portsdev"); |
1975 | 1975 | ||
1976 | /* | 1976 | /* |
1977 | * When yanking out a device, we immediately lose the | 1977 | * When yanking out a device, we immediately lose the |
1978 | * (device-side) queues. So there's no point in keeping the | 1978 | * (device-side) queues. So there's no point in keeping the |
1979 | * guest side around till we drop our final reference. This | 1979 | * guest side around till we drop our final reference. This |
1980 | * also means that any ports which are in an open state will | 1980 | * also means that any ports which are in an open state will |
1981 | * have to just stop using the port, as the vqs are going | 1981 | * have to just stop using the port, as the vqs are going |
1982 | * away. | 1982 | * away. |
1983 | */ | 1983 | */ |
1984 | remove_controlq_data(portdev); | 1984 | remove_controlq_data(portdev); |
1985 | remove_vqs(portdev); | 1985 | remove_vqs(portdev); |
1986 | kfree(portdev); | 1986 | kfree(portdev); |
1987 | } | 1987 | } |
1988 | 1988 | ||
1989 | static struct virtio_device_id id_table[] = { | 1989 | static struct virtio_device_id id_table[] = { |
1990 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, | 1990 | { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, |
1991 | { 0 }, | 1991 | { 0 }, |
1992 | }; | 1992 | }; |
1993 | 1993 | ||
1994 | static unsigned int features[] = { | 1994 | static unsigned int features[] = { |
1995 | VIRTIO_CONSOLE_F_SIZE, | 1995 | VIRTIO_CONSOLE_F_SIZE, |
1996 | VIRTIO_CONSOLE_F_MULTIPORT, | 1996 | VIRTIO_CONSOLE_F_MULTIPORT, |
1997 | }; | 1997 | }; |
1998 | 1998 | ||
1999 | #ifdef CONFIG_PM | 1999 | #ifdef CONFIG_PM |
2000 | static int virtcons_freeze(struct virtio_device *vdev) | 2000 | static int virtcons_freeze(struct virtio_device *vdev) |
2001 | { | 2001 | { |
2002 | struct ports_device *portdev; | 2002 | struct ports_device *portdev; |
2003 | struct port *port; | 2003 | struct port *port; |
2004 | 2004 | ||
2005 | portdev = vdev->priv; | 2005 | portdev = vdev->priv; |
2006 | 2006 | ||
2007 | vdev->config->reset(vdev); | 2007 | vdev->config->reset(vdev); |
2008 | 2008 | ||
2009 | virtqueue_disable_cb(portdev->c_ivq); | 2009 | virtqueue_disable_cb(portdev->c_ivq); |
2010 | cancel_work_sync(&portdev->control_work); | 2010 | cancel_work_sync(&portdev->control_work); |
2011 | /* | 2011 | /* |
2012 | * Once more: if control_work_handler() was running, it would | 2012 | * Once more: if control_work_handler() was running, it would |
2013 | * enable the cb as the last step. | 2013 | * enable the cb as the last step. |
2014 | */ | 2014 | */ |
2015 | virtqueue_disable_cb(portdev->c_ivq); | 2015 | virtqueue_disable_cb(portdev->c_ivq); |
2016 | remove_controlq_data(portdev); | 2016 | remove_controlq_data(portdev); |
2017 | 2017 | ||
2018 | list_for_each_entry(port, &portdev->ports, list) { | 2018 | list_for_each_entry(port, &portdev->ports, list) { |
2019 | virtqueue_disable_cb(port->in_vq); | 2019 | virtqueue_disable_cb(port->in_vq); |
2020 | virtqueue_disable_cb(port->out_vq); | 2020 | virtqueue_disable_cb(port->out_vq); |
2021 | /* | 2021 | /* |
2022 | * We'll ask the host later if the new invocation has | 2022 | * We'll ask the host later if the new invocation has |
2023 | * the port opened or closed. | 2023 | * the port opened or closed. |
2024 | */ | 2024 | */ |
2025 | port->host_connected = false; | 2025 | port->host_connected = false; |
2026 | remove_port_data(port); | 2026 | remove_port_data(port); |
2027 | } | 2027 | } |
2028 | remove_vqs(portdev); | 2028 | remove_vqs(portdev); |
2029 | 2029 | ||
2030 | return 0; | 2030 | return 0; |
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | static int virtcons_restore(struct virtio_device *vdev) | 2033 | static int virtcons_restore(struct virtio_device *vdev) |
2034 | { | 2034 | { |
2035 | struct ports_device *portdev; | 2035 | struct ports_device *portdev; |
2036 | struct port *port; | 2036 | struct port *port; |
2037 | int ret; | 2037 | int ret; |
2038 | 2038 | ||
2039 | portdev = vdev->priv; | 2039 | portdev = vdev->priv; |
2040 | 2040 | ||
2041 | ret = init_vqs(portdev); | 2041 | ret = init_vqs(portdev); |
2042 | if (ret) | 2042 | if (ret) |
2043 | return ret; | 2043 | return ret; |
2044 | 2044 | ||
2045 | if (use_multiport(portdev)) | 2045 | if (use_multiport(portdev)) |
2046 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2046 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); |
2047 | 2047 | ||
2048 | list_for_each_entry(port, &portdev->ports, list) { | 2048 | list_for_each_entry(port, &portdev->ports, list) { |
2049 | port->in_vq = portdev->in_vqs[port->id]; | 2049 | port->in_vq = portdev->in_vqs[port->id]; |
2050 | port->out_vq = portdev->out_vqs[port->id]; | 2050 | port->out_vq = portdev->out_vqs[port->id]; |
2051 | 2051 | ||
2052 | fill_queue(port->in_vq, &port->inbuf_lock); | 2052 | fill_queue(port->in_vq, &port->inbuf_lock); |
2053 | 2053 | ||
2054 | /* Get port open/close status on the host */ | 2054 | /* Get port open/close status on the host */ |
2055 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); | 2055 | send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); |
2056 | 2056 | ||
2057 | /* | 2057 | /* |
2058 | * If a port was open at the time of suspending, we | 2058 | * If a port was open at the time of suspending, we |
2059 | * have to let the host know that it's still open. | 2059 | * have to let the host know that it's still open. |
2060 | */ | 2060 | */ |
2061 | if (port->guest_connected) | 2061 | if (port->guest_connected) |
2062 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); | 2062 | send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); |
2063 | } | 2063 | } |
2064 | return 0; | 2064 | return 0; |
2065 | } | 2065 | } |
2066 | #endif | 2066 | #endif |
2067 | 2067 | ||
2068 | static struct virtio_driver virtio_console = { | 2068 | static struct virtio_driver virtio_console = { |
2069 | .feature_table = features, | 2069 | .feature_table = features, |
2070 | .feature_table_size = ARRAY_SIZE(features), | 2070 | .feature_table_size = ARRAY_SIZE(features), |
2071 | .driver.name = KBUILD_MODNAME, | 2071 | .driver.name = KBUILD_MODNAME, |
2072 | .driver.owner = THIS_MODULE, | 2072 | .driver.owner = THIS_MODULE, |
2073 | .id_table = id_table, | 2073 | .id_table = id_table, |
2074 | .probe = virtcons_probe, | 2074 | .probe = virtcons_probe, |
2075 | .remove = virtcons_remove, | 2075 | .remove = virtcons_remove, |
2076 | .config_changed = config_intr, | 2076 | .config_changed = config_intr, |
2077 | #ifdef CONFIG_PM | 2077 | #ifdef CONFIG_PM |
2078 | .freeze = virtcons_freeze, | 2078 | .freeze = virtcons_freeze, |
2079 | .restore = virtcons_restore, | 2079 | .restore = virtcons_restore, |
2080 | #endif | 2080 | #endif |
2081 | }; | 2081 | }; |
2082 | 2082 | ||
2083 | static int __init init(void) | 2083 | static int __init init(void) |
2084 | { | 2084 | { |
2085 | int err; | 2085 | int err; |
2086 | 2086 | ||
2087 | pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); | 2087 | pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); |
2088 | if (IS_ERR(pdrvdata.class)) { | 2088 | if (IS_ERR(pdrvdata.class)) { |
2089 | err = PTR_ERR(pdrvdata.class); | 2089 | err = PTR_ERR(pdrvdata.class); |
2090 | pr_err("Error %d creating virtio-ports class\n", err); | 2090 | pr_err("Error %d creating virtio-ports class\n", err); |
2091 | return err; | 2091 | return err; |
2092 | } | 2092 | } |
2093 | 2093 | ||
2094 | pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); | 2094 | pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); |
2095 | if (!pdrvdata.debugfs_dir) { | 2095 | if (!pdrvdata.debugfs_dir) { |
2096 | pr_warning("Error %ld creating debugfs dir for virtio-ports\n", | 2096 | pr_warning("Error %ld creating debugfs dir for virtio-ports\n", |
2097 | PTR_ERR(pdrvdata.debugfs_dir)); | 2097 | PTR_ERR(pdrvdata.debugfs_dir)); |
2098 | } | 2098 | } |
2099 | INIT_LIST_HEAD(&pdrvdata.consoles); | 2099 | INIT_LIST_HEAD(&pdrvdata.consoles); |
2100 | INIT_LIST_HEAD(&pdrvdata.portdevs); | 2100 | INIT_LIST_HEAD(&pdrvdata.portdevs); |
2101 | 2101 | ||
2102 | err = register_virtio_driver(&virtio_console); | 2102 | err = register_virtio_driver(&virtio_console); |
2103 | if (err < 0) { | 2103 | if (err < 0) { |
2104 | pr_err("Error %d registering virtio driver\n", err); | 2104 | pr_err("Error %d registering virtio driver\n", err); |
2105 | goto free; | 2105 | goto free; |
2106 | } | 2106 | } |
2107 | return 0; | 2107 | return 0; |
2108 | free: | 2108 | free: |
2109 | if (pdrvdata.debugfs_dir) | 2109 | if (pdrvdata.debugfs_dir) |
2110 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | 2110 | debugfs_remove_recursive(pdrvdata.debugfs_dir); |
2111 | class_destroy(pdrvdata.class); | 2111 | class_destroy(pdrvdata.class); |
2112 | return err; | 2112 | return err; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | static void __exit fini(void) | 2115 | static void __exit fini(void) |
2116 | { | 2116 | { |
2117 | unregister_virtio_driver(&virtio_console); | 2117 | unregister_virtio_driver(&virtio_console); |
2118 | 2118 | ||
2119 | class_destroy(pdrvdata.class); | 2119 | class_destroy(pdrvdata.class); |
2120 | if (pdrvdata.debugfs_dir) | 2120 | if (pdrvdata.debugfs_dir) |
2121 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | 2121 | debugfs_remove_recursive(pdrvdata.debugfs_dir); |
2122 | } | 2122 | } |
2123 | module_init(init); | 2123 | module_init(init); |
2124 | module_exit(fini); | 2124 | module_exit(fini); |
2125 | 2125 | ||
2126 | MODULE_DEVICE_TABLE(virtio, id_table); | 2126 | MODULE_DEVICE_TABLE(virtio, id_table); |
2127 | MODULE_DESCRIPTION("Virtio console driver"); | 2127 | MODULE_DESCRIPTION("Virtio console driver"); |
2128 | MODULE_LICENSE("GPL"); | 2128 | MODULE_LICENSE("GPL"); |
2129 | 2129 |
drivers/char/xilinx_hwicap/xilinx_hwicap.c
1 | /***************************************************************************** | 1 | /***************************************************************************** |
2 | * | 2 | * |
3 | * Author: Xilinx, Inc. | 3 | * Author: Xilinx, Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License as published by the | 6 | * under the terms of the GNU General Public License as published by the |
7 | * Free Software Foundation; either version 2 of the License, or (at your | 7 | * Free Software Foundation; either version 2 of the License, or (at your |
8 | * option) any later version. | 8 | * option) any later version. |
9 | * | 9 | * |
10 | * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" | 10 | * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" |
11 | * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND | 11 | * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND |
12 | * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, | 12 | * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE, |
13 | * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, | 13 | * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, |
14 | * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION | 14 | * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION |
15 | * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, | 15 | * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT, |
16 | * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE | 16 | * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE |
17 | * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY | 17 | * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY |
18 | * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE | 18 | * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE |
19 | * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR | 19 | * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR |
20 | * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF | 20 | * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF |
21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | 21 | * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
22 | * FOR A PARTICULAR PURPOSE. | 22 | * FOR A PARTICULAR PURPOSE. |
23 | * | 23 | * |
24 | * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group | 24 | * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group |
25 | * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group | 25 | * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group |
26 | * (c) Copyright 2007-2008 Xilinx Inc. | 26 | * (c) Copyright 2007-2008 Xilinx Inc. |
27 | * All rights reserved. | 27 | * All rights reserved. |
28 | * | 28 | * |
29 | * You should have received a copy of the GNU General Public License along | 29 | * You should have received a copy of the GNU General Public License along |
30 | * with this program; if not, write to the Free Software Foundation, Inc., | 30 | * with this program; if not, write to the Free Software Foundation, Inc., |
31 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 31 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
32 | * | 32 | * |
33 | *****************************************************************************/ | 33 | *****************************************************************************/ |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * This is the code behind /dev/icap* -- it allows a user-space | 36 | * This is the code behind /dev/icap* -- it allows a user-space |
37 | * application to use the Xilinx ICAP subsystem. | 37 | * application to use the Xilinx ICAP subsystem. |
38 | * | 38 | * |
39 | * The following operations are possible: | 39 | * The following operations are possible: |
40 | * | 40 | * |
41 | * open open the port and initialize for access. | 41 | * open open the port and initialize for access. |
42 | * release release port | 42 | * release release port |
43 | * write Write a bitstream to the configuration processor. | 43 | * write Write a bitstream to the configuration processor. |
44 | * read Read a data stream from the configuration processor. | 44 | * read Read a data stream from the configuration processor. |
45 | * | 45 | * |
46 | * After being opened, the port is initialized and accessed to avoid a | 46 | * After being opened, the port is initialized and accessed to avoid a |
47 | * corrupted first read which may occur with some hardware. The port | 47 | * corrupted first read which may occur with some hardware. The port |
48 | * is left in a desynched state, requiring that a synch sequence be | 48 | * is left in a desynched state, requiring that a synch sequence be |
49 | * transmitted before any valid configuration data. A user will have | 49 | * transmitted before any valid configuration data. A user will have |
50 | * exclusive access to the device while it remains open, and the state | 50 | * exclusive access to the device while it remains open, and the state |
51 | * of the ICAP cannot be guaranteed after the device is closed. Note | 51 | * of the ICAP cannot be guaranteed after the device is closed. Note |
52 | * that a complete reset of the core and the state of the ICAP cannot | 52 | * that a complete reset of the core and the state of the ICAP cannot |
53 | * be performed on many versions of the cores, hence users of this | 53 | * be performed on many versions of the cores, hence users of this |
54 | * device should avoid making inconsistent accesses to the device. In | 54 | * device should avoid making inconsistent accesses to the device. In |
55 | * particular, accessing the read interface, without first generating | 55 | * particular, accessing the read interface, without first generating |
56 | * a write containing a readback packet can leave the ICAP in an | 56 | * a write containing a readback packet can leave the ICAP in an |
57 | * inaccessible state. | 57 | * inaccessible state. |
58 | * | 58 | * |
59 | * Note that in order to use the read interface, it is first necessary | 59 | * Note that in order to use the read interface, it is first necessary |
60 | * to write a request packet to the write interface. i.e., it is not | 60 | * to write a request packet to the write interface. i.e., it is not |
61 | * possible to simply readback the bitstream (or any configuration | 61 | * possible to simply readback the bitstream (or any configuration |
62 | * bits) from a device without specifically requesting them first. | 62 | * bits) from a device without specifically requesting them first. |
63 | * The code to craft such packets is intended to be part of the | 63 | * The code to craft such packets is intended to be part of the |
64 | * user-space application code that uses this device. The simplest | 64 | * user-space application code that uses this device. The simplest |
65 | * way to use this interface is simply: | 65 | * way to use this interface is simply: |
66 | * | 66 | * |
67 | * cp foo.bit /dev/icap0 | 67 | * cp foo.bit /dev/icap0 |
68 | * | 68 | * |
69 | * Note that unless foo.bit is an appropriately constructed partial | 69 | * Note that unless foo.bit is an appropriately constructed partial |
70 | * bitstream, this has a high likelihood of overwriting the design | 70 | * bitstream, this has a high likelihood of overwriting the design |
71 | * currently programmed in the FPGA. | 71 | * currently programmed in the FPGA. |
72 | */ | 72 | */ |
73 | 73 | ||
74 | #include <linux/module.h> | 74 | #include <linux/module.h> |
75 | #include <linux/kernel.h> | 75 | #include <linux/kernel.h> |
76 | #include <linux/types.h> | 76 | #include <linux/types.h> |
77 | #include <linux/ioport.h> | 77 | #include <linux/ioport.h> |
78 | #include <linux/interrupt.h> | 78 | #include <linux/interrupt.h> |
79 | #include <linux/fcntl.h> | 79 | #include <linux/fcntl.h> |
80 | #include <linux/init.h> | 80 | #include <linux/init.h> |
81 | #include <linux/poll.h> | 81 | #include <linux/poll.h> |
82 | #include <linux/proc_fs.h> | 82 | #include <linux/proc_fs.h> |
83 | #include <linux/mutex.h> | 83 | #include <linux/mutex.h> |
84 | #include <linux/sysctl.h> | 84 | #include <linux/sysctl.h> |
85 | #include <linux/fs.h> | 85 | #include <linux/fs.h> |
86 | #include <linux/cdev.h> | 86 | #include <linux/cdev.h> |
87 | #include <linux/platform_device.h> | 87 | #include <linux/platform_device.h> |
88 | #include <linux/slab.h> | 88 | #include <linux/slab.h> |
89 | 89 | ||
90 | #include <asm/io.h> | 90 | #include <asm/io.h> |
91 | #include <asm/uaccess.h> | 91 | #include <asm/uaccess.h> |
92 | 92 | ||
93 | #ifdef CONFIG_OF | 93 | #ifdef CONFIG_OF |
94 | /* For open firmware. */ | 94 | /* For open firmware. */ |
95 | #include <linux/of_address.h> | 95 | #include <linux/of_address.h> |
96 | #include <linux/of_device.h> | 96 | #include <linux/of_device.h> |
97 | #include <linux/of_platform.h> | 97 | #include <linux/of_platform.h> |
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #include "xilinx_hwicap.h" | 100 | #include "xilinx_hwicap.h" |
101 | #include "buffer_icap.h" | 101 | #include "buffer_icap.h" |
102 | #include "fifo_icap.h" | 102 | #include "fifo_icap.h" |
103 | 103 | ||
104 | #define DRIVER_NAME "icap" | 104 | #define DRIVER_NAME "icap" |
105 | 105 | ||
106 | #define HWICAP_REGS (0x10000) | 106 | #define HWICAP_REGS (0x10000) |
107 | 107 | ||
108 | #define XHWICAP_MAJOR 259 | 108 | #define XHWICAP_MAJOR 259 |
109 | #define XHWICAP_MINOR 0 | 109 | #define XHWICAP_MINOR 0 |
110 | #define HWICAP_DEVICES 1 | 110 | #define HWICAP_DEVICES 1 |
111 | 111 | ||
112 | /* An array, which is set to true when the device is registered. */ | 112 | /* An array, which is set to true when the device is registered. */ |
113 | static DEFINE_MUTEX(hwicap_mutex); | 113 | static DEFINE_MUTEX(hwicap_mutex); |
114 | static bool probed_devices[HWICAP_DEVICES]; | 114 | static bool probed_devices[HWICAP_DEVICES]; |
115 | static struct mutex icap_sem; | 115 | static struct mutex icap_sem; |
116 | 116 | ||
117 | static struct class *icap_class; | 117 | static struct class *icap_class; |
118 | 118 | ||
119 | #define UNIMPLEMENTED 0xFFFF | 119 | #define UNIMPLEMENTED 0xFFFF |
120 | 120 | ||
121 | static const struct config_registers v2_config_registers = { | 121 | static const struct config_registers v2_config_registers = { |
122 | .CRC = 0, | 122 | .CRC = 0, |
123 | .FAR = 1, | 123 | .FAR = 1, |
124 | .FDRI = 2, | 124 | .FDRI = 2, |
125 | .FDRO = 3, | 125 | .FDRO = 3, |
126 | .CMD = 4, | 126 | .CMD = 4, |
127 | .CTL = 5, | 127 | .CTL = 5, |
128 | .MASK = 6, | 128 | .MASK = 6, |
129 | .STAT = 7, | 129 | .STAT = 7, |
130 | .LOUT = 8, | 130 | .LOUT = 8, |
131 | .COR = 9, | 131 | .COR = 9, |
132 | .MFWR = 10, | 132 | .MFWR = 10, |
133 | .FLR = 11, | 133 | .FLR = 11, |
134 | .KEY = 12, | 134 | .KEY = 12, |
135 | .CBC = 13, | 135 | .CBC = 13, |
136 | .IDCODE = 14, | 136 | .IDCODE = 14, |
137 | .AXSS = UNIMPLEMENTED, | 137 | .AXSS = UNIMPLEMENTED, |
138 | .C0R_1 = UNIMPLEMENTED, | 138 | .C0R_1 = UNIMPLEMENTED, |
139 | .CSOB = UNIMPLEMENTED, | 139 | .CSOB = UNIMPLEMENTED, |
140 | .WBSTAR = UNIMPLEMENTED, | 140 | .WBSTAR = UNIMPLEMENTED, |
141 | .TIMER = UNIMPLEMENTED, | 141 | .TIMER = UNIMPLEMENTED, |
142 | .BOOTSTS = UNIMPLEMENTED, | 142 | .BOOTSTS = UNIMPLEMENTED, |
143 | .CTL_1 = UNIMPLEMENTED, | 143 | .CTL_1 = UNIMPLEMENTED, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static const struct config_registers v4_config_registers = { | 146 | static const struct config_registers v4_config_registers = { |
147 | .CRC = 0, | 147 | .CRC = 0, |
148 | .FAR = 1, | 148 | .FAR = 1, |
149 | .FDRI = 2, | 149 | .FDRI = 2, |
150 | .FDRO = 3, | 150 | .FDRO = 3, |
151 | .CMD = 4, | 151 | .CMD = 4, |
152 | .CTL = 5, | 152 | .CTL = 5, |
153 | .MASK = 6, | 153 | .MASK = 6, |
154 | .STAT = 7, | 154 | .STAT = 7, |
155 | .LOUT = 8, | 155 | .LOUT = 8, |
156 | .COR = 9, | 156 | .COR = 9, |
157 | .MFWR = 10, | 157 | .MFWR = 10, |
158 | .FLR = UNIMPLEMENTED, | 158 | .FLR = UNIMPLEMENTED, |
159 | .KEY = UNIMPLEMENTED, | 159 | .KEY = UNIMPLEMENTED, |
160 | .CBC = 11, | 160 | .CBC = 11, |
161 | .IDCODE = 12, | 161 | .IDCODE = 12, |
162 | .AXSS = 13, | 162 | .AXSS = 13, |
163 | .C0R_1 = UNIMPLEMENTED, | 163 | .C0R_1 = UNIMPLEMENTED, |
164 | .CSOB = UNIMPLEMENTED, | 164 | .CSOB = UNIMPLEMENTED, |
165 | .WBSTAR = UNIMPLEMENTED, | 165 | .WBSTAR = UNIMPLEMENTED, |
166 | .TIMER = UNIMPLEMENTED, | 166 | .TIMER = UNIMPLEMENTED, |
167 | .BOOTSTS = UNIMPLEMENTED, | 167 | .BOOTSTS = UNIMPLEMENTED, |
168 | .CTL_1 = UNIMPLEMENTED, | 168 | .CTL_1 = UNIMPLEMENTED, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static const struct config_registers v5_config_registers = { | 171 | static const struct config_registers v5_config_registers = { |
172 | .CRC = 0, | 172 | .CRC = 0, |
173 | .FAR = 1, | 173 | .FAR = 1, |
174 | .FDRI = 2, | 174 | .FDRI = 2, |
175 | .FDRO = 3, | 175 | .FDRO = 3, |
176 | .CMD = 4, | 176 | .CMD = 4, |
177 | .CTL = 5, | 177 | .CTL = 5, |
178 | .MASK = 6, | 178 | .MASK = 6, |
179 | .STAT = 7, | 179 | .STAT = 7, |
180 | .LOUT = 8, | 180 | .LOUT = 8, |
181 | .COR = 9, | 181 | .COR = 9, |
182 | .MFWR = 10, | 182 | .MFWR = 10, |
183 | .FLR = UNIMPLEMENTED, | 183 | .FLR = UNIMPLEMENTED, |
184 | .KEY = UNIMPLEMENTED, | 184 | .KEY = UNIMPLEMENTED, |
185 | .CBC = 11, | 185 | .CBC = 11, |
186 | .IDCODE = 12, | 186 | .IDCODE = 12, |
187 | .AXSS = 13, | 187 | .AXSS = 13, |
188 | .C0R_1 = 14, | 188 | .C0R_1 = 14, |
189 | .CSOB = 15, | 189 | .CSOB = 15, |
190 | .WBSTAR = 16, | 190 | .WBSTAR = 16, |
191 | .TIMER = 17, | 191 | .TIMER = 17, |
192 | .BOOTSTS = 18, | 192 | .BOOTSTS = 18, |
193 | .CTL_1 = 19, | 193 | .CTL_1 = 19, |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static const struct config_registers v6_config_registers = { | 196 | static const struct config_registers v6_config_registers = { |
197 | .CRC = 0, | 197 | .CRC = 0, |
198 | .FAR = 1, | 198 | .FAR = 1, |
199 | .FDRI = 2, | 199 | .FDRI = 2, |
200 | .FDRO = 3, | 200 | .FDRO = 3, |
201 | .CMD = 4, | 201 | .CMD = 4, |
202 | .CTL = 5, | 202 | .CTL = 5, |
203 | .MASK = 6, | 203 | .MASK = 6, |
204 | .STAT = 7, | 204 | .STAT = 7, |
205 | .LOUT = 8, | 205 | .LOUT = 8, |
206 | .COR = 9, | 206 | .COR = 9, |
207 | .MFWR = 10, | 207 | .MFWR = 10, |
208 | .FLR = UNIMPLEMENTED, | 208 | .FLR = UNIMPLEMENTED, |
209 | .KEY = UNIMPLEMENTED, | 209 | .KEY = UNIMPLEMENTED, |
210 | .CBC = 11, | 210 | .CBC = 11, |
211 | .IDCODE = 12, | 211 | .IDCODE = 12, |
212 | .AXSS = 13, | 212 | .AXSS = 13, |
213 | .C0R_1 = 14, | 213 | .C0R_1 = 14, |
214 | .CSOB = 15, | 214 | .CSOB = 15, |
215 | .WBSTAR = 16, | 215 | .WBSTAR = 16, |
216 | .TIMER = 17, | 216 | .TIMER = 17, |
217 | .BOOTSTS = 22, | 217 | .BOOTSTS = 22, |
218 | .CTL_1 = 24, | 218 | .CTL_1 = 24, |
219 | }; | 219 | }; |
220 | 220 | ||
221 | /** | 221 | /** |
222 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. | 222 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. |
223 | * @drvdata: a pointer to the drvdata. | 223 | * @drvdata: a pointer to the drvdata. |
224 | * | 224 | * |
225 | * This command desynchronizes the ICAP After this command, a | 225 | * This command desynchronizes the ICAP After this command, a |
226 | * bitstream containing a NULL packet, followed by a SYNCH packet is | 226 | * bitstream containing a NULL packet, followed by a SYNCH packet is |
227 | * required before the ICAP will recognize commands. | 227 | * required before the ICAP will recognize commands. |
228 | */ | 228 | */ |
229 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) | 229 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) |
230 | { | 230 | { |
231 | u32 buffer[4]; | 231 | u32 buffer[4]; |
232 | u32 index = 0; | 232 | u32 index = 0; |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Create the data to be written to the ICAP. | 235 | * Create the data to be written to the ICAP. |
236 | */ | 236 | */ |
237 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; | 237 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; |
238 | buffer[index++] = XHI_CMD_DESYNCH; | 238 | buffer[index++] = XHI_CMD_DESYNCH; |
239 | buffer[index++] = XHI_NOOP_PACKET; | 239 | buffer[index++] = XHI_NOOP_PACKET; |
240 | buffer[index++] = XHI_NOOP_PACKET; | 240 | buffer[index++] = XHI_NOOP_PACKET; |
241 | 241 | ||
242 | /* | 242 | /* |
243 | * Write the data to the FIFO and intiate the transfer of data present | 243 | * Write the data to the FIFO and intiate the transfer of data present |
244 | * in the FIFO to the ICAP device. | 244 | * in the FIFO to the ICAP device. |
245 | */ | 245 | */ |
246 | return drvdata->config->set_configuration(drvdata, | 246 | return drvdata->config->set_configuration(drvdata, |
247 | &buffer[0], index); | 247 | &buffer[0], index); |
248 | } | 248 | } |
249 | 249 | ||
250 | /** | 250 | /** |
251 | * hwicap_get_configuration_register - Query a configuration register. | 251 | * hwicap_get_configuration_register - Query a configuration register. |
252 | * @drvdata: a pointer to the drvdata. | 252 | * @drvdata: a pointer to the drvdata. |
253 | * @reg: a constant which represents the configuration | 253 | * @reg: a constant which represents the configuration |
254 | * register value to be returned. | 254 | * register value to be returned. |
255 | * Examples: XHI_IDCODE, XHI_FLR. | 255 | * Examples: XHI_IDCODE, XHI_FLR. |
256 | * @reg_data: returns the value of the register. | 256 | * @reg_data: returns the value of the register. |
257 | * | 257 | * |
258 | * Sends a query packet to the ICAP and then receives the response. | 258 | * Sends a query packet to the ICAP and then receives the response. |
259 | * The icap is left in Synched state. | 259 | * The icap is left in Synched state. |
260 | */ | 260 | */ |
261 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | 261 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, |
262 | u32 reg, u32 *reg_data) | 262 | u32 reg, u32 *reg_data) |
263 | { | 263 | { |
264 | int status; | 264 | int status; |
265 | u32 buffer[6]; | 265 | u32 buffer[6]; |
266 | u32 index = 0; | 266 | u32 index = 0; |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * Create the data to be written to the ICAP. | 269 | * Create the data to be written to the ICAP. |
270 | */ | 270 | */ |
271 | buffer[index++] = XHI_DUMMY_PACKET; | 271 | buffer[index++] = XHI_DUMMY_PACKET; |
272 | buffer[index++] = XHI_NOOP_PACKET; | 272 | buffer[index++] = XHI_NOOP_PACKET; |
273 | buffer[index++] = XHI_SYNC_PACKET; | 273 | buffer[index++] = XHI_SYNC_PACKET; |
274 | buffer[index++] = XHI_NOOP_PACKET; | 274 | buffer[index++] = XHI_NOOP_PACKET; |
275 | buffer[index++] = XHI_NOOP_PACKET; | 275 | buffer[index++] = XHI_NOOP_PACKET; |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * Write the data to the FIFO and initiate the transfer of data present | 278 | * Write the data to the FIFO and initiate the transfer of data present |
279 | * in the FIFO to the ICAP device. | 279 | * in the FIFO to the ICAP device. |
280 | */ | 280 | */ |
281 | status = drvdata->config->set_configuration(drvdata, | 281 | status = drvdata->config->set_configuration(drvdata, |
282 | &buffer[0], index); | 282 | &buffer[0], index); |
283 | if (status) | 283 | if (status) |
284 | return status; | 284 | return status; |
285 | 285 | ||
286 | /* If the syncword was not found, then we need to start over. */ | 286 | /* If the syncword was not found, then we need to start over. */ |
287 | status = drvdata->config->get_status(drvdata); | 287 | status = drvdata->config->get_status(drvdata); |
288 | if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK) | 288 | if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK) |
289 | return -EIO; | 289 | return -EIO; |
290 | 290 | ||
291 | index = 0; | 291 | index = 0; |
292 | buffer[index++] = hwicap_type_1_read(reg) | 1; | 292 | buffer[index++] = hwicap_type_1_read(reg) | 1; |
293 | buffer[index++] = XHI_NOOP_PACKET; | 293 | buffer[index++] = XHI_NOOP_PACKET; |
294 | buffer[index++] = XHI_NOOP_PACKET; | 294 | buffer[index++] = XHI_NOOP_PACKET; |
295 | 295 | ||
296 | /* | 296 | /* |
297 | * Write the data to the FIFO and intiate the transfer of data present | 297 | * Write the data to the FIFO and intiate the transfer of data present |
298 | * in the FIFO to the ICAP device. | 298 | * in the FIFO to the ICAP device. |
299 | */ | 299 | */ |
300 | status = drvdata->config->set_configuration(drvdata, | 300 | status = drvdata->config->set_configuration(drvdata, |
301 | &buffer[0], index); | 301 | &buffer[0], index); |
302 | if (status) | 302 | if (status) |
303 | return status; | 303 | return status; |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * Read the configuration register | 306 | * Read the configuration register |
307 | */ | 307 | */ |
308 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); | 308 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); |
309 | if (status) | 309 | if (status) |
310 | return status; | 310 | return status; |
311 | 311 | ||
312 | return 0; | 312 | return 0; |
313 | } | 313 | } |
314 | 314 | ||
315 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | 315 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) |
316 | { | 316 | { |
317 | int status; | 317 | int status; |
318 | u32 idcode; | 318 | u32 idcode; |
319 | 319 | ||
320 | dev_dbg(drvdata->dev, "initializing\n"); | 320 | dev_dbg(drvdata->dev, "initializing\n"); |
321 | 321 | ||
322 | /* Abort any current transaction, to make sure we have the | 322 | /* Abort any current transaction, to make sure we have the |
323 | * ICAP in a good state. */ | 323 | * ICAP in a good state. */ |
324 | dev_dbg(drvdata->dev, "Reset...\n"); | 324 | dev_dbg(drvdata->dev, "Reset...\n"); |
325 | drvdata->config->reset(drvdata); | 325 | drvdata->config->reset(drvdata); |
326 | 326 | ||
327 | dev_dbg(drvdata->dev, "Desync...\n"); | 327 | dev_dbg(drvdata->dev, "Desync...\n"); |
328 | status = hwicap_command_desync(drvdata); | 328 | status = hwicap_command_desync(drvdata); |
329 | if (status) | 329 | if (status) |
330 | return status; | 330 | return status; |
331 | 331 | ||
332 | /* Attempt to read the IDCODE from ICAP. This | 332 | /* Attempt to read the IDCODE from ICAP. This |
333 | * may not be returned correctly, due to the design of the | 333 | * may not be returned correctly, due to the design of the |
334 | * hardware. | 334 | * hardware. |
335 | */ | 335 | */ |
336 | dev_dbg(drvdata->dev, "Reading IDCODE...\n"); | 336 | dev_dbg(drvdata->dev, "Reading IDCODE...\n"); |
337 | status = hwicap_get_configuration_register( | 337 | status = hwicap_get_configuration_register( |
338 | drvdata, drvdata->config_regs->IDCODE, &idcode); | 338 | drvdata, drvdata->config_regs->IDCODE, &idcode); |
339 | dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode); | 339 | dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode); |
340 | if (status) | 340 | if (status) |
341 | return status; | 341 | return status; |
342 | 342 | ||
343 | dev_dbg(drvdata->dev, "Desync...\n"); | 343 | dev_dbg(drvdata->dev, "Desync...\n"); |
344 | status = hwicap_command_desync(drvdata); | 344 | status = hwicap_command_desync(drvdata); |
345 | if (status) | 345 | if (status) |
346 | return status; | 346 | return status; |
347 | 347 | ||
348 | return 0; | 348 | return 0; |
349 | } | 349 | } |
350 | 350 | ||
351 | static ssize_t | 351 | static ssize_t |
352 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 352 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
353 | { | 353 | { |
354 | struct hwicap_drvdata *drvdata = file->private_data; | 354 | struct hwicap_drvdata *drvdata = file->private_data; |
355 | ssize_t bytes_to_read = 0; | 355 | ssize_t bytes_to_read = 0; |
356 | u32 *kbuf; | 356 | u32 *kbuf; |
357 | u32 words; | 357 | u32 words; |
358 | u32 bytes_remaining; | 358 | u32 bytes_remaining; |
359 | int status; | 359 | int status; |
360 | 360 | ||
361 | status = mutex_lock_interruptible(&drvdata->sem); | 361 | status = mutex_lock_interruptible(&drvdata->sem); |
362 | if (status) | 362 | if (status) |
363 | return status; | 363 | return status; |
364 | 364 | ||
365 | if (drvdata->read_buffer_in_use) { | 365 | if (drvdata->read_buffer_in_use) { |
366 | /* If there are leftover bytes in the buffer, just */ | 366 | /* If there are leftover bytes in the buffer, just */ |
367 | /* return them and don't try to read more from the */ | 367 | /* return them and don't try to read more from the */ |
368 | /* ICAP device. */ | 368 | /* ICAP device. */ |
369 | bytes_to_read = | 369 | bytes_to_read = |
370 | (count < drvdata->read_buffer_in_use) ? count : | 370 | (count < drvdata->read_buffer_in_use) ? count : |
371 | drvdata->read_buffer_in_use; | 371 | drvdata->read_buffer_in_use; |
372 | 372 | ||
373 | /* Return the data currently in the read buffer. */ | 373 | /* Return the data currently in the read buffer. */ |
374 | if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) { | 374 | if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) { |
375 | status = -EFAULT; | 375 | status = -EFAULT; |
376 | goto error; | 376 | goto error; |
377 | } | 377 | } |
378 | drvdata->read_buffer_in_use -= bytes_to_read; | 378 | drvdata->read_buffer_in_use -= bytes_to_read; |
379 | memmove(drvdata->read_buffer, | 379 | memmove(drvdata->read_buffer, |
380 | drvdata->read_buffer + bytes_to_read, | 380 | drvdata->read_buffer + bytes_to_read, |
381 | 4 - bytes_to_read); | 381 | 4 - bytes_to_read); |
382 | } else { | 382 | } else { |
383 | /* Get new data from the ICAP, and return was was requested. */ | 383 | /* Get new data from the ICAP, and return was was requested. */ |
384 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); | 384 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); |
385 | if (!kbuf) { | 385 | if (!kbuf) { |
386 | status = -ENOMEM; | 386 | status = -ENOMEM; |
387 | goto error; | 387 | goto error; |
388 | } | 388 | } |
389 | 389 | ||
390 | /* The ICAP device is only able to read complete */ | 390 | /* The ICAP device is only able to read complete */ |
391 | /* words. If a number of bytes that do not correspond */ | 391 | /* words. If a number of bytes that do not correspond */ |
392 | /* to complete words is requested, then we read enough */ | 392 | /* to complete words is requested, then we read enough */ |
393 | /* words to get the required number of bytes, and then */ | 393 | /* words to get the required number of bytes, and then */ |
394 | /* save the remaining bytes for the next read. */ | 394 | /* save the remaining bytes for the next read. */ |
395 | 395 | ||
396 | /* Determine the number of words to read, rounding up */ | 396 | /* Determine the number of words to read, rounding up */ |
397 | /* if necessary. */ | 397 | /* if necessary. */ |
398 | words = ((count + 3) >> 2); | 398 | words = ((count + 3) >> 2); |
399 | bytes_to_read = words << 2; | 399 | bytes_to_read = words << 2; |
400 | 400 | ||
401 | if (bytes_to_read > PAGE_SIZE) | 401 | if (bytes_to_read > PAGE_SIZE) |
402 | bytes_to_read = PAGE_SIZE; | 402 | bytes_to_read = PAGE_SIZE; |
403 | 403 | ||
404 | /* Ensure we only read a complete number of words. */ | 404 | /* Ensure we only read a complete number of words. */ |
405 | bytes_remaining = bytes_to_read & 3; | 405 | bytes_remaining = bytes_to_read & 3; |
406 | bytes_to_read &= ~3; | 406 | bytes_to_read &= ~3; |
407 | words = bytes_to_read >> 2; | 407 | words = bytes_to_read >> 2; |
408 | 408 | ||
409 | status = drvdata->config->get_configuration(drvdata, | 409 | status = drvdata->config->get_configuration(drvdata, |
410 | kbuf, words); | 410 | kbuf, words); |
411 | 411 | ||
412 | /* If we didn't read correctly, then bail out. */ | 412 | /* If we didn't read correctly, then bail out. */ |
413 | if (status) { | 413 | if (status) { |
414 | free_page((unsigned long)kbuf); | 414 | free_page((unsigned long)kbuf); |
415 | goto error; | 415 | goto error; |
416 | } | 416 | } |
417 | 417 | ||
418 | /* If we fail to return the data to the user, then bail out. */ | 418 | /* If we fail to return the data to the user, then bail out. */ |
419 | if (copy_to_user(buf, kbuf, bytes_to_read)) { | 419 | if (copy_to_user(buf, kbuf, bytes_to_read)) { |
420 | free_page((unsigned long)kbuf); | 420 | free_page((unsigned long)kbuf); |
421 | status = -EFAULT; | 421 | status = -EFAULT; |
422 | goto error; | 422 | goto error; |
423 | } | 423 | } |
424 | memcpy(drvdata->read_buffer, | 424 | memcpy(drvdata->read_buffer, |
425 | kbuf, | 425 | kbuf, |
426 | bytes_remaining); | 426 | bytes_remaining); |
427 | drvdata->read_buffer_in_use = bytes_remaining; | 427 | drvdata->read_buffer_in_use = bytes_remaining; |
428 | free_page((unsigned long)kbuf); | 428 | free_page((unsigned long)kbuf); |
429 | } | 429 | } |
430 | status = bytes_to_read; | 430 | status = bytes_to_read; |
431 | error: | 431 | error: |
432 | mutex_unlock(&drvdata->sem); | 432 | mutex_unlock(&drvdata->sem); |
433 | return status; | 433 | return status; |
434 | } | 434 | } |
435 | 435 | ||
436 | static ssize_t | 436 | static ssize_t |
437 | hwicap_write(struct file *file, const char __user *buf, | 437 | hwicap_write(struct file *file, const char __user *buf, |
438 | size_t count, loff_t *ppos) | 438 | size_t count, loff_t *ppos) |
439 | { | 439 | { |
440 | struct hwicap_drvdata *drvdata = file->private_data; | 440 | struct hwicap_drvdata *drvdata = file->private_data; |
441 | ssize_t written = 0; | 441 | ssize_t written = 0; |
442 | ssize_t left = count; | 442 | ssize_t left = count; |
443 | u32 *kbuf; | 443 | u32 *kbuf; |
444 | ssize_t len; | 444 | ssize_t len; |
445 | ssize_t status; | 445 | ssize_t status; |
446 | 446 | ||
447 | status = mutex_lock_interruptible(&drvdata->sem); | 447 | status = mutex_lock_interruptible(&drvdata->sem); |
448 | if (status) | 448 | if (status) |
449 | return status; | 449 | return status; |
450 | 450 | ||
451 | left += drvdata->write_buffer_in_use; | 451 | left += drvdata->write_buffer_in_use; |
452 | 452 | ||
453 | /* Only write multiples of 4 bytes. */ | 453 | /* Only write multiples of 4 bytes. */ |
454 | if (left < 4) { | 454 | if (left < 4) { |
455 | status = 0; | 455 | status = 0; |
456 | goto error; | 456 | goto error; |
457 | } | 457 | } |
458 | 458 | ||
459 | kbuf = (u32 *) __get_free_page(GFP_KERNEL); | 459 | kbuf = (u32 *) __get_free_page(GFP_KERNEL); |
460 | if (!kbuf) { | 460 | if (!kbuf) { |
461 | status = -ENOMEM; | 461 | status = -ENOMEM; |
462 | goto error; | 462 | goto error; |
463 | } | 463 | } |
464 | 464 | ||
465 | while (left > 3) { | 465 | while (left > 3) { |
466 | /* only write multiples of 4 bytes, so there might */ | 466 | /* only write multiples of 4 bytes, so there might */ |
467 | /* be as many as 3 bytes left (at the end). */ | 467 | /* be as many as 3 bytes left (at the end). */ |
468 | len = left; | 468 | len = left; |
469 | 469 | ||
470 | if (len > PAGE_SIZE) | 470 | if (len > PAGE_SIZE) |
471 | len = PAGE_SIZE; | 471 | len = PAGE_SIZE; |
472 | len &= ~3; | 472 | len &= ~3; |
473 | 473 | ||
474 | if (drvdata->write_buffer_in_use) { | 474 | if (drvdata->write_buffer_in_use) { |
475 | memcpy(kbuf, drvdata->write_buffer, | 475 | memcpy(kbuf, drvdata->write_buffer, |
476 | drvdata->write_buffer_in_use); | 476 | drvdata->write_buffer_in_use); |
477 | if (copy_from_user( | 477 | if (copy_from_user( |
478 | (((char *)kbuf) + drvdata->write_buffer_in_use), | 478 | (((char *)kbuf) + drvdata->write_buffer_in_use), |
479 | buf + written, | 479 | buf + written, |
480 | len - (drvdata->write_buffer_in_use))) { | 480 | len - (drvdata->write_buffer_in_use))) { |
481 | free_page((unsigned long)kbuf); | 481 | free_page((unsigned long)kbuf); |
482 | status = -EFAULT; | 482 | status = -EFAULT; |
483 | goto error; | 483 | goto error; |
484 | } | 484 | } |
485 | } else { | 485 | } else { |
486 | if (copy_from_user(kbuf, buf + written, len)) { | 486 | if (copy_from_user(kbuf, buf + written, len)) { |
487 | free_page((unsigned long)kbuf); | 487 | free_page((unsigned long)kbuf); |
488 | status = -EFAULT; | 488 | status = -EFAULT; |
489 | goto error; | 489 | goto error; |
490 | } | 490 | } |
491 | } | 491 | } |
492 | 492 | ||
493 | status = drvdata->config->set_configuration(drvdata, | 493 | status = drvdata->config->set_configuration(drvdata, |
494 | kbuf, len >> 2); | 494 | kbuf, len >> 2); |
495 | 495 | ||
496 | if (status) { | 496 | if (status) { |
497 | free_page((unsigned long)kbuf); | 497 | free_page((unsigned long)kbuf); |
498 | status = -EFAULT; | 498 | status = -EFAULT; |
499 | goto error; | 499 | goto error; |
500 | } | 500 | } |
501 | if (drvdata->write_buffer_in_use) { | 501 | if (drvdata->write_buffer_in_use) { |
502 | len -= drvdata->write_buffer_in_use; | 502 | len -= drvdata->write_buffer_in_use; |
503 | left -= drvdata->write_buffer_in_use; | 503 | left -= drvdata->write_buffer_in_use; |
504 | drvdata->write_buffer_in_use = 0; | 504 | drvdata->write_buffer_in_use = 0; |
505 | } | 505 | } |
506 | written += len; | 506 | written += len; |
507 | left -= len; | 507 | left -= len; |
508 | } | 508 | } |
509 | if ((left > 0) && (left < 4)) { | 509 | if ((left > 0) && (left < 4)) { |
510 | if (!copy_from_user(drvdata->write_buffer, | 510 | if (!copy_from_user(drvdata->write_buffer, |
511 | buf + written, left)) { | 511 | buf + written, left)) { |
512 | drvdata->write_buffer_in_use = left; | 512 | drvdata->write_buffer_in_use = left; |
513 | written += left; | 513 | written += left; |
514 | left = 0; | 514 | left = 0; |
515 | } | 515 | } |
516 | } | 516 | } |
517 | 517 | ||
518 | free_page((unsigned long)kbuf); | 518 | free_page((unsigned long)kbuf); |
519 | status = written; | 519 | status = written; |
520 | error: | 520 | error: |
521 | mutex_unlock(&drvdata->sem); | 521 | mutex_unlock(&drvdata->sem); |
522 | return status; | 522 | return status; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int hwicap_open(struct inode *inode, struct file *file) | 525 | static int hwicap_open(struct inode *inode, struct file *file) |
526 | { | 526 | { |
527 | struct hwicap_drvdata *drvdata; | 527 | struct hwicap_drvdata *drvdata; |
528 | int status; | 528 | int status; |
529 | 529 | ||
530 | mutex_lock(&hwicap_mutex); | 530 | mutex_lock(&hwicap_mutex); |
531 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); | 531 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); |
532 | 532 | ||
533 | status = mutex_lock_interruptible(&drvdata->sem); | 533 | status = mutex_lock_interruptible(&drvdata->sem); |
534 | if (status) | 534 | if (status) |
535 | goto out; | 535 | goto out; |
536 | 536 | ||
537 | if (drvdata->is_open) { | 537 | if (drvdata->is_open) { |
538 | status = -EBUSY; | 538 | status = -EBUSY; |
539 | goto error; | 539 | goto error; |
540 | } | 540 | } |
541 | 541 | ||
542 | status = hwicap_initialize_hwicap(drvdata); | 542 | status = hwicap_initialize_hwicap(drvdata); |
543 | if (status) { | 543 | if (status) { |
544 | dev_err(drvdata->dev, "Failed to open file"); | 544 | dev_err(drvdata->dev, "Failed to open file"); |
545 | goto error; | 545 | goto error; |
546 | } | 546 | } |
547 | 547 | ||
548 | file->private_data = drvdata; | 548 | file->private_data = drvdata; |
549 | drvdata->write_buffer_in_use = 0; | 549 | drvdata->write_buffer_in_use = 0; |
550 | drvdata->read_buffer_in_use = 0; | 550 | drvdata->read_buffer_in_use = 0; |
551 | drvdata->is_open = 1; | 551 | drvdata->is_open = 1; |
552 | 552 | ||
553 | error: | 553 | error: |
554 | mutex_unlock(&drvdata->sem); | 554 | mutex_unlock(&drvdata->sem); |
555 | out: | 555 | out: |
556 | mutex_unlock(&hwicap_mutex); | 556 | mutex_unlock(&hwicap_mutex); |
557 | return status; | 557 | return status; |
558 | } | 558 | } |
559 | 559 | ||
560 | static int hwicap_release(struct inode *inode, struct file *file) | 560 | static int hwicap_release(struct inode *inode, struct file *file) |
561 | { | 561 | { |
562 | struct hwicap_drvdata *drvdata = file->private_data; | 562 | struct hwicap_drvdata *drvdata = file->private_data; |
563 | int i; | 563 | int i; |
564 | int status = 0; | 564 | int status = 0; |
565 | 565 | ||
566 | mutex_lock(&drvdata->sem); | 566 | mutex_lock(&drvdata->sem); |
567 | 567 | ||
568 | if (drvdata->write_buffer_in_use) { | 568 | if (drvdata->write_buffer_in_use) { |
569 | /* Flush write buffer. */ | 569 | /* Flush write buffer. */ |
570 | for (i = drvdata->write_buffer_in_use; i < 4; i++) | 570 | for (i = drvdata->write_buffer_in_use; i < 4; i++) |
571 | drvdata->write_buffer[i] = 0; | 571 | drvdata->write_buffer[i] = 0; |
572 | 572 | ||
573 | status = drvdata->config->set_configuration(drvdata, | 573 | status = drvdata->config->set_configuration(drvdata, |
574 | (u32 *) drvdata->write_buffer, 1); | 574 | (u32 *) drvdata->write_buffer, 1); |
575 | if (status) | 575 | if (status) |
576 | goto error; | 576 | goto error; |
577 | } | 577 | } |
578 | 578 | ||
579 | status = hwicap_command_desync(drvdata); | 579 | status = hwicap_command_desync(drvdata); |
580 | if (status) | 580 | if (status) |
581 | goto error; | 581 | goto error; |
582 | 582 | ||
583 | error: | 583 | error: |
584 | drvdata->is_open = 0; | 584 | drvdata->is_open = 0; |
585 | mutex_unlock(&drvdata->sem); | 585 | mutex_unlock(&drvdata->sem); |
586 | return status; | 586 | return status; |
587 | } | 587 | } |
588 | 588 | ||
589 | static const struct file_operations hwicap_fops = { | 589 | static const struct file_operations hwicap_fops = { |
590 | .owner = THIS_MODULE, | 590 | .owner = THIS_MODULE, |
591 | .write = hwicap_write, | 591 | .write = hwicap_write, |
592 | .read = hwicap_read, | 592 | .read = hwicap_read, |
593 | .open = hwicap_open, | 593 | .open = hwicap_open, |
594 | .release = hwicap_release, | 594 | .release = hwicap_release, |
595 | .llseek = noop_llseek, | 595 | .llseek = noop_llseek, |
596 | }; | 596 | }; |
597 | 597 | ||
598 | static int __devinit hwicap_setup(struct device *dev, int id, | 598 | static int hwicap_setup(struct device *dev, int id, |
599 | const struct resource *regs_res, | 599 | const struct resource *regs_res, |
600 | const struct hwicap_driver_config *config, | 600 | const struct hwicap_driver_config *config, |
601 | const struct config_registers *config_regs) | 601 | const struct config_registers *config_regs) |
602 | { | 602 | { |
603 | dev_t devt; | 603 | dev_t devt; |
604 | struct hwicap_drvdata *drvdata = NULL; | 604 | struct hwicap_drvdata *drvdata = NULL; |
605 | int retval = 0; | 605 | int retval = 0; |
606 | 606 | ||
607 | dev_info(dev, "Xilinx icap port driver\n"); | 607 | dev_info(dev, "Xilinx icap port driver\n"); |
608 | 608 | ||
609 | mutex_lock(&icap_sem); | 609 | mutex_lock(&icap_sem); |
610 | 610 | ||
611 | if (id < 0) { | 611 | if (id < 0) { |
612 | for (id = 0; id < HWICAP_DEVICES; id++) | 612 | for (id = 0; id < HWICAP_DEVICES; id++) |
613 | if (!probed_devices[id]) | 613 | if (!probed_devices[id]) |
614 | break; | 614 | break; |
615 | } | 615 | } |
616 | if (id < 0 || id >= HWICAP_DEVICES) { | 616 | if (id < 0 || id >= HWICAP_DEVICES) { |
617 | mutex_unlock(&icap_sem); | 617 | mutex_unlock(&icap_sem); |
618 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); | 618 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); |
619 | return -EINVAL; | 619 | return -EINVAL; |
620 | } | 620 | } |
621 | if (probed_devices[id]) { | 621 | if (probed_devices[id]) { |
622 | mutex_unlock(&icap_sem); | 622 | mutex_unlock(&icap_sem); |
623 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", | 623 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", |
624 | DRIVER_NAME, id); | 624 | DRIVER_NAME, id); |
625 | return -EBUSY; | 625 | return -EBUSY; |
626 | } | 626 | } |
627 | 627 | ||
628 | probed_devices[id] = 1; | 628 | probed_devices[id] = 1; |
629 | mutex_unlock(&icap_sem); | 629 | mutex_unlock(&icap_sem); |
630 | 630 | ||
631 | devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); | 631 | devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id); |
632 | 632 | ||
633 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); | 633 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); |
634 | if (!drvdata) { | 634 | if (!drvdata) { |
635 | dev_err(dev, "Couldn't allocate device private record\n"); | 635 | dev_err(dev, "Couldn't allocate device private record\n"); |
636 | retval = -ENOMEM; | 636 | retval = -ENOMEM; |
637 | goto failed0; | 637 | goto failed0; |
638 | } | 638 | } |
639 | dev_set_drvdata(dev, (void *)drvdata); | 639 | dev_set_drvdata(dev, (void *)drvdata); |
640 | 640 | ||
641 | if (!regs_res) { | 641 | if (!regs_res) { |
642 | dev_err(dev, "Couldn't get registers resource\n"); | 642 | dev_err(dev, "Couldn't get registers resource\n"); |
643 | retval = -EFAULT; | 643 | retval = -EFAULT; |
644 | goto failed1; | 644 | goto failed1; |
645 | } | 645 | } |
646 | 646 | ||
647 | drvdata->mem_start = regs_res->start; | 647 | drvdata->mem_start = regs_res->start; |
648 | drvdata->mem_end = regs_res->end; | 648 | drvdata->mem_end = regs_res->end; |
649 | drvdata->mem_size = resource_size(regs_res); | 649 | drvdata->mem_size = resource_size(regs_res); |
650 | 650 | ||
651 | if (!request_mem_region(drvdata->mem_start, | 651 | if (!request_mem_region(drvdata->mem_start, |
652 | drvdata->mem_size, DRIVER_NAME)) { | 652 | drvdata->mem_size, DRIVER_NAME)) { |
653 | dev_err(dev, "Couldn't lock memory region at %Lx\n", | 653 | dev_err(dev, "Couldn't lock memory region at %Lx\n", |
654 | (unsigned long long) regs_res->start); | 654 | (unsigned long long) regs_res->start); |
655 | retval = -EBUSY; | 655 | retval = -EBUSY; |
656 | goto failed1; | 656 | goto failed1; |
657 | } | 657 | } |
658 | 658 | ||
659 | drvdata->devt = devt; | 659 | drvdata->devt = devt; |
660 | drvdata->dev = dev; | 660 | drvdata->dev = dev; |
661 | drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); | 661 | drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size); |
662 | if (!drvdata->base_address) { | 662 | if (!drvdata->base_address) { |
663 | dev_err(dev, "ioremap() failed\n"); | 663 | dev_err(dev, "ioremap() failed\n"); |
664 | goto failed2; | 664 | goto failed2; |
665 | } | 665 | } |
666 | 666 | ||
667 | drvdata->config = config; | 667 | drvdata->config = config; |
668 | drvdata->config_regs = config_regs; | 668 | drvdata->config_regs = config_regs; |
669 | 669 | ||
670 | mutex_init(&drvdata->sem); | 670 | mutex_init(&drvdata->sem); |
671 | drvdata->is_open = 0; | 671 | drvdata->is_open = 0; |
672 | 672 | ||
673 | dev_info(dev, "ioremap %llx to %p with size %llx\n", | 673 | dev_info(dev, "ioremap %llx to %p with size %llx\n", |
674 | (unsigned long long) drvdata->mem_start, | 674 | (unsigned long long) drvdata->mem_start, |
675 | drvdata->base_address, | 675 | drvdata->base_address, |
676 | (unsigned long long) drvdata->mem_size); | 676 | (unsigned long long) drvdata->mem_size); |
677 | 677 | ||
678 | cdev_init(&drvdata->cdev, &hwicap_fops); | 678 | cdev_init(&drvdata->cdev, &hwicap_fops); |
679 | drvdata->cdev.owner = THIS_MODULE; | 679 | drvdata->cdev.owner = THIS_MODULE; |
680 | retval = cdev_add(&drvdata->cdev, devt, 1); | 680 | retval = cdev_add(&drvdata->cdev, devt, 1); |
681 | if (retval) { | 681 | if (retval) { |
682 | dev_err(dev, "cdev_add() failed\n"); | 682 | dev_err(dev, "cdev_add() failed\n"); |
683 | goto failed3; | 683 | goto failed3; |
684 | } | 684 | } |
685 | 685 | ||
686 | device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); | 686 | device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id); |
687 | return 0; /* success */ | 687 | return 0; /* success */ |
688 | 688 | ||
689 | failed3: | 689 | failed3: |
690 | iounmap(drvdata->base_address); | 690 | iounmap(drvdata->base_address); |
691 | 691 | ||
692 | failed2: | 692 | failed2: |
693 | release_mem_region(regs_res->start, drvdata->mem_size); | 693 | release_mem_region(regs_res->start, drvdata->mem_size); |
694 | 694 | ||
695 | failed1: | 695 | failed1: |
696 | kfree(drvdata); | 696 | kfree(drvdata); |
697 | 697 | ||
698 | failed0: | 698 | failed0: |
699 | mutex_lock(&icap_sem); | 699 | mutex_lock(&icap_sem); |
700 | probed_devices[id] = 0; | 700 | probed_devices[id] = 0; |
701 | mutex_unlock(&icap_sem); | 701 | mutex_unlock(&icap_sem); |
702 | 702 | ||
703 | return retval; | 703 | return retval; |
704 | } | 704 | } |
705 | 705 | ||
706 | static struct hwicap_driver_config buffer_icap_config = { | 706 | static struct hwicap_driver_config buffer_icap_config = { |
707 | .get_configuration = buffer_icap_get_configuration, | 707 | .get_configuration = buffer_icap_get_configuration, |
708 | .set_configuration = buffer_icap_set_configuration, | 708 | .set_configuration = buffer_icap_set_configuration, |
709 | .get_status = buffer_icap_get_status, | 709 | .get_status = buffer_icap_get_status, |
710 | .reset = buffer_icap_reset, | 710 | .reset = buffer_icap_reset, |
711 | }; | 711 | }; |
712 | 712 | ||
713 | static struct hwicap_driver_config fifo_icap_config = { | 713 | static struct hwicap_driver_config fifo_icap_config = { |
714 | .get_configuration = fifo_icap_get_configuration, | 714 | .get_configuration = fifo_icap_get_configuration, |
715 | .set_configuration = fifo_icap_set_configuration, | 715 | .set_configuration = fifo_icap_set_configuration, |
716 | .get_status = fifo_icap_get_status, | 716 | .get_status = fifo_icap_get_status, |
717 | .reset = fifo_icap_reset, | 717 | .reset = fifo_icap_reset, |
718 | }; | 718 | }; |
719 | 719 | ||
720 | static int __devexit hwicap_remove(struct device *dev) | 720 | static int __devexit hwicap_remove(struct device *dev) |
721 | { | 721 | { |
722 | struct hwicap_drvdata *drvdata; | 722 | struct hwicap_drvdata *drvdata; |
723 | 723 | ||
724 | drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev); | 724 | drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev); |
725 | 725 | ||
726 | if (!drvdata) | 726 | if (!drvdata) |
727 | return 0; | 727 | return 0; |
728 | 728 | ||
729 | device_destroy(icap_class, drvdata->devt); | 729 | device_destroy(icap_class, drvdata->devt); |
730 | cdev_del(&drvdata->cdev); | 730 | cdev_del(&drvdata->cdev); |
731 | iounmap(drvdata->base_address); | 731 | iounmap(drvdata->base_address); |
732 | release_mem_region(drvdata->mem_start, drvdata->mem_size); | 732 | release_mem_region(drvdata->mem_start, drvdata->mem_size); |
733 | kfree(drvdata); | 733 | kfree(drvdata); |
734 | dev_set_drvdata(dev, NULL); | 734 | dev_set_drvdata(dev, NULL); |
735 | 735 | ||
736 | mutex_lock(&icap_sem); | 736 | mutex_lock(&icap_sem); |
737 | probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; | 737 | probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0; |
738 | mutex_unlock(&icap_sem); | 738 | mutex_unlock(&icap_sem); |
739 | return 0; /* success */ | 739 | return 0; /* success */ |
740 | } | 740 | } |
741 | 741 | ||
742 | #ifdef CONFIG_OF | 742 | #ifdef CONFIG_OF |
743 | static int __devinit hwicap_of_probe(struct platform_device *op, | 743 | static int hwicap_of_probe(struct platform_device *op, |
744 | const struct hwicap_driver_config *config) | 744 | const struct hwicap_driver_config *config) |
745 | { | 745 | { |
746 | struct resource res; | 746 | struct resource res; |
747 | const unsigned int *id; | 747 | const unsigned int *id; |
748 | const char *family; | 748 | const char *family; |
749 | int rc; | 749 | int rc; |
750 | const struct config_registers *regs; | 750 | const struct config_registers *regs; |
751 | 751 | ||
752 | 752 | ||
753 | rc = of_address_to_resource(op->dev.of_node, 0, &res); | 753 | rc = of_address_to_resource(op->dev.of_node, 0, &res); |
754 | if (rc) { | 754 | if (rc) { |
755 | dev_err(&op->dev, "invalid address\n"); | 755 | dev_err(&op->dev, "invalid address\n"); |
756 | return rc; | 756 | return rc; |
757 | } | 757 | } |
758 | 758 | ||
759 | id = of_get_property(op->dev.of_node, "port-number", NULL); | 759 | id = of_get_property(op->dev.of_node, "port-number", NULL); |
760 | 760 | ||
761 | /* It's most likely that we're using V4, if the family is not | 761 | /* It's most likely that we're using V4, if the family is not |
762 | specified */ | 762 | specified */ |
763 | regs = &v4_config_registers; | 763 | regs = &v4_config_registers; |
764 | family = of_get_property(op->dev.of_node, "xlnx,family", NULL); | 764 | family = of_get_property(op->dev.of_node, "xlnx,family", NULL); |
765 | 765 | ||
766 | if (family) { | 766 | if (family) { |
767 | if (!strcmp(family, "virtex2p")) { | 767 | if (!strcmp(family, "virtex2p")) { |
768 | regs = &v2_config_registers; | 768 | regs = &v2_config_registers; |
769 | } else if (!strcmp(family, "virtex4")) { | 769 | } else if (!strcmp(family, "virtex4")) { |
770 | regs = &v4_config_registers; | 770 | regs = &v4_config_registers; |
771 | } else if (!strcmp(family, "virtex5")) { | 771 | } else if (!strcmp(family, "virtex5")) { |
772 | regs = &v5_config_registers; | 772 | regs = &v5_config_registers; |
773 | } else if (!strcmp(family, "virtex6")) { | 773 | } else if (!strcmp(family, "virtex6")) { |
774 | regs = &v6_config_registers; | 774 | regs = &v6_config_registers; |
775 | } | 775 | } |
776 | } | 776 | } |
777 | return hwicap_setup(&op->dev, id ? *id : -1, &res, config, | 777 | return hwicap_setup(&op->dev, id ? *id : -1, &res, config, |
778 | regs); | 778 | regs); |
779 | } | 779 | } |
780 | #else | 780 | #else |
781 | static inline int hwicap_of_probe(struct platform_device *op, | 781 | static inline int hwicap_of_probe(struct platform_device *op, |
782 | const struct hwicap_driver_config *config) | 782 | const struct hwicap_driver_config *config) |
783 | { | 783 | { |
784 | return -EINVAL; | 784 | return -EINVAL; |
785 | } | 785 | } |
786 | #endif /* CONFIG_OF */ | 786 | #endif /* CONFIG_OF */ |
787 | 787 | ||
788 | static const struct of_device_id __devinitconst hwicap_of_match[]; | 788 | static const struct of_device_id __devinitconst hwicap_of_match[]; |
789 | static int __devinit hwicap_drv_probe(struct platform_device *pdev) | 789 | static int hwicap_drv_probe(struct platform_device *pdev) |
790 | { | 790 | { |
791 | const struct of_device_id *match; | 791 | const struct of_device_id *match; |
792 | struct resource *res; | 792 | struct resource *res; |
793 | const struct config_registers *regs; | 793 | const struct config_registers *regs; |
794 | const char *family; | 794 | const char *family; |
795 | 795 | ||
796 | match = of_match_device(hwicap_of_match, &pdev->dev); | 796 | match = of_match_device(hwicap_of_match, &pdev->dev); |
797 | if (match) | 797 | if (match) |
798 | return hwicap_of_probe(pdev, match->data); | 798 | return hwicap_of_probe(pdev, match->data); |
799 | 799 | ||
800 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 800 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
801 | if (!res) | 801 | if (!res) |
802 | return -ENODEV; | 802 | return -ENODEV; |
803 | 803 | ||
804 | /* It's most likely that we're using V4, if the family is not | 804 | /* It's most likely that we're using V4, if the family is not |
805 | specified */ | 805 | specified */ |
806 | regs = &v4_config_registers; | 806 | regs = &v4_config_registers; |
807 | family = pdev->dev.platform_data; | 807 | family = pdev->dev.platform_data; |
808 | 808 | ||
809 | if (family) { | 809 | if (family) { |
810 | if (!strcmp(family, "virtex2p")) { | 810 | if (!strcmp(family, "virtex2p")) { |
811 | regs = &v2_config_registers; | 811 | regs = &v2_config_registers; |
812 | } else if (!strcmp(family, "virtex4")) { | 812 | } else if (!strcmp(family, "virtex4")) { |
813 | regs = &v4_config_registers; | 813 | regs = &v4_config_registers; |
814 | } else if (!strcmp(family, "virtex5")) { | 814 | } else if (!strcmp(family, "virtex5")) { |
815 | regs = &v5_config_registers; | 815 | regs = &v5_config_registers; |
816 | } else if (!strcmp(family, "virtex6")) { | 816 | } else if (!strcmp(family, "virtex6")) { |
817 | regs = &v6_config_registers; | 817 | regs = &v6_config_registers; |
818 | } | 818 | } |
819 | } | 819 | } |
820 | 820 | ||
821 | return hwicap_setup(&pdev->dev, pdev->id, res, | 821 | return hwicap_setup(&pdev->dev, pdev->id, res, |
822 | &buffer_icap_config, regs); | 822 | &buffer_icap_config, regs); |
823 | } | 823 | } |
824 | 824 | ||
825 | static int __devexit hwicap_drv_remove(struct platform_device *pdev) | 825 | static int __devexit hwicap_drv_remove(struct platform_device *pdev) |
826 | { | 826 | { |
827 | return hwicap_remove(&pdev->dev); | 827 | return hwicap_remove(&pdev->dev); |
828 | } | 828 | } |
829 | 829 | ||
830 | #ifdef CONFIG_OF | 830 | #ifdef CONFIG_OF |
831 | /* Match table for device tree binding */ | 831 | /* Match table for device tree binding */ |
832 | static const struct of_device_id __devinitconst hwicap_of_match[] = { | 832 | static const struct of_device_id __devinitconst hwicap_of_match[] = { |
833 | { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, | 833 | { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config}, |
834 | { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, | 834 | { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config}, |
835 | {}, | 835 | {}, |
836 | }; | 836 | }; |
837 | MODULE_DEVICE_TABLE(of, hwicap_of_match); | 837 | MODULE_DEVICE_TABLE(of, hwicap_of_match); |
838 | #else | 838 | #else |
839 | #define hwicap_of_match NULL | 839 | #define hwicap_of_match NULL |
840 | #endif | 840 | #endif |
841 | 841 | ||
842 | static struct platform_driver hwicap_platform_driver = { | 842 | static struct platform_driver hwicap_platform_driver = { |
843 | .probe = hwicap_drv_probe, | 843 | .probe = hwicap_drv_probe, |
844 | .remove = hwicap_drv_remove, | 844 | .remove = hwicap_drv_remove, |
845 | .driver = { | 845 | .driver = { |
846 | .owner = THIS_MODULE, | 846 | .owner = THIS_MODULE, |
847 | .name = DRIVER_NAME, | 847 | .name = DRIVER_NAME, |
848 | .of_match_table = hwicap_of_match, | 848 | .of_match_table = hwicap_of_match, |
849 | }, | 849 | }, |
850 | }; | 850 | }; |
851 | 851 | ||
852 | static int __init hwicap_module_init(void) | 852 | static int __init hwicap_module_init(void) |
853 | { | 853 | { |
854 | dev_t devt; | 854 | dev_t devt; |
855 | int retval; | 855 | int retval; |
856 | 856 | ||
857 | icap_class = class_create(THIS_MODULE, "xilinx_config"); | 857 | icap_class = class_create(THIS_MODULE, "xilinx_config"); |
858 | mutex_init(&icap_sem); | 858 | mutex_init(&icap_sem); |
859 | 859 | ||
860 | devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); | 860 | devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); |
861 | retval = register_chrdev_region(devt, | 861 | retval = register_chrdev_region(devt, |
862 | HWICAP_DEVICES, | 862 | HWICAP_DEVICES, |
863 | DRIVER_NAME); | 863 | DRIVER_NAME); |
864 | if (retval < 0) | 864 | if (retval < 0) |
865 | return retval; | 865 | return retval; |
866 | 866 | ||
867 | retval = platform_driver_register(&hwicap_platform_driver); | 867 | retval = platform_driver_register(&hwicap_platform_driver); |
868 | if (retval) | 868 | if (retval) |
869 | goto failed; | 869 | goto failed; |
870 | 870 | ||
871 | return retval; | 871 | return retval; |
872 | 872 | ||
873 | failed: | 873 | failed: |
874 | unregister_chrdev_region(devt, HWICAP_DEVICES); | 874 | unregister_chrdev_region(devt, HWICAP_DEVICES); |
875 | 875 | ||
876 | return retval; | 876 | return retval; |
877 | } | 877 | } |
878 | 878 | ||
879 | static void __exit hwicap_module_cleanup(void) | 879 | static void __exit hwicap_module_cleanup(void) |
880 | { | 880 | { |
881 | dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); | 881 | dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR); |
882 | 882 | ||
883 | class_destroy(icap_class); | 883 | class_destroy(icap_class); |
884 | 884 | ||
885 | platform_driver_unregister(&hwicap_platform_driver); | 885 | platform_driver_unregister(&hwicap_platform_driver); |
886 | 886 | ||
887 | unregister_chrdev_region(devt, HWICAP_DEVICES); | 887 | unregister_chrdev_region(devt, HWICAP_DEVICES); |
888 | } | 888 | } |
889 | 889 | ||
890 | module_init(hwicap_module_init); | 890 | module_init(hwicap_module_init); |
891 | module_exit(hwicap_module_cleanup); | 891 | module_exit(hwicap_module_cleanup); |
892 | 892 | ||
893 | MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group"); | 893 | MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group"); |
894 | MODULE_DESCRIPTION("Xilinx ICAP Port Driver"); | 894 | MODULE_DESCRIPTION("Xilinx ICAP Port Driver"); |
895 | MODULE_LICENSE("GPL"); | 895 | MODULE_LICENSE("GPL"); |
896 | 896 |