Commit 4c418ba9695a24917a1fcfa48f7db3fd76337eb7

Authored by Doe, YiCheng
Committed by Linus Torvalds
1 parent 4afcc10a65

ipmi: Fix IPMI errors due to timing problems

This patch fixes an issue in OpenIPMI module where sometimes an ABORT command
is sent after sending an IPMI request to BMC causing the IPMI request to fail.

Signed-off-by: YiCheng Doe <yicheng.doe@hp.com>
Signed-off-by: Corey Minyard <cminyard@mvista.com>
Acked-by: Tom Mingarelli <thomas.mingarelli@hp.com>
Tested-by: Andy Cress <andy.cress@us.kontron.com>
Tested-by: Mika Lansirine <Mika.Lansirinne@stonesoft.com>
Tested-by: Brian De Wolf <bldewolf@csupomona.edu>
Cc: Jean Michel Audet <Jean-Michel.Audet@ca.Kontron.com>
Cc: Jozef Sudelsky <jozef.sudolsky@elbiahosting.sk>
Acked-by: Matthew Garrett <mjg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 8 additions and 0 deletions Inline Diff

drivers/char/ipmi/ipmi_si_intf.c
1 /* 1 /*
2 * ipmi_si.c 2 * ipmi_si.c
3 * 3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC, 4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT). 5 * BT).
6 * 6 *
7 * Author: MontaVista Software, Inc. 7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com> 8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com 9 * source@mvista.com
10 * 10 *
11 * Copyright 2002 MontaVista Software Inc. 11 * Copyright 2002 MontaVista Software Inc.
12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com> 12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version. 17 * option) any later version.
18 * 18 *
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 28 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * 30 *
31 * You should have received a copy of the GNU General Public License along 31 * You should have received a copy of the GNU General Public License along
32 * with this program; if not, write to the Free Software Foundation, Inc., 32 * with this program; if not, write to the Free Software Foundation, Inc.,
33 * 675 Mass Ave, Cambridge, MA 02139, USA. 33 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 */ 34 */
35 35
36 /* 36 /*
37 * This file holds the "policy" for the interface to the SMI state 37 * This file holds the "policy" for the interface to the SMI state
38 * machine. It does the configuration, handles timers and interrupts, 38 * machine. It does the configuration, handles timers and interrupts,
39 * and drives the real SMI state machine. 39 * and drives the real SMI state machine.
40 */ 40 */
41 41
42 #include <linux/module.h> 42 #include <linux/module.h>
43 #include <linux/moduleparam.h> 43 #include <linux/moduleparam.h>
44 #include <asm/system.h> 44 #include <asm/system.h>
45 #include <linux/sched.h> 45 #include <linux/sched.h>
46 #include <linux/timer.h> 46 #include <linux/timer.h>
47 #include <linux/errno.h> 47 #include <linux/errno.h>
48 #include <linux/spinlock.h> 48 #include <linux/spinlock.h>
49 #include <linux/slab.h> 49 #include <linux/slab.h>
50 #include <linux/delay.h> 50 #include <linux/delay.h>
51 #include <linux/list.h> 51 #include <linux/list.h>
52 #include <linux/pci.h> 52 #include <linux/pci.h>
53 #include <linux/ioport.h> 53 #include <linux/ioport.h>
54 #include <linux/notifier.h> 54 #include <linux/notifier.h>
55 #include <linux/mutex.h> 55 #include <linux/mutex.h>
56 #include <linux/kthread.h> 56 #include <linux/kthread.h>
57 #include <asm/irq.h> 57 #include <asm/irq.h>
58 #include <linux/interrupt.h> 58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h> 59 #include <linux/rcupdate.h>
60 #include <linux/ipmi.h> 60 #include <linux/ipmi.h>
61 #include <linux/ipmi_smi.h> 61 #include <linux/ipmi_smi.h>
62 #include <asm/io.h> 62 #include <asm/io.h>
63 #include "ipmi_si_sm.h" 63 #include "ipmi_si_sm.h"
64 #include <linux/init.h> 64 #include <linux/init.h>
65 #include <linux/dmi.h> 65 #include <linux/dmi.h>
66 #include <linux/string.h> 66 #include <linux/string.h>
67 #include <linux/ctype.h> 67 #include <linux/ctype.h>
68 #include <linux/pnp.h> 68 #include <linux/pnp.h>
69 69
70 #ifdef CONFIG_PPC_OF 70 #ifdef CONFIG_PPC_OF
71 #include <linux/of_device.h> 71 #include <linux/of_device.h>
72 #include <linux/of_platform.h> 72 #include <linux/of_platform.h>
73 #include <linux/of_address.h> 73 #include <linux/of_address.h>
74 #include <linux/of_irq.h> 74 #include <linux/of_irq.h>
75 #endif 75 #endif
76 76
77 #define PFX "ipmi_si: " 77 #define PFX "ipmi_si: "
78 78
79 /* Measure times between events in the driver. */ 79 /* Measure times between events in the driver. */
80 #undef DEBUG_TIMING 80 #undef DEBUG_TIMING
81 81
82 /* Call every 10 ms. */ 82 /* Call every 10 ms. */
83 #define SI_TIMEOUT_TIME_USEC 10000 83 #define SI_TIMEOUT_TIME_USEC 10000
84 #define SI_USEC_PER_JIFFY (1000000/HZ) 84 #define SI_USEC_PER_JIFFY (1000000/HZ)
85 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 85 #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
86 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 86 #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
87 short timeout */ 87 short timeout */
88 88
89 enum si_intf_state { 89 enum si_intf_state {
90 SI_NORMAL, 90 SI_NORMAL,
91 SI_GETTING_FLAGS, 91 SI_GETTING_FLAGS,
92 SI_GETTING_EVENTS, 92 SI_GETTING_EVENTS,
93 SI_CLEARING_FLAGS, 93 SI_CLEARING_FLAGS,
94 SI_CLEARING_FLAGS_THEN_SET_IRQ, 94 SI_CLEARING_FLAGS_THEN_SET_IRQ,
95 SI_GETTING_MESSAGES, 95 SI_GETTING_MESSAGES,
96 SI_ENABLE_INTERRUPTS1, 96 SI_ENABLE_INTERRUPTS1,
97 SI_ENABLE_INTERRUPTS2, 97 SI_ENABLE_INTERRUPTS2,
98 SI_DISABLE_INTERRUPTS1, 98 SI_DISABLE_INTERRUPTS1,
99 SI_DISABLE_INTERRUPTS2 99 SI_DISABLE_INTERRUPTS2
100 /* FIXME - add watchdog stuff. */ 100 /* FIXME - add watchdog stuff. */
101 }; 101 };
102 102
103 /* Some BT-specific defines we need here. */ 103 /* Some BT-specific defines we need here. */
104 #define IPMI_BT_INTMASK_REG 2 104 #define IPMI_BT_INTMASK_REG 2
105 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2 105 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
106 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1 106 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
107 107
108 enum si_type { 108 enum si_type {
109 SI_KCS, SI_SMIC, SI_BT 109 SI_KCS, SI_SMIC, SI_BT
110 }; 110 };
111 static char *si_to_str[] = { "kcs", "smic", "bt" }; 111 static char *si_to_str[] = { "kcs", "smic", "bt" };
112 112
113 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", 113 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
114 "ACPI", "SMBIOS", "PCI", 114 "ACPI", "SMBIOS", "PCI",
115 "device-tree", "default" }; 115 "device-tree", "default" };
116 116
117 #define DEVICE_NAME "ipmi_si" 117 #define DEVICE_NAME "ipmi_si"
118 118
119 static struct platform_driver ipmi_driver = { 119 static struct platform_driver ipmi_driver = {
120 .driver = { 120 .driver = {
121 .name = DEVICE_NAME, 121 .name = DEVICE_NAME,
122 .bus = &platform_bus_type 122 .bus = &platform_bus_type
123 } 123 }
124 }; 124 };
125 125
126 126
127 /* 127 /*
128 * Indexes into stats[] in smi_info below. 128 * Indexes into stats[] in smi_info below.
129 */ 129 */
130 enum si_stat_indexes { 130 enum si_stat_indexes {
131 /* 131 /*
132 * Number of times the driver requested a timer while an operation 132 * Number of times the driver requested a timer while an operation
133 * was in progress. 133 * was in progress.
134 */ 134 */
135 SI_STAT_short_timeouts = 0, 135 SI_STAT_short_timeouts = 0,
136 136
137 /* 137 /*
138 * Number of times the driver requested a timer while nothing was in 138 * Number of times the driver requested a timer while nothing was in
139 * progress. 139 * progress.
140 */ 140 */
141 SI_STAT_long_timeouts, 141 SI_STAT_long_timeouts,
142 142
143 /* Number of times the interface was idle while being polled. */ 143 /* Number of times the interface was idle while being polled. */
144 SI_STAT_idles, 144 SI_STAT_idles,
145 145
146 /* Number of interrupts the driver handled. */ 146 /* Number of interrupts the driver handled. */
147 SI_STAT_interrupts, 147 SI_STAT_interrupts,
148 148
149 /* Number of time the driver got an ATTN from the hardware. */ 149 /* Number of time the driver got an ATTN from the hardware. */
150 SI_STAT_attentions, 150 SI_STAT_attentions,
151 151
152 /* Number of times the driver requested flags from the hardware. */ 152 /* Number of times the driver requested flags from the hardware. */
153 SI_STAT_flag_fetches, 153 SI_STAT_flag_fetches,
154 154
155 /* Number of times the hardware didn't follow the state machine. */ 155 /* Number of times the hardware didn't follow the state machine. */
156 SI_STAT_hosed_count, 156 SI_STAT_hosed_count,
157 157
158 /* Number of completed messages. */ 158 /* Number of completed messages. */
159 SI_STAT_complete_transactions, 159 SI_STAT_complete_transactions,
160 160
161 /* Number of IPMI events received from the hardware. */ 161 /* Number of IPMI events received from the hardware. */
162 SI_STAT_events, 162 SI_STAT_events,
163 163
164 /* Number of watchdog pretimeouts. */ 164 /* Number of watchdog pretimeouts. */
165 SI_STAT_watchdog_pretimeouts, 165 SI_STAT_watchdog_pretimeouts,
166 166
167 /* Number of asyncronous messages received. */ 167 /* Number of asyncronous messages received. */
168 SI_STAT_incoming_messages, 168 SI_STAT_incoming_messages,
169 169
170 170
171 /* This *must* remain last, add new values above this. */ 171 /* This *must* remain last, add new values above this. */
172 SI_NUM_STATS 172 SI_NUM_STATS
173 }; 173 };
174 174
175 struct smi_info { 175 struct smi_info {
176 int intf_num; 176 int intf_num;
177 ipmi_smi_t intf; 177 ipmi_smi_t intf;
178 struct si_sm_data *si_sm; 178 struct si_sm_data *si_sm;
179 struct si_sm_handlers *handlers; 179 struct si_sm_handlers *handlers;
180 enum si_type si_type; 180 enum si_type si_type;
181 spinlock_t si_lock; 181 spinlock_t si_lock;
182 spinlock_t msg_lock; 182 spinlock_t msg_lock;
183 struct list_head xmit_msgs; 183 struct list_head xmit_msgs;
184 struct list_head hp_xmit_msgs; 184 struct list_head hp_xmit_msgs;
185 struct ipmi_smi_msg *curr_msg; 185 struct ipmi_smi_msg *curr_msg;
186 enum si_intf_state si_state; 186 enum si_intf_state si_state;
187 187
188 /* 188 /*
189 * Used to handle the various types of I/O that can occur with 189 * Used to handle the various types of I/O that can occur with
190 * IPMI 190 * IPMI
191 */ 191 */
192 struct si_sm_io io; 192 struct si_sm_io io;
193 int (*io_setup)(struct smi_info *info); 193 int (*io_setup)(struct smi_info *info);
194 void (*io_cleanup)(struct smi_info *info); 194 void (*io_cleanup)(struct smi_info *info);
195 int (*irq_setup)(struct smi_info *info); 195 int (*irq_setup)(struct smi_info *info);
196 void (*irq_cleanup)(struct smi_info *info); 196 void (*irq_cleanup)(struct smi_info *info);
197 unsigned int io_size; 197 unsigned int io_size;
198 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ 198 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
199 void (*addr_source_cleanup)(struct smi_info *info); 199 void (*addr_source_cleanup)(struct smi_info *info);
200 void *addr_source_data; 200 void *addr_source_data;
201 201
202 /* 202 /*
203 * Per-OEM handler, called from handle_flags(). Returns 1 203 * Per-OEM handler, called from handle_flags(). Returns 1
204 * when handle_flags() needs to be re-run or 0 indicating it 204 * when handle_flags() needs to be re-run or 0 indicating it
205 * set si_state itself. 205 * set si_state itself.
206 */ 206 */
207 int (*oem_data_avail_handler)(struct smi_info *smi_info); 207 int (*oem_data_avail_handler)(struct smi_info *smi_info);
208 208
209 /* 209 /*
210 * Flags from the last GET_MSG_FLAGS command, used when an ATTN 210 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
211 * is set to hold the flags until we are done handling everything 211 * is set to hold the flags until we are done handling everything
212 * from the flags. 212 * from the flags.
213 */ 213 */
214 #define RECEIVE_MSG_AVAIL 0x01 214 #define RECEIVE_MSG_AVAIL 0x01
215 #define EVENT_MSG_BUFFER_FULL 0x02 215 #define EVENT_MSG_BUFFER_FULL 0x02
216 #define WDT_PRE_TIMEOUT_INT 0x08 216 #define WDT_PRE_TIMEOUT_INT 0x08
217 #define OEM0_DATA_AVAIL 0x20 217 #define OEM0_DATA_AVAIL 0x20
218 #define OEM1_DATA_AVAIL 0x40 218 #define OEM1_DATA_AVAIL 0x40
219 #define OEM2_DATA_AVAIL 0x80 219 #define OEM2_DATA_AVAIL 0x80
220 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 220 #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
221 OEM1_DATA_AVAIL | \ 221 OEM1_DATA_AVAIL | \
222 OEM2_DATA_AVAIL) 222 OEM2_DATA_AVAIL)
223 unsigned char msg_flags; 223 unsigned char msg_flags;
224 224
225 /* Does the BMC have an event buffer? */ 225 /* Does the BMC have an event buffer? */
226 char has_event_buffer; 226 char has_event_buffer;
227 227
228 /* 228 /*
229 * If set to true, this will request events the next time the 229 * If set to true, this will request events the next time the
230 * state machine is idle. 230 * state machine is idle.
231 */ 231 */
232 atomic_t req_events; 232 atomic_t req_events;
233 233
234 /* 234 /*
235 * If true, run the state machine to completion on every send 235 * If true, run the state machine to completion on every send
236 * call. Generally used after a panic to make sure stuff goes 236 * call. Generally used after a panic to make sure stuff goes
237 * out. 237 * out.
238 */ 238 */
239 int run_to_completion; 239 int run_to_completion;
240 240
241 /* The I/O port of an SI interface. */ 241 /* The I/O port of an SI interface. */
242 int port; 242 int port;
243 243
244 /* 244 /*
245 * The space between start addresses of the two ports. For 245 * The space between start addresses of the two ports. For
246 * instance, if the first port is 0xca2 and the spacing is 4, then 246 * instance, if the first port is 0xca2 and the spacing is 4, then
247 * the second port is 0xca6. 247 * the second port is 0xca6.
248 */ 248 */
249 unsigned int spacing; 249 unsigned int spacing;
250 250
251 /* zero if no irq; */ 251 /* zero if no irq; */
252 int irq; 252 int irq;
253 253
254 /* The timer for this si. */ 254 /* The timer for this si. */
255 struct timer_list si_timer; 255 struct timer_list si_timer;
256 256
257 /* The time (in jiffies) the last timeout occurred at. */ 257 /* The time (in jiffies) the last timeout occurred at. */
258 unsigned long last_timeout_jiffies; 258 unsigned long last_timeout_jiffies;
259 259
260 /* Used to gracefully stop the timer without race conditions. */ 260 /* Used to gracefully stop the timer without race conditions. */
261 atomic_t stop_operation; 261 atomic_t stop_operation;
262 262
263 /* 263 /*
264 * The driver will disable interrupts when it gets into a 264 * The driver will disable interrupts when it gets into a
265 * situation where it cannot handle messages due to lack of 265 * situation where it cannot handle messages due to lack of
266 * memory. Once that situation clears up, it will re-enable 266 * memory. Once that situation clears up, it will re-enable
267 * interrupts. 267 * interrupts.
268 */ 268 */
269 int interrupt_disabled; 269 int interrupt_disabled;
270 270
271 /* From the get device id response... */ 271 /* From the get device id response... */
272 struct ipmi_device_id device_id; 272 struct ipmi_device_id device_id;
273 273
274 /* Driver model stuff. */ 274 /* Driver model stuff. */
275 struct device *dev; 275 struct device *dev;
276 struct platform_device *pdev; 276 struct platform_device *pdev;
277 277
278 /* 278 /*
279 * True if we allocated the device, false if it came from 279 * True if we allocated the device, false if it came from
280 * someplace else (like PCI). 280 * someplace else (like PCI).
281 */ 281 */
282 int dev_registered; 282 int dev_registered;
283 283
284 /* Slave address, could be reported from DMI. */ 284 /* Slave address, could be reported from DMI. */
285 unsigned char slave_addr; 285 unsigned char slave_addr;
286 286
287 /* Counters and things for the proc filesystem. */ 287 /* Counters and things for the proc filesystem. */
288 atomic_t stats[SI_NUM_STATS]; 288 atomic_t stats[SI_NUM_STATS];
289 289
290 struct task_struct *thread; 290 struct task_struct *thread;
291 291
292 struct list_head link; 292 struct list_head link;
293 union ipmi_smi_info_union addr_info; 293 union ipmi_smi_info_union addr_info;
294 }; 294 };
295 295
296 #define smi_inc_stat(smi, stat) \ 296 #define smi_inc_stat(smi, stat) \
297 atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) 297 atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
298 #define smi_get_stat(smi, stat) \ 298 #define smi_get_stat(smi, stat) \
299 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) 299 ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
300 300
301 #define SI_MAX_PARMS 4 301 #define SI_MAX_PARMS 4
302 302
303 static int force_kipmid[SI_MAX_PARMS]; 303 static int force_kipmid[SI_MAX_PARMS];
304 static int num_force_kipmid; 304 static int num_force_kipmid;
305 #ifdef CONFIG_PCI 305 #ifdef CONFIG_PCI
306 static int pci_registered; 306 static int pci_registered;
307 #endif 307 #endif
308 #ifdef CONFIG_ACPI 308 #ifdef CONFIG_ACPI
309 static int pnp_registered; 309 static int pnp_registered;
310 #endif 310 #endif
311 #ifdef CONFIG_PPC_OF 311 #ifdef CONFIG_PPC_OF
312 static int of_registered; 312 static int of_registered;
313 #endif 313 #endif
314 314
315 static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; 315 static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
316 static int num_max_busy_us; 316 static int num_max_busy_us;
317 317
318 static int unload_when_empty = 1; 318 static int unload_when_empty = 1;
319 319
320 static int add_smi(struct smi_info *smi); 320 static int add_smi(struct smi_info *smi);
321 static int try_smi_init(struct smi_info *smi); 321 static int try_smi_init(struct smi_info *smi);
322 static void cleanup_one_si(struct smi_info *to_clean); 322 static void cleanup_one_si(struct smi_info *to_clean);
323 static void cleanup_ipmi_si(void); 323 static void cleanup_ipmi_si(void);
324 324
325 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 325 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
326 static int register_xaction_notifier(struct notifier_block *nb) 326 static int register_xaction_notifier(struct notifier_block *nb)
327 { 327 {
328 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 328 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
329 } 329 }
330 330
331 static void deliver_recv_msg(struct smi_info *smi_info, 331 static void deliver_recv_msg(struct smi_info *smi_info,
332 struct ipmi_smi_msg *msg) 332 struct ipmi_smi_msg *msg)
333 { 333 {
334 /* Deliver the message to the upper layer with the lock 334 /* Deliver the message to the upper layer with the lock
335 released. */ 335 released. */
336 336
337 if (smi_info->run_to_completion) { 337 if (smi_info->run_to_completion) {
338 ipmi_smi_msg_received(smi_info->intf, msg); 338 ipmi_smi_msg_received(smi_info->intf, msg);
339 } else { 339 } else {
340 spin_unlock(&(smi_info->si_lock)); 340 spin_unlock(&(smi_info->si_lock));
341 ipmi_smi_msg_received(smi_info->intf, msg); 341 ipmi_smi_msg_received(smi_info->intf, msg);
342 spin_lock(&(smi_info->si_lock)); 342 spin_lock(&(smi_info->si_lock));
343 } 343 }
344 } 344 }
345 345
346 static void return_hosed_msg(struct smi_info *smi_info, int cCode) 346 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
347 { 347 {
348 struct ipmi_smi_msg *msg = smi_info->curr_msg; 348 struct ipmi_smi_msg *msg = smi_info->curr_msg;
349 349
350 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED) 350 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
351 cCode = IPMI_ERR_UNSPECIFIED; 351 cCode = IPMI_ERR_UNSPECIFIED;
352 /* else use it as is */ 352 /* else use it as is */
353 353
354 /* Make it a reponse */ 354 /* Make it a reponse */
355 msg->rsp[0] = msg->data[0] | 4; 355 msg->rsp[0] = msg->data[0] | 4;
356 msg->rsp[1] = msg->data[1]; 356 msg->rsp[1] = msg->data[1];
357 msg->rsp[2] = cCode; 357 msg->rsp[2] = cCode;
358 msg->rsp_size = 3; 358 msg->rsp_size = 3;
359 359
360 smi_info->curr_msg = NULL; 360 smi_info->curr_msg = NULL;
361 deliver_recv_msg(smi_info, msg); 361 deliver_recv_msg(smi_info, msg);
362 } 362 }
363 363
364 static enum si_sm_result start_next_msg(struct smi_info *smi_info) 364 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
365 { 365 {
366 int rv; 366 int rv;
367 struct list_head *entry = NULL; 367 struct list_head *entry = NULL;
368 #ifdef DEBUG_TIMING 368 #ifdef DEBUG_TIMING
369 struct timeval t; 369 struct timeval t;
370 #endif 370 #endif
371 371
372 /* 372 /*
373 * No need to save flags, we aleady have interrupts off and we 373 * No need to save flags, we aleady have interrupts off and we
374 * already hold the SMI lock. 374 * already hold the SMI lock.
375 */ 375 */
376 if (!smi_info->run_to_completion) 376 if (!smi_info->run_to_completion)
377 spin_lock(&(smi_info->msg_lock)); 377 spin_lock(&(smi_info->msg_lock));
378 378
379 /* Pick the high priority queue first. */ 379 /* Pick the high priority queue first. */
380 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 380 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
381 entry = smi_info->hp_xmit_msgs.next; 381 entry = smi_info->hp_xmit_msgs.next;
382 } else if (!list_empty(&(smi_info->xmit_msgs))) { 382 } else if (!list_empty(&(smi_info->xmit_msgs))) {
383 entry = smi_info->xmit_msgs.next; 383 entry = smi_info->xmit_msgs.next;
384 } 384 }
385 385
386 if (!entry) { 386 if (!entry) {
387 smi_info->curr_msg = NULL; 387 smi_info->curr_msg = NULL;
388 rv = SI_SM_IDLE; 388 rv = SI_SM_IDLE;
389 } else { 389 } else {
390 int err; 390 int err;
391 391
392 list_del(entry); 392 list_del(entry);
393 smi_info->curr_msg = list_entry(entry, 393 smi_info->curr_msg = list_entry(entry,
394 struct ipmi_smi_msg, 394 struct ipmi_smi_msg,
395 link); 395 link);
396 #ifdef DEBUG_TIMING 396 #ifdef DEBUG_TIMING
397 do_gettimeofday(&t); 397 do_gettimeofday(&t);
398 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 398 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
399 #endif 399 #endif
400 err = atomic_notifier_call_chain(&xaction_notifier_list, 400 err = atomic_notifier_call_chain(&xaction_notifier_list,
401 0, smi_info); 401 0, smi_info);
402 if (err & NOTIFY_STOP_MASK) { 402 if (err & NOTIFY_STOP_MASK) {
403 rv = SI_SM_CALL_WITHOUT_DELAY; 403 rv = SI_SM_CALL_WITHOUT_DELAY;
404 goto out; 404 goto out;
405 } 405 }
406 err = smi_info->handlers->start_transaction( 406 err = smi_info->handlers->start_transaction(
407 smi_info->si_sm, 407 smi_info->si_sm,
408 smi_info->curr_msg->data, 408 smi_info->curr_msg->data,
409 smi_info->curr_msg->data_size); 409 smi_info->curr_msg->data_size);
410 if (err) 410 if (err)
411 return_hosed_msg(smi_info, err); 411 return_hosed_msg(smi_info, err);
412 412
413 rv = SI_SM_CALL_WITHOUT_DELAY; 413 rv = SI_SM_CALL_WITHOUT_DELAY;
414 } 414 }
415 out: 415 out:
416 if (!smi_info->run_to_completion) 416 if (!smi_info->run_to_completion)
417 spin_unlock(&(smi_info->msg_lock)); 417 spin_unlock(&(smi_info->msg_lock));
418 418
419 return rv; 419 return rv;
420 } 420 }
421 421
422 static void start_enable_irq(struct smi_info *smi_info) 422 static void start_enable_irq(struct smi_info *smi_info)
423 { 423 {
424 unsigned char msg[2]; 424 unsigned char msg[2];
425 425
426 /* 426 /*
427 * If we are enabling interrupts, we have to tell the 427 * If we are enabling interrupts, we have to tell the
428 * BMC to use them. 428 * BMC to use them.
429 */ 429 */
430 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 430 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
431 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 431 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
432 432
433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 433 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
434 smi_info->si_state = SI_ENABLE_INTERRUPTS1; 434 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
435 } 435 }
436 436
437 static void start_disable_irq(struct smi_info *smi_info) 437 static void start_disable_irq(struct smi_info *smi_info)
438 { 438 {
439 unsigned char msg[2]; 439 unsigned char msg[2];
440 440
441 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 441 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
442 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 442 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
443 443
444 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 444 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
445 smi_info->si_state = SI_DISABLE_INTERRUPTS1; 445 smi_info->si_state = SI_DISABLE_INTERRUPTS1;
446 } 446 }
447 447
448 static void start_clear_flags(struct smi_info *smi_info) 448 static void start_clear_flags(struct smi_info *smi_info)
449 { 449 {
450 unsigned char msg[3]; 450 unsigned char msg[3];
451 451
452 /* Make sure the watchdog pre-timeout flag is not set at startup. */ 452 /* Make sure the watchdog pre-timeout flag is not set at startup. */
453 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 453 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
454 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 454 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
455 msg[2] = WDT_PRE_TIMEOUT_INT; 455 msg[2] = WDT_PRE_TIMEOUT_INT;
456 456
457 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 457 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
458 smi_info->si_state = SI_CLEARING_FLAGS; 458 smi_info->si_state = SI_CLEARING_FLAGS;
459 } 459 }
460 460
461 /* 461 /*
462 * When we have a situtaion where we run out of memory and cannot 462 * When we have a situtaion where we run out of memory and cannot
463 * allocate messages, we just leave them in the BMC and run the system 463 * allocate messages, we just leave them in the BMC and run the system
464 * polled until we can allocate some memory. Once we have some 464 * polled until we can allocate some memory. Once we have some
465 * memory, we will re-enable the interrupt. 465 * memory, we will re-enable the interrupt.
466 */ 466 */
467 static inline void disable_si_irq(struct smi_info *smi_info) 467 static inline void disable_si_irq(struct smi_info *smi_info)
468 { 468 {
469 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 469 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
470 start_disable_irq(smi_info); 470 start_disable_irq(smi_info);
471 smi_info->interrupt_disabled = 1; 471 smi_info->interrupt_disabled = 1;
472 if (!atomic_read(&smi_info->stop_operation)) 472 if (!atomic_read(&smi_info->stop_operation))
473 mod_timer(&smi_info->si_timer, 473 mod_timer(&smi_info->si_timer,
474 jiffies + SI_TIMEOUT_JIFFIES); 474 jiffies + SI_TIMEOUT_JIFFIES);
475 } 475 }
476 } 476 }
477 477
478 static inline void enable_si_irq(struct smi_info *smi_info) 478 static inline void enable_si_irq(struct smi_info *smi_info)
479 { 479 {
480 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 480 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
481 start_enable_irq(smi_info); 481 start_enable_irq(smi_info);
482 smi_info->interrupt_disabled = 0; 482 smi_info->interrupt_disabled = 0;
483 } 483 }
484 } 484 }
485 485
486 static void handle_flags(struct smi_info *smi_info) 486 static void handle_flags(struct smi_info *smi_info)
487 { 487 {
488 retry: 488 retry:
489 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 489 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
490 /* Watchdog pre-timeout */ 490 /* Watchdog pre-timeout */
491 smi_inc_stat(smi_info, watchdog_pretimeouts); 491 smi_inc_stat(smi_info, watchdog_pretimeouts);
492 492
493 start_clear_flags(smi_info); 493 start_clear_flags(smi_info);
494 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 494 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
495 spin_unlock(&(smi_info->si_lock)); 495 spin_unlock(&(smi_info->si_lock));
496 ipmi_smi_watchdog_pretimeout(smi_info->intf); 496 ipmi_smi_watchdog_pretimeout(smi_info->intf);
497 spin_lock(&(smi_info->si_lock)); 497 spin_lock(&(smi_info->si_lock));
498 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 498 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
499 /* Messages available. */ 499 /* Messages available. */
500 smi_info->curr_msg = ipmi_alloc_smi_msg(); 500 smi_info->curr_msg = ipmi_alloc_smi_msg();
501 if (!smi_info->curr_msg) { 501 if (!smi_info->curr_msg) {
502 disable_si_irq(smi_info); 502 disable_si_irq(smi_info);
503 smi_info->si_state = SI_NORMAL; 503 smi_info->si_state = SI_NORMAL;
504 return; 504 return;
505 } 505 }
506 enable_si_irq(smi_info); 506 enable_si_irq(smi_info);
507 507
508 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 508 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
509 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; 509 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
510 smi_info->curr_msg->data_size = 2; 510 smi_info->curr_msg->data_size = 2;
511 511
512 smi_info->handlers->start_transaction( 512 smi_info->handlers->start_transaction(
513 smi_info->si_sm, 513 smi_info->si_sm,
514 smi_info->curr_msg->data, 514 smi_info->curr_msg->data,
515 smi_info->curr_msg->data_size); 515 smi_info->curr_msg->data_size);
516 smi_info->si_state = SI_GETTING_MESSAGES; 516 smi_info->si_state = SI_GETTING_MESSAGES;
517 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 517 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
518 /* Events available. */ 518 /* Events available. */
519 smi_info->curr_msg = ipmi_alloc_smi_msg(); 519 smi_info->curr_msg = ipmi_alloc_smi_msg();
520 if (!smi_info->curr_msg) { 520 if (!smi_info->curr_msg) {
521 disable_si_irq(smi_info); 521 disable_si_irq(smi_info);
522 smi_info->si_state = SI_NORMAL; 522 smi_info->si_state = SI_NORMAL;
523 return; 523 return;
524 } 524 }
525 enable_si_irq(smi_info); 525 enable_si_irq(smi_info);
526 526
527 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 527 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
528 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 528 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
529 smi_info->curr_msg->data_size = 2; 529 smi_info->curr_msg->data_size = 2;
530 530
531 smi_info->handlers->start_transaction( 531 smi_info->handlers->start_transaction(
532 smi_info->si_sm, 532 smi_info->si_sm,
533 smi_info->curr_msg->data, 533 smi_info->curr_msg->data,
534 smi_info->curr_msg->data_size); 534 smi_info->curr_msg->data_size);
535 smi_info->si_state = SI_GETTING_EVENTS; 535 smi_info->si_state = SI_GETTING_EVENTS;
536 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 536 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
537 smi_info->oem_data_avail_handler) { 537 smi_info->oem_data_avail_handler) {
538 if (smi_info->oem_data_avail_handler(smi_info)) 538 if (smi_info->oem_data_avail_handler(smi_info))
539 goto retry; 539 goto retry;
540 } else 540 } else
541 smi_info->si_state = SI_NORMAL; 541 smi_info->si_state = SI_NORMAL;
542 } 542 }
543 543
544 static void handle_transaction_done(struct smi_info *smi_info) 544 static void handle_transaction_done(struct smi_info *smi_info)
545 { 545 {
546 struct ipmi_smi_msg *msg; 546 struct ipmi_smi_msg *msg;
547 #ifdef DEBUG_TIMING 547 #ifdef DEBUG_TIMING
548 struct timeval t; 548 struct timeval t;
549 549
550 do_gettimeofday(&t); 550 do_gettimeofday(&t);
551 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 551 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
552 #endif 552 #endif
553 switch (smi_info->si_state) { 553 switch (smi_info->si_state) {
554 case SI_NORMAL: 554 case SI_NORMAL:
555 if (!smi_info->curr_msg) 555 if (!smi_info->curr_msg)
556 break; 556 break;
557 557
558 smi_info->curr_msg->rsp_size 558 smi_info->curr_msg->rsp_size
559 = smi_info->handlers->get_result( 559 = smi_info->handlers->get_result(
560 smi_info->si_sm, 560 smi_info->si_sm,
561 smi_info->curr_msg->rsp, 561 smi_info->curr_msg->rsp,
562 IPMI_MAX_MSG_LENGTH); 562 IPMI_MAX_MSG_LENGTH);
563 563
564 /* 564 /*
565 * Do this here becase deliver_recv_msg() releases the 565 * Do this here becase deliver_recv_msg() releases the
566 * lock, and a new message can be put in during the 566 * lock, and a new message can be put in during the
567 * time the lock is released. 567 * time the lock is released.
568 */ 568 */
569 msg = smi_info->curr_msg; 569 msg = smi_info->curr_msg;
570 smi_info->curr_msg = NULL; 570 smi_info->curr_msg = NULL;
571 deliver_recv_msg(smi_info, msg); 571 deliver_recv_msg(smi_info, msg);
572 break; 572 break;
573 573
574 case SI_GETTING_FLAGS: 574 case SI_GETTING_FLAGS:
575 { 575 {
576 unsigned char msg[4]; 576 unsigned char msg[4];
577 unsigned int len; 577 unsigned int len;
578 578
579 /* We got the flags from the SMI, now handle them. */ 579 /* We got the flags from the SMI, now handle them. */
580 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 580 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
581 if (msg[2] != 0) { 581 if (msg[2] != 0) {
582 /* Error fetching flags, just give up for now. */ 582 /* Error fetching flags, just give up for now. */
583 smi_info->si_state = SI_NORMAL; 583 smi_info->si_state = SI_NORMAL;
584 } else if (len < 4) { 584 } else if (len < 4) {
585 /* 585 /*
586 * Hmm, no flags. That's technically illegal, but 586 * Hmm, no flags. That's technically illegal, but
587 * don't use uninitialized data. 587 * don't use uninitialized data.
588 */ 588 */
589 smi_info->si_state = SI_NORMAL; 589 smi_info->si_state = SI_NORMAL;
590 } else { 590 } else {
591 smi_info->msg_flags = msg[3]; 591 smi_info->msg_flags = msg[3];
592 handle_flags(smi_info); 592 handle_flags(smi_info);
593 } 593 }
594 break; 594 break;
595 } 595 }
596 596
597 case SI_CLEARING_FLAGS: 597 case SI_CLEARING_FLAGS:
598 case SI_CLEARING_FLAGS_THEN_SET_IRQ: 598 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
599 { 599 {
600 unsigned char msg[3]; 600 unsigned char msg[3];
601 601
602 /* We cleared the flags. */ 602 /* We cleared the flags. */
603 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); 603 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
604 if (msg[2] != 0) { 604 if (msg[2] != 0) {
605 /* Error clearing flags */ 605 /* Error clearing flags */
606 dev_warn(smi_info->dev, 606 dev_warn(smi_info->dev,
607 "Error clearing flags: %2.2x\n", msg[2]); 607 "Error clearing flags: %2.2x\n", msg[2]);
608 } 608 }
609 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 609 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
610 start_enable_irq(smi_info); 610 start_enable_irq(smi_info);
611 else 611 else
612 smi_info->si_state = SI_NORMAL; 612 smi_info->si_state = SI_NORMAL;
613 break; 613 break;
614 } 614 }
615 615
616 case SI_GETTING_EVENTS: 616 case SI_GETTING_EVENTS:
617 { 617 {
618 smi_info->curr_msg->rsp_size 618 smi_info->curr_msg->rsp_size
619 = smi_info->handlers->get_result( 619 = smi_info->handlers->get_result(
620 smi_info->si_sm, 620 smi_info->si_sm,
621 smi_info->curr_msg->rsp, 621 smi_info->curr_msg->rsp,
622 IPMI_MAX_MSG_LENGTH); 622 IPMI_MAX_MSG_LENGTH);
623 623
624 /* 624 /*
625 * Do this here becase deliver_recv_msg() releases the 625 * Do this here becase deliver_recv_msg() releases the
626 * lock, and a new message can be put in during the 626 * lock, and a new message can be put in during the
627 * time the lock is released. 627 * time the lock is released.
628 */ 628 */
629 msg = smi_info->curr_msg; 629 msg = smi_info->curr_msg;
630 smi_info->curr_msg = NULL; 630 smi_info->curr_msg = NULL;
631 if (msg->rsp[2] != 0) { 631 if (msg->rsp[2] != 0) {
632 /* Error getting event, probably done. */ 632 /* Error getting event, probably done. */
633 msg->done(msg); 633 msg->done(msg);
634 634
635 /* Take off the event flag. */ 635 /* Take off the event flag. */
636 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; 636 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
637 handle_flags(smi_info); 637 handle_flags(smi_info);
638 } else { 638 } else {
639 smi_inc_stat(smi_info, events); 639 smi_inc_stat(smi_info, events);
640 640
641 /* 641 /*
642 * Do this before we deliver the message 642 * Do this before we deliver the message
643 * because delivering the message releases the 643 * because delivering the message releases the
644 * lock and something else can mess with the 644 * lock and something else can mess with the
645 * state. 645 * state.
646 */ 646 */
647 handle_flags(smi_info); 647 handle_flags(smi_info);
648 648
649 deliver_recv_msg(smi_info, msg); 649 deliver_recv_msg(smi_info, msg);
650 } 650 }
651 break; 651 break;
652 } 652 }
653 653
654 case SI_GETTING_MESSAGES: 654 case SI_GETTING_MESSAGES:
655 { 655 {
656 smi_info->curr_msg->rsp_size 656 smi_info->curr_msg->rsp_size
657 = smi_info->handlers->get_result( 657 = smi_info->handlers->get_result(
658 smi_info->si_sm, 658 smi_info->si_sm,
659 smi_info->curr_msg->rsp, 659 smi_info->curr_msg->rsp,
660 IPMI_MAX_MSG_LENGTH); 660 IPMI_MAX_MSG_LENGTH);
661 661
662 /* 662 /*
663 * Do this here becase deliver_recv_msg() releases the 663 * Do this here becase deliver_recv_msg() releases the
664 * lock, and a new message can be put in during the 664 * lock, and a new message can be put in during the
665 * time the lock is released. 665 * time the lock is released.
666 */ 666 */
667 msg = smi_info->curr_msg; 667 msg = smi_info->curr_msg;
668 smi_info->curr_msg = NULL; 668 smi_info->curr_msg = NULL;
669 if (msg->rsp[2] != 0) { 669 if (msg->rsp[2] != 0) {
670 /* Error getting event, probably done. */ 670 /* Error getting event, probably done. */
671 msg->done(msg); 671 msg->done(msg);
672 672
673 /* Take off the msg flag. */ 673 /* Take off the msg flag. */
674 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; 674 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
675 handle_flags(smi_info); 675 handle_flags(smi_info);
676 } else { 676 } else {
677 smi_inc_stat(smi_info, incoming_messages); 677 smi_inc_stat(smi_info, incoming_messages);
678 678
679 /* 679 /*
680 * Do this before we deliver the message 680 * Do this before we deliver the message
681 * because delivering the message releases the 681 * because delivering the message releases the
682 * lock and something else can mess with the 682 * lock and something else can mess with the
683 * state. 683 * state.
684 */ 684 */
685 handle_flags(smi_info); 685 handle_flags(smi_info);
686 686
687 deliver_recv_msg(smi_info, msg); 687 deliver_recv_msg(smi_info, msg);
688 } 688 }
689 break; 689 break;
690 } 690 }
691 691
692 case SI_ENABLE_INTERRUPTS1: 692 case SI_ENABLE_INTERRUPTS1:
693 { 693 {
694 unsigned char msg[4]; 694 unsigned char msg[4];
695 695
696 /* We got the flags from the SMI, now handle them. */ 696 /* We got the flags from the SMI, now handle them. */
697 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 697 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
698 if (msg[2] != 0) { 698 if (msg[2] != 0) {
699 dev_warn(smi_info->dev, "Could not enable interrupts" 699 dev_warn(smi_info->dev, "Could not enable interrupts"
700 ", failed get, using polled mode.\n"); 700 ", failed get, using polled mode.\n");
701 smi_info->si_state = SI_NORMAL; 701 smi_info->si_state = SI_NORMAL;
702 } else { 702 } else {
703 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 703 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
704 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 704 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
705 msg[2] = (msg[3] | 705 msg[2] = (msg[3] |
706 IPMI_BMC_RCV_MSG_INTR | 706 IPMI_BMC_RCV_MSG_INTR |
707 IPMI_BMC_EVT_MSG_INTR); 707 IPMI_BMC_EVT_MSG_INTR);
708 smi_info->handlers->start_transaction( 708 smi_info->handlers->start_transaction(
709 smi_info->si_sm, msg, 3); 709 smi_info->si_sm, msg, 3);
710 smi_info->si_state = SI_ENABLE_INTERRUPTS2; 710 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
711 } 711 }
712 break; 712 break;
713 } 713 }
714 714
715 case SI_ENABLE_INTERRUPTS2: 715 case SI_ENABLE_INTERRUPTS2:
716 { 716 {
717 unsigned char msg[4]; 717 unsigned char msg[4];
718 718
719 /* We got the flags from the SMI, now handle them. */ 719 /* We got the flags from the SMI, now handle them. */
720 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 720 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
721 if (msg[2] != 0) 721 if (msg[2] != 0)
722 dev_warn(smi_info->dev, "Could not enable interrupts" 722 dev_warn(smi_info->dev, "Could not enable interrupts"
723 ", failed set, using polled mode.\n"); 723 ", failed set, using polled mode.\n");
724 else 724 else
725 smi_info->interrupt_disabled = 0; 725 smi_info->interrupt_disabled = 0;
726 smi_info->si_state = SI_NORMAL; 726 smi_info->si_state = SI_NORMAL;
727 break; 727 break;
728 } 728 }
729 729
730 case SI_DISABLE_INTERRUPTS1: 730 case SI_DISABLE_INTERRUPTS1:
731 { 731 {
732 unsigned char msg[4]; 732 unsigned char msg[4];
733 733
734 /* We got the flags from the SMI, now handle them. */ 734 /* We got the flags from the SMI, now handle them. */
735 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 735 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
736 if (msg[2] != 0) { 736 if (msg[2] != 0) {
737 dev_warn(smi_info->dev, "Could not disable interrupts" 737 dev_warn(smi_info->dev, "Could not disable interrupts"
738 ", failed get.\n"); 738 ", failed get.\n");
739 smi_info->si_state = SI_NORMAL; 739 smi_info->si_state = SI_NORMAL;
740 } else { 740 } else {
741 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 741 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
742 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 742 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
743 msg[2] = (msg[3] & 743 msg[2] = (msg[3] &
744 ~(IPMI_BMC_RCV_MSG_INTR | 744 ~(IPMI_BMC_RCV_MSG_INTR |
745 IPMI_BMC_EVT_MSG_INTR)); 745 IPMI_BMC_EVT_MSG_INTR));
746 smi_info->handlers->start_transaction( 746 smi_info->handlers->start_transaction(
747 smi_info->si_sm, msg, 3); 747 smi_info->si_sm, msg, 3);
748 smi_info->si_state = SI_DISABLE_INTERRUPTS2; 748 smi_info->si_state = SI_DISABLE_INTERRUPTS2;
749 } 749 }
750 break; 750 break;
751 } 751 }
752 752
753 case SI_DISABLE_INTERRUPTS2: 753 case SI_DISABLE_INTERRUPTS2:
754 { 754 {
755 unsigned char msg[4]; 755 unsigned char msg[4];
756 756
757 /* We got the flags from the SMI, now handle them. */ 757 /* We got the flags from the SMI, now handle them. */
758 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 758 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
759 if (msg[2] != 0) { 759 if (msg[2] != 0) {
760 dev_warn(smi_info->dev, "Could not disable interrupts" 760 dev_warn(smi_info->dev, "Could not disable interrupts"
761 ", failed set.\n"); 761 ", failed set.\n");
762 } 762 }
763 smi_info->si_state = SI_NORMAL; 763 smi_info->si_state = SI_NORMAL;
764 break; 764 break;
765 } 765 }
766 } 766 }
767 } 767 }
768 768
769 /* 769 /*
770 * Called on timeouts and events. Timeouts should pass the elapsed 770 * Called on timeouts and events. Timeouts should pass the elapsed
771 * time, interrupts should pass in zero. Must be called with 771 * time, interrupts should pass in zero. Must be called with
772 * si_lock held and interrupts disabled. 772 * si_lock held and interrupts disabled.
773 */ 773 */
774 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 774 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
775 int time) 775 int time)
776 { 776 {
777 enum si_sm_result si_sm_result; 777 enum si_sm_result si_sm_result;
778 778
779 restart: 779 restart:
780 /* 780 /*
781 * There used to be a loop here that waited a little while 781 * There used to be a loop here that waited a little while
782 * (around 25us) before giving up. That turned out to be 782 * (around 25us) before giving up. That turned out to be
783 * pointless, the minimum delays I was seeing were in the 300us 783 * pointless, the minimum delays I was seeing were in the 300us
784 * range, which is far too long to wait in an interrupt. So 784 * range, which is far too long to wait in an interrupt. So
785 * we just run until the state machine tells us something 785 * we just run until the state machine tells us something
786 * happened or it needs a delay. 786 * happened or it needs a delay.
787 */ 787 */
788 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 788 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
789 time = 0; 789 time = 0;
790 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 790 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
791 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 791 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
792 792
793 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { 793 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
794 smi_inc_stat(smi_info, complete_transactions); 794 smi_inc_stat(smi_info, complete_transactions);
795 795
796 handle_transaction_done(smi_info); 796 handle_transaction_done(smi_info);
797 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 797 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
798 } else if (si_sm_result == SI_SM_HOSED) { 798 } else if (si_sm_result == SI_SM_HOSED) {
799 smi_inc_stat(smi_info, hosed_count); 799 smi_inc_stat(smi_info, hosed_count);
800 800
801 /* 801 /*
802 * Do the before return_hosed_msg, because that 802 * Do the before return_hosed_msg, because that
803 * releases the lock. 803 * releases the lock.
804 */ 804 */
805 smi_info->si_state = SI_NORMAL; 805 smi_info->si_state = SI_NORMAL;
806 if (smi_info->curr_msg != NULL) { 806 if (smi_info->curr_msg != NULL) {
807 /* 807 /*
808 * If we were handling a user message, format 808 * If we were handling a user message, format
809 * a response to send to the upper layer to 809 * a response to send to the upper layer to
810 * tell it about the error. 810 * tell it about the error.
811 */ 811 */
812 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 812 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
813 } 813 }
814 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 814 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
815 } 815 }
816 816
817 /* 817 /*
818 * We prefer handling attn over new messages. But don't do 818 * We prefer handling attn over new messages. But don't do
819 * this if there is not yet an upper layer to handle anything. 819 * this if there is not yet an upper layer to handle anything.
820 */ 820 */
821 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { 821 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
822 unsigned char msg[2]; 822 unsigned char msg[2];
823 823
824 smi_inc_stat(smi_info, attentions); 824 smi_inc_stat(smi_info, attentions);
825 825
826 /* 826 /*
827 * Got a attn, send down a get message flags to see 827 * Got a attn, send down a get message flags to see
828 * what's causing it. It would be better to handle 828 * what's causing it. It would be better to handle
829 * this in the upper layer, but due to the way 829 * this in the upper layer, but due to the way
830 * interrupts work with the SMI, that's not really 830 * interrupts work with the SMI, that's not really
831 * possible. 831 * possible.
832 */ 832 */
833 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 833 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
834 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 834 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
835 835
836 smi_info->handlers->start_transaction( 836 smi_info->handlers->start_transaction(
837 smi_info->si_sm, msg, 2); 837 smi_info->si_sm, msg, 2);
838 smi_info->si_state = SI_GETTING_FLAGS; 838 smi_info->si_state = SI_GETTING_FLAGS;
839 goto restart; 839 goto restart;
840 } 840 }
841 841
842 /* If we are currently idle, try to start the next message. */ 842 /* If we are currently idle, try to start the next message. */
843 if (si_sm_result == SI_SM_IDLE) { 843 if (si_sm_result == SI_SM_IDLE) {
844 smi_inc_stat(smi_info, idles); 844 smi_inc_stat(smi_info, idles);
845 845
846 si_sm_result = start_next_msg(smi_info); 846 si_sm_result = start_next_msg(smi_info);
847 if (si_sm_result != SI_SM_IDLE) 847 if (si_sm_result != SI_SM_IDLE)
848 goto restart; 848 goto restart;
849 } 849 }
850 850
851 if ((si_sm_result == SI_SM_IDLE) 851 if ((si_sm_result == SI_SM_IDLE)
852 && (atomic_read(&smi_info->req_events))) { 852 && (atomic_read(&smi_info->req_events))) {
853 /* 853 /*
854 * We are idle and the upper layer requested that I fetch 854 * We are idle and the upper layer requested that I fetch
855 * events, so do so. 855 * events, so do so.
856 */ 856 */
857 atomic_set(&smi_info->req_events, 0); 857 atomic_set(&smi_info->req_events, 0);
858 858
859 smi_info->curr_msg = ipmi_alloc_smi_msg(); 859 smi_info->curr_msg = ipmi_alloc_smi_msg();
860 if (!smi_info->curr_msg) 860 if (!smi_info->curr_msg)
861 goto out; 861 goto out;
862 862
863 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 863 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
864 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 864 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
865 smi_info->curr_msg->data_size = 2; 865 smi_info->curr_msg->data_size = 2;
866 866
867 smi_info->handlers->start_transaction( 867 smi_info->handlers->start_transaction(
868 smi_info->si_sm, 868 smi_info->si_sm,
869 smi_info->curr_msg->data, 869 smi_info->curr_msg->data,
870 smi_info->curr_msg->data_size); 870 smi_info->curr_msg->data_size);
871 smi_info->si_state = SI_GETTING_EVENTS; 871 smi_info->si_state = SI_GETTING_EVENTS;
872 goto restart; 872 goto restart;
873 } 873 }
874 out: 874 out:
875 return si_sm_result; 875 return si_sm_result;
876 } 876 }
877 877
878 static void sender(void *send_info, 878 static void sender(void *send_info,
879 struct ipmi_smi_msg *msg, 879 struct ipmi_smi_msg *msg,
880 int priority) 880 int priority)
881 { 881 {
882 struct smi_info *smi_info = send_info; 882 struct smi_info *smi_info = send_info;
883 enum si_sm_result result; 883 enum si_sm_result result;
884 unsigned long flags; 884 unsigned long flags;
885 #ifdef DEBUG_TIMING 885 #ifdef DEBUG_TIMING
886 struct timeval t; 886 struct timeval t;
887 #endif 887 #endif
888 888
889 if (atomic_read(&smi_info->stop_operation)) { 889 if (atomic_read(&smi_info->stop_operation)) {
890 msg->rsp[0] = msg->data[0] | 4; 890 msg->rsp[0] = msg->data[0] | 4;
891 msg->rsp[1] = msg->data[1]; 891 msg->rsp[1] = msg->data[1];
892 msg->rsp[2] = IPMI_ERR_UNSPECIFIED; 892 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
893 msg->rsp_size = 3; 893 msg->rsp_size = 3;
894 deliver_recv_msg(smi_info, msg); 894 deliver_recv_msg(smi_info, msg);
895 return; 895 return;
896 } 896 }
897 897
898 #ifdef DEBUG_TIMING 898 #ifdef DEBUG_TIMING
899 do_gettimeofday(&t); 899 do_gettimeofday(&t);
900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
901 #endif 901 #endif
902 902
903 /*
904 * last_timeout_jiffies is updated here to avoid
905 * smi_timeout() handler passing very large time_diff
906 * value to smi_event_handler() that causes
907 * the send command to abort.
908 */
909 smi_info->last_timeout_jiffies = jiffies;
910
903 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 911 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
904 912
905 if (smi_info->thread) 913 if (smi_info->thread)
906 wake_up_process(smi_info->thread); 914 wake_up_process(smi_info->thread);
907 915
908 if (smi_info->run_to_completion) { 916 if (smi_info->run_to_completion) {
909 /* 917 /*
910 * If we are running to completion, then throw it in 918 * If we are running to completion, then throw it in
911 * the list and run transactions until everything is 919 * the list and run transactions until everything is
912 * clear. Priority doesn't matter here. 920 * clear. Priority doesn't matter here.
913 */ 921 */
914 922
915 /* 923 /*
916 * Run to completion means we are single-threaded, no 924 * Run to completion means we are single-threaded, no
917 * need for locks. 925 * need for locks.
918 */ 926 */
919 list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); 927 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
920 928
921 result = smi_event_handler(smi_info, 0); 929 result = smi_event_handler(smi_info, 0);
922 while (result != SI_SM_IDLE) { 930 while (result != SI_SM_IDLE) {
923 udelay(SI_SHORT_TIMEOUT_USEC); 931 udelay(SI_SHORT_TIMEOUT_USEC);
924 result = smi_event_handler(smi_info, 932 result = smi_event_handler(smi_info,
925 SI_SHORT_TIMEOUT_USEC); 933 SI_SHORT_TIMEOUT_USEC);
926 } 934 }
927 return; 935 return;
928 } 936 }
929 937
930 spin_lock_irqsave(&smi_info->msg_lock, flags); 938 spin_lock_irqsave(&smi_info->msg_lock, flags);
931 if (priority > 0) 939 if (priority > 0)
932 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); 940 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
933 else 941 else
934 list_add_tail(&msg->link, &smi_info->xmit_msgs); 942 list_add_tail(&msg->link, &smi_info->xmit_msgs);
935 spin_unlock_irqrestore(&smi_info->msg_lock, flags); 943 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
936 944
937 spin_lock_irqsave(&smi_info->si_lock, flags); 945 spin_lock_irqsave(&smi_info->si_lock, flags);
938 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) 946 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
939 start_next_msg(smi_info); 947 start_next_msg(smi_info);
940 spin_unlock_irqrestore(&smi_info->si_lock, flags); 948 spin_unlock_irqrestore(&smi_info->si_lock, flags);
941 } 949 }
942 950
943 static void set_run_to_completion(void *send_info, int i_run_to_completion) 951 static void set_run_to_completion(void *send_info, int i_run_to_completion)
944 { 952 {
945 struct smi_info *smi_info = send_info; 953 struct smi_info *smi_info = send_info;
946 enum si_sm_result result; 954 enum si_sm_result result;
947 955
948 smi_info->run_to_completion = i_run_to_completion; 956 smi_info->run_to_completion = i_run_to_completion;
949 if (i_run_to_completion) { 957 if (i_run_to_completion) {
950 result = smi_event_handler(smi_info, 0); 958 result = smi_event_handler(smi_info, 0);
951 while (result != SI_SM_IDLE) { 959 while (result != SI_SM_IDLE) {
952 udelay(SI_SHORT_TIMEOUT_USEC); 960 udelay(SI_SHORT_TIMEOUT_USEC);
953 result = smi_event_handler(smi_info, 961 result = smi_event_handler(smi_info,
954 SI_SHORT_TIMEOUT_USEC); 962 SI_SHORT_TIMEOUT_USEC);
955 } 963 }
956 } 964 }
957 } 965 }
958 966
959 /* 967 /*
960 * Use -1 in the nsec value of the busy waiting timespec to tell that 968 * Use -1 in the nsec value of the busy waiting timespec to tell that
961 * we are spinning in kipmid looking for something and not delaying 969 * we are spinning in kipmid looking for something and not delaying
962 * between checks 970 * between checks
963 */ 971 */
964 static inline void ipmi_si_set_not_busy(struct timespec *ts) 972 static inline void ipmi_si_set_not_busy(struct timespec *ts)
965 { 973 {
966 ts->tv_nsec = -1; 974 ts->tv_nsec = -1;
967 } 975 }
968 static inline int ipmi_si_is_busy(struct timespec *ts) 976 static inline int ipmi_si_is_busy(struct timespec *ts)
969 { 977 {
970 return ts->tv_nsec != -1; 978 return ts->tv_nsec != -1;
971 } 979 }
972 980
973 static int ipmi_thread_busy_wait(enum si_sm_result smi_result, 981 static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
974 const struct smi_info *smi_info, 982 const struct smi_info *smi_info,
975 struct timespec *busy_until) 983 struct timespec *busy_until)
976 { 984 {
977 unsigned int max_busy_us = 0; 985 unsigned int max_busy_us = 0;
978 986
979 if (smi_info->intf_num < num_max_busy_us) 987 if (smi_info->intf_num < num_max_busy_us)
980 max_busy_us = kipmid_max_busy_us[smi_info->intf_num]; 988 max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
981 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) 989 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
982 ipmi_si_set_not_busy(busy_until); 990 ipmi_si_set_not_busy(busy_until);
983 else if (!ipmi_si_is_busy(busy_until)) { 991 else if (!ipmi_si_is_busy(busy_until)) {
984 getnstimeofday(busy_until); 992 getnstimeofday(busy_until);
985 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); 993 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
986 } else { 994 } else {
987 struct timespec now; 995 struct timespec now;
988 getnstimeofday(&now); 996 getnstimeofday(&now);
989 if (unlikely(timespec_compare(&now, busy_until) > 0)) { 997 if (unlikely(timespec_compare(&now, busy_until) > 0)) {
990 ipmi_si_set_not_busy(busy_until); 998 ipmi_si_set_not_busy(busy_until);
991 return 0; 999 return 0;
992 } 1000 }
993 } 1001 }
994 return 1; 1002 return 1;
995 } 1003 }
996 1004
997 1005
998 /* 1006 /*
999 * A busy-waiting loop for speeding up IPMI operation. 1007 * A busy-waiting loop for speeding up IPMI operation.
1000 * 1008 *
1001 * Lousy hardware makes this hard. This is only enabled for systems 1009 * Lousy hardware makes this hard. This is only enabled for systems
1002 * that are not BT and do not have interrupts. It starts spinning 1010 * that are not BT and do not have interrupts. It starts spinning
1003 * when an operation is complete or until max_busy tells it to stop 1011 * when an operation is complete or until max_busy tells it to stop
1004 * (if that is enabled). See the paragraph on kimid_max_busy_us in 1012 * (if that is enabled). See the paragraph on kimid_max_busy_us in
1005 * Documentation/IPMI.txt for details. 1013 * Documentation/IPMI.txt for details.
1006 */ 1014 */
1007 static int ipmi_thread(void *data) 1015 static int ipmi_thread(void *data)
1008 { 1016 {
1009 struct smi_info *smi_info = data; 1017 struct smi_info *smi_info = data;
1010 unsigned long flags; 1018 unsigned long flags;
1011 enum si_sm_result smi_result; 1019 enum si_sm_result smi_result;
1012 struct timespec busy_until; 1020 struct timespec busy_until;
1013 1021
1014 ipmi_si_set_not_busy(&busy_until); 1022 ipmi_si_set_not_busy(&busy_until);
1015 set_user_nice(current, 19); 1023 set_user_nice(current, 19);
1016 while (!kthread_should_stop()) { 1024 while (!kthread_should_stop()) {
1017 int busy_wait; 1025 int busy_wait;
1018 1026
1019 spin_lock_irqsave(&(smi_info->si_lock), flags); 1027 spin_lock_irqsave(&(smi_info->si_lock), flags);
1020 smi_result = smi_event_handler(smi_info, 0); 1028 smi_result = smi_event_handler(smi_info, 0);
1021 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1029 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1022 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, 1030 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1023 &busy_until); 1031 &busy_until);
1024 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 1032 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1025 ; /* do nothing */ 1033 ; /* do nothing */
1026 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1034 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1027 schedule(); 1035 schedule();
1028 else if (smi_result == SI_SM_IDLE) 1036 else if (smi_result == SI_SM_IDLE)
1029 schedule_timeout_interruptible(100); 1037 schedule_timeout_interruptible(100);
1030 else 1038 else
1031 schedule_timeout_interruptible(1); 1039 schedule_timeout_interruptible(1);
1032 } 1040 }
1033 return 0; 1041 return 0;
1034 } 1042 }
1035 1043
1036 1044
1037 static void poll(void *send_info) 1045 static void poll(void *send_info)
1038 { 1046 {
1039 struct smi_info *smi_info = send_info; 1047 struct smi_info *smi_info = send_info;
1040 unsigned long flags; 1048 unsigned long flags;
1041 1049
1042 /* 1050 /*
1043 * Make sure there is some delay in the poll loop so we can 1051 * Make sure there is some delay in the poll loop so we can
1044 * drive time forward and timeout things. 1052 * drive time forward and timeout things.
1045 */ 1053 */
1046 udelay(10); 1054 udelay(10);
1047 spin_lock_irqsave(&smi_info->si_lock, flags); 1055 spin_lock_irqsave(&smi_info->si_lock, flags);
1048 smi_event_handler(smi_info, 10); 1056 smi_event_handler(smi_info, 10);
1049 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1057 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1050 } 1058 }
1051 1059
1052 static void request_events(void *send_info) 1060 static void request_events(void *send_info)
1053 { 1061 {
1054 struct smi_info *smi_info = send_info; 1062 struct smi_info *smi_info = send_info;
1055 1063
1056 if (atomic_read(&smi_info->stop_operation) || 1064 if (atomic_read(&smi_info->stop_operation) ||
1057 !smi_info->has_event_buffer) 1065 !smi_info->has_event_buffer)
1058 return; 1066 return;
1059 1067
1060 atomic_set(&smi_info->req_events, 1); 1068 atomic_set(&smi_info->req_events, 1);
1061 } 1069 }
1062 1070
1063 static int initialized; 1071 static int initialized;
1064 1072
1065 static void smi_timeout(unsigned long data) 1073 static void smi_timeout(unsigned long data)
1066 { 1074 {
1067 struct smi_info *smi_info = (struct smi_info *) data; 1075 struct smi_info *smi_info = (struct smi_info *) data;
1068 enum si_sm_result smi_result; 1076 enum si_sm_result smi_result;
1069 unsigned long flags; 1077 unsigned long flags;
1070 unsigned long jiffies_now; 1078 unsigned long jiffies_now;
1071 long time_diff; 1079 long time_diff;
1072 long timeout; 1080 long timeout;
1073 #ifdef DEBUG_TIMING 1081 #ifdef DEBUG_TIMING
1074 struct timeval t; 1082 struct timeval t;
1075 #endif 1083 #endif
1076 1084
1077 spin_lock_irqsave(&(smi_info->si_lock), flags); 1085 spin_lock_irqsave(&(smi_info->si_lock), flags);
1078 #ifdef DEBUG_TIMING 1086 #ifdef DEBUG_TIMING
1079 do_gettimeofday(&t); 1087 do_gettimeofday(&t);
1080 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1088 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1081 #endif 1089 #endif
1082 jiffies_now = jiffies; 1090 jiffies_now = jiffies;
1083 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 1091 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1084 * SI_USEC_PER_JIFFY); 1092 * SI_USEC_PER_JIFFY);
1085 smi_result = smi_event_handler(smi_info, time_diff); 1093 smi_result = smi_event_handler(smi_info, time_diff);
1086 1094
1087 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1095 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1088 1096
1089 smi_info->last_timeout_jiffies = jiffies_now; 1097 smi_info->last_timeout_jiffies = jiffies_now;
1090 1098
1091 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1099 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1092 /* Running with interrupts, only do long timeouts. */ 1100 /* Running with interrupts, only do long timeouts. */
1093 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1101 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1094 smi_inc_stat(smi_info, long_timeouts); 1102 smi_inc_stat(smi_info, long_timeouts);
1095 goto do_mod_timer; 1103 goto do_mod_timer;
1096 } 1104 }
1097 1105
1098 /* 1106 /*
1099 * If the state machine asks for a short delay, then shorten 1107 * If the state machine asks for a short delay, then shorten
1100 * the timer timeout. 1108 * the timer timeout.
1101 */ 1109 */
1102 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1110 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1103 smi_inc_stat(smi_info, short_timeouts); 1111 smi_inc_stat(smi_info, short_timeouts);
1104 timeout = jiffies + 1; 1112 timeout = jiffies + 1;
1105 } else { 1113 } else {
1106 smi_inc_stat(smi_info, long_timeouts); 1114 smi_inc_stat(smi_info, long_timeouts);
1107 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1115 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1108 } 1116 }
1109 1117
1110 do_mod_timer: 1118 do_mod_timer:
1111 if (smi_result != SI_SM_IDLE) 1119 if (smi_result != SI_SM_IDLE)
1112 mod_timer(&(smi_info->si_timer), timeout); 1120 mod_timer(&(smi_info->si_timer), timeout);
1113 } 1121 }
1114 1122
1115 static irqreturn_t si_irq_handler(int irq, void *data) 1123 static irqreturn_t si_irq_handler(int irq, void *data)
1116 { 1124 {
1117 struct smi_info *smi_info = data; 1125 struct smi_info *smi_info = data;
1118 unsigned long flags; 1126 unsigned long flags;
1119 #ifdef DEBUG_TIMING 1127 #ifdef DEBUG_TIMING
1120 struct timeval t; 1128 struct timeval t;
1121 #endif 1129 #endif
1122 1130
1123 spin_lock_irqsave(&(smi_info->si_lock), flags); 1131 spin_lock_irqsave(&(smi_info->si_lock), flags);
1124 1132
1125 smi_inc_stat(smi_info, interrupts); 1133 smi_inc_stat(smi_info, interrupts);
1126 1134
1127 #ifdef DEBUG_TIMING 1135 #ifdef DEBUG_TIMING
1128 do_gettimeofday(&t); 1136 do_gettimeofday(&t);
1129 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1137 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1130 #endif 1138 #endif
1131 smi_event_handler(smi_info, 0); 1139 smi_event_handler(smi_info, 0);
1132 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1140 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1133 return IRQ_HANDLED; 1141 return IRQ_HANDLED;
1134 } 1142 }
1135 1143
1136 static irqreturn_t si_bt_irq_handler(int irq, void *data) 1144 static irqreturn_t si_bt_irq_handler(int irq, void *data)
1137 { 1145 {
1138 struct smi_info *smi_info = data; 1146 struct smi_info *smi_info = data;
1139 /* We need to clear the IRQ flag for the BT interface. */ 1147 /* We need to clear the IRQ flag for the BT interface. */
1140 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 1148 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1141 IPMI_BT_INTMASK_CLEAR_IRQ_BIT 1149 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1142 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1150 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1143 return si_irq_handler(irq, data); 1151 return si_irq_handler(irq, data);
1144 } 1152 }
1145 1153
1146 static int smi_start_processing(void *send_info, 1154 static int smi_start_processing(void *send_info,
1147 ipmi_smi_t intf) 1155 ipmi_smi_t intf)
1148 { 1156 {
1149 struct smi_info *new_smi = send_info; 1157 struct smi_info *new_smi = send_info;
1150 int enable = 0; 1158 int enable = 0;
1151 1159
1152 new_smi->intf = intf; 1160 new_smi->intf = intf;
1153 1161
1154 /* Try to claim any interrupts. */ 1162 /* Try to claim any interrupts. */
1155 if (new_smi->irq_setup) 1163 if (new_smi->irq_setup)
1156 new_smi->irq_setup(new_smi); 1164 new_smi->irq_setup(new_smi);
1157 1165
1158 /* Set up the timer that drives the interface. */ 1166 /* Set up the timer that drives the interface. */
1159 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1167 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1160 new_smi->last_timeout_jiffies = jiffies; 1168 new_smi->last_timeout_jiffies = jiffies;
1161 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 1169 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1162 1170
1163 /* 1171 /*
1164 * Check if the user forcefully enabled the daemon. 1172 * Check if the user forcefully enabled the daemon.
1165 */ 1173 */
1166 if (new_smi->intf_num < num_force_kipmid) 1174 if (new_smi->intf_num < num_force_kipmid)
1167 enable = force_kipmid[new_smi->intf_num]; 1175 enable = force_kipmid[new_smi->intf_num];
1168 /* 1176 /*
1169 * The BT interface is efficient enough to not need a thread, 1177 * The BT interface is efficient enough to not need a thread,
1170 * and there is no need for a thread if we have interrupts. 1178 * and there is no need for a thread if we have interrupts.
1171 */ 1179 */
1172 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) 1180 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1173 enable = 1; 1181 enable = 1;
1174 1182
1175 if (enable) { 1183 if (enable) {
1176 new_smi->thread = kthread_run(ipmi_thread, new_smi, 1184 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1177 "kipmi%d", new_smi->intf_num); 1185 "kipmi%d", new_smi->intf_num);
1178 if (IS_ERR(new_smi->thread)) { 1186 if (IS_ERR(new_smi->thread)) {
1179 dev_notice(new_smi->dev, "Could not start" 1187 dev_notice(new_smi->dev, "Could not start"
1180 " kernel thread due to error %ld, only using" 1188 " kernel thread due to error %ld, only using"
1181 " timers to drive the interface\n", 1189 " timers to drive the interface\n",
1182 PTR_ERR(new_smi->thread)); 1190 PTR_ERR(new_smi->thread));
1183 new_smi->thread = NULL; 1191 new_smi->thread = NULL;
1184 } 1192 }
1185 } 1193 }
1186 1194
1187 return 0; 1195 return 0;
1188 } 1196 }
1189 1197
1190 static int get_smi_info(void *send_info, struct ipmi_smi_info *data) 1198 static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1191 { 1199 {
1192 struct smi_info *smi = send_info; 1200 struct smi_info *smi = send_info;
1193 1201
1194 data->addr_src = smi->addr_source; 1202 data->addr_src = smi->addr_source;
1195 data->dev = smi->dev; 1203 data->dev = smi->dev;
1196 data->addr_info = smi->addr_info; 1204 data->addr_info = smi->addr_info;
1197 get_device(smi->dev); 1205 get_device(smi->dev);
1198 1206
1199 return 0; 1207 return 0;
1200 } 1208 }
1201 1209
1202 static void set_maintenance_mode(void *send_info, int enable) 1210 static void set_maintenance_mode(void *send_info, int enable)
1203 { 1211 {
1204 struct smi_info *smi_info = send_info; 1212 struct smi_info *smi_info = send_info;
1205 1213
1206 if (!enable) 1214 if (!enable)
1207 atomic_set(&smi_info->req_events, 0); 1215 atomic_set(&smi_info->req_events, 0);
1208 } 1216 }
1209 1217
1210 static struct ipmi_smi_handlers handlers = { 1218 static struct ipmi_smi_handlers handlers = {
1211 .owner = THIS_MODULE, 1219 .owner = THIS_MODULE,
1212 .start_processing = smi_start_processing, 1220 .start_processing = smi_start_processing,
1213 .get_smi_info = get_smi_info, 1221 .get_smi_info = get_smi_info,
1214 .sender = sender, 1222 .sender = sender,
1215 .request_events = request_events, 1223 .request_events = request_events,
1216 .set_maintenance_mode = set_maintenance_mode, 1224 .set_maintenance_mode = set_maintenance_mode,
1217 .set_run_to_completion = set_run_to_completion, 1225 .set_run_to_completion = set_run_to_completion,
1218 .poll = poll, 1226 .poll = poll,
1219 }; 1227 };
1220 1228
1221 /* 1229 /*
1222 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1230 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1223 * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. 1231 * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
1224 */ 1232 */
1225 1233
1226 static LIST_HEAD(smi_infos); 1234 static LIST_HEAD(smi_infos);
1227 static DEFINE_MUTEX(smi_infos_lock); 1235 static DEFINE_MUTEX(smi_infos_lock);
1228 static int smi_num; /* Used to sequence the SMIs */ 1236 static int smi_num; /* Used to sequence the SMIs */
1229 1237
1230 #define DEFAULT_REGSPACING 1 1238 #define DEFAULT_REGSPACING 1
1231 #define DEFAULT_REGSIZE 1 1239 #define DEFAULT_REGSIZE 1
1232 1240
1233 static int si_trydefaults = 1; 1241 static int si_trydefaults = 1;
1234 static char *si_type[SI_MAX_PARMS]; 1242 static char *si_type[SI_MAX_PARMS];
1235 #define MAX_SI_TYPE_STR 30 1243 #define MAX_SI_TYPE_STR 30
1236 static char si_type_str[MAX_SI_TYPE_STR]; 1244 static char si_type_str[MAX_SI_TYPE_STR];
1237 static unsigned long addrs[SI_MAX_PARMS]; 1245 static unsigned long addrs[SI_MAX_PARMS];
1238 static unsigned int num_addrs; 1246 static unsigned int num_addrs;
1239 static unsigned int ports[SI_MAX_PARMS]; 1247 static unsigned int ports[SI_MAX_PARMS];
1240 static unsigned int num_ports; 1248 static unsigned int num_ports;
1241 static int irqs[SI_MAX_PARMS]; 1249 static int irqs[SI_MAX_PARMS];
1242 static unsigned int num_irqs; 1250 static unsigned int num_irqs;
1243 static int regspacings[SI_MAX_PARMS]; 1251 static int regspacings[SI_MAX_PARMS];
1244 static unsigned int num_regspacings; 1252 static unsigned int num_regspacings;
1245 static int regsizes[SI_MAX_PARMS]; 1253 static int regsizes[SI_MAX_PARMS];
1246 static unsigned int num_regsizes; 1254 static unsigned int num_regsizes;
1247 static int regshifts[SI_MAX_PARMS]; 1255 static int regshifts[SI_MAX_PARMS];
1248 static unsigned int num_regshifts; 1256 static unsigned int num_regshifts;
1249 static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ 1257 static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1250 static unsigned int num_slave_addrs; 1258 static unsigned int num_slave_addrs;
1251 1259
1252 #define IPMI_IO_ADDR_SPACE 0 1260 #define IPMI_IO_ADDR_SPACE 0
1253 #define IPMI_MEM_ADDR_SPACE 1 1261 #define IPMI_MEM_ADDR_SPACE 1
1254 static char *addr_space_to_str[] = { "i/o", "mem" }; 1262 static char *addr_space_to_str[] = { "i/o", "mem" };
1255 1263
1256 static int hotmod_handler(const char *val, struct kernel_param *kp); 1264 static int hotmod_handler(const char *val, struct kernel_param *kp);
1257 1265
1258 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200); 1266 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1259 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See" 1267 MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1260 " Documentation/IPMI.txt in the kernel sources for the" 1268 " Documentation/IPMI.txt in the kernel sources for the"
1261 " gory details."); 1269 " gory details.");
1262 1270
1263 module_param_named(trydefaults, si_trydefaults, bool, 0); 1271 module_param_named(trydefaults, si_trydefaults, bool, 0);
1264 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the" 1272 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1265 " default scan of the KCS and SMIC interface at the standard" 1273 " default scan of the KCS and SMIC interface at the standard"
1266 " address"); 1274 " address");
1267 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); 1275 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1268 MODULE_PARM_DESC(type, "Defines the type of each interface, each" 1276 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1269 " interface separated by commas. The types are 'kcs'," 1277 " interface separated by commas. The types are 'kcs',"
1270 " 'smic', and 'bt'. For example si_type=kcs,bt will set" 1278 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1271 " the first interface to kcs and the second to bt"); 1279 " the first interface to kcs and the second to bt");
1272 module_param_array(addrs, ulong, &num_addrs, 0); 1280 module_param_array(addrs, ulong, &num_addrs, 0);
1273 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the" 1281 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1274 " addresses separated by commas. Only use if an interface" 1282 " addresses separated by commas. Only use if an interface"
1275 " is in memory. Otherwise, set it to zero or leave" 1283 " is in memory. Otherwise, set it to zero or leave"
1276 " it blank."); 1284 " it blank.");
1277 module_param_array(ports, uint, &num_ports, 0); 1285 module_param_array(ports, uint, &num_ports, 0);
1278 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the" 1286 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1279 " addresses separated by commas. Only use if an interface" 1287 " addresses separated by commas. Only use if an interface"
1280 " is a port. Otherwise, set it to zero or leave" 1288 " is a port. Otherwise, set it to zero or leave"
1281 " it blank."); 1289 " it blank.");
1282 module_param_array(irqs, int, &num_irqs, 0); 1290 module_param_array(irqs, int, &num_irqs, 0);
1283 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the" 1291 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1284 " addresses separated by commas. Only use if an interface" 1292 " addresses separated by commas. Only use if an interface"
1285 " has an interrupt. Otherwise, set it to zero or leave" 1293 " has an interrupt. Otherwise, set it to zero or leave"
1286 " it blank."); 1294 " it blank.");
1287 module_param_array(regspacings, int, &num_regspacings, 0); 1295 module_param_array(regspacings, int, &num_regspacings, 0);
1288 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address" 1296 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1289 " and each successive register used by the interface. For" 1297 " and each successive register used by the interface. For"
1290 " instance, if the start address is 0xca2 and the spacing" 1298 " instance, if the start address is 0xca2 and the spacing"
1291 " is 2, then the second address is at 0xca4. Defaults" 1299 " is 2, then the second address is at 0xca4. Defaults"
1292 " to 1."); 1300 " to 1.");
1293 module_param_array(regsizes, int, &num_regsizes, 0); 1301 module_param_array(regsizes, int, &num_regsizes, 0);
1294 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes." 1302 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1295 " This should generally be 1, 2, 4, or 8 for an 8-bit," 1303 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1296 " 16-bit, 32-bit, or 64-bit register. Use this if you" 1304 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1297 " the 8-bit IPMI register has to be read from a larger" 1305 " the 8-bit IPMI register has to be read from a larger"
1298 " register."); 1306 " register.");
1299 module_param_array(regshifts, int, &num_regshifts, 0); 1307 module_param_array(regshifts, int, &num_regshifts, 0);
1300 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the." 1308 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1301 " IPMI register, in bits. For instance, if the data" 1309 " IPMI register, in bits. For instance, if the data"
1302 " is read from a 32-bit word and the IPMI data is in" 1310 " is read from a 32-bit word and the IPMI data is in"
1303 " bit 8-15, then the shift would be 8"); 1311 " bit 8-15, then the shift would be 8");
1304 module_param_array(slave_addrs, int, &num_slave_addrs, 0); 1312 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1305 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" 1313 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1306 " the controller. Normally this is 0x20, but can be" 1314 " the controller. Normally this is 0x20, but can be"
1307 " overridden by this parm. This is an array indexed" 1315 " overridden by this parm. This is an array indexed"
1308 " by interface number."); 1316 " by interface number.");
1309 module_param_array(force_kipmid, int, &num_force_kipmid, 0); 1317 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1310 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1318 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1311 " disabled(0). Normally the IPMI driver auto-detects" 1319 " disabled(0). Normally the IPMI driver auto-detects"
1312 " this, but the value may be overridden by this parm."); 1320 " this, but the value may be overridden by this parm.");
1313 module_param(unload_when_empty, int, 0); 1321 module_param(unload_when_empty, int, 0);
1314 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1322 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1315 " specified or found, default is 1. Setting to 0" 1323 " specified or found, default is 1. Setting to 0"
1316 " is useful for hot add of devices using hotmod."); 1324 " is useful for hot add of devices using hotmod.");
1317 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644); 1325 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1318 MODULE_PARM_DESC(kipmid_max_busy_us, 1326 MODULE_PARM_DESC(kipmid_max_busy_us,
1319 "Max time (in microseconds) to busy-wait for IPMI data before" 1327 "Max time (in microseconds) to busy-wait for IPMI data before"
1320 " sleeping. 0 (default) means to wait forever. Set to 100-500" 1328 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1321 " if kipmid is using up a lot of CPU time."); 1329 " if kipmid is using up a lot of CPU time.");
1322 1330
1323 1331
1324 static void std_irq_cleanup(struct smi_info *info) 1332 static void std_irq_cleanup(struct smi_info *info)
1325 { 1333 {
1326 if (info->si_type == SI_BT) 1334 if (info->si_type == SI_BT)
1327 /* Disable the interrupt in the BT interface. */ 1335 /* Disable the interrupt in the BT interface. */
1328 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); 1336 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1329 free_irq(info->irq, info); 1337 free_irq(info->irq, info);
1330 } 1338 }
1331 1339
1332 static int std_irq_setup(struct smi_info *info) 1340 static int std_irq_setup(struct smi_info *info)
1333 { 1341 {
1334 int rv; 1342 int rv;
1335 1343
1336 if (!info->irq) 1344 if (!info->irq)
1337 return 0; 1345 return 0;
1338 1346
1339 if (info->si_type == SI_BT) { 1347 if (info->si_type == SI_BT) {
1340 rv = request_irq(info->irq, 1348 rv = request_irq(info->irq,
1341 si_bt_irq_handler, 1349 si_bt_irq_handler,
1342 IRQF_SHARED | IRQF_DISABLED, 1350 IRQF_SHARED | IRQF_DISABLED,
1343 DEVICE_NAME, 1351 DEVICE_NAME,
1344 info); 1352 info);
1345 if (!rv) 1353 if (!rv)
1346 /* Enable the interrupt in the BT interface. */ 1354 /* Enable the interrupt in the BT interface. */
1347 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1355 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1348 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1356 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1349 } else 1357 } else
1350 rv = request_irq(info->irq, 1358 rv = request_irq(info->irq,
1351 si_irq_handler, 1359 si_irq_handler,
1352 IRQF_SHARED | IRQF_DISABLED, 1360 IRQF_SHARED | IRQF_DISABLED,
1353 DEVICE_NAME, 1361 DEVICE_NAME,
1354 info); 1362 info);
1355 if (rv) { 1363 if (rv) {
1356 dev_warn(info->dev, "%s unable to claim interrupt %d," 1364 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1357 " running polled\n", 1365 " running polled\n",
1358 DEVICE_NAME, info->irq); 1366 DEVICE_NAME, info->irq);
1359 info->irq = 0; 1367 info->irq = 0;
1360 } else { 1368 } else {
1361 info->irq_cleanup = std_irq_cleanup; 1369 info->irq_cleanup = std_irq_cleanup;
1362 dev_info(info->dev, "Using irq %d\n", info->irq); 1370 dev_info(info->dev, "Using irq %d\n", info->irq);
1363 } 1371 }
1364 1372
1365 return rv; 1373 return rv;
1366 } 1374 }
1367 1375
1368 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) 1376 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1369 { 1377 {
1370 unsigned int addr = io->addr_data; 1378 unsigned int addr = io->addr_data;
1371 1379
1372 return inb(addr + (offset * io->regspacing)); 1380 return inb(addr + (offset * io->regspacing));
1373 } 1381 }
1374 1382
1375 static void port_outb(struct si_sm_io *io, unsigned int offset, 1383 static void port_outb(struct si_sm_io *io, unsigned int offset,
1376 unsigned char b) 1384 unsigned char b)
1377 { 1385 {
1378 unsigned int addr = io->addr_data; 1386 unsigned int addr = io->addr_data;
1379 1387
1380 outb(b, addr + (offset * io->regspacing)); 1388 outb(b, addr + (offset * io->regspacing));
1381 } 1389 }
1382 1390
1383 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) 1391 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1384 { 1392 {
1385 unsigned int addr = io->addr_data; 1393 unsigned int addr = io->addr_data;
1386 1394
1387 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; 1395 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1388 } 1396 }
1389 1397
1390 static void port_outw(struct si_sm_io *io, unsigned int offset, 1398 static void port_outw(struct si_sm_io *io, unsigned int offset,
1391 unsigned char b) 1399 unsigned char b)
1392 { 1400 {
1393 unsigned int addr = io->addr_data; 1401 unsigned int addr = io->addr_data;
1394 1402
1395 outw(b << io->regshift, addr + (offset * io->regspacing)); 1403 outw(b << io->regshift, addr + (offset * io->regspacing));
1396 } 1404 }
1397 1405
1398 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) 1406 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1399 { 1407 {
1400 unsigned int addr = io->addr_data; 1408 unsigned int addr = io->addr_data;
1401 1409
1402 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; 1410 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1403 } 1411 }
1404 1412
1405 static void port_outl(struct si_sm_io *io, unsigned int offset, 1413 static void port_outl(struct si_sm_io *io, unsigned int offset,
1406 unsigned char b) 1414 unsigned char b)
1407 { 1415 {
1408 unsigned int addr = io->addr_data; 1416 unsigned int addr = io->addr_data;
1409 1417
1410 outl(b << io->regshift, addr+(offset * io->regspacing)); 1418 outl(b << io->regshift, addr+(offset * io->regspacing));
1411 } 1419 }
1412 1420
1413 static void port_cleanup(struct smi_info *info) 1421 static void port_cleanup(struct smi_info *info)
1414 { 1422 {
1415 unsigned int addr = info->io.addr_data; 1423 unsigned int addr = info->io.addr_data;
1416 int idx; 1424 int idx;
1417 1425
1418 if (addr) { 1426 if (addr) {
1419 for (idx = 0; idx < info->io_size; idx++) 1427 for (idx = 0; idx < info->io_size; idx++)
1420 release_region(addr + idx * info->io.regspacing, 1428 release_region(addr + idx * info->io.regspacing,
1421 info->io.regsize); 1429 info->io.regsize);
1422 } 1430 }
1423 } 1431 }
1424 1432
1425 static int port_setup(struct smi_info *info) 1433 static int port_setup(struct smi_info *info)
1426 { 1434 {
1427 unsigned int addr = info->io.addr_data; 1435 unsigned int addr = info->io.addr_data;
1428 int idx; 1436 int idx;
1429 1437
1430 if (!addr) 1438 if (!addr)
1431 return -ENODEV; 1439 return -ENODEV;
1432 1440
1433 info->io_cleanup = port_cleanup; 1441 info->io_cleanup = port_cleanup;
1434 1442
1435 /* 1443 /*
1436 * Figure out the actual inb/inw/inl/etc routine to use based 1444 * Figure out the actual inb/inw/inl/etc routine to use based
1437 * upon the register size. 1445 * upon the register size.
1438 */ 1446 */
1439 switch (info->io.regsize) { 1447 switch (info->io.regsize) {
1440 case 1: 1448 case 1:
1441 info->io.inputb = port_inb; 1449 info->io.inputb = port_inb;
1442 info->io.outputb = port_outb; 1450 info->io.outputb = port_outb;
1443 break; 1451 break;
1444 case 2: 1452 case 2:
1445 info->io.inputb = port_inw; 1453 info->io.inputb = port_inw;
1446 info->io.outputb = port_outw; 1454 info->io.outputb = port_outw;
1447 break; 1455 break;
1448 case 4: 1456 case 4:
1449 info->io.inputb = port_inl; 1457 info->io.inputb = port_inl;
1450 info->io.outputb = port_outl; 1458 info->io.outputb = port_outl;
1451 break; 1459 break;
1452 default: 1460 default:
1453 dev_warn(info->dev, "Invalid register size: %d\n", 1461 dev_warn(info->dev, "Invalid register size: %d\n",
1454 info->io.regsize); 1462 info->io.regsize);
1455 return -EINVAL; 1463 return -EINVAL;
1456 } 1464 }
1457 1465
1458 /* 1466 /*
1459 * Some BIOSes reserve disjoint I/O regions in their ACPI 1467 * Some BIOSes reserve disjoint I/O regions in their ACPI
1460 * tables. This causes problems when trying to register the 1468 * tables. This causes problems when trying to register the
1461 * entire I/O region. Therefore we must register each I/O 1469 * entire I/O region. Therefore we must register each I/O
1462 * port separately. 1470 * port separately.
1463 */ 1471 */
1464 for (idx = 0; idx < info->io_size; idx++) { 1472 for (idx = 0; idx < info->io_size; idx++) {
1465 if (request_region(addr + idx * info->io.regspacing, 1473 if (request_region(addr + idx * info->io.regspacing,
1466 info->io.regsize, DEVICE_NAME) == NULL) { 1474 info->io.regsize, DEVICE_NAME) == NULL) {
1467 /* Undo allocations */ 1475 /* Undo allocations */
1468 while (idx--) { 1476 while (idx--) {
1469 release_region(addr + idx * info->io.regspacing, 1477 release_region(addr + idx * info->io.regspacing,
1470 info->io.regsize); 1478 info->io.regsize);
1471 } 1479 }
1472 return -EIO; 1480 return -EIO;
1473 } 1481 }
1474 } 1482 }
1475 return 0; 1483 return 0;
1476 } 1484 }
1477 1485
1478 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) 1486 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1479 { 1487 {
1480 return readb((io->addr)+(offset * io->regspacing)); 1488 return readb((io->addr)+(offset * io->regspacing));
1481 } 1489 }
1482 1490
1483 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset, 1491 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1484 unsigned char b) 1492 unsigned char b)
1485 { 1493 {
1486 writeb(b, (io->addr)+(offset * io->regspacing)); 1494 writeb(b, (io->addr)+(offset * io->regspacing));
1487 } 1495 }
1488 1496
1489 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset) 1497 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1490 { 1498 {
1491 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) 1499 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1492 & 0xff; 1500 & 0xff;
1493 } 1501 }
1494 1502
1495 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset, 1503 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1496 unsigned char b) 1504 unsigned char b)
1497 { 1505 {
1498 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1506 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1499 } 1507 }
1500 1508
1501 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset) 1509 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1502 { 1510 {
1503 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) 1511 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1504 & 0xff; 1512 & 0xff;
1505 } 1513 }
1506 1514
1507 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset, 1515 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1508 unsigned char b) 1516 unsigned char b)
1509 { 1517 {
1510 writel(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1518 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1511 } 1519 }
1512 1520
1513 #ifdef readq 1521 #ifdef readq
1514 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset) 1522 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1515 { 1523 {
1516 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) 1524 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1517 & 0xff; 1525 & 0xff;
1518 } 1526 }
1519 1527
1520 static void mem_outq(struct si_sm_io *io, unsigned int offset, 1528 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1521 unsigned char b) 1529 unsigned char b)
1522 { 1530 {
1523 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing)); 1531 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1524 } 1532 }
1525 #endif 1533 #endif
1526 1534
1527 static void mem_cleanup(struct smi_info *info) 1535 static void mem_cleanup(struct smi_info *info)
1528 { 1536 {
1529 unsigned long addr = info->io.addr_data; 1537 unsigned long addr = info->io.addr_data;
1530 int mapsize; 1538 int mapsize;
1531 1539
1532 if (info->io.addr) { 1540 if (info->io.addr) {
1533 iounmap(info->io.addr); 1541 iounmap(info->io.addr);
1534 1542
1535 mapsize = ((info->io_size * info->io.regspacing) 1543 mapsize = ((info->io_size * info->io.regspacing)
1536 - (info->io.regspacing - info->io.regsize)); 1544 - (info->io.regspacing - info->io.regsize));
1537 1545
1538 release_mem_region(addr, mapsize); 1546 release_mem_region(addr, mapsize);
1539 } 1547 }
1540 } 1548 }
1541 1549
1542 static int mem_setup(struct smi_info *info) 1550 static int mem_setup(struct smi_info *info)
1543 { 1551 {
1544 unsigned long addr = info->io.addr_data; 1552 unsigned long addr = info->io.addr_data;
1545 int mapsize; 1553 int mapsize;
1546 1554
1547 if (!addr) 1555 if (!addr)
1548 return -ENODEV; 1556 return -ENODEV;
1549 1557
1550 info->io_cleanup = mem_cleanup; 1558 info->io_cleanup = mem_cleanup;
1551 1559
1552 /* 1560 /*
1553 * Figure out the actual readb/readw/readl/etc routine to use based 1561 * Figure out the actual readb/readw/readl/etc routine to use based
1554 * upon the register size. 1562 * upon the register size.
1555 */ 1563 */
1556 switch (info->io.regsize) { 1564 switch (info->io.regsize) {
1557 case 1: 1565 case 1:
1558 info->io.inputb = intf_mem_inb; 1566 info->io.inputb = intf_mem_inb;
1559 info->io.outputb = intf_mem_outb; 1567 info->io.outputb = intf_mem_outb;
1560 break; 1568 break;
1561 case 2: 1569 case 2:
1562 info->io.inputb = intf_mem_inw; 1570 info->io.inputb = intf_mem_inw;
1563 info->io.outputb = intf_mem_outw; 1571 info->io.outputb = intf_mem_outw;
1564 break; 1572 break;
1565 case 4: 1573 case 4:
1566 info->io.inputb = intf_mem_inl; 1574 info->io.inputb = intf_mem_inl;
1567 info->io.outputb = intf_mem_outl; 1575 info->io.outputb = intf_mem_outl;
1568 break; 1576 break;
1569 #ifdef readq 1577 #ifdef readq
1570 case 8: 1578 case 8:
1571 info->io.inputb = mem_inq; 1579 info->io.inputb = mem_inq;
1572 info->io.outputb = mem_outq; 1580 info->io.outputb = mem_outq;
1573 break; 1581 break;
1574 #endif 1582 #endif
1575 default: 1583 default:
1576 dev_warn(info->dev, "Invalid register size: %d\n", 1584 dev_warn(info->dev, "Invalid register size: %d\n",
1577 info->io.regsize); 1585 info->io.regsize);
1578 return -EINVAL; 1586 return -EINVAL;
1579 } 1587 }
1580 1588
1581 /* 1589 /*
1582 * Calculate the total amount of memory to claim. This is an 1590 * Calculate the total amount of memory to claim. This is an
1583 * unusual looking calculation, but it avoids claiming any 1591 * unusual looking calculation, but it avoids claiming any
1584 * more memory than it has to. It will claim everything 1592 * more memory than it has to. It will claim everything
1585 * between the first address to the end of the last full 1593 * between the first address to the end of the last full
1586 * register. 1594 * register.
1587 */ 1595 */
1588 mapsize = ((info->io_size * info->io.regspacing) 1596 mapsize = ((info->io_size * info->io.regspacing)
1589 - (info->io.regspacing - info->io.regsize)); 1597 - (info->io.regspacing - info->io.regsize));
1590 1598
1591 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) 1599 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1592 return -EIO; 1600 return -EIO;
1593 1601
1594 info->io.addr = ioremap(addr, mapsize); 1602 info->io.addr = ioremap(addr, mapsize);
1595 if (info->io.addr == NULL) { 1603 if (info->io.addr == NULL) {
1596 release_mem_region(addr, mapsize); 1604 release_mem_region(addr, mapsize);
1597 return -EIO; 1605 return -EIO;
1598 } 1606 }
1599 return 0; 1607 return 0;
1600 } 1608 }
1601 1609
1602 /* 1610 /*
1603 * Parms come in as <op1>[:op2[:op3...]]. ops are: 1611 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1604 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]] 1612 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1605 * Options are: 1613 * Options are:
1606 * rsp=<regspacing> 1614 * rsp=<regspacing>
1607 * rsi=<regsize> 1615 * rsi=<regsize>
1608 * rsh=<regshift> 1616 * rsh=<regshift>
1609 * irq=<irq> 1617 * irq=<irq>
1610 * ipmb=<ipmb addr> 1618 * ipmb=<ipmb addr>
1611 */ 1619 */
1612 enum hotmod_op { HM_ADD, HM_REMOVE }; 1620 enum hotmod_op { HM_ADD, HM_REMOVE };
1613 struct hotmod_vals { 1621 struct hotmod_vals {
1614 char *name; 1622 char *name;
1615 int val; 1623 int val;
1616 }; 1624 };
1617 static struct hotmod_vals hotmod_ops[] = { 1625 static struct hotmod_vals hotmod_ops[] = {
1618 { "add", HM_ADD }, 1626 { "add", HM_ADD },
1619 { "remove", HM_REMOVE }, 1627 { "remove", HM_REMOVE },
1620 { NULL } 1628 { NULL }
1621 }; 1629 };
1622 static struct hotmod_vals hotmod_si[] = { 1630 static struct hotmod_vals hotmod_si[] = {
1623 { "kcs", SI_KCS }, 1631 { "kcs", SI_KCS },
1624 { "smic", SI_SMIC }, 1632 { "smic", SI_SMIC },
1625 { "bt", SI_BT }, 1633 { "bt", SI_BT },
1626 { NULL } 1634 { NULL }
1627 }; 1635 };
1628 static struct hotmod_vals hotmod_as[] = { 1636 static struct hotmod_vals hotmod_as[] = {
1629 { "mem", IPMI_MEM_ADDR_SPACE }, 1637 { "mem", IPMI_MEM_ADDR_SPACE },
1630 { "i/o", IPMI_IO_ADDR_SPACE }, 1638 { "i/o", IPMI_IO_ADDR_SPACE },
1631 { NULL } 1639 { NULL }
1632 }; 1640 };
1633 1641
1634 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) 1642 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1635 { 1643 {
1636 char *s; 1644 char *s;
1637 int i; 1645 int i;
1638 1646
1639 s = strchr(*curr, ','); 1647 s = strchr(*curr, ',');
1640 if (!s) { 1648 if (!s) {
1641 printk(KERN_WARNING PFX "No hotmod %s given.\n", name); 1649 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1642 return -EINVAL; 1650 return -EINVAL;
1643 } 1651 }
1644 *s = '\0'; 1652 *s = '\0';
1645 s++; 1653 s++;
1646 for (i = 0; hotmod_ops[i].name; i++) { 1654 for (i = 0; hotmod_ops[i].name; i++) {
1647 if (strcmp(*curr, v[i].name) == 0) { 1655 if (strcmp(*curr, v[i].name) == 0) {
1648 *val = v[i].val; 1656 *val = v[i].val;
1649 *curr = s; 1657 *curr = s;
1650 return 0; 1658 return 0;
1651 } 1659 }
1652 } 1660 }
1653 1661
1654 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); 1662 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1655 return -EINVAL; 1663 return -EINVAL;
1656 } 1664 }
1657 1665
1658 static int check_hotmod_int_op(const char *curr, const char *option, 1666 static int check_hotmod_int_op(const char *curr, const char *option,
1659 const char *name, int *val) 1667 const char *name, int *val)
1660 { 1668 {
1661 char *n; 1669 char *n;
1662 1670
1663 if (strcmp(curr, name) == 0) { 1671 if (strcmp(curr, name) == 0) {
1664 if (!option) { 1672 if (!option) {
1665 printk(KERN_WARNING PFX 1673 printk(KERN_WARNING PFX
1666 "No option given for '%s'\n", 1674 "No option given for '%s'\n",
1667 curr); 1675 curr);
1668 return -EINVAL; 1676 return -EINVAL;
1669 } 1677 }
1670 *val = simple_strtoul(option, &n, 0); 1678 *val = simple_strtoul(option, &n, 0);
1671 if ((*n != '\0') || (*option == '\0')) { 1679 if ((*n != '\0') || (*option == '\0')) {
1672 printk(KERN_WARNING PFX 1680 printk(KERN_WARNING PFX
1673 "Bad option given for '%s'\n", 1681 "Bad option given for '%s'\n",
1674 curr); 1682 curr);
1675 return -EINVAL; 1683 return -EINVAL;
1676 } 1684 }
1677 return 1; 1685 return 1;
1678 } 1686 }
1679 return 0; 1687 return 0;
1680 } 1688 }
1681 1689
1682 static struct smi_info *smi_info_alloc(void) 1690 static struct smi_info *smi_info_alloc(void)
1683 { 1691 {
1684 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 1692 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1685 1693
1686 if (info) { 1694 if (info) {
1687 spin_lock_init(&info->si_lock); 1695 spin_lock_init(&info->si_lock);
1688 spin_lock_init(&info->msg_lock); 1696 spin_lock_init(&info->msg_lock);
1689 } 1697 }
1690 return info; 1698 return info;
1691 } 1699 }
1692 1700
1693 static int hotmod_handler(const char *val, struct kernel_param *kp) 1701 static int hotmod_handler(const char *val, struct kernel_param *kp)
1694 { 1702 {
1695 char *str = kstrdup(val, GFP_KERNEL); 1703 char *str = kstrdup(val, GFP_KERNEL);
1696 int rv; 1704 int rv;
1697 char *next, *curr, *s, *n, *o; 1705 char *next, *curr, *s, *n, *o;
1698 enum hotmod_op op; 1706 enum hotmod_op op;
1699 enum si_type si_type; 1707 enum si_type si_type;
1700 int addr_space; 1708 int addr_space;
1701 unsigned long addr; 1709 unsigned long addr;
1702 int regspacing; 1710 int regspacing;
1703 int regsize; 1711 int regsize;
1704 int regshift; 1712 int regshift;
1705 int irq; 1713 int irq;
1706 int ipmb; 1714 int ipmb;
1707 int ival; 1715 int ival;
1708 int len; 1716 int len;
1709 struct smi_info *info; 1717 struct smi_info *info;
1710 1718
1711 if (!str) 1719 if (!str)
1712 return -ENOMEM; 1720 return -ENOMEM;
1713 1721
1714 /* Kill any trailing spaces, as we can get a "\n" from echo. */ 1722 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1715 len = strlen(str); 1723 len = strlen(str);
1716 ival = len - 1; 1724 ival = len - 1;
1717 while ((ival >= 0) && isspace(str[ival])) { 1725 while ((ival >= 0) && isspace(str[ival])) {
1718 str[ival] = '\0'; 1726 str[ival] = '\0';
1719 ival--; 1727 ival--;
1720 } 1728 }
1721 1729
1722 for (curr = str; curr; curr = next) { 1730 for (curr = str; curr; curr = next) {
1723 regspacing = 1; 1731 regspacing = 1;
1724 regsize = 1; 1732 regsize = 1;
1725 regshift = 0; 1733 regshift = 0;
1726 irq = 0; 1734 irq = 0;
1727 ipmb = 0; /* Choose the default if not specified */ 1735 ipmb = 0; /* Choose the default if not specified */
1728 1736
1729 next = strchr(curr, ':'); 1737 next = strchr(curr, ':');
1730 if (next) { 1738 if (next) {
1731 *next = '\0'; 1739 *next = '\0';
1732 next++; 1740 next++;
1733 } 1741 }
1734 1742
1735 rv = parse_str(hotmod_ops, &ival, "operation", &curr); 1743 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1736 if (rv) 1744 if (rv)
1737 break; 1745 break;
1738 op = ival; 1746 op = ival;
1739 1747
1740 rv = parse_str(hotmod_si, &ival, "interface type", &curr); 1748 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1741 if (rv) 1749 if (rv)
1742 break; 1750 break;
1743 si_type = ival; 1751 si_type = ival;
1744 1752
1745 rv = parse_str(hotmod_as, &addr_space, "address space", &curr); 1753 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1746 if (rv) 1754 if (rv)
1747 break; 1755 break;
1748 1756
1749 s = strchr(curr, ','); 1757 s = strchr(curr, ',');
1750 if (s) { 1758 if (s) {
1751 *s = '\0'; 1759 *s = '\0';
1752 s++; 1760 s++;
1753 } 1761 }
1754 addr = simple_strtoul(curr, &n, 0); 1762 addr = simple_strtoul(curr, &n, 0);
1755 if ((*n != '\0') || (*curr == '\0')) { 1763 if ((*n != '\0') || (*curr == '\0')) {
1756 printk(KERN_WARNING PFX "Invalid hotmod address" 1764 printk(KERN_WARNING PFX "Invalid hotmod address"
1757 " '%s'\n", curr); 1765 " '%s'\n", curr);
1758 break; 1766 break;
1759 } 1767 }
1760 1768
1761 while (s) { 1769 while (s) {
1762 curr = s; 1770 curr = s;
1763 s = strchr(curr, ','); 1771 s = strchr(curr, ',');
1764 if (s) { 1772 if (s) {
1765 *s = '\0'; 1773 *s = '\0';
1766 s++; 1774 s++;
1767 } 1775 }
1768 o = strchr(curr, '='); 1776 o = strchr(curr, '=');
1769 if (o) { 1777 if (o) {
1770 *o = '\0'; 1778 *o = '\0';
1771 o++; 1779 o++;
1772 } 1780 }
1773 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing); 1781 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1774 if (rv < 0) 1782 if (rv < 0)
1775 goto out; 1783 goto out;
1776 else if (rv) 1784 else if (rv)
1777 continue; 1785 continue;
1778 rv = check_hotmod_int_op(curr, o, "rsi", &regsize); 1786 rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1779 if (rv < 0) 1787 if (rv < 0)
1780 goto out; 1788 goto out;
1781 else if (rv) 1789 else if (rv)
1782 continue; 1790 continue;
1783 rv = check_hotmod_int_op(curr, o, "rsh", &regshift); 1791 rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1784 if (rv < 0) 1792 if (rv < 0)
1785 goto out; 1793 goto out;
1786 else if (rv) 1794 else if (rv)
1787 continue; 1795 continue;
1788 rv = check_hotmod_int_op(curr, o, "irq", &irq); 1796 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1789 if (rv < 0) 1797 if (rv < 0)
1790 goto out; 1798 goto out;
1791 else if (rv) 1799 else if (rv)
1792 continue; 1800 continue;
1793 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); 1801 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1794 if (rv < 0) 1802 if (rv < 0)
1795 goto out; 1803 goto out;
1796 else if (rv) 1804 else if (rv)
1797 continue; 1805 continue;
1798 1806
1799 rv = -EINVAL; 1807 rv = -EINVAL;
1800 printk(KERN_WARNING PFX 1808 printk(KERN_WARNING PFX
1801 "Invalid hotmod option '%s'\n", 1809 "Invalid hotmod option '%s'\n",
1802 curr); 1810 curr);
1803 goto out; 1811 goto out;
1804 } 1812 }
1805 1813
1806 if (op == HM_ADD) { 1814 if (op == HM_ADD) {
1807 info = smi_info_alloc(); 1815 info = smi_info_alloc();
1808 if (!info) { 1816 if (!info) {
1809 rv = -ENOMEM; 1817 rv = -ENOMEM;
1810 goto out; 1818 goto out;
1811 } 1819 }
1812 1820
1813 info->addr_source = SI_HOTMOD; 1821 info->addr_source = SI_HOTMOD;
1814 info->si_type = si_type; 1822 info->si_type = si_type;
1815 info->io.addr_data = addr; 1823 info->io.addr_data = addr;
1816 info->io.addr_type = addr_space; 1824 info->io.addr_type = addr_space;
1817 if (addr_space == IPMI_MEM_ADDR_SPACE) 1825 if (addr_space == IPMI_MEM_ADDR_SPACE)
1818 info->io_setup = mem_setup; 1826 info->io_setup = mem_setup;
1819 else 1827 else
1820 info->io_setup = port_setup; 1828 info->io_setup = port_setup;
1821 1829
1822 info->io.addr = NULL; 1830 info->io.addr = NULL;
1823 info->io.regspacing = regspacing; 1831 info->io.regspacing = regspacing;
1824 if (!info->io.regspacing) 1832 if (!info->io.regspacing)
1825 info->io.regspacing = DEFAULT_REGSPACING; 1833 info->io.regspacing = DEFAULT_REGSPACING;
1826 info->io.regsize = regsize; 1834 info->io.regsize = regsize;
1827 if (!info->io.regsize) 1835 if (!info->io.regsize)
1828 info->io.regsize = DEFAULT_REGSPACING; 1836 info->io.regsize = DEFAULT_REGSPACING;
1829 info->io.regshift = regshift; 1837 info->io.regshift = regshift;
1830 info->irq = irq; 1838 info->irq = irq;
1831 if (info->irq) 1839 if (info->irq)
1832 info->irq_setup = std_irq_setup; 1840 info->irq_setup = std_irq_setup;
1833 info->slave_addr = ipmb; 1841 info->slave_addr = ipmb;
1834 1842
1835 if (!add_smi(info)) { 1843 if (!add_smi(info)) {
1836 if (try_smi_init(info)) 1844 if (try_smi_init(info))
1837 cleanup_one_si(info); 1845 cleanup_one_si(info);
1838 } else { 1846 } else {
1839 kfree(info); 1847 kfree(info);
1840 } 1848 }
1841 } else { 1849 } else {
1842 /* remove */ 1850 /* remove */
1843 struct smi_info *e, *tmp_e; 1851 struct smi_info *e, *tmp_e;
1844 1852
1845 mutex_lock(&smi_infos_lock); 1853 mutex_lock(&smi_infos_lock);
1846 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { 1854 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1847 if (e->io.addr_type != addr_space) 1855 if (e->io.addr_type != addr_space)
1848 continue; 1856 continue;
1849 if (e->si_type != si_type) 1857 if (e->si_type != si_type)
1850 continue; 1858 continue;
1851 if (e->io.addr_data == addr) 1859 if (e->io.addr_data == addr)
1852 cleanup_one_si(e); 1860 cleanup_one_si(e);
1853 } 1861 }
1854 mutex_unlock(&smi_infos_lock); 1862 mutex_unlock(&smi_infos_lock);
1855 } 1863 }
1856 } 1864 }
1857 rv = len; 1865 rv = len;
1858 out: 1866 out:
1859 kfree(str); 1867 kfree(str);
1860 return rv; 1868 return rv;
1861 } 1869 }
1862 1870
1863 static void __devinit hardcode_find_bmc(void) 1871 static void __devinit hardcode_find_bmc(void)
1864 { 1872 {
1865 int i; 1873 int i;
1866 struct smi_info *info; 1874 struct smi_info *info;
1867 1875
1868 for (i = 0; i < SI_MAX_PARMS; i++) { 1876 for (i = 0; i < SI_MAX_PARMS; i++) {
1869 if (!ports[i] && !addrs[i]) 1877 if (!ports[i] && !addrs[i])
1870 continue; 1878 continue;
1871 1879
1872 info = smi_info_alloc(); 1880 info = smi_info_alloc();
1873 if (!info) 1881 if (!info)
1874 return; 1882 return;
1875 1883
1876 info->addr_source = SI_HARDCODED; 1884 info->addr_source = SI_HARDCODED;
1877 printk(KERN_INFO PFX "probing via hardcoded address\n"); 1885 printk(KERN_INFO PFX "probing via hardcoded address\n");
1878 1886
1879 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { 1887 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1880 info->si_type = SI_KCS; 1888 info->si_type = SI_KCS;
1881 } else if (strcmp(si_type[i], "smic") == 0) { 1889 } else if (strcmp(si_type[i], "smic") == 0) {
1882 info->si_type = SI_SMIC; 1890 info->si_type = SI_SMIC;
1883 } else if (strcmp(si_type[i], "bt") == 0) { 1891 } else if (strcmp(si_type[i], "bt") == 0) {
1884 info->si_type = SI_BT; 1892 info->si_type = SI_BT;
1885 } else { 1893 } else {
1886 printk(KERN_WARNING PFX "Interface type specified " 1894 printk(KERN_WARNING PFX "Interface type specified "
1887 "for interface %d, was invalid: %s\n", 1895 "for interface %d, was invalid: %s\n",
1888 i, si_type[i]); 1896 i, si_type[i]);
1889 kfree(info); 1897 kfree(info);
1890 continue; 1898 continue;
1891 } 1899 }
1892 1900
1893 if (ports[i]) { 1901 if (ports[i]) {
1894 /* An I/O port */ 1902 /* An I/O port */
1895 info->io_setup = port_setup; 1903 info->io_setup = port_setup;
1896 info->io.addr_data = ports[i]; 1904 info->io.addr_data = ports[i];
1897 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1905 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1898 } else if (addrs[i]) { 1906 } else if (addrs[i]) {
1899 /* A memory port */ 1907 /* A memory port */
1900 info->io_setup = mem_setup; 1908 info->io_setup = mem_setup;
1901 info->io.addr_data = addrs[i]; 1909 info->io.addr_data = addrs[i];
1902 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 1910 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1903 } else { 1911 } else {
1904 printk(KERN_WARNING PFX "Interface type specified " 1912 printk(KERN_WARNING PFX "Interface type specified "
1905 "for interface %d, but port and address were " 1913 "for interface %d, but port and address were "
1906 "not set or set to zero.\n", i); 1914 "not set or set to zero.\n", i);
1907 kfree(info); 1915 kfree(info);
1908 continue; 1916 continue;
1909 } 1917 }
1910 1918
1911 info->io.addr = NULL; 1919 info->io.addr = NULL;
1912 info->io.regspacing = regspacings[i]; 1920 info->io.regspacing = regspacings[i];
1913 if (!info->io.regspacing) 1921 if (!info->io.regspacing)
1914 info->io.regspacing = DEFAULT_REGSPACING; 1922 info->io.regspacing = DEFAULT_REGSPACING;
1915 info->io.regsize = regsizes[i]; 1923 info->io.regsize = regsizes[i];
1916 if (!info->io.regsize) 1924 if (!info->io.regsize)
1917 info->io.regsize = DEFAULT_REGSPACING; 1925 info->io.regsize = DEFAULT_REGSPACING;
1918 info->io.regshift = regshifts[i]; 1926 info->io.regshift = regshifts[i];
1919 info->irq = irqs[i]; 1927 info->irq = irqs[i];
1920 if (info->irq) 1928 if (info->irq)
1921 info->irq_setup = std_irq_setup; 1929 info->irq_setup = std_irq_setup;
1922 info->slave_addr = slave_addrs[i]; 1930 info->slave_addr = slave_addrs[i];
1923 1931
1924 if (!add_smi(info)) { 1932 if (!add_smi(info)) {
1925 if (try_smi_init(info)) 1933 if (try_smi_init(info))
1926 cleanup_one_si(info); 1934 cleanup_one_si(info);
1927 } else { 1935 } else {
1928 kfree(info); 1936 kfree(info);
1929 } 1937 }
1930 } 1938 }
1931 } 1939 }
1932 1940
1933 #ifdef CONFIG_ACPI 1941 #ifdef CONFIG_ACPI
1934 1942
1935 #include <linux/acpi.h> 1943 #include <linux/acpi.h>
1936 1944
1937 /* 1945 /*
1938 * Once we get an ACPI failure, we don't try any more, because we go 1946 * Once we get an ACPI failure, we don't try any more, because we go
1939 * through the tables sequentially. Once we don't find a table, there 1947 * through the tables sequentially. Once we don't find a table, there
1940 * are no more. 1948 * are no more.
1941 */ 1949 */
1942 static int acpi_failure; 1950 static int acpi_failure;
1943 1951
1944 /* For GPE-type interrupts. */ 1952 /* For GPE-type interrupts. */
1945 static u32 ipmi_acpi_gpe(acpi_handle gpe_device, 1953 static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
1946 u32 gpe_number, void *context) 1954 u32 gpe_number, void *context)
1947 { 1955 {
1948 struct smi_info *smi_info = context; 1956 struct smi_info *smi_info = context;
1949 unsigned long flags; 1957 unsigned long flags;
1950 #ifdef DEBUG_TIMING 1958 #ifdef DEBUG_TIMING
1951 struct timeval t; 1959 struct timeval t;
1952 #endif 1960 #endif
1953 1961
1954 spin_lock_irqsave(&(smi_info->si_lock), flags); 1962 spin_lock_irqsave(&(smi_info->si_lock), flags);
1955 1963
1956 smi_inc_stat(smi_info, interrupts); 1964 smi_inc_stat(smi_info, interrupts);
1957 1965
1958 #ifdef DEBUG_TIMING 1966 #ifdef DEBUG_TIMING
1959 do_gettimeofday(&t); 1967 do_gettimeofday(&t);
1960 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1968 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1961 #endif 1969 #endif
1962 smi_event_handler(smi_info, 0); 1970 smi_event_handler(smi_info, 0);
1963 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1971 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1964 1972
1965 return ACPI_INTERRUPT_HANDLED; 1973 return ACPI_INTERRUPT_HANDLED;
1966 } 1974 }
1967 1975
1968 static void acpi_gpe_irq_cleanup(struct smi_info *info) 1976 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1969 { 1977 {
1970 if (!info->irq) 1978 if (!info->irq)
1971 return; 1979 return;
1972 1980
1973 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); 1981 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1974 } 1982 }
1975 1983
1976 static int acpi_gpe_irq_setup(struct smi_info *info) 1984 static int acpi_gpe_irq_setup(struct smi_info *info)
1977 { 1985 {
1978 acpi_status status; 1986 acpi_status status;
1979 1987
1980 if (!info->irq) 1988 if (!info->irq)
1981 return 0; 1989 return 0;
1982 1990
1983 /* FIXME - is level triggered right? */ 1991 /* FIXME - is level triggered right? */
1984 status = acpi_install_gpe_handler(NULL, 1992 status = acpi_install_gpe_handler(NULL,
1985 info->irq, 1993 info->irq,
1986 ACPI_GPE_LEVEL_TRIGGERED, 1994 ACPI_GPE_LEVEL_TRIGGERED,
1987 &ipmi_acpi_gpe, 1995 &ipmi_acpi_gpe,
1988 info); 1996 info);
1989 if (status != AE_OK) { 1997 if (status != AE_OK) {
1990 dev_warn(info->dev, "%s unable to claim ACPI GPE %d," 1998 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
1991 " running polled\n", DEVICE_NAME, info->irq); 1999 " running polled\n", DEVICE_NAME, info->irq);
1992 info->irq = 0; 2000 info->irq = 0;
1993 return -EINVAL; 2001 return -EINVAL;
1994 } else { 2002 } else {
1995 info->irq_cleanup = acpi_gpe_irq_cleanup; 2003 info->irq_cleanup = acpi_gpe_irq_cleanup;
1996 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq); 2004 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
1997 return 0; 2005 return 0;
1998 } 2006 }
1999 } 2007 }
2000 2008
2001 /* 2009 /*
2002 * Defined at 2010 * Defined at
2003 * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf 2011 * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
2004 */ 2012 */
2005 struct SPMITable { 2013 struct SPMITable {
2006 s8 Signature[4]; 2014 s8 Signature[4];
2007 u32 Length; 2015 u32 Length;
2008 u8 Revision; 2016 u8 Revision;
2009 u8 Checksum; 2017 u8 Checksum;
2010 s8 OEMID[6]; 2018 s8 OEMID[6];
2011 s8 OEMTableID[8]; 2019 s8 OEMTableID[8];
2012 s8 OEMRevision[4]; 2020 s8 OEMRevision[4];
2013 s8 CreatorID[4]; 2021 s8 CreatorID[4];
2014 s8 CreatorRevision[4]; 2022 s8 CreatorRevision[4];
2015 u8 InterfaceType; 2023 u8 InterfaceType;
2016 u8 IPMIlegacy; 2024 u8 IPMIlegacy;
2017 s16 SpecificationRevision; 2025 s16 SpecificationRevision;
2018 2026
2019 /* 2027 /*
2020 * Bit 0 - SCI interrupt supported 2028 * Bit 0 - SCI interrupt supported
2021 * Bit 1 - I/O APIC/SAPIC 2029 * Bit 1 - I/O APIC/SAPIC
2022 */ 2030 */
2023 u8 InterruptType; 2031 u8 InterruptType;
2024 2032
2025 /* 2033 /*
2026 * If bit 0 of InterruptType is set, then this is the SCI 2034 * If bit 0 of InterruptType is set, then this is the SCI
2027 * interrupt in the GPEx_STS register. 2035 * interrupt in the GPEx_STS register.
2028 */ 2036 */
2029 u8 GPE; 2037 u8 GPE;
2030 2038
2031 s16 Reserved; 2039 s16 Reserved;
2032 2040
2033 /* 2041 /*
2034 * If bit 1 of InterruptType is set, then this is the I/O 2042 * If bit 1 of InterruptType is set, then this is the I/O
2035 * APIC/SAPIC interrupt. 2043 * APIC/SAPIC interrupt.
2036 */ 2044 */
2037 u32 GlobalSystemInterrupt; 2045 u32 GlobalSystemInterrupt;
2038 2046
2039 /* The actual register address. */ 2047 /* The actual register address. */
2040 struct acpi_generic_address addr; 2048 struct acpi_generic_address addr;
2041 2049
2042 u8 UID[4]; 2050 u8 UID[4];
2043 2051
2044 s8 spmi_id[1]; /* A '\0' terminated array starts here. */ 2052 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
2045 }; 2053 };
2046 2054
2047 static int __devinit try_init_spmi(struct SPMITable *spmi) 2055 static int __devinit try_init_spmi(struct SPMITable *spmi)
2048 { 2056 {
2049 struct smi_info *info; 2057 struct smi_info *info;
2050 2058
2051 if (spmi->IPMIlegacy != 1) { 2059 if (spmi->IPMIlegacy != 1) {
2052 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy); 2060 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
2053 return -ENODEV; 2061 return -ENODEV;
2054 } 2062 }
2055 2063
2056 info = smi_info_alloc(); 2064 info = smi_info_alloc();
2057 if (!info) { 2065 if (!info) {
2058 printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); 2066 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2059 return -ENOMEM; 2067 return -ENOMEM;
2060 } 2068 }
2061 2069
2062 info->addr_source = SI_SPMI; 2070 info->addr_source = SI_SPMI;
2063 printk(KERN_INFO PFX "probing via SPMI\n"); 2071 printk(KERN_INFO PFX "probing via SPMI\n");
2064 2072
2065 /* Figure out the interface type. */ 2073 /* Figure out the interface type. */
2066 switch (spmi->InterfaceType) { 2074 switch (spmi->InterfaceType) {
2067 case 1: /* KCS */ 2075 case 1: /* KCS */
2068 info->si_type = SI_KCS; 2076 info->si_type = SI_KCS;
2069 break; 2077 break;
2070 case 2: /* SMIC */ 2078 case 2: /* SMIC */
2071 info->si_type = SI_SMIC; 2079 info->si_type = SI_SMIC;
2072 break; 2080 break;
2073 case 3: /* BT */ 2081 case 3: /* BT */
2074 info->si_type = SI_BT; 2082 info->si_type = SI_BT;
2075 break; 2083 break;
2076 default: 2084 default:
2077 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", 2085 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2078 spmi->InterfaceType); 2086 spmi->InterfaceType);
2079 kfree(info); 2087 kfree(info);
2080 return -EIO; 2088 return -EIO;
2081 } 2089 }
2082 2090
2083 if (spmi->InterruptType & 1) { 2091 if (spmi->InterruptType & 1) {
2084 /* We've got a GPE interrupt. */ 2092 /* We've got a GPE interrupt. */
2085 info->irq = spmi->GPE; 2093 info->irq = spmi->GPE;
2086 info->irq_setup = acpi_gpe_irq_setup; 2094 info->irq_setup = acpi_gpe_irq_setup;
2087 } else if (spmi->InterruptType & 2) { 2095 } else if (spmi->InterruptType & 2) {
2088 /* We've got an APIC/SAPIC interrupt. */ 2096 /* We've got an APIC/SAPIC interrupt. */
2089 info->irq = spmi->GlobalSystemInterrupt; 2097 info->irq = spmi->GlobalSystemInterrupt;
2090 info->irq_setup = std_irq_setup; 2098 info->irq_setup = std_irq_setup;
2091 } else { 2099 } else {
2092 /* Use the default interrupt setting. */ 2100 /* Use the default interrupt setting. */
2093 info->irq = 0; 2101 info->irq = 0;
2094 info->irq_setup = NULL; 2102 info->irq_setup = NULL;
2095 } 2103 }
2096 2104
2097 if (spmi->addr.bit_width) { 2105 if (spmi->addr.bit_width) {
2098 /* A (hopefully) properly formed register bit width. */ 2106 /* A (hopefully) properly formed register bit width. */
2099 info->io.regspacing = spmi->addr.bit_width / 8; 2107 info->io.regspacing = spmi->addr.bit_width / 8;
2100 } else { 2108 } else {
2101 info->io.regspacing = DEFAULT_REGSPACING; 2109 info->io.regspacing = DEFAULT_REGSPACING;
2102 } 2110 }
2103 info->io.regsize = info->io.regspacing; 2111 info->io.regsize = info->io.regspacing;
2104 info->io.regshift = spmi->addr.bit_offset; 2112 info->io.regshift = spmi->addr.bit_offset;
2105 2113
2106 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 2114 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
2107 info->io_setup = mem_setup; 2115 info->io_setup = mem_setup;
2108 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 2116 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2109 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 2117 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
2110 info->io_setup = port_setup; 2118 info->io_setup = port_setup;
2111 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2119 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2112 } else { 2120 } else {
2113 kfree(info); 2121 kfree(info);
2114 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n"); 2122 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2115 return -EIO; 2123 return -EIO;
2116 } 2124 }
2117 info->io.addr_data = spmi->addr.address; 2125 info->io.addr_data = spmi->addr.address;
2118 2126
2119 pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n", 2127 pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
2120 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", 2128 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2121 info->io.addr_data, info->io.regsize, info->io.regspacing, 2129 info->io.addr_data, info->io.regsize, info->io.regspacing,
2122 info->irq); 2130 info->irq);
2123 2131
2124 if (add_smi(info)) 2132 if (add_smi(info))
2125 kfree(info); 2133 kfree(info);
2126 2134
2127 return 0; 2135 return 0;
2128 } 2136 }
2129 2137
2130 static void __devinit spmi_find_bmc(void) 2138 static void __devinit spmi_find_bmc(void)
2131 { 2139 {
2132 acpi_status status; 2140 acpi_status status;
2133 struct SPMITable *spmi; 2141 struct SPMITable *spmi;
2134 int i; 2142 int i;
2135 2143
2136 if (acpi_disabled) 2144 if (acpi_disabled)
2137 return; 2145 return;
2138 2146
2139 if (acpi_failure) 2147 if (acpi_failure)
2140 return; 2148 return;
2141 2149
2142 for (i = 0; ; i++) { 2150 for (i = 0; ; i++) {
2143 status = acpi_get_table(ACPI_SIG_SPMI, i+1, 2151 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
2144 (struct acpi_table_header **)&spmi); 2152 (struct acpi_table_header **)&spmi);
2145 if (status != AE_OK) 2153 if (status != AE_OK)
2146 return; 2154 return;
2147 2155
2148 try_init_spmi(spmi); 2156 try_init_spmi(spmi);
2149 } 2157 }
2150 } 2158 }
2151 2159
2152 static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, 2160 static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2153 const struct pnp_device_id *dev_id) 2161 const struct pnp_device_id *dev_id)
2154 { 2162 {
2155 struct acpi_device *acpi_dev; 2163 struct acpi_device *acpi_dev;
2156 struct smi_info *info; 2164 struct smi_info *info;
2157 struct resource *res, *res_second; 2165 struct resource *res, *res_second;
2158 acpi_handle handle; 2166 acpi_handle handle;
2159 acpi_status status; 2167 acpi_status status;
2160 unsigned long long tmp; 2168 unsigned long long tmp;
2161 2169
2162 acpi_dev = pnp_acpi_device(dev); 2170 acpi_dev = pnp_acpi_device(dev);
2163 if (!acpi_dev) 2171 if (!acpi_dev)
2164 return -ENODEV; 2172 return -ENODEV;
2165 2173
2166 info = smi_info_alloc(); 2174 info = smi_info_alloc();
2167 if (!info) 2175 if (!info)
2168 return -ENOMEM; 2176 return -ENOMEM;
2169 2177
2170 info->addr_source = SI_ACPI; 2178 info->addr_source = SI_ACPI;
2171 printk(KERN_INFO PFX "probing via ACPI\n"); 2179 printk(KERN_INFO PFX "probing via ACPI\n");
2172 2180
2173 handle = acpi_dev->handle; 2181 handle = acpi_dev->handle;
2174 info->addr_info.acpi_info.acpi_handle = handle; 2182 info->addr_info.acpi_info.acpi_handle = handle;
2175 2183
2176 /* _IFT tells us the interface type: KCS, BT, etc */ 2184 /* _IFT tells us the interface type: KCS, BT, etc */
2177 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); 2185 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
2178 if (ACPI_FAILURE(status)) 2186 if (ACPI_FAILURE(status))
2179 goto err_free; 2187 goto err_free;
2180 2188
2181 switch (tmp) { 2189 switch (tmp) {
2182 case 1: 2190 case 1:
2183 info->si_type = SI_KCS; 2191 info->si_type = SI_KCS;
2184 break; 2192 break;
2185 case 2: 2193 case 2:
2186 info->si_type = SI_SMIC; 2194 info->si_type = SI_SMIC;
2187 break; 2195 break;
2188 case 3: 2196 case 3:
2189 info->si_type = SI_BT; 2197 info->si_type = SI_BT;
2190 break; 2198 break;
2191 default: 2199 default:
2192 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); 2200 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2193 goto err_free; 2201 goto err_free;
2194 } 2202 }
2195 2203
2196 res = pnp_get_resource(dev, IORESOURCE_IO, 0); 2204 res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2197 if (res) { 2205 if (res) {
2198 info->io_setup = port_setup; 2206 info->io_setup = port_setup;
2199 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2207 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2200 } else { 2208 } else {
2201 res = pnp_get_resource(dev, IORESOURCE_MEM, 0); 2209 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2202 if (res) { 2210 if (res) {
2203 info->io_setup = mem_setup; 2211 info->io_setup = mem_setup;
2204 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 2212 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2205 } 2213 }
2206 } 2214 }
2207 if (!res) { 2215 if (!res) {
2208 dev_err(&dev->dev, "no I/O or memory address\n"); 2216 dev_err(&dev->dev, "no I/O or memory address\n");
2209 goto err_free; 2217 goto err_free;
2210 } 2218 }
2211 info->io.addr_data = res->start; 2219 info->io.addr_data = res->start;
2212 2220
2213 info->io.regspacing = DEFAULT_REGSPACING; 2221 info->io.regspacing = DEFAULT_REGSPACING;
2214 res_second = pnp_get_resource(dev, 2222 res_second = pnp_get_resource(dev,
2215 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? 2223 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2216 IORESOURCE_IO : IORESOURCE_MEM, 2224 IORESOURCE_IO : IORESOURCE_MEM,
2217 1); 2225 1);
2218 if (res_second) { 2226 if (res_second) {
2219 if (res_second->start > info->io.addr_data) 2227 if (res_second->start > info->io.addr_data)
2220 info->io.regspacing = res_second->start - info->io.addr_data; 2228 info->io.regspacing = res_second->start - info->io.addr_data;
2221 } 2229 }
2222 info->io.regsize = DEFAULT_REGSPACING; 2230 info->io.regsize = DEFAULT_REGSPACING;
2223 info->io.regshift = 0; 2231 info->io.regshift = 0;
2224 2232
2225 /* If _GPE exists, use it; otherwise use standard interrupts */ 2233 /* If _GPE exists, use it; otherwise use standard interrupts */
2226 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); 2234 status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
2227 if (ACPI_SUCCESS(status)) { 2235 if (ACPI_SUCCESS(status)) {
2228 info->irq = tmp; 2236 info->irq = tmp;
2229 info->irq_setup = acpi_gpe_irq_setup; 2237 info->irq_setup = acpi_gpe_irq_setup;
2230 } else if (pnp_irq_valid(dev, 0)) { 2238 } else if (pnp_irq_valid(dev, 0)) {
2231 info->irq = pnp_irq(dev, 0); 2239 info->irq = pnp_irq(dev, 0);
2232 info->irq_setup = std_irq_setup; 2240 info->irq_setup = std_irq_setup;
2233 } 2241 }
2234 2242
2235 info->dev = &dev->dev; 2243 info->dev = &dev->dev;
2236 pnp_set_drvdata(dev, info); 2244 pnp_set_drvdata(dev, info);
2237 2245
2238 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n", 2246 dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2239 res, info->io.regsize, info->io.regspacing, 2247 res, info->io.regsize, info->io.regspacing,
2240 info->irq); 2248 info->irq);
2241 2249
2242 if (add_smi(info)) 2250 if (add_smi(info))
2243 goto err_free; 2251 goto err_free;
2244 2252
2245 return 0; 2253 return 0;
2246 2254
2247 err_free: 2255 err_free:
2248 kfree(info); 2256 kfree(info);
2249 return -EINVAL; 2257 return -EINVAL;
2250 } 2258 }
2251 2259
2252 static void __devexit ipmi_pnp_remove(struct pnp_dev *dev) 2260 static void __devexit ipmi_pnp_remove(struct pnp_dev *dev)
2253 { 2261 {
2254 struct smi_info *info = pnp_get_drvdata(dev); 2262 struct smi_info *info = pnp_get_drvdata(dev);
2255 2263
2256 cleanup_one_si(info); 2264 cleanup_one_si(info);
2257 } 2265 }
2258 2266
2259 static const struct pnp_device_id pnp_dev_table[] = { 2267 static const struct pnp_device_id pnp_dev_table[] = {
2260 {"IPI0001", 0}, 2268 {"IPI0001", 0},
2261 {"", 0}, 2269 {"", 0},
2262 }; 2270 };
2263 2271
2264 static struct pnp_driver ipmi_pnp_driver = { 2272 static struct pnp_driver ipmi_pnp_driver = {
2265 .name = DEVICE_NAME, 2273 .name = DEVICE_NAME,
2266 .probe = ipmi_pnp_probe, 2274 .probe = ipmi_pnp_probe,
2267 .remove = __devexit_p(ipmi_pnp_remove), 2275 .remove = __devexit_p(ipmi_pnp_remove),
2268 .id_table = pnp_dev_table, 2276 .id_table = pnp_dev_table,
2269 }; 2277 };
2270 #endif 2278 #endif
2271 2279
2272 #ifdef CONFIG_DMI 2280 #ifdef CONFIG_DMI
2273 struct dmi_ipmi_data { 2281 struct dmi_ipmi_data {
2274 u8 type; 2282 u8 type;
2275 u8 addr_space; 2283 u8 addr_space;
2276 unsigned long base_addr; 2284 unsigned long base_addr;
2277 u8 irq; 2285 u8 irq;
2278 u8 offset; 2286 u8 offset;
2279 u8 slave_addr; 2287 u8 slave_addr;
2280 }; 2288 };
2281 2289
2282 static int __devinit decode_dmi(const struct dmi_header *dm, 2290 static int __devinit decode_dmi(const struct dmi_header *dm,
2283 struct dmi_ipmi_data *dmi) 2291 struct dmi_ipmi_data *dmi)
2284 { 2292 {
2285 const u8 *data = (const u8 *)dm; 2293 const u8 *data = (const u8 *)dm;
2286 unsigned long base_addr; 2294 unsigned long base_addr;
2287 u8 reg_spacing; 2295 u8 reg_spacing;
2288 u8 len = dm->length; 2296 u8 len = dm->length;
2289 2297
2290 dmi->type = data[4]; 2298 dmi->type = data[4];
2291 2299
2292 memcpy(&base_addr, data+8, sizeof(unsigned long)); 2300 memcpy(&base_addr, data+8, sizeof(unsigned long));
2293 if (len >= 0x11) { 2301 if (len >= 0x11) {
2294 if (base_addr & 1) { 2302 if (base_addr & 1) {
2295 /* I/O */ 2303 /* I/O */
2296 base_addr &= 0xFFFE; 2304 base_addr &= 0xFFFE;
2297 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2305 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2298 } else 2306 } else
2299 /* Memory */ 2307 /* Memory */
2300 dmi->addr_space = IPMI_MEM_ADDR_SPACE; 2308 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
2301 2309
2302 /* If bit 4 of byte 0x10 is set, then the lsb for the address 2310 /* If bit 4 of byte 0x10 is set, then the lsb for the address
2303 is odd. */ 2311 is odd. */
2304 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 2312 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
2305 2313
2306 dmi->irq = data[0x11]; 2314 dmi->irq = data[0x11];
2307 2315
2308 /* The top two bits of byte 0x10 hold the register spacing. */ 2316 /* The top two bits of byte 0x10 hold the register spacing. */
2309 reg_spacing = (data[0x10] & 0xC0) >> 6; 2317 reg_spacing = (data[0x10] & 0xC0) >> 6;
2310 switch (reg_spacing) { 2318 switch (reg_spacing) {
2311 case 0x00: /* Byte boundaries */ 2319 case 0x00: /* Byte boundaries */
2312 dmi->offset = 1; 2320 dmi->offset = 1;
2313 break; 2321 break;
2314 case 0x01: /* 32-bit boundaries */ 2322 case 0x01: /* 32-bit boundaries */
2315 dmi->offset = 4; 2323 dmi->offset = 4;
2316 break; 2324 break;
2317 case 0x02: /* 16-byte boundaries */ 2325 case 0x02: /* 16-byte boundaries */
2318 dmi->offset = 16; 2326 dmi->offset = 16;
2319 break; 2327 break;
2320 default: 2328 default:
2321 /* Some other interface, just ignore it. */ 2329 /* Some other interface, just ignore it. */
2322 return -EIO; 2330 return -EIO;
2323 } 2331 }
2324 } else { 2332 } else {
2325 /* Old DMI spec. */ 2333 /* Old DMI spec. */
2326 /* 2334 /*
2327 * Note that technically, the lower bit of the base 2335 * Note that technically, the lower bit of the base
2328 * address should be 1 if the address is I/O and 0 if 2336 * address should be 1 if the address is I/O and 0 if
2329 * the address is in memory. So many systems get that 2337 * the address is in memory. So many systems get that
2330 * wrong (and all that I have seen are I/O) so we just 2338 * wrong (and all that I have seen are I/O) so we just
2331 * ignore that bit and assume I/O. Systems that use 2339 * ignore that bit and assume I/O. Systems that use
2332 * memory should use the newer spec, anyway. 2340 * memory should use the newer spec, anyway.
2333 */ 2341 */
2334 dmi->base_addr = base_addr & 0xfffe; 2342 dmi->base_addr = base_addr & 0xfffe;
2335 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2343 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2336 dmi->offset = 1; 2344 dmi->offset = 1;
2337 } 2345 }
2338 2346
2339 dmi->slave_addr = data[6]; 2347 dmi->slave_addr = data[6];
2340 2348
2341 return 0; 2349 return 0;
2342 } 2350 }
2343 2351
2344 static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data) 2352 static void __devinit try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2345 { 2353 {
2346 struct smi_info *info; 2354 struct smi_info *info;
2347 2355
2348 info = smi_info_alloc(); 2356 info = smi_info_alloc();
2349 if (!info) { 2357 if (!info) {
2350 printk(KERN_ERR PFX "Could not allocate SI data\n"); 2358 printk(KERN_ERR PFX "Could not allocate SI data\n");
2351 return; 2359 return;
2352 } 2360 }
2353 2361
2354 info->addr_source = SI_SMBIOS; 2362 info->addr_source = SI_SMBIOS;
2355 printk(KERN_INFO PFX "probing via SMBIOS\n"); 2363 printk(KERN_INFO PFX "probing via SMBIOS\n");
2356 2364
2357 switch (ipmi_data->type) { 2365 switch (ipmi_data->type) {
2358 case 0x01: /* KCS */ 2366 case 0x01: /* KCS */
2359 info->si_type = SI_KCS; 2367 info->si_type = SI_KCS;
2360 break; 2368 break;
2361 case 0x02: /* SMIC */ 2369 case 0x02: /* SMIC */
2362 info->si_type = SI_SMIC; 2370 info->si_type = SI_SMIC;
2363 break; 2371 break;
2364 case 0x03: /* BT */ 2372 case 0x03: /* BT */
2365 info->si_type = SI_BT; 2373 info->si_type = SI_BT;
2366 break; 2374 break;
2367 default: 2375 default:
2368 kfree(info); 2376 kfree(info);
2369 return; 2377 return;
2370 } 2378 }
2371 2379
2372 switch (ipmi_data->addr_space) { 2380 switch (ipmi_data->addr_space) {
2373 case IPMI_MEM_ADDR_SPACE: 2381 case IPMI_MEM_ADDR_SPACE:
2374 info->io_setup = mem_setup; 2382 info->io_setup = mem_setup;
2375 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 2383 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2376 break; 2384 break;
2377 2385
2378 case IPMI_IO_ADDR_SPACE: 2386 case IPMI_IO_ADDR_SPACE:
2379 info->io_setup = port_setup; 2387 info->io_setup = port_setup;
2380 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2388 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2381 break; 2389 break;
2382 2390
2383 default: 2391 default:
2384 kfree(info); 2392 kfree(info);
2385 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n", 2393 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2386 ipmi_data->addr_space); 2394 ipmi_data->addr_space);
2387 return; 2395 return;
2388 } 2396 }
2389 info->io.addr_data = ipmi_data->base_addr; 2397 info->io.addr_data = ipmi_data->base_addr;
2390 2398
2391 info->io.regspacing = ipmi_data->offset; 2399 info->io.regspacing = ipmi_data->offset;
2392 if (!info->io.regspacing) 2400 if (!info->io.regspacing)
2393 info->io.regspacing = DEFAULT_REGSPACING; 2401 info->io.regspacing = DEFAULT_REGSPACING;
2394 info->io.regsize = DEFAULT_REGSPACING; 2402 info->io.regsize = DEFAULT_REGSPACING;
2395 info->io.regshift = 0; 2403 info->io.regshift = 0;
2396 2404
2397 info->slave_addr = ipmi_data->slave_addr; 2405 info->slave_addr = ipmi_data->slave_addr;
2398 2406
2399 info->irq = ipmi_data->irq; 2407 info->irq = ipmi_data->irq;
2400 if (info->irq) 2408 if (info->irq)
2401 info->irq_setup = std_irq_setup; 2409 info->irq_setup = std_irq_setup;
2402 2410
2403 pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", 2411 pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
2404 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", 2412 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2405 info->io.addr_data, info->io.regsize, info->io.regspacing, 2413 info->io.addr_data, info->io.regsize, info->io.regspacing,
2406 info->irq); 2414 info->irq);
2407 2415
2408 if (add_smi(info)) 2416 if (add_smi(info))
2409 kfree(info); 2417 kfree(info);
2410 } 2418 }
2411 2419
2412 static void __devinit dmi_find_bmc(void) 2420 static void __devinit dmi_find_bmc(void)
2413 { 2421 {
2414 const struct dmi_device *dev = NULL; 2422 const struct dmi_device *dev = NULL;
2415 struct dmi_ipmi_data data; 2423 struct dmi_ipmi_data data;
2416 int rv; 2424 int rv;
2417 2425
2418 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { 2426 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2419 memset(&data, 0, sizeof(data)); 2427 memset(&data, 0, sizeof(data));
2420 rv = decode_dmi((const struct dmi_header *) dev->device_data, 2428 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2421 &data); 2429 &data);
2422 if (!rv) 2430 if (!rv)
2423 try_init_dmi(&data); 2431 try_init_dmi(&data);
2424 } 2432 }
2425 } 2433 }
2426 #endif /* CONFIG_DMI */ 2434 #endif /* CONFIG_DMI */
2427 2435
2428 #ifdef CONFIG_PCI 2436 #ifdef CONFIG_PCI
2429 2437
2430 #define PCI_ERMC_CLASSCODE 0x0C0700 2438 #define PCI_ERMC_CLASSCODE 0x0C0700
2431 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00 2439 #define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2432 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff 2440 #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2433 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 2441 #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2434 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 2442 #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2435 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 2443 #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2436 2444
2437 #define PCI_HP_VENDOR_ID 0x103C 2445 #define PCI_HP_VENDOR_ID 0x103C
2438 #define PCI_MMC_DEVICE_ID 0x121A 2446 #define PCI_MMC_DEVICE_ID 0x121A
2439 #define PCI_MMC_ADDR_CW 0x10 2447 #define PCI_MMC_ADDR_CW 0x10
2440 2448
2441 static void ipmi_pci_cleanup(struct smi_info *info) 2449 static void ipmi_pci_cleanup(struct smi_info *info)
2442 { 2450 {
2443 struct pci_dev *pdev = info->addr_source_data; 2451 struct pci_dev *pdev = info->addr_source_data;
2444 2452
2445 pci_disable_device(pdev); 2453 pci_disable_device(pdev);
2446 } 2454 }
2447 2455
2448 static int __devinit ipmi_pci_probe(struct pci_dev *pdev, 2456 static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2449 const struct pci_device_id *ent) 2457 const struct pci_device_id *ent)
2450 { 2458 {
2451 int rv; 2459 int rv;
2452 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; 2460 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2453 struct smi_info *info; 2461 struct smi_info *info;
2454 2462
2455 info = smi_info_alloc(); 2463 info = smi_info_alloc();
2456 if (!info) 2464 if (!info)
2457 return -ENOMEM; 2465 return -ENOMEM;
2458 2466
2459 info->addr_source = SI_PCI; 2467 info->addr_source = SI_PCI;
2460 dev_info(&pdev->dev, "probing via PCI"); 2468 dev_info(&pdev->dev, "probing via PCI");
2461 2469
2462 switch (class_type) { 2470 switch (class_type) {
2463 case PCI_ERMC_CLASSCODE_TYPE_SMIC: 2471 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2464 info->si_type = SI_SMIC; 2472 info->si_type = SI_SMIC;
2465 break; 2473 break;
2466 2474
2467 case PCI_ERMC_CLASSCODE_TYPE_KCS: 2475 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2468 info->si_type = SI_KCS; 2476 info->si_type = SI_KCS;
2469 break; 2477 break;
2470 2478
2471 case PCI_ERMC_CLASSCODE_TYPE_BT: 2479 case PCI_ERMC_CLASSCODE_TYPE_BT:
2472 info->si_type = SI_BT; 2480 info->si_type = SI_BT;
2473 break; 2481 break;
2474 2482
2475 default: 2483 default:
2476 kfree(info); 2484 kfree(info);
2477 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type); 2485 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2478 return -ENOMEM; 2486 return -ENOMEM;
2479 } 2487 }
2480 2488
2481 rv = pci_enable_device(pdev); 2489 rv = pci_enable_device(pdev);
2482 if (rv) { 2490 if (rv) {
2483 dev_err(&pdev->dev, "couldn't enable PCI device\n"); 2491 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2484 kfree(info); 2492 kfree(info);
2485 return rv; 2493 return rv;
2486 } 2494 }
2487 2495
2488 info->addr_source_cleanup = ipmi_pci_cleanup; 2496 info->addr_source_cleanup = ipmi_pci_cleanup;
2489 info->addr_source_data = pdev; 2497 info->addr_source_data = pdev;
2490 2498
2491 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { 2499 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2492 info->io_setup = port_setup; 2500 info->io_setup = port_setup;
2493 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2501 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2494 } else { 2502 } else {
2495 info->io_setup = mem_setup; 2503 info->io_setup = mem_setup;
2496 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 2504 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2497 } 2505 }
2498 info->io.addr_data = pci_resource_start(pdev, 0); 2506 info->io.addr_data = pci_resource_start(pdev, 0);
2499 2507
2500 info->io.regspacing = DEFAULT_REGSPACING; 2508 info->io.regspacing = DEFAULT_REGSPACING;
2501 info->io.regsize = DEFAULT_REGSPACING; 2509 info->io.regsize = DEFAULT_REGSPACING;
2502 info->io.regshift = 0; 2510 info->io.regshift = 0;
2503 2511
2504 info->irq = pdev->irq; 2512 info->irq = pdev->irq;
2505 if (info->irq) 2513 if (info->irq)
2506 info->irq_setup = std_irq_setup; 2514 info->irq_setup = std_irq_setup;
2507 2515
2508 info->dev = &pdev->dev; 2516 info->dev = &pdev->dev;
2509 pci_set_drvdata(pdev, info); 2517 pci_set_drvdata(pdev, info);
2510 2518
2511 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", 2519 dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2512 &pdev->resource[0], info->io.regsize, info->io.regspacing, 2520 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2513 info->irq); 2521 info->irq);
2514 2522
2515 if (add_smi(info)) 2523 if (add_smi(info))
2516 kfree(info); 2524 kfree(info);
2517 2525
2518 return 0; 2526 return 0;
2519 } 2527 }
2520 2528
2521 static void __devexit ipmi_pci_remove(struct pci_dev *pdev) 2529 static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2522 { 2530 {
2523 struct smi_info *info = pci_get_drvdata(pdev); 2531 struct smi_info *info = pci_get_drvdata(pdev);
2524 cleanup_one_si(info); 2532 cleanup_one_si(info);
2525 } 2533 }
2526 2534
2527 #ifdef CONFIG_PM 2535 #ifdef CONFIG_PM
2528 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2536 static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2529 { 2537 {
2530 return 0; 2538 return 0;
2531 } 2539 }
2532 2540
2533 static int ipmi_pci_resume(struct pci_dev *pdev) 2541 static int ipmi_pci_resume(struct pci_dev *pdev)
2534 { 2542 {
2535 return 0; 2543 return 0;
2536 } 2544 }
2537 #endif 2545 #endif
2538 2546
2539 static struct pci_device_id ipmi_pci_devices[] = { 2547 static struct pci_device_id ipmi_pci_devices[] = {
2540 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, 2548 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2541 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, 2549 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2542 { 0, } 2550 { 0, }
2543 }; 2551 };
2544 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); 2552 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2545 2553
2546 static struct pci_driver ipmi_pci_driver = { 2554 static struct pci_driver ipmi_pci_driver = {
2547 .name = DEVICE_NAME, 2555 .name = DEVICE_NAME,
2548 .id_table = ipmi_pci_devices, 2556 .id_table = ipmi_pci_devices,
2549 .probe = ipmi_pci_probe, 2557 .probe = ipmi_pci_probe,
2550 .remove = __devexit_p(ipmi_pci_remove), 2558 .remove = __devexit_p(ipmi_pci_remove),
2551 #ifdef CONFIG_PM 2559 #ifdef CONFIG_PM
2552 .suspend = ipmi_pci_suspend, 2560 .suspend = ipmi_pci_suspend,
2553 .resume = ipmi_pci_resume, 2561 .resume = ipmi_pci_resume,
2554 #endif 2562 #endif
2555 }; 2563 };
2556 #endif /* CONFIG_PCI */ 2564 #endif /* CONFIG_PCI */
2557 2565
2558 2566
2559 #ifdef CONFIG_PPC_OF 2567 #ifdef CONFIG_PPC_OF
2560 static int __devinit ipmi_of_probe(struct platform_device *dev, 2568 static int __devinit ipmi_of_probe(struct platform_device *dev,
2561 const struct of_device_id *match) 2569 const struct of_device_id *match)
2562 { 2570 {
2563 struct smi_info *info; 2571 struct smi_info *info;
2564 struct resource resource; 2572 struct resource resource;
2565 const __be32 *regsize, *regspacing, *regshift; 2573 const __be32 *regsize, *regspacing, *regshift;
2566 struct device_node *np = dev->dev.of_node; 2574 struct device_node *np = dev->dev.of_node;
2567 int ret; 2575 int ret;
2568 int proplen; 2576 int proplen;
2569 2577
2570 dev_info(&dev->dev, "probing via device tree\n"); 2578 dev_info(&dev->dev, "probing via device tree\n");
2571 2579
2572 ret = of_address_to_resource(np, 0, &resource); 2580 ret = of_address_to_resource(np, 0, &resource);
2573 if (ret) { 2581 if (ret) {
2574 dev_warn(&dev->dev, PFX "invalid address from OF\n"); 2582 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2575 return ret; 2583 return ret;
2576 } 2584 }
2577 2585
2578 regsize = of_get_property(np, "reg-size", &proplen); 2586 regsize = of_get_property(np, "reg-size", &proplen);
2579 if (regsize && proplen != 4) { 2587 if (regsize && proplen != 4) {
2580 dev_warn(&dev->dev, PFX "invalid regsize from OF\n"); 2588 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2581 return -EINVAL; 2589 return -EINVAL;
2582 } 2590 }
2583 2591
2584 regspacing = of_get_property(np, "reg-spacing", &proplen); 2592 regspacing = of_get_property(np, "reg-spacing", &proplen);
2585 if (regspacing && proplen != 4) { 2593 if (regspacing && proplen != 4) {
2586 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n"); 2594 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2587 return -EINVAL; 2595 return -EINVAL;
2588 } 2596 }
2589 2597
2590 regshift = of_get_property(np, "reg-shift", &proplen); 2598 regshift = of_get_property(np, "reg-shift", &proplen);
2591 if (regshift && proplen != 4) { 2599 if (regshift && proplen != 4) {
2592 dev_warn(&dev->dev, PFX "invalid regshift from OF\n"); 2600 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2593 return -EINVAL; 2601 return -EINVAL;
2594 } 2602 }
2595 2603
2596 info = smi_info_alloc(); 2604 info = smi_info_alloc();
2597 2605
2598 if (!info) { 2606 if (!info) {
2599 dev_err(&dev->dev, 2607 dev_err(&dev->dev,
2600 "could not allocate memory for OF probe\n"); 2608 "could not allocate memory for OF probe\n");
2601 return -ENOMEM; 2609 return -ENOMEM;
2602 } 2610 }
2603 2611
2604 info->si_type = (enum si_type) match->data; 2612 info->si_type = (enum si_type) match->data;
2605 info->addr_source = SI_DEVICETREE; 2613 info->addr_source = SI_DEVICETREE;
2606 info->irq_setup = std_irq_setup; 2614 info->irq_setup = std_irq_setup;
2607 2615
2608 if (resource.flags & IORESOURCE_IO) { 2616 if (resource.flags & IORESOURCE_IO) {
2609 info->io_setup = port_setup; 2617 info->io_setup = port_setup;
2610 info->io.addr_type = IPMI_IO_ADDR_SPACE; 2618 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2611 } else { 2619 } else {
2612 info->io_setup = mem_setup; 2620 info->io_setup = mem_setup;
2613 info->io.addr_type = IPMI_MEM_ADDR_SPACE; 2621 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2614 } 2622 }
2615 2623
2616 info->io.addr_data = resource.start; 2624 info->io.addr_data = resource.start;
2617 2625
2618 info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE; 2626 info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
2619 info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING; 2627 info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
2620 info->io.regshift = regshift ? be32_to_cpup(regshift) : 0; 2628 info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
2621 2629
2622 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0); 2630 info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
2623 info->dev = &dev->dev; 2631 info->dev = &dev->dev;
2624 2632
2625 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n", 2633 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2626 info->io.addr_data, info->io.regsize, info->io.regspacing, 2634 info->io.addr_data, info->io.regsize, info->io.regspacing,
2627 info->irq); 2635 info->irq);
2628 2636
2629 dev_set_drvdata(&dev->dev, info); 2637 dev_set_drvdata(&dev->dev, info);
2630 2638
2631 if (add_smi(info)) { 2639 if (add_smi(info)) {
2632 kfree(info); 2640 kfree(info);
2633 return -EBUSY; 2641 return -EBUSY;
2634 } 2642 }
2635 2643
2636 return 0; 2644 return 0;
2637 } 2645 }
2638 2646
2639 static int __devexit ipmi_of_remove(struct platform_device *dev) 2647 static int __devexit ipmi_of_remove(struct platform_device *dev)
2640 { 2648 {
2641 cleanup_one_si(dev_get_drvdata(&dev->dev)); 2649 cleanup_one_si(dev_get_drvdata(&dev->dev));
2642 return 0; 2650 return 0;
2643 } 2651 }
2644 2652
2645 static struct of_device_id ipmi_match[] = 2653 static struct of_device_id ipmi_match[] =
2646 { 2654 {
2647 { .type = "ipmi", .compatible = "ipmi-kcs", 2655 { .type = "ipmi", .compatible = "ipmi-kcs",
2648 .data = (void *)(unsigned long) SI_KCS }, 2656 .data = (void *)(unsigned long) SI_KCS },
2649 { .type = "ipmi", .compatible = "ipmi-smic", 2657 { .type = "ipmi", .compatible = "ipmi-smic",
2650 .data = (void *)(unsigned long) SI_SMIC }, 2658 .data = (void *)(unsigned long) SI_SMIC },
2651 { .type = "ipmi", .compatible = "ipmi-bt", 2659 { .type = "ipmi", .compatible = "ipmi-bt",
2652 .data = (void *)(unsigned long) SI_BT }, 2660 .data = (void *)(unsigned long) SI_BT },
2653 {}, 2661 {},
2654 }; 2662 };
2655 2663
2656 static struct of_platform_driver ipmi_of_platform_driver = { 2664 static struct of_platform_driver ipmi_of_platform_driver = {
2657 .driver = { 2665 .driver = {
2658 .name = "ipmi", 2666 .name = "ipmi",
2659 .owner = THIS_MODULE, 2667 .owner = THIS_MODULE,
2660 .of_match_table = ipmi_match, 2668 .of_match_table = ipmi_match,
2661 }, 2669 },
2662 .probe = ipmi_of_probe, 2670 .probe = ipmi_of_probe,
2663 .remove = __devexit_p(ipmi_of_remove), 2671 .remove = __devexit_p(ipmi_of_remove),
2664 }; 2672 };
2665 #endif /* CONFIG_PPC_OF */ 2673 #endif /* CONFIG_PPC_OF */
2666 2674
2667 static int wait_for_msg_done(struct smi_info *smi_info) 2675 static int wait_for_msg_done(struct smi_info *smi_info)
2668 { 2676 {
2669 enum si_sm_result smi_result; 2677 enum si_sm_result smi_result;
2670 2678
2671 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 2679 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2672 for (;;) { 2680 for (;;) {
2673 if (smi_result == SI_SM_CALL_WITH_DELAY || 2681 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2674 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 2682 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2675 schedule_timeout_uninterruptible(1); 2683 schedule_timeout_uninterruptible(1);
2676 smi_result = smi_info->handlers->event( 2684 smi_result = smi_info->handlers->event(
2677 smi_info->si_sm, 100); 2685 smi_info->si_sm, 100);
2678 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 2686 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2679 smi_result = smi_info->handlers->event( 2687 smi_result = smi_info->handlers->event(
2680 smi_info->si_sm, 0); 2688 smi_info->si_sm, 0);
2681 } else 2689 } else
2682 break; 2690 break;
2683 } 2691 }
2684 if (smi_result == SI_SM_HOSED) 2692 if (smi_result == SI_SM_HOSED)
2685 /* 2693 /*
2686 * We couldn't get the state machine to run, so whatever's at 2694 * We couldn't get the state machine to run, so whatever's at
2687 * the port is probably not an IPMI SMI interface. 2695 * the port is probably not an IPMI SMI interface.
2688 */ 2696 */
2689 return -ENODEV; 2697 return -ENODEV;
2690 2698
2691 return 0; 2699 return 0;
2692 } 2700 }
2693 2701
2694 static int try_get_dev_id(struct smi_info *smi_info) 2702 static int try_get_dev_id(struct smi_info *smi_info)
2695 { 2703 {
2696 unsigned char msg[2]; 2704 unsigned char msg[2];
2697 unsigned char *resp; 2705 unsigned char *resp;
2698 unsigned long resp_len; 2706 unsigned long resp_len;
2699 int rv = 0; 2707 int rv = 0;
2700 2708
2701 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 2709 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2702 if (!resp) 2710 if (!resp)
2703 return -ENOMEM; 2711 return -ENOMEM;
2704 2712
2705 /* 2713 /*
2706 * Do a Get Device ID command, since it comes back with some 2714 * Do a Get Device ID command, since it comes back with some
2707 * useful info. 2715 * useful info.
2708 */ 2716 */
2709 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2717 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2710 msg[1] = IPMI_GET_DEVICE_ID_CMD; 2718 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2711 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 2719 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2712 2720
2713 rv = wait_for_msg_done(smi_info); 2721 rv = wait_for_msg_done(smi_info);
2714 if (rv) 2722 if (rv)
2715 goto out; 2723 goto out;
2716 2724
2717 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 2725 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2718 resp, IPMI_MAX_MSG_LENGTH); 2726 resp, IPMI_MAX_MSG_LENGTH);
2719 2727
2720 /* Check and record info from the get device id, in case we need it. */ 2728 /* Check and record info from the get device id, in case we need it. */
2721 rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id); 2729 rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2722 2730
2723 out: 2731 out:
2724 kfree(resp); 2732 kfree(resp);
2725 return rv; 2733 return rv;
2726 } 2734 }
2727 2735
2728 static int try_enable_event_buffer(struct smi_info *smi_info) 2736 static int try_enable_event_buffer(struct smi_info *smi_info)
2729 { 2737 {
2730 unsigned char msg[3]; 2738 unsigned char msg[3];
2731 unsigned char *resp; 2739 unsigned char *resp;
2732 unsigned long resp_len; 2740 unsigned long resp_len;
2733 int rv = 0; 2741 int rv = 0;
2734 2742
2735 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 2743 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2736 if (!resp) 2744 if (!resp)
2737 return -ENOMEM; 2745 return -ENOMEM;
2738 2746
2739 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2747 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2740 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 2748 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
2741 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 2749 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2742 2750
2743 rv = wait_for_msg_done(smi_info); 2751 rv = wait_for_msg_done(smi_info);
2744 if (rv) { 2752 if (rv) {
2745 printk(KERN_WARNING PFX "Error getting response from get" 2753 printk(KERN_WARNING PFX "Error getting response from get"
2746 " global enables command, the event buffer is not" 2754 " global enables command, the event buffer is not"
2747 " enabled.\n"); 2755 " enabled.\n");
2748 goto out; 2756 goto out;
2749 } 2757 }
2750 2758
2751 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 2759 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2752 resp, IPMI_MAX_MSG_LENGTH); 2760 resp, IPMI_MAX_MSG_LENGTH);
2753 2761
2754 if (resp_len < 4 || 2762 if (resp_len < 4 ||
2755 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2763 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2756 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD || 2764 resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
2757 resp[2] != 0) { 2765 resp[2] != 0) {
2758 printk(KERN_WARNING PFX "Invalid return from get global" 2766 printk(KERN_WARNING PFX "Invalid return from get global"
2759 " enables command, cannot enable the event buffer.\n"); 2767 " enables command, cannot enable the event buffer.\n");
2760 rv = -EINVAL; 2768 rv = -EINVAL;
2761 goto out; 2769 goto out;
2762 } 2770 }
2763 2771
2764 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) 2772 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
2765 /* buffer is already enabled, nothing to do. */ 2773 /* buffer is already enabled, nothing to do. */
2766 goto out; 2774 goto out;
2767 2775
2768 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2776 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2769 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 2777 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
2770 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; 2778 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
2771 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); 2779 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
2772 2780
2773 rv = wait_for_msg_done(smi_info); 2781 rv = wait_for_msg_done(smi_info);
2774 if (rv) { 2782 if (rv) {
2775 printk(KERN_WARNING PFX "Error getting response from set" 2783 printk(KERN_WARNING PFX "Error getting response from set"
2776 " global, enables command, the event buffer is not" 2784 " global, enables command, the event buffer is not"
2777 " enabled.\n"); 2785 " enabled.\n");
2778 goto out; 2786 goto out;
2779 } 2787 }
2780 2788
2781 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 2789 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2782 resp, IPMI_MAX_MSG_LENGTH); 2790 resp, IPMI_MAX_MSG_LENGTH);
2783 2791
2784 if (resp_len < 3 || 2792 if (resp_len < 3 ||
2785 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || 2793 resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2786 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) { 2794 resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2787 printk(KERN_WARNING PFX "Invalid return from get global," 2795 printk(KERN_WARNING PFX "Invalid return from get global,"
2788 "enables command, not enable the event buffer.\n"); 2796 "enables command, not enable the event buffer.\n");
2789 rv = -EINVAL; 2797 rv = -EINVAL;
2790 goto out; 2798 goto out;
2791 } 2799 }
2792 2800
2793 if (resp[2] != 0) 2801 if (resp[2] != 0)
2794 /* 2802 /*
2795 * An error when setting the event buffer bit means 2803 * An error when setting the event buffer bit means
2796 * that the event buffer is not supported. 2804 * that the event buffer is not supported.
2797 */ 2805 */
2798 rv = -ENOENT; 2806 rv = -ENOENT;
2799 out: 2807 out:
2800 kfree(resp); 2808 kfree(resp);
2801 return rv; 2809 return rv;
2802 } 2810 }
2803 2811
2804 static int type_file_read_proc(char *page, char **start, off_t off, 2812 static int type_file_read_proc(char *page, char **start, off_t off,
2805 int count, int *eof, void *data) 2813 int count, int *eof, void *data)
2806 { 2814 {
2807 struct smi_info *smi = data; 2815 struct smi_info *smi = data;
2808 2816
2809 return sprintf(page, "%s\n", si_to_str[smi->si_type]); 2817 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
2810 } 2818 }
2811 2819
2812 static int stat_file_read_proc(char *page, char **start, off_t off, 2820 static int stat_file_read_proc(char *page, char **start, off_t off,
2813 int count, int *eof, void *data) 2821 int count, int *eof, void *data)
2814 { 2822 {
2815 char *out = (char *) page; 2823 char *out = (char *) page;
2816 struct smi_info *smi = data; 2824 struct smi_info *smi = data;
2817 2825
2818 out += sprintf(out, "interrupts_enabled: %d\n", 2826 out += sprintf(out, "interrupts_enabled: %d\n",
2819 smi->irq && !smi->interrupt_disabled); 2827 smi->irq && !smi->interrupt_disabled);
2820 out += sprintf(out, "short_timeouts: %u\n", 2828 out += sprintf(out, "short_timeouts: %u\n",
2821 smi_get_stat(smi, short_timeouts)); 2829 smi_get_stat(smi, short_timeouts));
2822 out += sprintf(out, "long_timeouts: %u\n", 2830 out += sprintf(out, "long_timeouts: %u\n",
2823 smi_get_stat(smi, long_timeouts)); 2831 smi_get_stat(smi, long_timeouts));
2824 out += sprintf(out, "idles: %u\n", 2832 out += sprintf(out, "idles: %u\n",
2825 smi_get_stat(smi, idles)); 2833 smi_get_stat(smi, idles));
2826 out += sprintf(out, "interrupts: %u\n", 2834 out += sprintf(out, "interrupts: %u\n",
2827 smi_get_stat(smi, interrupts)); 2835 smi_get_stat(smi, interrupts));
2828 out += sprintf(out, "attentions: %u\n", 2836 out += sprintf(out, "attentions: %u\n",
2829 smi_get_stat(smi, attentions)); 2837 smi_get_stat(smi, attentions));
2830 out += sprintf(out, "flag_fetches: %u\n", 2838 out += sprintf(out, "flag_fetches: %u\n",
2831 smi_get_stat(smi, flag_fetches)); 2839 smi_get_stat(smi, flag_fetches));
2832 out += sprintf(out, "hosed_count: %u\n", 2840 out += sprintf(out, "hosed_count: %u\n",
2833 smi_get_stat(smi, hosed_count)); 2841 smi_get_stat(smi, hosed_count));
2834 out += sprintf(out, "complete_transactions: %u\n", 2842 out += sprintf(out, "complete_transactions: %u\n",
2835 smi_get_stat(smi, complete_transactions)); 2843 smi_get_stat(smi, complete_transactions));
2836 out += sprintf(out, "events: %u\n", 2844 out += sprintf(out, "events: %u\n",
2837 smi_get_stat(smi, events)); 2845 smi_get_stat(smi, events));
2838 out += sprintf(out, "watchdog_pretimeouts: %u\n", 2846 out += sprintf(out, "watchdog_pretimeouts: %u\n",
2839 smi_get_stat(smi, watchdog_pretimeouts)); 2847 smi_get_stat(smi, watchdog_pretimeouts));
2840 out += sprintf(out, "incoming_messages: %u\n", 2848 out += sprintf(out, "incoming_messages: %u\n",
2841 smi_get_stat(smi, incoming_messages)); 2849 smi_get_stat(smi, incoming_messages));
2842 2850
2843 return out - page; 2851 return out - page;
2844 } 2852 }
2845 2853
2846 static int param_read_proc(char *page, char **start, off_t off, 2854 static int param_read_proc(char *page, char **start, off_t off,
2847 int count, int *eof, void *data) 2855 int count, int *eof, void *data)
2848 { 2856 {
2849 struct smi_info *smi = data; 2857 struct smi_info *smi = data;
2850 2858
2851 return sprintf(page, 2859 return sprintf(page,
2852 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", 2860 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2853 si_to_str[smi->si_type], 2861 si_to_str[smi->si_type],
2854 addr_space_to_str[smi->io.addr_type], 2862 addr_space_to_str[smi->io.addr_type],
2855 smi->io.addr_data, 2863 smi->io.addr_data,
2856 smi->io.regspacing, 2864 smi->io.regspacing,
2857 smi->io.regsize, 2865 smi->io.regsize,
2858 smi->io.regshift, 2866 smi->io.regshift,
2859 smi->irq, 2867 smi->irq,
2860 smi->slave_addr); 2868 smi->slave_addr);
2861 } 2869 }
2862 2870
2863 /* 2871 /*
2864 * oem_data_avail_to_receive_msg_avail 2872 * oem_data_avail_to_receive_msg_avail
2865 * @info - smi_info structure with msg_flags set 2873 * @info - smi_info structure with msg_flags set
2866 * 2874 *
2867 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL 2875 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2868 * Returns 1 indicating need to re-run handle_flags(). 2876 * Returns 1 indicating need to re-run handle_flags().
2869 */ 2877 */
2870 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 2878 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2871 { 2879 {
2872 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 2880 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2873 RECEIVE_MSG_AVAIL); 2881 RECEIVE_MSG_AVAIL);
2874 return 1; 2882 return 1;
2875 } 2883 }
2876 2884
2877 /* 2885 /*
2878 * setup_dell_poweredge_oem_data_handler 2886 * setup_dell_poweredge_oem_data_handler
2879 * @info - smi_info.device_id must be populated 2887 * @info - smi_info.device_id must be populated
2880 * 2888 *
2881 * Systems that match, but have firmware version < 1.40 may assert 2889 * Systems that match, but have firmware version < 1.40 may assert
2882 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that 2890 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2883 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL 2891 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2884 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags 2892 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2885 * as RECEIVE_MSG_AVAIL instead. 2893 * as RECEIVE_MSG_AVAIL instead.
2886 * 2894 *
2887 * As Dell has no plans to release IPMI 1.5 firmware that *ever* 2895 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2888 * assert the OEM[012] bits, and if it did, the driver would have to 2896 * assert the OEM[012] bits, and if it did, the driver would have to
2889 * change to handle that properly, we don't actually check for the 2897 * change to handle that properly, we don't actually check for the
2890 * firmware version. 2898 * firmware version.
2891 * Device ID = 0x20 BMC on PowerEdge 8G servers 2899 * Device ID = 0x20 BMC on PowerEdge 8G servers
2892 * Device Revision = 0x80 2900 * Device Revision = 0x80
2893 * Firmware Revision1 = 0x01 BMC version 1.40 2901 * Firmware Revision1 = 0x01 BMC version 1.40
2894 * Firmware Revision2 = 0x40 BCD encoded 2902 * Firmware Revision2 = 0x40 BCD encoded
2895 * IPMI Version = 0x51 IPMI 1.5 2903 * IPMI Version = 0x51 IPMI 1.5
2896 * Manufacturer ID = A2 02 00 Dell IANA 2904 * Manufacturer ID = A2 02 00 Dell IANA
2897 * 2905 *
2898 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert 2906 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2899 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL. 2907 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2900 * 2908 *
2901 */ 2909 */
2902 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 2910 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2903 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 2911 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2904 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 2912 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2905 #define DELL_IANA_MFR_ID 0x0002a2 2913 #define DELL_IANA_MFR_ID 0x0002a2
2906 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 2914 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2907 { 2915 {
2908 struct ipmi_device_id *id = &smi_info->device_id; 2916 struct ipmi_device_id *id = &smi_info->device_id;
2909 if (id->manufacturer_id == DELL_IANA_MFR_ID) { 2917 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2910 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 2918 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2911 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 2919 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2912 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2920 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2913 smi_info->oem_data_avail_handler = 2921 smi_info->oem_data_avail_handler =
2914 oem_data_avail_to_receive_msg_avail; 2922 oem_data_avail_to_receive_msg_avail;
2915 } else if (ipmi_version_major(id) < 1 || 2923 } else if (ipmi_version_major(id) < 1 ||
2916 (ipmi_version_major(id) == 1 && 2924 (ipmi_version_major(id) == 1 &&
2917 ipmi_version_minor(id) < 5)) { 2925 ipmi_version_minor(id) < 5)) {
2918 smi_info->oem_data_avail_handler = 2926 smi_info->oem_data_avail_handler =
2919 oem_data_avail_to_receive_msg_avail; 2927 oem_data_avail_to_receive_msg_avail;
2920 } 2928 }
2921 } 2929 }
2922 } 2930 }
2923 2931
2924 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA 2932 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2925 static void return_hosed_msg_badsize(struct smi_info *smi_info) 2933 static void return_hosed_msg_badsize(struct smi_info *smi_info)
2926 { 2934 {
2927 struct ipmi_smi_msg *msg = smi_info->curr_msg; 2935 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2928 2936
2929 /* Make it a reponse */ 2937 /* Make it a reponse */
2930 msg->rsp[0] = msg->data[0] | 4; 2938 msg->rsp[0] = msg->data[0] | 4;
2931 msg->rsp[1] = msg->data[1]; 2939 msg->rsp[1] = msg->data[1];
2932 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH; 2940 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2933 msg->rsp_size = 3; 2941 msg->rsp_size = 3;
2934 smi_info->curr_msg = NULL; 2942 smi_info->curr_msg = NULL;
2935 deliver_recv_msg(smi_info, msg); 2943 deliver_recv_msg(smi_info, msg);
2936 } 2944 }
2937 2945
2938 /* 2946 /*
2939 * dell_poweredge_bt_xaction_handler 2947 * dell_poweredge_bt_xaction_handler
2940 * @info - smi_info.device_id must be populated 2948 * @info - smi_info.device_id must be populated
2941 * 2949 *
2942 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will 2950 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2943 * not respond to a Get SDR command if the length of the data 2951 * not respond to a Get SDR command if the length of the data
2944 * requested is exactly 0x3A, which leads to command timeouts and no 2952 * requested is exactly 0x3A, which leads to command timeouts and no
2945 * data returned. This intercepts such commands, and causes userspace 2953 * data returned. This intercepts such commands, and causes userspace
2946 * callers to try again with a different-sized buffer, which succeeds. 2954 * callers to try again with a different-sized buffer, which succeeds.
2947 */ 2955 */
2948 2956
2949 #define STORAGE_NETFN 0x0A 2957 #define STORAGE_NETFN 0x0A
2950 #define STORAGE_CMD_GET_SDR 0x23 2958 #define STORAGE_CMD_GET_SDR 0x23
2951 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self, 2959 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2952 unsigned long unused, 2960 unsigned long unused,
2953 void *in) 2961 void *in)
2954 { 2962 {
2955 struct smi_info *smi_info = in; 2963 struct smi_info *smi_info = in;
2956 unsigned char *data = smi_info->curr_msg->data; 2964 unsigned char *data = smi_info->curr_msg->data;
2957 unsigned int size = smi_info->curr_msg->data_size; 2965 unsigned int size = smi_info->curr_msg->data_size;
2958 if (size >= 8 && 2966 if (size >= 8 &&
2959 (data[0]>>2) == STORAGE_NETFN && 2967 (data[0]>>2) == STORAGE_NETFN &&
2960 data[1] == STORAGE_CMD_GET_SDR && 2968 data[1] == STORAGE_CMD_GET_SDR &&
2961 data[7] == 0x3A) { 2969 data[7] == 0x3A) {
2962 return_hosed_msg_badsize(smi_info); 2970 return_hosed_msg_badsize(smi_info);
2963 return NOTIFY_STOP; 2971 return NOTIFY_STOP;
2964 } 2972 }
2965 return NOTIFY_DONE; 2973 return NOTIFY_DONE;
2966 } 2974 }
2967 2975
2968 static struct notifier_block dell_poweredge_bt_xaction_notifier = { 2976 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2969 .notifier_call = dell_poweredge_bt_xaction_handler, 2977 .notifier_call = dell_poweredge_bt_xaction_handler,
2970 }; 2978 };
2971 2979
2972 /* 2980 /*
2973 * setup_dell_poweredge_bt_xaction_handler 2981 * setup_dell_poweredge_bt_xaction_handler
2974 * @info - smi_info.device_id must be filled in already 2982 * @info - smi_info.device_id must be filled in already
2975 * 2983 *
2976 * Fills in smi_info.device_id.start_transaction_pre_hook 2984 * Fills in smi_info.device_id.start_transaction_pre_hook
2977 * when we know what function to use there. 2985 * when we know what function to use there.
2978 */ 2986 */
2979 static void 2987 static void
2980 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 2988 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2981 { 2989 {
2982 struct ipmi_device_id *id = &smi_info->device_id; 2990 struct ipmi_device_id *id = &smi_info->device_id;
2983 if (id->manufacturer_id == DELL_IANA_MFR_ID && 2991 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2984 smi_info->si_type == SI_BT) 2992 smi_info->si_type == SI_BT)
2985 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 2993 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2986 } 2994 }
2987 2995
2988 /* 2996 /*
2989 * setup_oem_data_handler 2997 * setup_oem_data_handler
2990 * @info - smi_info.device_id must be filled in already 2998 * @info - smi_info.device_id must be filled in already
2991 * 2999 *
2992 * Fills in smi_info.device_id.oem_data_available_handler 3000 * Fills in smi_info.device_id.oem_data_available_handler
2993 * when we know what function to use there. 3001 * when we know what function to use there.
2994 */ 3002 */
2995 3003
2996 static void setup_oem_data_handler(struct smi_info *smi_info) 3004 static void setup_oem_data_handler(struct smi_info *smi_info)
2997 { 3005 {
2998 setup_dell_poweredge_oem_data_handler(smi_info); 3006 setup_dell_poweredge_oem_data_handler(smi_info);
2999 } 3007 }
3000 3008
3001 static void setup_xaction_handlers(struct smi_info *smi_info) 3009 static void setup_xaction_handlers(struct smi_info *smi_info)
3002 { 3010 {
3003 setup_dell_poweredge_bt_xaction_handler(smi_info); 3011 setup_dell_poweredge_bt_xaction_handler(smi_info);
3004 } 3012 }
3005 3013
3006 static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 3014 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3007 { 3015 {
3008 if (smi_info->intf) { 3016 if (smi_info->intf) {
3009 /* 3017 /*
3010 * The timer and thread are only running if the 3018 * The timer and thread are only running if the
3011 * interface has been started up and registered. 3019 * interface has been started up and registered.
3012 */ 3020 */
3013 if (smi_info->thread != NULL) 3021 if (smi_info->thread != NULL)
3014 kthread_stop(smi_info->thread); 3022 kthread_stop(smi_info->thread);
3015 del_timer_sync(&smi_info->si_timer); 3023 del_timer_sync(&smi_info->si_timer);
3016 } 3024 }
3017 } 3025 }
3018 3026
3019 static __devinitdata struct ipmi_default_vals 3027 static __devinitdata struct ipmi_default_vals
3020 { 3028 {
3021 int type; 3029 int type;
3022 int port; 3030 int port;
3023 } ipmi_defaults[] = 3031 } ipmi_defaults[] =
3024 { 3032 {
3025 { .type = SI_KCS, .port = 0xca2 }, 3033 { .type = SI_KCS, .port = 0xca2 },
3026 { .type = SI_SMIC, .port = 0xca9 }, 3034 { .type = SI_SMIC, .port = 0xca9 },
3027 { .type = SI_BT, .port = 0xe4 }, 3035 { .type = SI_BT, .port = 0xe4 },
3028 { .port = 0 } 3036 { .port = 0 }
3029 }; 3037 };
3030 3038
3031 static void __devinit default_find_bmc(void) 3039 static void __devinit default_find_bmc(void)
3032 { 3040 {
3033 struct smi_info *info; 3041 struct smi_info *info;
3034 int i; 3042 int i;
3035 3043
3036 for (i = 0; ; i++) { 3044 for (i = 0; ; i++) {
3037 if (!ipmi_defaults[i].port) 3045 if (!ipmi_defaults[i].port)
3038 break; 3046 break;
3039 #ifdef CONFIG_PPC 3047 #ifdef CONFIG_PPC
3040 if (check_legacy_ioport(ipmi_defaults[i].port)) 3048 if (check_legacy_ioport(ipmi_defaults[i].port))
3041 continue; 3049 continue;
3042 #endif 3050 #endif
3043 info = smi_info_alloc(); 3051 info = smi_info_alloc();
3044 if (!info) 3052 if (!info)
3045 return; 3053 return;
3046 3054
3047 info->addr_source = SI_DEFAULT; 3055 info->addr_source = SI_DEFAULT;
3048 3056
3049 info->si_type = ipmi_defaults[i].type; 3057 info->si_type = ipmi_defaults[i].type;
3050 info->io_setup = port_setup; 3058 info->io_setup = port_setup;
3051 info->io.addr_data = ipmi_defaults[i].port; 3059 info->io.addr_data = ipmi_defaults[i].port;
3052 info->io.addr_type = IPMI_IO_ADDR_SPACE; 3060 info->io.addr_type = IPMI_IO_ADDR_SPACE;
3053 3061
3054 info->io.addr = NULL; 3062 info->io.addr = NULL;
3055 info->io.regspacing = DEFAULT_REGSPACING; 3063 info->io.regspacing = DEFAULT_REGSPACING;
3056 info->io.regsize = DEFAULT_REGSPACING; 3064 info->io.regsize = DEFAULT_REGSPACING;
3057 info->io.regshift = 0; 3065 info->io.regshift = 0;
3058 3066
3059 if (add_smi(info) == 0) { 3067 if (add_smi(info) == 0) {
3060 if ((try_smi_init(info)) == 0) { 3068 if ((try_smi_init(info)) == 0) {
3061 /* Found one... */ 3069 /* Found one... */
3062 printk(KERN_INFO PFX "Found default %s" 3070 printk(KERN_INFO PFX "Found default %s"
3063 " state machine at %s address 0x%lx\n", 3071 " state machine at %s address 0x%lx\n",
3064 si_to_str[info->si_type], 3072 si_to_str[info->si_type],
3065 addr_space_to_str[info->io.addr_type], 3073 addr_space_to_str[info->io.addr_type],
3066 info->io.addr_data); 3074 info->io.addr_data);
3067 } else 3075 } else
3068 cleanup_one_si(info); 3076 cleanup_one_si(info);
3069 } else { 3077 } else {
3070 kfree(info); 3078 kfree(info);
3071 } 3079 }
3072 } 3080 }
3073 } 3081 }
3074 3082
3075 static int is_new_interface(struct smi_info *info) 3083 static int is_new_interface(struct smi_info *info)
3076 { 3084 {
3077 struct smi_info *e; 3085 struct smi_info *e;
3078 3086
3079 list_for_each_entry(e, &smi_infos, link) { 3087 list_for_each_entry(e, &smi_infos, link) {
3080 if (e->io.addr_type != info->io.addr_type) 3088 if (e->io.addr_type != info->io.addr_type)
3081 continue; 3089 continue;
3082 if (e->io.addr_data == info->io.addr_data) 3090 if (e->io.addr_data == info->io.addr_data)
3083 return 0; 3091 return 0;
3084 } 3092 }
3085 3093
3086 return 1; 3094 return 1;
3087 } 3095 }
3088 3096
3089 static int add_smi(struct smi_info *new_smi) 3097 static int add_smi(struct smi_info *new_smi)
3090 { 3098 {
3091 int rv = 0; 3099 int rv = 0;
3092 3100
3093 printk(KERN_INFO PFX "Adding %s-specified %s state machine", 3101 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3094 ipmi_addr_src_to_str[new_smi->addr_source], 3102 ipmi_addr_src_to_str[new_smi->addr_source],
3095 si_to_str[new_smi->si_type]); 3103 si_to_str[new_smi->si_type]);
3096 mutex_lock(&smi_infos_lock); 3104 mutex_lock(&smi_infos_lock);
3097 if (!is_new_interface(new_smi)) { 3105 if (!is_new_interface(new_smi)) {
3098 printk(KERN_CONT " duplicate interface\n"); 3106 printk(KERN_CONT " duplicate interface\n");
3099 rv = -EBUSY; 3107 rv = -EBUSY;
3100 goto out_err; 3108 goto out_err;
3101 } 3109 }
3102 3110
3103 printk(KERN_CONT "\n"); 3111 printk(KERN_CONT "\n");
3104 3112
3105 /* So we know not to free it unless we have allocated one. */ 3113 /* So we know not to free it unless we have allocated one. */
3106 new_smi->intf = NULL; 3114 new_smi->intf = NULL;
3107 new_smi->si_sm = NULL; 3115 new_smi->si_sm = NULL;
3108 new_smi->handlers = NULL; 3116 new_smi->handlers = NULL;
3109 3117
3110 list_add_tail(&new_smi->link, &smi_infos); 3118 list_add_tail(&new_smi->link, &smi_infos);
3111 3119
3112 out_err: 3120 out_err:
3113 mutex_unlock(&smi_infos_lock); 3121 mutex_unlock(&smi_infos_lock);
3114 return rv; 3122 return rv;
3115 } 3123 }
3116 3124
3117 static int try_smi_init(struct smi_info *new_smi) 3125 static int try_smi_init(struct smi_info *new_smi)
3118 { 3126 {
3119 int rv = 0; 3127 int rv = 0;
3120 int i; 3128 int i;
3121 3129
3122 printk(KERN_INFO PFX "Trying %s-specified %s state" 3130 printk(KERN_INFO PFX "Trying %s-specified %s state"
3123 " machine at %s address 0x%lx, slave address 0x%x," 3131 " machine at %s address 0x%lx, slave address 0x%x,"
3124 " irq %d\n", 3132 " irq %d\n",
3125 ipmi_addr_src_to_str[new_smi->addr_source], 3133 ipmi_addr_src_to_str[new_smi->addr_source],
3126 si_to_str[new_smi->si_type], 3134 si_to_str[new_smi->si_type],
3127 addr_space_to_str[new_smi->io.addr_type], 3135 addr_space_to_str[new_smi->io.addr_type],
3128 new_smi->io.addr_data, 3136 new_smi->io.addr_data,
3129 new_smi->slave_addr, new_smi->irq); 3137 new_smi->slave_addr, new_smi->irq);
3130 3138
3131 switch (new_smi->si_type) { 3139 switch (new_smi->si_type) {
3132 case SI_KCS: 3140 case SI_KCS:
3133 new_smi->handlers = &kcs_smi_handlers; 3141 new_smi->handlers = &kcs_smi_handlers;
3134 break; 3142 break;
3135 3143
3136 case SI_SMIC: 3144 case SI_SMIC:
3137 new_smi->handlers = &smic_smi_handlers; 3145 new_smi->handlers = &smic_smi_handlers;
3138 break; 3146 break;
3139 3147
3140 case SI_BT: 3148 case SI_BT:
3141 new_smi->handlers = &bt_smi_handlers; 3149 new_smi->handlers = &bt_smi_handlers;
3142 break; 3150 break;
3143 3151
3144 default: 3152 default:
3145 /* No support for anything else yet. */ 3153 /* No support for anything else yet. */
3146 rv = -EIO; 3154 rv = -EIO;
3147 goto out_err; 3155 goto out_err;
3148 } 3156 }
3149 3157
3150 /* Allocate the state machine's data and initialize it. */ 3158 /* Allocate the state machine's data and initialize it. */
3151 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 3159 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3152 if (!new_smi->si_sm) { 3160 if (!new_smi->si_sm) {
3153 printk(KERN_ERR PFX 3161 printk(KERN_ERR PFX
3154 "Could not allocate state machine memory\n"); 3162 "Could not allocate state machine memory\n");
3155 rv = -ENOMEM; 3163 rv = -ENOMEM;
3156 goto out_err; 3164 goto out_err;
3157 } 3165 }
3158 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm, 3166 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
3159 &new_smi->io); 3167 &new_smi->io);
3160 3168
3161 /* Now that we know the I/O size, we can set up the I/O. */ 3169 /* Now that we know the I/O size, we can set up the I/O. */
3162 rv = new_smi->io_setup(new_smi); 3170 rv = new_smi->io_setup(new_smi);
3163 if (rv) { 3171 if (rv) {
3164 printk(KERN_ERR PFX "Could not set up I/O space\n"); 3172 printk(KERN_ERR PFX "Could not set up I/O space\n");
3165 goto out_err; 3173 goto out_err;
3166 } 3174 }
3167 3175
3168 /* Do low-level detection first. */ 3176 /* Do low-level detection first. */
3169 if (new_smi->handlers->detect(new_smi->si_sm)) { 3177 if (new_smi->handlers->detect(new_smi->si_sm)) {
3170 if (new_smi->addr_source) 3178 if (new_smi->addr_source)
3171 printk(KERN_INFO PFX "Interface detection failed\n"); 3179 printk(KERN_INFO PFX "Interface detection failed\n");
3172 rv = -ENODEV; 3180 rv = -ENODEV;
3173 goto out_err; 3181 goto out_err;
3174 } 3182 }
3175 3183
3176 /* 3184 /*
3177 * Attempt a get device id command. If it fails, we probably 3185 * Attempt a get device id command. If it fails, we probably
3178 * don't have a BMC here. 3186 * don't have a BMC here.
3179 */ 3187 */
3180 rv = try_get_dev_id(new_smi); 3188 rv = try_get_dev_id(new_smi);
3181 if (rv) { 3189 if (rv) {
3182 if (new_smi->addr_source) 3190 if (new_smi->addr_source)
3183 printk(KERN_INFO PFX "There appears to be no BMC" 3191 printk(KERN_INFO PFX "There appears to be no BMC"
3184 " at this location\n"); 3192 " at this location\n");
3185 goto out_err; 3193 goto out_err;
3186 } 3194 }
3187 3195
3188 setup_oem_data_handler(new_smi); 3196 setup_oem_data_handler(new_smi);
3189 setup_xaction_handlers(new_smi); 3197 setup_xaction_handlers(new_smi);
3190 3198
3191 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 3199 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
3192 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 3200 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3193 new_smi->curr_msg = NULL; 3201 new_smi->curr_msg = NULL;
3194 atomic_set(&new_smi->req_events, 0); 3202 atomic_set(&new_smi->req_events, 0);
3195 new_smi->run_to_completion = 0; 3203 new_smi->run_to_completion = 0;
3196 for (i = 0; i < SI_NUM_STATS; i++) 3204 for (i = 0; i < SI_NUM_STATS; i++)
3197 atomic_set(&new_smi->stats[i], 0); 3205 atomic_set(&new_smi->stats[i], 0);
3198 3206
3199 new_smi->interrupt_disabled = 1; 3207 new_smi->interrupt_disabled = 1;
3200 atomic_set(&new_smi->stop_operation, 0); 3208 atomic_set(&new_smi->stop_operation, 0);
3201 new_smi->intf_num = smi_num; 3209 new_smi->intf_num = smi_num;
3202 smi_num++; 3210 smi_num++;
3203 3211
3204 rv = try_enable_event_buffer(new_smi); 3212 rv = try_enable_event_buffer(new_smi);
3205 if (rv == 0) 3213 if (rv == 0)
3206 new_smi->has_event_buffer = 1; 3214 new_smi->has_event_buffer = 1;
3207 3215
3208 /* 3216 /*
3209 * Start clearing the flags before we enable interrupts or the 3217 * Start clearing the flags before we enable interrupts or the
3210 * timer to avoid racing with the timer. 3218 * timer to avoid racing with the timer.
3211 */ 3219 */
3212 start_clear_flags(new_smi); 3220 start_clear_flags(new_smi);
3213 /* IRQ is defined to be set when non-zero. */ 3221 /* IRQ is defined to be set when non-zero. */
3214 if (new_smi->irq) 3222 if (new_smi->irq)
3215 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 3223 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
3216 3224
3217 if (!new_smi->dev) { 3225 if (!new_smi->dev) {
3218 /* 3226 /*
3219 * If we don't already have a device from something 3227 * If we don't already have a device from something
3220 * else (like PCI), then register a new one. 3228 * else (like PCI), then register a new one.
3221 */ 3229 */
3222 new_smi->pdev = platform_device_alloc("ipmi_si", 3230 new_smi->pdev = platform_device_alloc("ipmi_si",
3223 new_smi->intf_num); 3231 new_smi->intf_num);
3224 if (!new_smi->pdev) { 3232 if (!new_smi->pdev) {
3225 printk(KERN_ERR PFX 3233 printk(KERN_ERR PFX
3226 "Unable to allocate platform device\n"); 3234 "Unable to allocate platform device\n");
3227 goto out_err; 3235 goto out_err;
3228 } 3236 }
3229 new_smi->dev = &new_smi->pdev->dev; 3237 new_smi->dev = &new_smi->pdev->dev;
3230 new_smi->dev->driver = &ipmi_driver.driver; 3238 new_smi->dev->driver = &ipmi_driver.driver;
3231 3239
3232 rv = platform_device_add(new_smi->pdev); 3240 rv = platform_device_add(new_smi->pdev);
3233 if (rv) { 3241 if (rv) {
3234 printk(KERN_ERR PFX 3242 printk(KERN_ERR PFX
3235 "Unable to register system interface device:" 3243 "Unable to register system interface device:"
3236 " %d\n", 3244 " %d\n",
3237 rv); 3245 rv);
3238 goto out_err; 3246 goto out_err;
3239 } 3247 }
3240 new_smi->dev_registered = 1; 3248 new_smi->dev_registered = 1;
3241 } 3249 }
3242 3250
3243 rv = ipmi_register_smi(&handlers, 3251 rv = ipmi_register_smi(&handlers,
3244 new_smi, 3252 new_smi,
3245 &new_smi->device_id, 3253 &new_smi->device_id,
3246 new_smi->dev, 3254 new_smi->dev,
3247 "bmc", 3255 "bmc",
3248 new_smi->slave_addr); 3256 new_smi->slave_addr);
3249 if (rv) { 3257 if (rv) {
3250 dev_err(new_smi->dev, "Unable to register device: error %d\n", 3258 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3251 rv); 3259 rv);
3252 goto out_err_stop_timer; 3260 goto out_err_stop_timer;
3253 } 3261 }
3254 3262
3255 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", 3263 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3256 type_file_read_proc, 3264 type_file_read_proc,
3257 new_smi); 3265 new_smi);
3258 if (rv) { 3266 if (rv) {
3259 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3267 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3260 goto out_err_stop_timer; 3268 goto out_err_stop_timer;
3261 } 3269 }
3262 3270
3263 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", 3271 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3264 stat_file_read_proc, 3272 stat_file_read_proc,
3265 new_smi); 3273 new_smi);
3266 if (rv) { 3274 if (rv) {
3267 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3275 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3268 goto out_err_stop_timer; 3276 goto out_err_stop_timer;
3269 } 3277 }
3270 3278
3271 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", 3279 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3272 param_read_proc, 3280 param_read_proc,
3273 new_smi); 3281 new_smi);
3274 if (rv) { 3282 if (rv) {
3275 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); 3283 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3276 goto out_err_stop_timer; 3284 goto out_err_stop_timer;
3277 } 3285 }
3278 3286
3279 dev_info(new_smi->dev, "IPMI %s interface initialized\n", 3287 dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3280 si_to_str[new_smi->si_type]); 3288 si_to_str[new_smi->si_type]);
3281 3289
3282 return 0; 3290 return 0;
3283 3291
3284 out_err_stop_timer: 3292 out_err_stop_timer:
3285 atomic_inc(&new_smi->stop_operation); 3293 atomic_inc(&new_smi->stop_operation);
3286 wait_for_timer_and_thread(new_smi); 3294 wait_for_timer_and_thread(new_smi);
3287 3295
3288 out_err: 3296 out_err:
3289 new_smi->interrupt_disabled = 1; 3297 new_smi->interrupt_disabled = 1;
3290 3298
3291 if (new_smi->intf) { 3299 if (new_smi->intf) {
3292 ipmi_unregister_smi(new_smi->intf); 3300 ipmi_unregister_smi(new_smi->intf);
3293 new_smi->intf = NULL; 3301 new_smi->intf = NULL;
3294 } 3302 }
3295 3303
3296 if (new_smi->irq_cleanup) { 3304 if (new_smi->irq_cleanup) {
3297 new_smi->irq_cleanup(new_smi); 3305 new_smi->irq_cleanup(new_smi);
3298 new_smi->irq_cleanup = NULL; 3306 new_smi->irq_cleanup = NULL;
3299 } 3307 }
3300 3308
3301 /* 3309 /*
3302 * Wait until we know that we are out of any interrupt 3310 * Wait until we know that we are out of any interrupt
3303 * handlers might have been running before we freed the 3311 * handlers might have been running before we freed the
3304 * interrupt. 3312 * interrupt.
3305 */ 3313 */
3306 synchronize_sched(); 3314 synchronize_sched();
3307 3315
3308 if (new_smi->si_sm) { 3316 if (new_smi->si_sm) {
3309 if (new_smi->handlers) 3317 if (new_smi->handlers)
3310 new_smi->handlers->cleanup(new_smi->si_sm); 3318 new_smi->handlers->cleanup(new_smi->si_sm);
3311 kfree(new_smi->si_sm); 3319 kfree(new_smi->si_sm);
3312 new_smi->si_sm = NULL; 3320 new_smi->si_sm = NULL;
3313 } 3321 }
3314 if (new_smi->addr_source_cleanup) { 3322 if (new_smi->addr_source_cleanup) {
3315 new_smi->addr_source_cleanup(new_smi); 3323 new_smi->addr_source_cleanup(new_smi);
3316 new_smi->addr_source_cleanup = NULL; 3324 new_smi->addr_source_cleanup = NULL;
3317 } 3325 }
3318 if (new_smi->io_cleanup) { 3326 if (new_smi->io_cleanup) {
3319 new_smi->io_cleanup(new_smi); 3327 new_smi->io_cleanup(new_smi);
3320 new_smi->io_cleanup = NULL; 3328 new_smi->io_cleanup = NULL;
3321 } 3329 }
3322 3330
3323 if (new_smi->dev_registered) { 3331 if (new_smi->dev_registered) {
3324 platform_device_unregister(new_smi->pdev); 3332 platform_device_unregister(new_smi->pdev);
3325 new_smi->dev_registered = 0; 3333 new_smi->dev_registered = 0;
3326 } 3334 }
3327 3335
3328 return rv; 3336 return rv;
3329 } 3337 }
3330 3338
3331 static int __devinit init_ipmi_si(void) 3339 static int __devinit init_ipmi_si(void)
3332 { 3340 {
3333 int i; 3341 int i;
3334 char *str; 3342 char *str;
3335 int rv; 3343 int rv;
3336 struct smi_info *e; 3344 struct smi_info *e;
3337 enum ipmi_addr_src type = SI_INVALID; 3345 enum ipmi_addr_src type = SI_INVALID;
3338 3346
3339 if (initialized) 3347 if (initialized)
3340 return 0; 3348 return 0;
3341 initialized = 1; 3349 initialized = 1;
3342 3350
3343 /* Register the device drivers. */ 3351 /* Register the device drivers. */
3344 rv = driver_register(&ipmi_driver.driver); 3352 rv = driver_register(&ipmi_driver.driver);
3345 if (rv) { 3353 if (rv) {
3346 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv); 3354 printk(KERN_ERR PFX "Unable to register driver: %d\n", rv);
3347 return rv; 3355 return rv;
3348 } 3356 }
3349 3357
3350 3358
3351 /* Parse out the si_type string into its components. */ 3359 /* Parse out the si_type string into its components. */
3352 str = si_type_str; 3360 str = si_type_str;
3353 if (*str != '\0') { 3361 if (*str != '\0') {
3354 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) { 3362 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
3355 si_type[i] = str; 3363 si_type[i] = str;
3356 str = strchr(str, ','); 3364 str = strchr(str, ',');
3357 if (str) { 3365 if (str) {
3358 *str = '\0'; 3366 *str = '\0';
3359 str++; 3367 str++;
3360 } else { 3368 } else {
3361 break; 3369 break;
3362 } 3370 }
3363 } 3371 }
3364 } 3372 }
3365 3373
3366 printk(KERN_INFO "IPMI System Interface driver.\n"); 3374 printk(KERN_INFO "IPMI System Interface driver.\n");
3367 3375
3368 hardcode_find_bmc(); 3376 hardcode_find_bmc();
3369 3377
3370 /* If the user gave us a device, they presumably want us to use it */ 3378 /* If the user gave us a device, they presumably want us to use it */
3371 mutex_lock(&smi_infos_lock); 3379 mutex_lock(&smi_infos_lock);
3372 if (!list_empty(&smi_infos)) { 3380 if (!list_empty(&smi_infos)) {
3373 mutex_unlock(&smi_infos_lock); 3381 mutex_unlock(&smi_infos_lock);
3374 return 0; 3382 return 0;
3375 } 3383 }
3376 mutex_unlock(&smi_infos_lock); 3384 mutex_unlock(&smi_infos_lock);
3377 3385
3378 #ifdef CONFIG_PCI 3386 #ifdef CONFIG_PCI
3379 rv = pci_register_driver(&ipmi_pci_driver); 3387 rv = pci_register_driver(&ipmi_pci_driver);
3380 if (rv) 3388 if (rv)
3381 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv); 3389 printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
3382 else 3390 else
3383 pci_registered = 1; 3391 pci_registered = 1;
3384 #endif 3392 #endif
3385 3393
3386 #ifdef CONFIG_ACPI 3394 #ifdef CONFIG_ACPI
3387 pnp_register_driver(&ipmi_pnp_driver); 3395 pnp_register_driver(&ipmi_pnp_driver);
3388 pnp_registered = 1; 3396 pnp_registered = 1;
3389 #endif 3397 #endif
3390 3398
3391 #ifdef CONFIG_DMI 3399 #ifdef CONFIG_DMI
3392 dmi_find_bmc(); 3400 dmi_find_bmc();
3393 #endif 3401 #endif
3394 3402
3395 #ifdef CONFIG_ACPI 3403 #ifdef CONFIG_ACPI
3396 spmi_find_bmc(); 3404 spmi_find_bmc();
3397 #endif 3405 #endif
3398 3406
3399 #ifdef CONFIG_PPC_OF 3407 #ifdef CONFIG_PPC_OF
3400 of_register_platform_driver(&ipmi_of_platform_driver); 3408 of_register_platform_driver(&ipmi_of_platform_driver);
3401 of_registered = 1; 3409 of_registered = 1;
3402 #endif 3410 #endif
3403 3411
3404 /* We prefer devices with interrupts, but in the case of a machine 3412 /* We prefer devices with interrupts, but in the case of a machine
3405 with multiple BMCs we assume that there will be several instances 3413 with multiple BMCs we assume that there will be several instances
3406 of a given type so if we succeed in registering a type then also 3414 of a given type so if we succeed in registering a type then also
3407 try to register everything else of the same type */ 3415 try to register everything else of the same type */
3408 3416
3409 mutex_lock(&smi_infos_lock); 3417 mutex_lock(&smi_infos_lock);
3410 list_for_each_entry(e, &smi_infos, link) { 3418 list_for_each_entry(e, &smi_infos, link) {
3411 /* Try to register a device if it has an IRQ and we either 3419 /* Try to register a device if it has an IRQ and we either
3412 haven't successfully registered a device yet or this 3420 haven't successfully registered a device yet or this
3413 device has the same type as one we successfully registered */ 3421 device has the same type as one we successfully registered */
3414 if (e->irq && (!type || e->addr_source == type)) { 3422 if (e->irq && (!type || e->addr_source == type)) {
3415 if (!try_smi_init(e)) { 3423 if (!try_smi_init(e)) {
3416 type = e->addr_source; 3424 type = e->addr_source;
3417 } 3425 }
3418 } 3426 }
3419 } 3427 }
3420 3428
3421 /* type will only have been set if we successfully registered an si */ 3429 /* type will only have been set if we successfully registered an si */
3422 if (type) { 3430 if (type) {
3423 mutex_unlock(&smi_infos_lock); 3431 mutex_unlock(&smi_infos_lock);
3424 return 0; 3432 return 0;
3425 } 3433 }
3426 3434
3427 /* Fall back to the preferred device */ 3435 /* Fall back to the preferred device */
3428 3436
3429 list_for_each_entry(e, &smi_infos, link) { 3437 list_for_each_entry(e, &smi_infos, link) {
3430 if (!e->irq && (!type || e->addr_source == type)) { 3438 if (!e->irq && (!type || e->addr_source == type)) {
3431 if (!try_smi_init(e)) { 3439 if (!try_smi_init(e)) {
3432 type = e->addr_source; 3440 type = e->addr_source;
3433 } 3441 }
3434 } 3442 }
3435 } 3443 }
3436 mutex_unlock(&smi_infos_lock); 3444 mutex_unlock(&smi_infos_lock);
3437 3445
3438 if (type) 3446 if (type)
3439 return 0; 3447 return 0;
3440 3448
3441 if (si_trydefaults) { 3449 if (si_trydefaults) {
3442 mutex_lock(&smi_infos_lock); 3450 mutex_lock(&smi_infos_lock);
3443 if (list_empty(&smi_infos)) { 3451 if (list_empty(&smi_infos)) {
3444 /* No BMC was found, try defaults. */ 3452 /* No BMC was found, try defaults. */
3445 mutex_unlock(&smi_infos_lock); 3453 mutex_unlock(&smi_infos_lock);
3446 default_find_bmc(); 3454 default_find_bmc();
3447 } else 3455 } else
3448 mutex_unlock(&smi_infos_lock); 3456 mutex_unlock(&smi_infos_lock);
3449 } 3457 }
3450 3458
3451 mutex_lock(&smi_infos_lock); 3459 mutex_lock(&smi_infos_lock);
3452 if (unload_when_empty && list_empty(&smi_infos)) { 3460 if (unload_when_empty && list_empty(&smi_infos)) {
3453 mutex_unlock(&smi_infos_lock); 3461 mutex_unlock(&smi_infos_lock);
3454 cleanup_ipmi_si(); 3462 cleanup_ipmi_si();
3455 printk(KERN_WARNING PFX 3463 printk(KERN_WARNING PFX
3456 "Unable to find any System Interface(s)\n"); 3464 "Unable to find any System Interface(s)\n");
3457 return -ENODEV; 3465 return -ENODEV;
3458 } else { 3466 } else {
3459 mutex_unlock(&smi_infos_lock); 3467 mutex_unlock(&smi_infos_lock);
3460 return 0; 3468 return 0;
3461 } 3469 }
3462 } 3470 }
3463 module_init(init_ipmi_si); 3471 module_init(init_ipmi_si);
3464 3472
3465 static void cleanup_one_si(struct smi_info *to_clean) 3473 static void cleanup_one_si(struct smi_info *to_clean)
3466 { 3474 {
3467 int rv = 0; 3475 int rv = 0;
3468 unsigned long flags; 3476 unsigned long flags;
3469 3477
3470 if (!to_clean) 3478 if (!to_clean)
3471 return; 3479 return;
3472 3480
3473 list_del(&to_clean->link); 3481 list_del(&to_clean->link);
3474 3482
3475 /* Tell the driver that we are shutting down. */ 3483 /* Tell the driver that we are shutting down. */
3476 atomic_inc(&to_clean->stop_operation); 3484 atomic_inc(&to_clean->stop_operation);
3477 3485
3478 /* 3486 /*
3479 * Make sure the timer and thread are stopped and will not run 3487 * Make sure the timer and thread are stopped and will not run
3480 * again. 3488 * again.
3481 */ 3489 */
3482 wait_for_timer_and_thread(to_clean); 3490 wait_for_timer_and_thread(to_clean);
3483 3491
3484 /* 3492 /*
3485 * Timeouts are stopped, now make sure the interrupts are off 3493 * Timeouts are stopped, now make sure the interrupts are off
3486 * for the device. A little tricky with locks to make sure 3494 * for the device. A little tricky with locks to make sure
3487 * there are no races. 3495 * there are no races.
3488 */ 3496 */
3489 spin_lock_irqsave(&to_clean->si_lock, flags); 3497 spin_lock_irqsave(&to_clean->si_lock, flags);
3490 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3498 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3491 spin_unlock_irqrestore(&to_clean->si_lock, flags); 3499 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3492 poll(to_clean); 3500 poll(to_clean);
3493 schedule_timeout_uninterruptible(1); 3501 schedule_timeout_uninterruptible(1);
3494 spin_lock_irqsave(&to_clean->si_lock, flags); 3502 spin_lock_irqsave(&to_clean->si_lock, flags);
3495 } 3503 }
3496 disable_si_irq(to_clean); 3504 disable_si_irq(to_clean);
3497 spin_unlock_irqrestore(&to_clean->si_lock, flags); 3505 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3498 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3506 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3499 poll(to_clean); 3507 poll(to_clean);
3500 schedule_timeout_uninterruptible(1); 3508 schedule_timeout_uninterruptible(1);
3501 } 3509 }
3502 3510
3503 /* Clean up interrupts and make sure that everything is done. */ 3511 /* Clean up interrupts and make sure that everything is done. */
3504 if (to_clean->irq_cleanup) 3512 if (to_clean->irq_cleanup)
3505 to_clean->irq_cleanup(to_clean); 3513 to_clean->irq_cleanup(to_clean);
3506 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3514 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3507 poll(to_clean); 3515 poll(to_clean);
3508 schedule_timeout_uninterruptible(1); 3516 schedule_timeout_uninterruptible(1);
3509 } 3517 }
3510 3518
3511 if (to_clean->intf) 3519 if (to_clean->intf)
3512 rv = ipmi_unregister_smi(to_clean->intf); 3520 rv = ipmi_unregister_smi(to_clean->intf);
3513 3521
3514 if (rv) { 3522 if (rv) {
3515 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n", 3523 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3516 rv); 3524 rv);
3517 } 3525 }
3518 3526
3519 if (to_clean->handlers) 3527 if (to_clean->handlers)
3520 to_clean->handlers->cleanup(to_clean->si_sm); 3528 to_clean->handlers->cleanup(to_clean->si_sm);
3521 3529
3522 kfree(to_clean->si_sm); 3530 kfree(to_clean->si_sm);
3523 3531
3524 if (to_clean->addr_source_cleanup) 3532 if (to_clean->addr_source_cleanup)
3525 to_clean->addr_source_cleanup(to_clean); 3533 to_clean->addr_source_cleanup(to_clean);
3526 if (to_clean->io_cleanup) 3534 if (to_clean->io_cleanup)
3527 to_clean->io_cleanup(to_clean); 3535 to_clean->io_cleanup(to_clean);
3528 3536
3529 if (to_clean->dev_registered) 3537 if (to_clean->dev_registered)
3530 platform_device_unregister(to_clean->pdev); 3538 platform_device_unregister(to_clean->pdev);
3531 3539
3532 kfree(to_clean); 3540 kfree(to_clean);
3533 } 3541 }
3534 3542
3535 static void __exit cleanup_ipmi_si(void) 3543 static void __exit cleanup_ipmi_si(void)
3536 { 3544 {
3537 struct smi_info *e, *tmp_e; 3545 struct smi_info *e, *tmp_e;
3538 3546
3539 if (!initialized) 3547 if (!initialized)
3540 return; 3548 return;
3541 3549
3542 #ifdef CONFIG_PCI 3550 #ifdef CONFIG_PCI
3543 if (pci_registered) 3551 if (pci_registered)
3544 pci_unregister_driver(&ipmi_pci_driver); 3552 pci_unregister_driver(&ipmi_pci_driver);
3545 #endif 3553 #endif
3546 #ifdef CONFIG_ACPI 3554 #ifdef CONFIG_ACPI
3547 if (pnp_registered) 3555 if (pnp_registered)
3548 pnp_unregister_driver(&ipmi_pnp_driver); 3556 pnp_unregister_driver(&ipmi_pnp_driver);
3549 #endif 3557 #endif
3550 3558
3551 #ifdef CONFIG_PPC_OF 3559 #ifdef CONFIG_PPC_OF
3552 if (of_registered) 3560 if (of_registered)
3553 of_unregister_platform_driver(&ipmi_of_platform_driver); 3561 of_unregister_platform_driver(&ipmi_of_platform_driver);
3554 #endif 3562 #endif
3555 3563
3556 mutex_lock(&smi_infos_lock); 3564 mutex_lock(&smi_infos_lock);
3557 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) 3565 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3558 cleanup_one_si(e); 3566 cleanup_one_si(e);
3559 mutex_unlock(&smi_infos_lock); 3567 mutex_unlock(&smi_infos_lock);
3560 3568
3561 driver_unregister(&ipmi_driver.driver); 3569 driver_unregister(&ipmi_driver.driver);
3562 } 3570 }
3563 module_exit(cleanup_ipmi_si); 3571 module_exit(cleanup_ipmi_si);
3564 3572
3565 MODULE_LICENSE("GPL"); 3573 MODULE_LICENSE("GPL");
3566 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3574 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3567 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" 3575 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3568 " system interfaces."); 3576 " system interfaces.");
3569 3577