Commit d4d5291c8cd499b1b590336059d5cc3e24c1ced6

Authored by Arjan van de Ven
Committed by Linus Torvalds
1 parent 5dd559f020

driver synchronization: make scsi_wait_scan more advanced

There is currently only one way for userspace to say "wait for my storage
device to get ready for the modules I just loaded": to load the
scsi_wait_scan module. Expectations of userspace are that once this
module is loaded, all the (storage) devices for which the drivers
were loaded before the module load are present.

Now, there are some issues with the implementation, and the async
stuff got caught in the middle of this: The existing code only
waits for the scsy async probing to finish, but it did not take
into account at all that probing might not have begun yet.
(Russell ran into this problem on his computer and the fix works for him)

This patch fixes this more thoroughly than the previous "fix", which
had some bad side effects (namely, for kernel code that wanted to wait for
the scsi scan it would also do an async sync, which would deadlock if you did
it from async context already.. there's a report about that on lkml):
The patch makes the module first wait for all device driver probes, and then it
will wait for the scsi parallel scan to finish.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Tested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 4 changed files with 13 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * drivers/base/dd.c - The core device/driver interactions. 2 * drivers/base/dd.c - The core device/driver interactions.
3 * 3 *
4 * This file contains the (sometimes tricky) code that controls the 4 * This file contains the (sometimes tricky) code that controls the
5 * interactions between devices and drivers, which primarily includes 5 * interactions between devices and drivers, which primarily includes
6 * driver binding and unbinding. 6 * driver binding and unbinding.
7 * 7 *
8 * All of this code used to exist in drivers/base/bus.c, but was 8 * All of this code used to exist in drivers/base/bus.c, but was
9 * relocated to here in the name of compartmentalization (since it wasn't 9 * relocated to here in the name of compartmentalization (since it wasn't
10 * strictly code just for the 'struct bus_type'. 10 * strictly code just for the 'struct bus_type'.
11 * 11 *
12 * Copyright (c) 2002-5 Patrick Mochel 12 * Copyright (c) 2002-5 Patrick Mochel
13 * Copyright (c) 2002-3 Open Source Development Labs 13 * Copyright (c) 2002-3 Open Source Development Labs
14 * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> 14 * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
15 * Copyright (c) 2007 Novell Inc. 15 * Copyright (c) 2007 Novell Inc.
16 * 16 *
17 * This file is released under the GPLv2 17 * This file is released under the GPLv2
18 */ 18 */
19 19
20 #include <linux/device.h> 20 #include <linux/device.h>
21 #include <linux/delay.h> 21 #include <linux/delay.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/kthread.h> 23 #include <linux/kthread.h>
24 #include <linux/wait.h> 24 #include <linux/wait.h>
25 #include <linux/async.h> 25 #include <linux/async.h>
26 26
27 #include "base.h" 27 #include "base.h"
28 #include "power/power.h" 28 #include "power/power.h"
29 29
30 30
31 static void driver_bound(struct device *dev) 31 static void driver_bound(struct device *dev)
32 { 32 {
33 if (klist_node_attached(&dev->p->knode_driver)) { 33 if (klist_node_attached(&dev->p->knode_driver)) {
34 printk(KERN_WARNING "%s: device %s already bound\n", 34 printk(KERN_WARNING "%s: device %s already bound\n",
35 __func__, kobject_name(&dev->kobj)); 35 __func__, kobject_name(&dev->kobj));
36 return; 36 return;
37 } 37 }
38 38
39 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev), 39 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev),
40 __func__, dev->driver->name); 40 __func__, dev->driver->name);
41 41
42 if (dev->bus) 42 if (dev->bus)
43 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 43 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
44 BUS_NOTIFY_BOUND_DRIVER, dev); 44 BUS_NOTIFY_BOUND_DRIVER, dev);
45 45
46 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); 46 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
47 } 47 }
48 48
49 static int driver_sysfs_add(struct device *dev) 49 static int driver_sysfs_add(struct device *dev)
50 { 50 {
51 int ret; 51 int ret;
52 52
53 ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj, 53 ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
54 kobject_name(&dev->kobj)); 54 kobject_name(&dev->kobj));
55 if (ret == 0) { 55 if (ret == 0) {
56 ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj, 56 ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
57 "driver"); 57 "driver");
58 if (ret) 58 if (ret)
59 sysfs_remove_link(&dev->driver->p->kobj, 59 sysfs_remove_link(&dev->driver->p->kobj,
60 kobject_name(&dev->kobj)); 60 kobject_name(&dev->kobj));
61 } 61 }
62 return ret; 62 return ret;
63 } 63 }
64 64
65 static void driver_sysfs_remove(struct device *dev) 65 static void driver_sysfs_remove(struct device *dev)
66 { 66 {
67 struct device_driver *drv = dev->driver; 67 struct device_driver *drv = dev->driver;
68 68
69 if (drv) { 69 if (drv) {
70 sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj)); 70 sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
71 sysfs_remove_link(&dev->kobj, "driver"); 71 sysfs_remove_link(&dev->kobj, "driver");
72 } 72 }
73 } 73 }
74 74
75 /** 75 /**
76 * device_bind_driver - bind a driver to one device. 76 * device_bind_driver - bind a driver to one device.
77 * @dev: device. 77 * @dev: device.
78 * 78 *
79 * Allow manual attachment of a driver to a device. 79 * Allow manual attachment of a driver to a device.
80 * Caller must have already set @dev->driver. 80 * Caller must have already set @dev->driver.
81 * 81 *
82 * Note that this does not modify the bus reference count 82 * Note that this does not modify the bus reference count
83 * nor take the bus's rwsem. Please verify those are accounted 83 * nor take the bus's rwsem. Please verify those are accounted
84 * for before calling this. (It is ok to call with no other effort 84 * for before calling this. (It is ok to call with no other effort
85 * from a driver's probe() method.) 85 * from a driver's probe() method.)
86 * 86 *
87 * This function must be called with @dev->sem held. 87 * This function must be called with @dev->sem held.
88 */ 88 */
89 int device_bind_driver(struct device *dev) 89 int device_bind_driver(struct device *dev)
90 { 90 {
91 int ret; 91 int ret;
92 92
93 ret = driver_sysfs_add(dev); 93 ret = driver_sysfs_add(dev);
94 if (!ret) 94 if (!ret)
95 driver_bound(dev); 95 driver_bound(dev);
96 return ret; 96 return ret;
97 } 97 }
98 EXPORT_SYMBOL_GPL(device_bind_driver); 98 EXPORT_SYMBOL_GPL(device_bind_driver);
99 99
100 static atomic_t probe_count = ATOMIC_INIT(0); 100 static atomic_t probe_count = ATOMIC_INIT(0);
101 static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); 101 static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
102 102
103 static int really_probe(struct device *dev, struct device_driver *drv) 103 static int really_probe(struct device *dev, struct device_driver *drv)
104 { 104 {
105 int ret = 0; 105 int ret = 0;
106 106
107 atomic_inc(&probe_count); 107 atomic_inc(&probe_count);
108 pr_debug("bus: '%s': %s: probing driver %s with device %s\n", 108 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
109 drv->bus->name, __func__, drv->name, dev_name(dev)); 109 drv->bus->name, __func__, drv->name, dev_name(dev));
110 WARN_ON(!list_empty(&dev->devres_head)); 110 WARN_ON(!list_empty(&dev->devres_head));
111 111
112 dev->driver = drv; 112 dev->driver = drv;
113 if (driver_sysfs_add(dev)) { 113 if (driver_sysfs_add(dev)) {
114 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", 114 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
115 __func__, dev_name(dev)); 115 __func__, dev_name(dev));
116 goto probe_failed; 116 goto probe_failed;
117 } 117 }
118 118
119 if (dev->bus->probe) { 119 if (dev->bus->probe) {
120 ret = dev->bus->probe(dev); 120 ret = dev->bus->probe(dev);
121 if (ret) 121 if (ret)
122 goto probe_failed; 122 goto probe_failed;
123 } else if (drv->probe) { 123 } else if (drv->probe) {
124 ret = drv->probe(dev); 124 ret = drv->probe(dev);
125 if (ret) 125 if (ret)
126 goto probe_failed; 126 goto probe_failed;
127 } 127 }
128 128
129 driver_bound(dev); 129 driver_bound(dev);
130 ret = 1; 130 ret = 1;
131 pr_debug("bus: '%s': %s: bound device %s to driver %s\n", 131 pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
132 drv->bus->name, __func__, dev_name(dev), drv->name); 132 drv->bus->name, __func__, dev_name(dev), drv->name);
133 goto done; 133 goto done;
134 134
135 probe_failed: 135 probe_failed:
136 devres_release_all(dev); 136 devres_release_all(dev);
137 driver_sysfs_remove(dev); 137 driver_sysfs_remove(dev);
138 dev->driver = NULL; 138 dev->driver = NULL;
139 139
140 if (ret != -ENODEV && ret != -ENXIO) { 140 if (ret != -ENODEV && ret != -ENXIO) {
141 /* driver matched but the probe failed */ 141 /* driver matched but the probe failed */
142 printk(KERN_WARNING 142 printk(KERN_WARNING
143 "%s: probe of %s failed with error %d\n", 143 "%s: probe of %s failed with error %d\n",
144 drv->name, dev_name(dev), ret); 144 drv->name, dev_name(dev), ret);
145 } 145 }
146 /* 146 /*
147 * Ignore errors returned by ->probe so that the next driver can try 147 * Ignore errors returned by ->probe so that the next driver can try
148 * its luck. 148 * its luck.
149 */ 149 */
150 ret = 0; 150 ret = 0;
151 done: 151 done:
152 atomic_dec(&probe_count); 152 atomic_dec(&probe_count);
153 wake_up(&probe_waitqueue); 153 wake_up(&probe_waitqueue);
154 return ret; 154 return ret;
155 } 155 }
156 156
157 /** 157 /**
158 * driver_probe_done 158 * driver_probe_done
159 * Determine if the probe sequence is finished or not. 159 * Determine if the probe sequence is finished or not.
160 * 160 *
161 * Should somehow figure out how to use a semaphore, not an atomic variable... 161 * Should somehow figure out how to use a semaphore, not an atomic variable...
162 */ 162 */
163 int driver_probe_done(void) 163 int driver_probe_done(void)
164 { 164 {
165 pr_debug("%s: probe_count = %d\n", __func__, 165 pr_debug("%s: probe_count = %d\n", __func__,
166 atomic_read(&probe_count)); 166 atomic_read(&probe_count));
167 if (atomic_read(&probe_count)) 167 if (atomic_read(&probe_count))
168 return -EBUSY; 168 return -EBUSY;
169 return 0; 169 return 0;
170 } 170 }
171 171
172 /** 172 /**
173 * wait_for_device_probe 173 * wait_for_device_probe
174 * Wait for device probing to be completed. 174 * Wait for device probing to be completed.
175 */ 175 */
176 void wait_for_device_probe(void) 176 void wait_for_device_probe(void)
177 { 177 {
178 /* wait for the known devices to complete their probing */ 178 /* wait for the known devices to complete their probing */
179 wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); 179 wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
180 async_synchronize_full(); 180 async_synchronize_full();
181 } 181 }
182 EXPORT_SYMBOL_GPL(wait_for_device_probe);
182 183
183 /** 184 /**
184 * driver_probe_device - attempt to bind device & driver together 185 * driver_probe_device - attempt to bind device & driver together
185 * @drv: driver to bind a device to 186 * @drv: driver to bind a device to
186 * @dev: device to try to bind to the driver 187 * @dev: device to try to bind to the driver
187 * 188 *
188 * This function returns -ENODEV if the device is not registered, 189 * This function returns -ENODEV if the device is not registered,
189 * 1 if the device is bound sucessfully and 0 otherwise. 190 * 1 if the device is bound sucessfully and 0 otherwise.
190 * 191 *
191 * This function must be called with @dev->sem held. When called for a 192 * This function must be called with @dev->sem held. When called for a
192 * USB interface, @dev->parent->sem must be held as well. 193 * USB interface, @dev->parent->sem must be held as well.
193 */ 194 */
194 int driver_probe_device(struct device_driver *drv, struct device *dev) 195 int driver_probe_device(struct device_driver *drv, struct device *dev)
195 { 196 {
196 int ret = 0; 197 int ret = 0;
197 198
198 if (!device_is_registered(dev)) 199 if (!device_is_registered(dev))
199 return -ENODEV; 200 return -ENODEV;
200 201
201 pr_debug("bus: '%s': %s: matched device %s with driver %s\n", 202 pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
202 drv->bus->name, __func__, dev_name(dev), drv->name); 203 drv->bus->name, __func__, dev_name(dev), drv->name);
203 204
204 ret = really_probe(dev, drv); 205 ret = really_probe(dev, drv);
205 206
206 return ret; 207 return ret;
207 } 208 }
208 209
209 static int __device_attach(struct device_driver *drv, void *data) 210 static int __device_attach(struct device_driver *drv, void *data)
210 { 211 {
211 struct device *dev = data; 212 struct device *dev = data;
212 213
213 if (!driver_match_device(drv, dev)) 214 if (!driver_match_device(drv, dev))
214 return 0; 215 return 0;
215 216
216 return driver_probe_device(drv, dev); 217 return driver_probe_device(drv, dev);
217 } 218 }
218 219
219 /** 220 /**
220 * device_attach - try to attach device to a driver. 221 * device_attach - try to attach device to a driver.
221 * @dev: device. 222 * @dev: device.
222 * 223 *
223 * Walk the list of drivers that the bus has and call 224 * Walk the list of drivers that the bus has and call
224 * driver_probe_device() for each pair. If a compatible 225 * driver_probe_device() for each pair. If a compatible
225 * pair is found, break out and return. 226 * pair is found, break out and return.
226 * 227 *
227 * Returns 1 if the device was bound to a driver; 228 * Returns 1 if the device was bound to a driver;
228 * 0 if no matching device was found; 229 * 0 if no matching device was found;
229 * -ENODEV if the device is not registered. 230 * -ENODEV if the device is not registered.
230 * 231 *
231 * When called for a USB interface, @dev->parent->sem must be held. 232 * When called for a USB interface, @dev->parent->sem must be held.
232 */ 233 */
233 int device_attach(struct device *dev) 234 int device_attach(struct device *dev)
234 { 235 {
235 int ret = 0; 236 int ret = 0;
236 237
237 down(&dev->sem); 238 down(&dev->sem);
238 if (dev->driver) { 239 if (dev->driver) {
239 ret = device_bind_driver(dev); 240 ret = device_bind_driver(dev);
240 if (ret == 0) 241 if (ret == 0)
241 ret = 1; 242 ret = 1;
242 else { 243 else {
243 dev->driver = NULL; 244 dev->driver = NULL;
244 ret = 0; 245 ret = 0;
245 } 246 }
246 } else { 247 } else {
247 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); 248 ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach);
248 } 249 }
249 up(&dev->sem); 250 up(&dev->sem);
250 return ret; 251 return ret;
251 } 252 }
252 EXPORT_SYMBOL_GPL(device_attach); 253 EXPORT_SYMBOL_GPL(device_attach);
253 254
254 static int __driver_attach(struct device *dev, void *data) 255 static int __driver_attach(struct device *dev, void *data)
255 { 256 {
256 struct device_driver *drv = data; 257 struct device_driver *drv = data;
257 258
258 /* 259 /*
259 * Lock device and try to bind to it. We drop the error 260 * Lock device and try to bind to it. We drop the error
260 * here and always return 0, because we need to keep trying 261 * here and always return 0, because we need to keep trying
261 * to bind to devices and some drivers will return an error 262 * to bind to devices and some drivers will return an error
262 * simply if it didn't support the device. 263 * simply if it didn't support the device.
263 * 264 *
264 * driver_probe_device() will spit a warning if there 265 * driver_probe_device() will spit a warning if there
265 * is an error. 266 * is an error.
266 */ 267 */
267 268
268 if (!driver_match_device(drv, dev)) 269 if (!driver_match_device(drv, dev))
269 return 0; 270 return 0;
270 271
271 if (dev->parent) /* Needed for USB */ 272 if (dev->parent) /* Needed for USB */
272 down(&dev->parent->sem); 273 down(&dev->parent->sem);
273 down(&dev->sem); 274 down(&dev->sem);
274 if (!dev->driver) 275 if (!dev->driver)
275 driver_probe_device(drv, dev); 276 driver_probe_device(drv, dev);
276 up(&dev->sem); 277 up(&dev->sem);
277 if (dev->parent) 278 if (dev->parent)
278 up(&dev->parent->sem); 279 up(&dev->parent->sem);
279 280
280 return 0; 281 return 0;
281 } 282 }
282 283
283 /** 284 /**
284 * driver_attach - try to bind driver to devices. 285 * driver_attach - try to bind driver to devices.
285 * @drv: driver. 286 * @drv: driver.
286 * 287 *
287 * Walk the list of devices that the bus has on it and try to 288 * Walk the list of devices that the bus has on it and try to
288 * match the driver with each one. If driver_probe_device() 289 * match the driver with each one. If driver_probe_device()
289 * returns 0 and the @dev->driver is set, we've found a 290 * returns 0 and the @dev->driver is set, we've found a
290 * compatible pair. 291 * compatible pair.
291 */ 292 */
292 int driver_attach(struct device_driver *drv) 293 int driver_attach(struct device_driver *drv)
293 { 294 {
294 return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach); 295 return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
295 } 296 }
296 EXPORT_SYMBOL_GPL(driver_attach); 297 EXPORT_SYMBOL_GPL(driver_attach);
297 298
298 /* 299 /*
299 * __device_release_driver() must be called with @dev->sem held. 300 * __device_release_driver() must be called with @dev->sem held.
300 * When called for a USB interface, @dev->parent->sem must be held as well. 301 * When called for a USB interface, @dev->parent->sem must be held as well.
301 */ 302 */
302 static void __device_release_driver(struct device *dev) 303 static void __device_release_driver(struct device *dev)
303 { 304 {
304 struct device_driver *drv; 305 struct device_driver *drv;
305 306
306 drv = dev->driver; 307 drv = dev->driver;
307 if (drv) { 308 if (drv) {
308 driver_sysfs_remove(dev); 309 driver_sysfs_remove(dev);
309 310
310 if (dev->bus) 311 if (dev->bus)
311 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 312 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
312 BUS_NOTIFY_UNBIND_DRIVER, 313 BUS_NOTIFY_UNBIND_DRIVER,
313 dev); 314 dev);
314 315
315 if (dev->bus && dev->bus->remove) 316 if (dev->bus && dev->bus->remove)
316 dev->bus->remove(dev); 317 dev->bus->remove(dev);
317 else if (drv->remove) 318 else if (drv->remove)
318 drv->remove(dev); 319 drv->remove(dev);
319 devres_release_all(dev); 320 devres_release_all(dev);
320 dev->driver = NULL; 321 dev->driver = NULL;
321 klist_remove(&dev->p->knode_driver); 322 klist_remove(&dev->p->knode_driver);
322 } 323 }
323 } 324 }
324 325
325 /** 326 /**
326 * device_release_driver - manually detach device from driver. 327 * device_release_driver - manually detach device from driver.
327 * @dev: device. 328 * @dev: device.
328 * 329 *
329 * Manually detach device from driver. 330 * Manually detach device from driver.
330 * When called for a USB interface, @dev->parent->sem must be held. 331 * When called for a USB interface, @dev->parent->sem must be held.
331 */ 332 */
332 void device_release_driver(struct device *dev) 333 void device_release_driver(struct device *dev)
333 { 334 {
334 /* 335 /*
335 * If anyone calls device_release_driver() recursively from 336 * If anyone calls device_release_driver() recursively from
336 * within their ->remove callback for the same device, they 337 * within their ->remove callback for the same device, they
337 * will deadlock right here. 338 * will deadlock right here.
338 */ 339 */
339 down(&dev->sem); 340 down(&dev->sem);
340 __device_release_driver(dev); 341 __device_release_driver(dev);
341 up(&dev->sem); 342 up(&dev->sem);
342 } 343 }
343 EXPORT_SYMBOL_GPL(device_release_driver); 344 EXPORT_SYMBOL_GPL(device_release_driver);
344 345
345 /** 346 /**
346 * driver_detach - detach driver from all devices it controls. 347 * driver_detach - detach driver from all devices it controls.
347 * @drv: driver. 348 * @drv: driver.
348 */ 349 */
349 void driver_detach(struct device_driver *drv) 350 void driver_detach(struct device_driver *drv)
350 { 351 {
351 struct device_private *dev_prv; 352 struct device_private *dev_prv;
352 struct device *dev; 353 struct device *dev;
353 354
354 for (;;) { 355 for (;;) {
355 spin_lock(&drv->p->klist_devices.k_lock); 356 spin_lock(&drv->p->klist_devices.k_lock);
356 if (list_empty(&drv->p->klist_devices.k_list)) { 357 if (list_empty(&drv->p->klist_devices.k_list)) {
357 spin_unlock(&drv->p->klist_devices.k_lock); 358 spin_unlock(&drv->p->klist_devices.k_lock);
358 break; 359 break;
359 } 360 }
360 dev_prv = list_entry(drv->p->klist_devices.k_list.prev, 361 dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
361 struct device_private, 362 struct device_private,
362 knode_driver.n_node); 363 knode_driver.n_node);
363 dev = dev_prv->device; 364 dev = dev_prv->device;
364 get_device(dev); 365 get_device(dev);
365 spin_unlock(&drv->p->klist_devices.k_lock); 366 spin_unlock(&drv->p->klist_devices.k_lock);
366 367
367 if (dev->parent) /* Needed for USB */ 368 if (dev->parent) /* Needed for USB */
368 down(&dev->parent->sem); 369 down(&dev->parent->sem);
369 down(&dev->sem); 370 down(&dev->sem);
370 if (dev->driver == drv) 371 if (dev->driver == drv)
371 __device_release_driver(dev); 372 __device_release_driver(dev);
372 up(&dev->sem); 373 up(&dev->sem);
373 if (dev->parent) 374 if (dev->parent)
374 up(&dev->parent->sem); 375 up(&dev->parent->sem);
375 put_device(dev); 376 put_device(dev);
376 } 377 }
377 } 378 }
378 379
drivers/scsi/scsi_scan.c
1 /* 1 /*
2 * scsi_scan.c 2 * scsi_scan.c
3 * 3 *
4 * Copyright (C) 2000 Eric Youngdale, 4 * Copyright (C) 2000 Eric Youngdale,
5 * Copyright (C) 2002 Patrick Mansfield 5 * Copyright (C) 2002 Patrick Mansfield
6 * 6 *
7 * The general scanning/probing algorithm is as follows, exceptions are 7 * The general scanning/probing algorithm is as follows, exceptions are
8 * made to it depending on device specific flags, compilation options, and 8 * made to it depending on device specific flags, compilation options, and
9 * global variable (boot or module load time) settings. 9 * global variable (boot or module load time) settings.
10 * 10 *
11 * A specific LUN is scanned via an INQUIRY command; if the LUN has a 11 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
12 * device attached, a scsi_device is allocated and setup for it. 12 * device attached, a scsi_device is allocated and setup for it.
13 * 13 *
14 * For every id of every channel on the given host: 14 * For every id of every channel on the given host:
15 * 15 *
16 * Scan LUN 0; if the target responds to LUN 0 (even if there is no 16 * Scan LUN 0; if the target responds to LUN 0 (even if there is no
17 * device or storage attached to LUN 0): 17 * device or storage attached to LUN 0):
18 * 18 *
19 * If LUN 0 has a device attached, allocate and setup a 19 * If LUN 0 has a device attached, allocate and setup a
20 * scsi_device for it. 20 * scsi_device for it.
21 * 21 *
22 * If target is SCSI-3 or up, issue a REPORT LUN, and scan 22 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
23 * all of the LUNs returned by the REPORT LUN; else, 23 * all of the LUNs returned by the REPORT LUN; else,
24 * sequentially scan LUNs up until some maximum is reached, 24 * sequentially scan LUNs up until some maximum is reached,
25 * or a LUN is seen that cannot have a device attached to it. 25 * or a LUN is seen that cannot have a device attached to it.
26 */ 26 */
27 27
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/moduleparam.h> 29 #include <linux/moduleparam.h>
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/blkdev.h> 31 #include <linux/blkdev.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/kthread.h> 33 #include <linux/kthread.h>
34 #include <linux/spinlock.h> 34 #include <linux/spinlock.h>
35 #include <linux/async.h> 35 #include <linux/async.h>
36 36
37 #include <scsi/scsi.h> 37 #include <scsi/scsi.h>
38 #include <scsi/scsi_cmnd.h> 38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h> 39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_driver.h> 40 #include <scsi/scsi_driver.h>
41 #include <scsi/scsi_devinfo.h> 41 #include <scsi/scsi_devinfo.h>
42 #include <scsi/scsi_host.h> 42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_transport.h> 43 #include <scsi/scsi_transport.h>
44 #include <scsi/scsi_eh.h> 44 #include <scsi/scsi_eh.h>
45 45
46 #include "scsi_priv.h" 46 #include "scsi_priv.h"
47 #include "scsi_logging.h" 47 #include "scsi_logging.h"
48 48
49 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ 49 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
50 " SCSI scanning, some SCSI devices might not be configured\n" 50 " SCSI scanning, some SCSI devices might not be configured\n"
51 51
52 /* 52 /*
53 * Default timeout 53 * Default timeout
54 */ 54 */
55 #define SCSI_TIMEOUT (2*HZ) 55 #define SCSI_TIMEOUT (2*HZ)
56 56
57 /* 57 /*
58 * Prefix values for the SCSI id's (stored in sysfs name field) 58 * Prefix values for the SCSI id's (stored in sysfs name field)
59 */ 59 */
60 #define SCSI_UID_SER_NUM 'S' 60 #define SCSI_UID_SER_NUM 'S'
61 #define SCSI_UID_UNKNOWN 'Z' 61 #define SCSI_UID_UNKNOWN 'Z'
62 62
63 /* 63 /*
64 * Return values of some of the scanning functions. 64 * Return values of some of the scanning functions.
65 * 65 *
66 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this 66 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
67 * includes allocation or general failures preventing IO from being sent. 67 * includes allocation or general failures preventing IO from being sent.
68 * 68 *
69 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available 69 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
70 * on the given LUN. 70 * on the given LUN.
71 * 71 *
72 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a 72 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
73 * given LUN. 73 * given LUN.
74 */ 74 */
75 #define SCSI_SCAN_NO_RESPONSE 0 75 #define SCSI_SCAN_NO_RESPONSE 0
76 #define SCSI_SCAN_TARGET_PRESENT 1 76 #define SCSI_SCAN_TARGET_PRESENT 1
77 #define SCSI_SCAN_LUN_PRESENT 2 77 #define SCSI_SCAN_LUN_PRESENT 2
78 78
79 static const char *scsi_null_device_strs = "nullnullnullnull"; 79 static const char *scsi_null_device_strs = "nullnullnullnull";
80 80
81 #define MAX_SCSI_LUNS 512 81 #define MAX_SCSI_LUNS 512
82 82
83 #ifdef CONFIG_SCSI_MULTI_LUN 83 #ifdef CONFIG_SCSI_MULTI_LUN
84 static unsigned int max_scsi_luns = MAX_SCSI_LUNS; 84 static unsigned int max_scsi_luns = MAX_SCSI_LUNS;
85 #else 85 #else
86 static unsigned int max_scsi_luns = 1; 86 static unsigned int max_scsi_luns = 1;
87 #endif 87 #endif
88 88
89 module_param_named(max_luns, max_scsi_luns, uint, S_IRUGO|S_IWUSR); 89 module_param_named(max_luns, max_scsi_luns, uint, S_IRUGO|S_IWUSR);
90 MODULE_PARM_DESC(max_luns, 90 MODULE_PARM_DESC(max_luns,
91 "last scsi LUN (should be between 1 and 2^32-1)"); 91 "last scsi LUN (should be between 1 and 2^32-1)");
92 92
93 #ifdef CONFIG_SCSI_SCAN_ASYNC 93 #ifdef CONFIG_SCSI_SCAN_ASYNC
94 #define SCSI_SCAN_TYPE_DEFAULT "async" 94 #define SCSI_SCAN_TYPE_DEFAULT "async"
95 #else 95 #else
96 #define SCSI_SCAN_TYPE_DEFAULT "sync" 96 #define SCSI_SCAN_TYPE_DEFAULT "sync"
97 #endif 97 #endif
98 98
99 static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; 99 static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
100 100
101 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); 101 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
102 MODULE_PARM_DESC(scan, "sync, async or none"); 102 MODULE_PARM_DESC(scan, "sync, async or none");
103 103
104 /* 104 /*
105 * max_scsi_report_luns: the maximum number of LUNS that will be 105 * max_scsi_report_luns: the maximum number of LUNS that will be
106 * returned from the REPORT LUNS command. 8 times this value must 106 * returned from the REPORT LUNS command. 8 times this value must
107 * be allocated. In theory this could be up to an 8 byte value, but 107 * be allocated. In theory this could be up to an 8 byte value, but
108 * in practice, the maximum number of LUNs suppored by any device 108 * in practice, the maximum number of LUNs suppored by any device
109 * is about 16k. 109 * is about 16k.
110 */ 110 */
111 static unsigned int max_scsi_report_luns = 511; 111 static unsigned int max_scsi_report_luns = 511;
112 112
113 module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR); 113 module_param_named(max_report_luns, max_scsi_report_luns, uint, S_IRUGO|S_IWUSR);
114 MODULE_PARM_DESC(max_report_luns, 114 MODULE_PARM_DESC(max_report_luns,
115 "REPORT LUNS maximum number of LUNS received (should be" 115 "REPORT LUNS maximum number of LUNS received (should be"
116 " between 1 and 16384)"); 116 " between 1 and 16384)");
117 117
118 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; 118 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3;
119 119
120 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 120 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
121 MODULE_PARM_DESC(inq_timeout, 121 MODULE_PARM_DESC(inq_timeout,
122 "Timeout (in seconds) waiting for devices to answer INQUIRY." 122 "Timeout (in seconds) waiting for devices to answer INQUIRY."
123 " Default is 5. Some non-compliant devices need more."); 123 " Default is 5. Some non-compliant devices need more.");
124 124
125 /* This lock protects only this list */ 125 /* This lock protects only this list */
126 static DEFINE_SPINLOCK(async_scan_lock); 126 static DEFINE_SPINLOCK(async_scan_lock);
127 static LIST_HEAD(scanning_hosts); 127 static LIST_HEAD(scanning_hosts);
128 128
129 struct async_scan_data { 129 struct async_scan_data {
130 struct list_head list; 130 struct list_head list;
131 struct Scsi_Host *shost; 131 struct Scsi_Host *shost;
132 struct completion prev_finished; 132 struct completion prev_finished;
133 }; 133 };
134 134
135 /** 135 /**
136 * scsi_complete_async_scans - Wait for asynchronous scans to complete 136 * scsi_complete_async_scans - Wait for asynchronous scans to complete
137 * 137 *
138 * When this function returns, any host which started scanning before 138 * When this function returns, any host which started scanning before
139 * this function was called will have finished its scan. Hosts which 139 * this function was called will have finished its scan. Hosts which
140 * started scanning after this function was called may or may not have 140 * started scanning after this function was called may or may not have
141 * finished. 141 * finished.
142 */ 142 */
143 int scsi_complete_async_scans(void) 143 int scsi_complete_async_scans(void)
144 { 144 {
145 struct async_scan_data *data; 145 struct async_scan_data *data;
146 146
147 do { 147 do {
148 if (list_empty(&scanning_hosts)) 148 if (list_empty(&scanning_hosts))
149 return 0; 149 return 0;
150 /* If we can't get memory immediately, that's OK. Just 150 /* If we can't get memory immediately, that's OK. Just
151 * sleep a little. Even if we never get memory, the async 151 * sleep a little. Even if we never get memory, the async
152 * scans will finish eventually. 152 * scans will finish eventually.
153 */ 153 */
154 data = kmalloc(sizeof(*data), GFP_KERNEL); 154 data = kmalloc(sizeof(*data), GFP_KERNEL);
155 if (!data) 155 if (!data)
156 msleep(1); 156 msleep(1);
157 } while (!data); 157 } while (!data);
158 158
159 data->shost = NULL; 159 data->shost = NULL;
160 init_completion(&data->prev_finished); 160 init_completion(&data->prev_finished);
161 161
162 spin_lock(&async_scan_lock); 162 spin_lock(&async_scan_lock);
163 /* Check that there's still somebody else on the list */ 163 /* Check that there's still somebody else on the list */
164 if (list_empty(&scanning_hosts)) 164 if (list_empty(&scanning_hosts))
165 goto done; 165 goto done;
166 list_add_tail(&data->list, &scanning_hosts); 166 list_add_tail(&data->list, &scanning_hosts);
167 spin_unlock(&async_scan_lock); 167 spin_unlock(&async_scan_lock);
168 168
169 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); 169 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
170 wait_for_completion(&data->prev_finished); 170 wait_for_completion(&data->prev_finished);
171 171
172 spin_lock(&async_scan_lock); 172 spin_lock(&async_scan_lock);
173 list_del(&data->list); 173 list_del(&data->list);
174 if (!list_empty(&scanning_hosts)) { 174 if (!list_empty(&scanning_hosts)) {
175 struct async_scan_data *next = list_entry(scanning_hosts.next, 175 struct async_scan_data *next = list_entry(scanning_hosts.next,
176 struct async_scan_data, list); 176 struct async_scan_data, list);
177 complete(&next->prev_finished); 177 complete(&next->prev_finished);
178 } 178 }
179 done: 179 done:
180 spin_unlock(&async_scan_lock); 180 spin_unlock(&async_scan_lock);
181 181
182 kfree(data); 182 kfree(data);
183 /* Synchronize async operations globally */
184 async_synchronize_full();
185 return 0; 183 return 0;
186 } 184 }
187 185
188 /* Only exported for the benefit of scsi_wait_scan */ 186 /* Only exported for the benefit of scsi_wait_scan */
189 EXPORT_SYMBOL_GPL(scsi_complete_async_scans); 187 EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
190 188
191 #ifndef MODULE 189 #ifndef MODULE
192 /* 190 /*
193 * For async scanning we need to wait for all the scans to complete before 191 * For async scanning we need to wait for all the scans to complete before
194 * trying to mount the root fs. Otherwise non-modular drivers may not be ready 192 * trying to mount the root fs. Otherwise non-modular drivers may not be ready
195 * yet. 193 * yet.
196 */ 194 */
197 late_initcall(scsi_complete_async_scans); 195 late_initcall(scsi_complete_async_scans);
198 #endif 196 #endif
199 197
200 /** 198 /**
201 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 199 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
202 * @sdev: scsi device to send command to 200 * @sdev: scsi device to send command to
203 * @result: area to store the result of the MODE SENSE 201 * @result: area to store the result of the MODE SENSE
204 * 202 *
205 * Description: 203 * Description:
206 * Send a vendor specific MODE SENSE (not a MODE SELECT) command. 204 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
207 * Called for BLIST_KEY devices. 205 * Called for BLIST_KEY devices.
208 **/ 206 **/
209 static void scsi_unlock_floptical(struct scsi_device *sdev, 207 static void scsi_unlock_floptical(struct scsi_device *sdev,
210 unsigned char *result) 208 unsigned char *result)
211 { 209 {
212 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 210 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
213 211
214 printk(KERN_NOTICE "scsi: unlocking floptical drive\n"); 212 printk(KERN_NOTICE "scsi: unlocking floptical drive\n");
215 scsi_cmd[0] = MODE_SENSE; 213 scsi_cmd[0] = MODE_SENSE;
216 scsi_cmd[1] = 0; 214 scsi_cmd[1] = 0;
217 scsi_cmd[2] = 0x2e; 215 scsi_cmd[2] = 0x2e;
218 scsi_cmd[3] = 0; 216 scsi_cmd[3] = 0;
219 scsi_cmd[4] = 0x2a; /* size */ 217 scsi_cmd[4] = 0x2a; /* size */
220 scsi_cmd[5] = 0; 218 scsi_cmd[5] = 0;
221 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL, 219 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
222 SCSI_TIMEOUT, 3, NULL); 220 SCSI_TIMEOUT, 3, NULL);
223 } 221 }
224 222
225 /** 223 /**
226 * scsi_alloc_sdev - allocate and setup a scsi_Device 224 * scsi_alloc_sdev - allocate and setup a scsi_Device
227 * @starget: which target to allocate a &scsi_device for 225 * @starget: which target to allocate a &scsi_device for
228 * @lun: which lun 226 * @lun: which lun
229 * @hostdata: usually NULL and set by ->slave_alloc instead 227 * @hostdata: usually NULL and set by ->slave_alloc instead
230 * 228 *
231 * Description: 229 * Description:
232 * Allocate, initialize for io, and return a pointer to a scsi_Device. 230 * Allocate, initialize for io, and return a pointer to a scsi_Device.
233 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and 231 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
234 * adds scsi_Device to the appropriate list. 232 * adds scsi_Device to the appropriate list.
235 * 233 *
236 * Return value: 234 * Return value:
237 * scsi_Device pointer, or NULL on failure. 235 * scsi_Device pointer, or NULL on failure.
238 **/ 236 **/
239 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, 237 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
240 unsigned int lun, void *hostdata) 238 unsigned int lun, void *hostdata)
241 { 239 {
242 struct scsi_device *sdev; 240 struct scsi_device *sdev;
243 int display_failure_msg = 1, ret; 241 int display_failure_msg = 1, ret;
244 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 242 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
245 extern void scsi_evt_thread(struct work_struct *work); 243 extern void scsi_evt_thread(struct work_struct *work);
246 244
247 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 245 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
248 GFP_ATOMIC); 246 GFP_ATOMIC);
249 if (!sdev) 247 if (!sdev)
250 goto out; 248 goto out;
251 249
252 sdev->vendor = scsi_null_device_strs; 250 sdev->vendor = scsi_null_device_strs;
253 sdev->model = scsi_null_device_strs; 251 sdev->model = scsi_null_device_strs;
254 sdev->rev = scsi_null_device_strs; 252 sdev->rev = scsi_null_device_strs;
255 sdev->host = shost; 253 sdev->host = shost;
256 sdev->id = starget->id; 254 sdev->id = starget->id;
257 sdev->lun = lun; 255 sdev->lun = lun;
258 sdev->channel = starget->channel; 256 sdev->channel = starget->channel;
259 sdev->sdev_state = SDEV_CREATED; 257 sdev->sdev_state = SDEV_CREATED;
260 INIT_LIST_HEAD(&sdev->siblings); 258 INIT_LIST_HEAD(&sdev->siblings);
261 INIT_LIST_HEAD(&sdev->same_target_siblings); 259 INIT_LIST_HEAD(&sdev->same_target_siblings);
262 INIT_LIST_HEAD(&sdev->cmd_list); 260 INIT_LIST_HEAD(&sdev->cmd_list);
263 INIT_LIST_HEAD(&sdev->starved_entry); 261 INIT_LIST_HEAD(&sdev->starved_entry);
264 INIT_LIST_HEAD(&sdev->event_list); 262 INIT_LIST_HEAD(&sdev->event_list);
265 spin_lock_init(&sdev->list_lock); 263 spin_lock_init(&sdev->list_lock);
266 INIT_WORK(&sdev->event_work, scsi_evt_thread); 264 INIT_WORK(&sdev->event_work, scsi_evt_thread);
267 265
268 sdev->sdev_gendev.parent = get_device(&starget->dev); 266 sdev->sdev_gendev.parent = get_device(&starget->dev);
269 sdev->sdev_target = starget; 267 sdev->sdev_target = starget;
270 268
271 /* usually NULL and set by ->slave_alloc instead */ 269 /* usually NULL and set by ->slave_alloc instead */
272 sdev->hostdata = hostdata; 270 sdev->hostdata = hostdata;
273 271
274 /* if the device needs this changing, it may do so in the 272 /* if the device needs this changing, it may do so in the
275 * slave_configure function */ 273 * slave_configure function */
276 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; 274 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
277 275
278 /* 276 /*
279 * Some low level driver could use device->type 277 * Some low level driver could use device->type
280 */ 278 */
281 sdev->type = -1; 279 sdev->type = -1;
282 280
283 /* 281 /*
284 * Assume that the device will have handshaking problems, 282 * Assume that the device will have handshaking problems,
285 * and then fix this field later if it turns out it 283 * and then fix this field later if it turns out it
286 * doesn't 284 * doesn't
287 */ 285 */
288 sdev->borken = 1; 286 sdev->borken = 1;
289 287
290 sdev->request_queue = scsi_alloc_queue(sdev); 288 sdev->request_queue = scsi_alloc_queue(sdev);
291 if (!sdev->request_queue) { 289 if (!sdev->request_queue) {
292 /* release fn is set up in scsi_sysfs_device_initialise, so 290 /* release fn is set up in scsi_sysfs_device_initialise, so
293 * have to free and put manually here */ 291 * have to free and put manually here */
294 put_device(&starget->dev); 292 put_device(&starget->dev);
295 kfree(sdev); 293 kfree(sdev);
296 goto out; 294 goto out;
297 } 295 }
298 296
299 sdev->request_queue->queuedata = sdev; 297 sdev->request_queue->queuedata = sdev;
300 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 298 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
301 299
302 scsi_sysfs_device_initialize(sdev); 300 scsi_sysfs_device_initialize(sdev);
303 301
304 if (shost->hostt->slave_alloc) { 302 if (shost->hostt->slave_alloc) {
305 ret = shost->hostt->slave_alloc(sdev); 303 ret = shost->hostt->slave_alloc(sdev);
306 if (ret) { 304 if (ret) {
307 /* 305 /*
308 * if LLDD reports slave not present, don't clutter 306 * if LLDD reports slave not present, don't clutter
309 * console with alloc failure messages 307 * console with alloc failure messages
310 */ 308 */
311 if (ret == -ENXIO) 309 if (ret == -ENXIO)
312 display_failure_msg = 0; 310 display_failure_msg = 0;
313 goto out_device_destroy; 311 goto out_device_destroy;
314 } 312 }
315 } 313 }
316 314
317 return sdev; 315 return sdev;
318 316
319 out_device_destroy: 317 out_device_destroy:
320 scsi_device_set_state(sdev, SDEV_DEL); 318 scsi_device_set_state(sdev, SDEV_DEL);
321 transport_destroy_device(&sdev->sdev_gendev); 319 transport_destroy_device(&sdev->sdev_gendev);
322 put_device(&sdev->sdev_gendev); 320 put_device(&sdev->sdev_gendev);
323 out: 321 out:
324 if (display_failure_msg) 322 if (display_failure_msg)
325 printk(ALLOC_FAILURE_MSG, __func__); 323 printk(ALLOC_FAILURE_MSG, __func__);
326 return NULL; 324 return NULL;
327 } 325 }
328 326
329 static void scsi_target_destroy(struct scsi_target *starget) 327 static void scsi_target_destroy(struct scsi_target *starget)
330 { 328 {
331 struct device *dev = &starget->dev; 329 struct device *dev = &starget->dev;
332 struct Scsi_Host *shost = dev_to_shost(dev->parent); 330 struct Scsi_Host *shost = dev_to_shost(dev->parent);
333 unsigned long flags; 331 unsigned long flags;
334 332
335 transport_destroy_device(dev); 333 transport_destroy_device(dev);
336 spin_lock_irqsave(shost->host_lock, flags); 334 spin_lock_irqsave(shost->host_lock, flags);
337 if (shost->hostt->target_destroy) 335 if (shost->hostt->target_destroy)
338 shost->hostt->target_destroy(starget); 336 shost->hostt->target_destroy(starget);
339 list_del_init(&starget->siblings); 337 list_del_init(&starget->siblings);
340 spin_unlock_irqrestore(shost->host_lock, flags); 338 spin_unlock_irqrestore(shost->host_lock, flags);
341 put_device(dev); 339 put_device(dev);
342 } 340 }
343 341
344 static void scsi_target_dev_release(struct device *dev) 342 static void scsi_target_dev_release(struct device *dev)
345 { 343 {
346 struct device *parent = dev->parent; 344 struct device *parent = dev->parent;
347 struct scsi_target *starget = to_scsi_target(dev); 345 struct scsi_target *starget = to_scsi_target(dev);
348 346
349 kfree(starget); 347 kfree(starget);
350 put_device(parent); 348 put_device(parent);
351 } 349 }
352 350
353 static struct device_type scsi_target_type = { 351 static struct device_type scsi_target_type = {
354 .name = "scsi_target", 352 .name = "scsi_target",
355 .release = scsi_target_dev_release, 353 .release = scsi_target_dev_release,
356 }; 354 };
357 355
358 int scsi_is_target_device(const struct device *dev) 356 int scsi_is_target_device(const struct device *dev)
359 { 357 {
360 return dev->type == &scsi_target_type; 358 return dev->type == &scsi_target_type;
361 } 359 }
362 EXPORT_SYMBOL(scsi_is_target_device); 360 EXPORT_SYMBOL(scsi_is_target_device);
363 361
364 static struct scsi_target *__scsi_find_target(struct device *parent, 362 static struct scsi_target *__scsi_find_target(struct device *parent,
365 int channel, uint id) 363 int channel, uint id)
366 { 364 {
367 struct scsi_target *starget, *found_starget = NULL; 365 struct scsi_target *starget, *found_starget = NULL;
368 struct Scsi_Host *shost = dev_to_shost(parent); 366 struct Scsi_Host *shost = dev_to_shost(parent);
369 /* 367 /*
370 * Search for an existing target for this sdev. 368 * Search for an existing target for this sdev.
371 */ 369 */
372 list_for_each_entry(starget, &shost->__targets, siblings) { 370 list_for_each_entry(starget, &shost->__targets, siblings) {
373 if (starget->id == id && 371 if (starget->id == id &&
374 starget->channel == channel) { 372 starget->channel == channel) {
375 found_starget = starget; 373 found_starget = starget;
376 break; 374 break;
377 } 375 }
378 } 376 }
379 if (found_starget) 377 if (found_starget)
380 get_device(&found_starget->dev); 378 get_device(&found_starget->dev);
381 379
382 return found_starget; 380 return found_starget;
383 } 381 }
384 382
385 /** 383 /**
386 * scsi_alloc_target - allocate a new or find an existing target 384 * scsi_alloc_target - allocate a new or find an existing target
387 * @parent: parent of the target (need not be a scsi host) 385 * @parent: parent of the target (need not be a scsi host)
388 * @channel: target channel number (zero if no channels) 386 * @channel: target channel number (zero if no channels)
389 * @id: target id number 387 * @id: target id number
390 * 388 *
391 * Return an existing target if one exists, provided it hasn't already 389 * Return an existing target if one exists, provided it hasn't already
392 * gone into STARGET_DEL state, otherwise allocate a new target. 390 * gone into STARGET_DEL state, otherwise allocate a new target.
393 * 391 *
394 * The target is returned with an incremented reference, so the caller 392 * The target is returned with an incremented reference, so the caller
395 * is responsible for both reaping and doing a last put 393 * is responsible for both reaping and doing a last put
396 */ 394 */
397 static struct scsi_target *scsi_alloc_target(struct device *parent, 395 static struct scsi_target *scsi_alloc_target(struct device *parent,
398 int channel, uint id) 396 int channel, uint id)
399 { 397 {
400 struct Scsi_Host *shost = dev_to_shost(parent); 398 struct Scsi_Host *shost = dev_to_shost(parent);
401 struct device *dev = NULL; 399 struct device *dev = NULL;
402 unsigned long flags; 400 unsigned long flags;
403 const int size = sizeof(struct scsi_target) 401 const int size = sizeof(struct scsi_target)
404 + shost->transportt->target_size; 402 + shost->transportt->target_size;
405 struct scsi_target *starget; 403 struct scsi_target *starget;
406 struct scsi_target *found_target; 404 struct scsi_target *found_target;
407 int error; 405 int error;
408 406
409 starget = kzalloc(size, GFP_KERNEL); 407 starget = kzalloc(size, GFP_KERNEL);
410 if (!starget) { 408 if (!starget) {
411 printk(KERN_ERR "%s: allocation failure\n", __func__); 409 printk(KERN_ERR "%s: allocation failure\n", __func__);
412 return NULL; 410 return NULL;
413 } 411 }
414 dev = &starget->dev; 412 dev = &starget->dev;
415 device_initialize(dev); 413 device_initialize(dev);
416 starget->reap_ref = 1; 414 starget->reap_ref = 1;
417 dev->parent = get_device(parent); 415 dev->parent = get_device(parent);
418 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 416 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
419 #ifndef CONFIG_SYSFS_DEPRECATED 417 #ifndef CONFIG_SYSFS_DEPRECATED
420 dev->bus = &scsi_bus_type; 418 dev->bus = &scsi_bus_type;
421 #endif 419 #endif
422 dev->type = &scsi_target_type; 420 dev->type = &scsi_target_type;
423 starget->id = id; 421 starget->id = id;
424 starget->channel = channel; 422 starget->channel = channel;
425 starget->can_queue = 0; 423 starget->can_queue = 0;
426 INIT_LIST_HEAD(&starget->siblings); 424 INIT_LIST_HEAD(&starget->siblings);
427 INIT_LIST_HEAD(&starget->devices); 425 INIT_LIST_HEAD(&starget->devices);
428 starget->state = STARGET_CREATED; 426 starget->state = STARGET_CREATED;
429 starget->scsi_level = SCSI_2; 427 starget->scsi_level = SCSI_2;
430 retry: 428 retry:
431 spin_lock_irqsave(shost->host_lock, flags); 429 spin_lock_irqsave(shost->host_lock, flags);
432 430
433 found_target = __scsi_find_target(parent, channel, id); 431 found_target = __scsi_find_target(parent, channel, id);
434 if (found_target) 432 if (found_target)
435 goto found; 433 goto found;
436 434
437 list_add_tail(&starget->siblings, &shost->__targets); 435 list_add_tail(&starget->siblings, &shost->__targets);
438 spin_unlock_irqrestore(shost->host_lock, flags); 436 spin_unlock_irqrestore(shost->host_lock, flags);
439 /* allocate and add */ 437 /* allocate and add */
440 transport_setup_device(dev); 438 transport_setup_device(dev);
441 if (shost->hostt->target_alloc) { 439 if (shost->hostt->target_alloc) {
442 error = shost->hostt->target_alloc(starget); 440 error = shost->hostt->target_alloc(starget);
443 441
444 if(error) { 442 if(error) {
445 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); 443 dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
446 /* don't want scsi_target_reap to do the final 444 /* don't want scsi_target_reap to do the final
447 * put because it will be under the host lock */ 445 * put because it will be under the host lock */
448 scsi_target_destroy(starget); 446 scsi_target_destroy(starget);
449 return NULL; 447 return NULL;
450 } 448 }
451 } 449 }
452 get_device(dev); 450 get_device(dev);
453 451
454 return starget; 452 return starget;
455 453
456 found: 454 found:
457 found_target->reap_ref++; 455 found_target->reap_ref++;
458 spin_unlock_irqrestore(shost->host_lock, flags); 456 spin_unlock_irqrestore(shost->host_lock, flags);
459 if (found_target->state != STARGET_DEL) { 457 if (found_target->state != STARGET_DEL) {
460 put_device(parent); 458 put_device(parent);
461 kfree(starget); 459 kfree(starget);
462 return found_target; 460 return found_target;
463 } 461 }
464 /* Unfortunately, we found a dying target; need to 462 /* Unfortunately, we found a dying target; need to
465 * wait until it's dead before we can get a new one */ 463 * wait until it's dead before we can get a new one */
466 put_device(&found_target->dev); 464 put_device(&found_target->dev);
467 flush_scheduled_work(); 465 flush_scheduled_work();
468 goto retry; 466 goto retry;
469 } 467 }
470 468
471 static void scsi_target_reap_usercontext(struct work_struct *work) 469 static void scsi_target_reap_usercontext(struct work_struct *work)
472 { 470 {
473 struct scsi_target *starget = 471 struct scsi_target *starget =
474 container_of(work, struct scsi_target, ew.work); 472 container_of(work, struct scsi_target, ew.work);
475 473
476 transport_remove_device(&starget->dev); 474 transport_remove_device(&starget->dev);
477 device_del(&starget->dev); 475 device_del(&starget->dev);
478 scsi_target_destroy(starget); 476 scsi_target_destroy(starget);
479 } 477 }
480 478
481 /** 479 /**
482 * scsi_target_reap - check to see if target is in use and destroy if not 480 * scsi_target_reap - check to see if target is in use and destroy if not
483 * @starget: target to be checked 481 * @starget: target to be checked
484 * 482 *
485 * This is used after removing a LUN or doing a last put of the target 483 * This is used after removing a LUN or doing a last put of the target
486 * it checks atomically that nothing is using the target and removes 484 * it checks atomically that nothing is using the target and removes
487 * it if so. 485 * it if so.
488 */ 486 */
489 void scsi_target_reap(struct scsi_target *starget) 487 void scsi_target_reap(struct scsi_target *starget)
490 { 488 {
491 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 489 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
492 unsigned long flags; 490 unsigned long flags;
493 enum scsi_target_state state; 491 enum scsi_target_state state;
494 int empty; 492 int empty;
495 493
496 spin_lock_irqsave(shost->host_lock, flags); 494 spin_lock_irqsave(shost->host_lock, flags);
497 state = starget->state; 495 state = starget->state;
498 empty = --starget->reap_ref == 0 && 496 empty = --starget->reap_ref == 0 &&
499 list_empty(&starget->devices) ? 1 : 0; 497 list_empty(&starget->devices) ? 1 : 0;
500 spin_unlock_irqrestore(shost->host_lock, flags); 498 spin_unlock_irqrestore(shost->host_lock, flags);
501 499
502 if (!empty) 500 if (!empty)
503 return; 501 return;
504 502
505 BUG_ON(state == STARGET_DEL); 503 BUG_ON(state == STARGET_DEL);
506 starget->state = STARGET_DEL; 504 starget->state = STARGET_DEL;
507 if (state == STARGET_CREATED) 505 if (state == STARGET_CREATED)
508 scsi_target_destroy(starget); 506 scsi_target_destroy(starget);
509 else 507 else
510 execute_in_process_context(scsi_target_reap_usercontext, 508 execute_in_process_context(scsi_target_reap_usercontext,
511 &starget->ew); 509 &starget->ew);
512 } 510 }
513 511
514 /** 512 /**
515 * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string 513 * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
516 * @s: INQUIRY result string to sanitize 514 * @s: INQUIRY result string to sanitize
517 * @len: length of the string 515 * @len: length of the string
518 * 516 *
519 * Description: 517 * Description:
520 * The SCSI spec says that INQUIRY vendor, product, and revision 518 * The SCSI spec says that INQUIRY vendor, product, and revision
521 * strings must consist entirely of graphic ASCII characters, 519 * strings must consist entirely of graphic ASCII characters,
522 * padded on the right with spaces. Since not all devices obey 520 * padded on the right with spaces. Since not all devices obey
523 * this rule, we will replace non-graphic or non-ASCII characters 521 * this rule, we will replace non-graphic or non-ASCII characters
524 * with spaces. Exception: a NUL character is interpreted as a 522 * with spaces. Exception: a NUL character is interpreted as a
525 * string terminator, so all the following characters are set to 523 * string terminator, so all the following characters are set to
526 * spaces. 524 * spaces.
527 **/ 525 **/
528 static void sanitize_inquiry_string(unsigned char *s, int len) 526 static void sanitize_inquiry_string(unsigned char *s, int len)
529 { 527 {
530 int terminated = 0; 528 int terminated = 0;
531 529
532 for (; len > 0; (--len, ++s)) { 530 for (; len > 0; (--len, ++s)) {
533 if (*s == 0) 531 if (*s == 0)
534 terminated = 1; 532 terminated = 1;
535 if (terminated || *s < 0x20 || *s > 0x7e) 533 if (terminated || *s < 0x20 || *s > 0x7e)
536 *s = ' '; 534 *s = ' ';
537 } 535 }
538 } 536 }
539 537
540 /** 538 /**
541 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 539 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
542 * @sdev: scsi_device to probe 540 * @sdev: scsi_device to probe
543 * @inq_result: area to store the INQUIRY result 541 * @inq_result: area to store the INQUIRY result
544 * @result_len: len of inq_result 542 * @result_len: len of inq_result
545 * @bflags: store any bflags found here 543 * @bflags: store any bflags found here
546 * 544 *
547 * Description: 545 * Description:
548 * Probe the lun associated with @req using a standard SCSI INQUIRY; 546 * Probe the lun associated with @req using a standard SCSI INQUIRY;
549 * 547 *
550 * If the INQUIRY is successful, zero is returned and the 548 * If the INQUIRY is successful, zero is returned and the
551 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 549 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
552 * are copied to the scsi_device any flags value is stored in *@bflags. 550 * are copied to the scsi_device any flags value is stored in *@bflags.
553 **/ 551 **/
554 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, 552 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
555 int result_len, int *bflags) 553 int result_len, int *bflags)
556 { 554 {
557 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 555 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
558 int first_inquiry_len, try_inquiry_len, next_inquiry_len; 556 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
559 int response_len = 0; 557 int response_len = 0;
560 int pass, count, result; 558 int pass, count, result;
561 struct scsi_sense_hdr sshdr; 559 struct scsi_sense_hdr sshdr;
562 560
563 *bflags = 0; 561 *bflags = 0;
564 562
565 /* Perform up to 3 passes. The first pass uses a conservative 563 /* Perform up to 3 passes. The first pass uses a conservative
566 * transfer length of 36 unless sdev->inquiry_len specifies a 564 * transfer length of 36 unless sdev->inquiry_len specifies a
567 * different value. */ 565 * different value. */
568 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; 566 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
569 try_inquiry_len = first_inquiry_len; 567 try_inquiry_len = first_inquiry_len;
570 pass = 1; 568 pass = 1;
571 569
572 next_pass: 570 next_pass:
573 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 571 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
574 "scsi scan: INQUIRY pass %d length %d\n", 572 "scsi scan: INQUIRY pass %d length %d\n",
575 pass, try_inquiry_len)); 573 pass, try_inquiry_len));
576 574
577 /* Each pass gets up to three chances to ignore Unit Attention */ 575 /* Each pass gets up to three chances to ignore Unit Attention */
578 for (count = 0; count < 3; ++count) { 576 for (count = 0; count < 3; ++count) {
579 int resid; 577 int resid;
580 578
581 memset(scsi_cmd, 0, 6); 579 memset(scsi_cmd, 0, 6);
582 scsi_cmd[0] = INQUIRY; 580 scsi_cmd[0] = INQUIRY;
583 scsi_cmd[4] = (unsigned char) try_inquiry_len; 581 scsi_cmd[4] = (unsigned char) try_inquiry_len;
584 582
585 memset(inq_result, 0, try_inquiry_len); 583 memset(inq_result, 0, try_inquiry_len);
586 584
587 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 585 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
588 inq_result, try_inquiry_len, &sshdr, 586 inq_result, try_inquiry_len, &sshdr,
589 HZ / 2 + HZ * scsi_inq_timeout, 3, 587 HZ / 2 + HZ * scsi_inq_timeout, 3,
590 &resid); 588 &resid);
591 589
592 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s " 590 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
593 "with code 0x%x\n", 591 "with code 0x%x\n",
594 result ? "failed" : "successful", result)); 592 result ? "failed" : "successful", result));
595 593
596 if (result) { 594 if (result) {
597 /* 595 /*
598 * not-ready to ready transition [asc/ascq=0x28/0x0] 596 * not-ready to ready transition [asc/ascq=0x28/0x0]
599 * or power-on, reset [asc/ascq=0x29/0x0], continue. 597 * or power-on, reset [asc/ascq=0x29/0x0], continue.
600 * INQUIRY should not yield UNIT_ATTENTION 598 * INQUIRY should not yield UNIT_ATTENTION
601 * but many buggy devices do so anyway. 599 * but many buggy devices do so anyway.
602 */ 600 */
603 if ((driver_byte(result) & DRIVER_SENSE) && 601 if ((driver_byte(result) & DRIVER_SENSE) &&
604 scsi_sense_valid(&sshdr)) { 602 scsi_sense_valid(&sshdr)) {
605 if ((sshdr.sense_key == UNIT_ATTENTION) && 603 if ((sshdr.sense_key == UNIT_ATTENTION) &&
606 ((sshdr.asc == 0x28) || 604 ((sshdr.asc == 0x28) ||
607 (sshdr.asc == 0x29)) && 605 (sshdr.asc == 0x29)) &&
608 (sshdr.ascq == 0)) 606 (sshdr.ascq == 0))
609 continue; 607 continue;
610 } 608 }
611 } else { 609 } else {
612 /* 610 /*
613 * if nothing was transferred, we try 611 * if nothing was transferred, we try
614 * again. It's a workaround for some USB 612 * again. It's a workaround for some USB
615 * devices. 613 * devices.
616 */ 614 */
617 if (resid == try_inquiry_len) 615 if (resid == try_inquiry_len)
618 continue; 616 continue;
619 } 617 }
620 break; 618 break;
621 } 619 }
622 620
623 if (result == 0) { 621 if (result == 0) {
624 sanitize_inquiry_string(&inq_result[8], 8); 622 sanitize_inquiry_string(&inq_result[8], 8);
625 sanitize_inquiry_string(&inq_result[16], 16); 623 sanitize_inquiry_string(&inq_result[16], 16);
626 sanitize_inquiry_string(&inq_result[32], 4); 624 sanitize_inquiry_string(&inq_result[32], 4);
627 625
628 response_len = inq_result[4] + 5; 626 response_len = inq_result[4] + 5;
629 if (response_len > 255) 627 if (response_len > 255)
630 response_len = first_inquiry_len; /* sanity */ 628 response_len = first_inquiry_len; /* sanity */
631 629
632 /* 630 /*
633 * Get any flags for this device. 631 * Get any flags for this device.
634 * 632 *
635 * XXX add a bflags to scsi_device, and replace the 633 * XXX add a bflags to scsi_device, and replace the
636 * corresponding bit fields in scsi_device, so bflags 634 * corresponding bit fields in scsi_device, so bflags
637 * need not be passed as an argument. 635 * need not be passed as an argument.
638 */ 636 */
639 *bflags = scsi_get_device_flags(sdev, &inq_result[8], 637 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
640 &inq_result[16]); 638 &inq_result[16]);
641 639
642 /* When the first pass succeeds we gain information about 640 /* When the first pass succeeds we gain information about
643 * what larger transfer lengths might work. */ 641 * what larger transfer lengths might work. */
644 if (pass == 1) { 642 if (pass == 1) {
645 if (BLIST_INQUIRY_36 & *bflags) 643 if (BLIST_INQUIRY_36 & *bflags)
646 next_inquiry_len = 36; 644 next_inquiry_len = 36;
647 else if (BLIST_INQUIRY_58 & *bflags) 645 else if (BLIST_INQUIRY_58 & *bflags)
648 next_inquiry_len = 58; 646 next_inquiry_len = 58;
649 else if (sdev->inquiry_len) 647 else if (sdev->inquiry_len)
650 next_inquiry_len = sdev->inquiry_len; 648 next_inquiry_len = sdev->inquiry_len;
651 else 649 else
652 next_inquiry_len = response_len; 650 next_inquiry_len = response_len;
653 651
654 /* If more data is available perform the second pass */ 652 /* If more data is available perform the second pass */
655 if (next_inquiry_len > try_inquiry_len) { 653 if (next_inquiry_len > try_inquiry_len) {
656 try_inquiry_len = next_inquiry_len; 654 try_inquiry_len = next_inquiry_len;
657 pass = 2; 655 pass = 2;
658 goto next_pass; 656 goto next_pass;
659 } 657 }
660 } 658 }
661 659
662 } else if (pass == 2) { 660 } else if (pass == 2) {
663 printk(KERN_INFO "scsi scan: %d byte inquiry failed. " 661 printk(KERN_INFO "scsi scan: %d byte inquiry failed. "
664 "Consider BLIST_INQUIRY_36 for this device\n", 662 "Consider BLIST_INQUIRY_36 for this device\n",
665 try_inquiry_len); 663 try_inquiry_len);
666 664
667 /* If this pass failed, the third pass goes back and transfers 665 /* If this pass failed, the third pass goes back and transfers
668 * the same amount as we successfully got in the first pass. */ 666 * the same amount as we successfully got in the first pass. */
669 try_inquiry_len = first_inquiry_len; 667 try_inquiry_len = first_inquiry_len;
670 pass = 3; 668 pass = 3;
671 goto next_pass; 669 goto next_pass;
672 } 670 }
673 671
674 /* If the last transfer attempt got an error, assume the 672 /* If the last transfer attempt got an error, assume the
675 * peripheral doesn't exist or is dead. */ 673 * peripheral doesn't exist or is dead. */
676 if (result) 674 if (result)
677 return -EIO; 675 return -EIO;
678 676
679 /* Don't report any more data than the device says is valid */ 677 /* Don't report any more data than the device says is valid */
680 sdev->inquiry_len = min(try_inquiry_len, response_len); 678 sdev->inquiry_len = min(try_inquiry_len, response_len);
681 679
682 /* 680 /*
683 * XXX Abort if the response length is less than 36? If less than 681 * XXX Abort if the response length is less than 36? If less than
684 * 32, the lookup of the device flags (above) could be invalid, 682 * 32, the lookup of the device flags (above) could be invalid,
685 * and it would be possible to take an incorrect action - we do 683 * and it would be possible to take an incorrect action - we do
686 * not want to hang because of a short INQUIRY. On the flip side, 684 * not want to hang because of a short INQUIRY. On the flip side,
687 * if the device is spun down or becoming ready (and so it gives a 685 * if the device is spun down or becoming ready (and so it gives a
688 * short INQUIRY), an abort here prevents any further use of the 686 * short INQUIRY), an abort here prevents any further use of the
689 * device, including spin up. 687 * device, including spin up.
690 * 688 *
691 * On the whole, the best approach seems to be to assume the first 689 * On the whole, the best approach seems to be to assume the first
692 * 36 bytes are valid no matter what the device says. That's 690 * 36 bytes are valid no matter what the device says. That's
693 * better than copying < 36 bytes to the inquiry-result buffer 691 * better than copying < 36 bytes to the inquiry-result buffer
694 * and displaying garbage for the Vendor, Product, or Revision 692 * and displaying garbage for the Vendor, Product, or Revision
695 * strings. 693 * strings.
696 */ 694 */
697 if (sdev->inquiry_len < 36) { 695 if (sdev->inquiry_len < 36) {
698 printk(KERN_INFO "scsi scan: INQUIRY result too short (%d)," 696 printk(KERN_INFO "scsi scan: INQUIRY result too short (%d),"
699 " using 36\n", sdev->inquiry_len); 697 " using 36\n", sdev->inquiry_len);
700 sdev->inquiry_len = 36; 698 sdev->inquiry_len = 36;
701 } 699 }
702 700
703 /* 701 /*
704 * Related to the above issue: 702 * Related to the above issue:
705 * 703 *
706 * XXX Devices (disk or all?) should be sent a TEST UNIT READY, 704 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
707 * and if not ready, sent a START_STOP to start (maybe spin up) and 705 * and if not ready, sent a START_STOP to start (maybe spin up) and
708 * then send the INQUIRY again, since the INQUIRY can change after 706 * then send the INQUIRY again, since the INQUIRY can change after
709 * a device is initialized. 707 * a device is initialized.
710 * 708 *
711 * Ideally, start a device if explicitly asked to do so. This 709 * Ideally, start a device if explicitly asked to do so. This
712 * assumes that a device is spun up on power on, spun down on 710 * assumes that a device is spun up on power on, spun down on
713 * request, and then spun up on request. 711 * request, and then spun up on request.
714 */ 712 */
715 713
716 /* 714 /*
717 * The scanning code needs to know the scsi_level, even if no 715 * The scanning code needs to know the scsi_level, even if no
718 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so 716 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
719 * non-zero LUNs can be scanned. 717 * non-zero LUNs can be scanned.
720 */ 718 */
721 sdev->scsi_level = inq_result[2] & 0x07; 719 sdev->scsi_level = inq_result[2] & 0x07;
722 if (sdev->scsi_level >= 2 || 720 if (sdev->scsi_level >= 2 ||
723 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 721 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
724 sdev->scsi_level++; 722 sdev->scsi_level++;
725 sdev->sdev_target->scsi_level = sdev->scsi_level; 723 sdev->sdev_target->scsi_level = sdev->scsi_level;
726 724
727 return 0; 725 return 0;
728 } 726 }
729 727
730 /** 728 /**
731 * scsi_add_lun - allocate and fully initialze a scsi_device 729 * scsi_add_lun - allocate and fully initialze a scsi_device
732 * @sdev: holds information to be stored in the new scsi_device 730 * @sdev: holds information to be stored in the new scsi_device
733 * @inq_result: holds the result of a previous INQUIRY to the LUN 731 * @inq_result: holds the result of a previous INQUIRY to the LUN
734 * @bflags: black/white list flag 732 * @bflags: black/white list flag
735 * @async: 1 if this device is being scanned asynchronously 733 * @async: 1 if this device is being scanned asynchronously
736 * 734 *
737 * Description: 735 * Description:
738 * Initialize the scsi_device @sdev. Optionally set fields based 736 * Initialize the scsi_device @sdev. Optionally set fields based
739 * on values in *@bflags. 737 * on values in *@bflags.
740 * 738 *
741 * Return: 739 * Return:
742 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 740 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
743 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 741 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
744 **/ 742 **/
745 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 743 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
746 int *bflags, int async) 744 int *bflags, int async)
747 { 745 {
748 int ret; 746 int ret;
749 747
750 /* 748 /*
751 * XXX do not save the inquiry, since it can change underneath us, 749 * XXX do not save the inquiry, since it can change underneath us,
752 * save just vendor/model/rev. 750 * save just vendor/model/rev.
753 * 751 *
754 * Rather than save it and have an ioctl that retrieves the saved 752 * Rather than save it and have an ioctl that retrieves the saved
755 * value, have an ioctl that executes the same INQUIRY code used 753 * value, have an ioctl that executes the same INQUIRY code used
756 * in scsi_probe_lun, let user level programs doing INQUIRY 754 * in scsi_probe_lun, let user level programs doing INQUIRY
757 * scanning run at their own risk, or supply a user level program 755 * scanning run at their own risk, or supply a user level program
758 * that can correctly scan. 756 * that can correctly scan.
759 */ 757 */
760 758
761 /* 759 /*
762 * Copy at least 36 bytes of INQUIRY data, so that we don't 760 * Copy at least 36 bytes of INQUIRY data, so that we don't
763 * dereference unallocated memory when accessing the Vendor, 761 * dereference unallocated memory when accessing the Vendor,
764 * Product, and Revision strings. Badly behaved devices may set 762 * Product, and Revision strings. Badly behaved devices may set
765 * the INQUIRY Additional Length byte to a small value, indicating 763 * the INQUIRY Additional Length byte to a small value, indicating
766 * these strings are invalid, but often they contain plausible data 764 * these strings are invalid, but often they contain plausible data
767 * nonetheless. It doesn't matter if the device sent < 36 bytes 765 * nonetheless. It doesn't matter if the device sent < 36 bytes
768 * total, since scsi_probe_lun() initializes inq_result with 0s. 766 * total, since scsi_probe_lun() initializes inq_result with 0s.
769 */ 767 */
770 sdev->inquiry = kmemdup(inq_result, 768 sdev->inquiry = kmemdup(inq_result,
771 max_t(size_t, sdev->inquiry_len, 36), 769 max_t(size_t, sdev->inquiry_len, 36),
772 GFP_ATOMIC); 770 GFP_ATOMIC);
773 if (sdev->inquiry == NULL) 771 if (sdev->inquiry == NULL)
774 return SCSI_SCAN_NO_RESPONSE; 772 return SCSI_SCAN_NO_RESPONSE;
775 773
776 sdev->vendor = (char *) (sdev->inquiry + 8); 774 sdev->vendor = (char *) (sdev->inquiry + 8);
777 sdev->model = (char *) (sdev->inquiry + 16); 775 sdev->model = (char *) (sdev->inquiry + 16);
778 sdev->rev = (char *) (sdev->inquiry + 32); 776 sdev->rev = (char *) (sdev->inquiry + 32);
779 777
780 if (*bflags & BLIST_ISROM) { 778 if (*bflags & BLIST_ISROM) {
781 sdev->type = TYPE_ROM; 779 sdev->type = TYPE_ROM;
782 sdev->removable = 1; 780 sdev->removable = 1;
783 } else { 781 } else {
784 sdev->type = (inq_result[0] & 0x1f); 782 sdev->type = (inq_result[0] & 0x1f);
785 sdev->removable = (inq_result[1] & 0x80) >> 7; 783 sdev->removable = (inq_result[1] & 0x80) >> 7;
786 } 784 }
787 785
788 switch (sdev->type) { 786 switch (sdev->type) {
789 case TYPE_RBC: 787 case TYPE_RBC:
790 case TYPE_TAPE: 788 case TYPE_TAPE:
791 case TYPE_DISK: 789 case TYPE_DISK:
792 case TYPE_PRINTER: 790 case TYPE_PRINTER:
793 case TYPE_MOD: 791 case TYPE_MOD:
794 case TYPE_PROCESSOR: 792 case TYPE_PROCESSOR:
795 case TYPE_SCANNER: 793 case TYPE_SCANNER:
796 case TYPE_MEDIUM_CHANGER: 794 case TYPE_MEDIUM_CHANGER:
797 case TYPE_ENCLOSURE: 795 case TYPE_ENCLOSURE:
798 case TYPE_COMM: 796 case TYPE_COMM:
799 case TYPE_RAID: 797 case TYPE_RAID:
800 case TYPE_OSD: 798 case TYPE_OSD:
801 sdev->writeable = 1; 799 sdev->writeable = 1;
802 break; 800 break;
803 case TYPE_ROM: 801 case TYPE_ROM:
804 case TYPE_WORM: 802 case TYPE_WORM:
805 sdev->writeable = 0; 803 sdev->writeable = 0;
806 break; 804 break;
807 default: 805 default:
808 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); 806 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
809 } 807 }
810 808
811 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { 809 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
812 /* RBC and MMC devices can return SCSI-3 compliance and yet 810 /* RBC and MMC devices can return SCSI-3 compliance and yet
813 * still not support REPORT LUNS, so make them act as 811 * still not support REPORT LUNS, so make them act as
814 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is 812 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
815 * specifically set */ 813 * specifically set */
816 if ((*bflags & BLIST_REPORTLUN2) == 0) 814 if ((*bflags & BLIST_REPORTLUN2) == 0)
817 *bflags |= BLIST_NOREPORTLUN; 815 *bflags |= BLIST_NOREPORTLUN;
818 } 816 }
819 817
820 /* 818 /*
821 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 819 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
822 * spec says: The device server is capable of supporting the 820 * spec says: The device server is capable of supporting the
823 * specified peripheral device type on this logical unit. However, 821 * specified peripheral device type on this logical unit. However,
824 * the physical device is not currently connected to this logical 822 * the physical device is not currently connected to this logical
825 * unit. 823 * unit.
826 * 824 *
827 * The above is vague, as it implies that we could treat 001 and 825 * The above is vague, as it implies that we could treat 001 and
828 * 011 the same. Stay compatible with previous code, and create a 826 * 011 the same. Stay compatible with previous code, and create a
829 * scsi_device for a PQ of 1 827 * scsi_device for a PQ of 1
830 * 828 *
831 * Don't set the device offline here; rather let the upper 829 * Don't set the device offline here; rather let the upper
832 * level drivers eval the PQ to decide whether they should 830 * level drivers eval the PQ to decide whether they should
833 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. 831 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
834 */ 832 */
835 833
836 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; 834 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
837 sdev->lockable = sdev->removable; 835 sdev->lockable = sdev->removable;
838 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); 836 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
839 837
840 if (sdev->scsi_level >= SCSI_3 || 838 if (sdev->scsi_level >= SCSI_3 ||
841 (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) 839 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
842 sdev->ppr = 1; 840 sdev->ppr = 1;
843 if (inq_result[7] & 0x60) 841 if (inq_result[7] & 0x60)
844 sdev->wdtr = 1; 842 sdev->wdtr = 1;
845 if (inq_result[7] & 0x10) 843 if (inq_result[7] & 0x10)
846 sdev->sdtr = 1; 844 sdev->sdtr = 1;
847 845
848 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " 846 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
849 "ANSI: %d%s\n", scsi_device_type(sdev->type), 847 "ANSI: %d%s\n", scsi_device_type(sdev->type),
850 sdev->vendor, sdev->model, sdev->rev, 848 sdev->vendor, sdev->model, sdev->rev,
851 sdev->inq_periph_qual, inq_result[2] & 0x07, 849 sdev->inq_periph_qual, inq_result[2] & 0x07,
852 (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); 850 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
853 851
854 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && 852 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
855 !(*bflags & BLIST_NOTQ)) 853 !(*bflags & BLIST_NOTQ))
856 sdev->tagged_supported = 1; 854 sdev->tagged_supported = 1;
857 855
858 /* 856 /*
859 * Some devices (Texel CD ROM drives) have handshaking problems 857 * Some devices (Texel CD ROM drives) have handshaking problems
860 * when used with the Seagate controllers. borken is initialized 858 * when used with the Seagate controllers. borken is initialized
861 * to 1, and then set it to 0 here. 859 * to 1, and then set it to 0 here.
862 */ 860 */
863 if ((*bflags & BLIST_BORKEN) == 0) 861 if ((*bflags & BLIST_BORKEN) == 0)
864 sdev->borken = 0; 862 sdev->borken = 0;
865 863
866 if (*bflags & BLIST_NO_ULD_ATTACH) 864 if (*bflags & BLIST_NO_ULD_ATTACH)
867 sdev->no_uld_attach = 1; 865 sdev->no_uld_attach = 1;
868 866
869 /* 867 /*
870 * Apparently some really broken devices (contrary to the SCSI 868 * Apparently some really broken devices (contrary to the SCSI
871 * standards) need to be selected without asserting ATN 869 * standards) need to be selected without asserting ATN
872 */ 870 */
873 if (*bflags & BLIST_SELECT_NO_ATN) 871 if (*bflags & BLIST_SELECT_NO_ATN)
874 sdev->select_no_atn = 1; 872 sdev->select_no_atn = 1;
875 873
876 /* 874 /*
877 * Maximum 512 sector transfer length 875 * Maximum 512 sector transfer length
878 * broken RA4x00 Compaq Disk Array 876 * broken RA4x00 Compaq Disk Array
879 */ 877 */
880 if (*bflags & BLIST_MAX_512) 878 if (*bflags & BLIST_MAX_512)
881 blk_queue_max_sectors(sdev->request_queue, 512); 879 blk_queue_max_sectors(sdev->request_queue, 512);
882 880
883 /* 881 /*
884 * Some devices may not want to have a start command automatically 882 * Some devices may not want to have a start command automatically
885 * issued when a device is added. 883 * issued when a device is added.
886 */ 884 */
887 if (*bflags & BLIST_NOSTARTONADD) 885 if (*bflags & BLIST_NOSTARTONADD)
888 sdev->no_start_on_add = 1; 886 sdev->no_start_on_add = 1;
889 887
890 if (*bflags & BLIST_SINGLELUN) 888 if (*bflags & BLIST_SINGLELUN)
891 scsi_target(sdev)->single_lun = 1; 889 scsi_target(sdev)->single_lun = 1;
892 890
893 sdev->use_10_for_rw = 1; 891 sdev->use_10_for_rw = 1;
894 892
895 if (*bflags & BLIST_MS_SKIP_PAGE_08) 893 if (*bflags & BLIST_MS_SKIP_PAGE_08)
896 sdev->skip_ms_page_8 = 1; 894 sdev->skip_ms_page_8 = 1;
897 895
898 if (*bflags & BLIST_MS_SKIP_PAGE_3F) 896 if (*bflags & BLIST_MS_SKIP_PAGE_3F)
899 sdev->skip_ms_page_3f = 1; 897 sdev->skip_ms_page_3f = 1;
900 898
901 if (*bflags & BLIST_USE_10_BYTE_MS) 899 if (*bflags & BLIST_USE_10_BYTE_MS)
902 sdev->use_10_for_ms = 1; 900 sdev->use_10_for_ms = 1;
903 901
904 /* set the device running here so that slave configure 902 /* set the device running here so that slave configure
905 * may do I/O */ 903 * may do I/O */
906 ret = scsi_device_set_state(sdev, SDEV_RUNNING); 904 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
907 if (ret) { 905 if (ret) {
908 ret = scsi_device_set_state(sdev, SDEV_BLOCK); 906 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
909 907
910 if (ret) { 908 if (ret) {
911 sdev_printk(KERN_ERR, sdev, 909 sdev_printk(KERN_ERR, sdev,
912 "in wrong state %s to complete scan\n", 910 "in wrong state %s to complete scan\n",
913 scsi_device_state_name(sdev->sdev_state)); 911 scsi_device_state_name(sdev->sdev_state));
914 return SCSI_SCAN_NO_RESPONSE; 912 return SCSI_SCAN_NO_RESPONSE;
915 } 913 }
916 } 914 }
917 915
918 if (*bflags & BLIST_MS_192_BYTES_FOR_3F) 916 if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
919 sdev->use_192_bytes_for_3f = 1; 917 sdev->use_192_bytes_for_3f = 1;
920 918
921 if (*bflags & BLIST_NOT_LOCKABLE) 919 if (*bflags & BLIST_NOT_LOCKABLE)
922 sdev->lockable = 0; 920 sdev->lockable = 0;
923 921
924 if (*bflags & BLIST_RETRY_HWERROR) 922 if (*bflags & BLIST_RETRY_HWERROR)
925 sdev->retry_hwerror = 1; 923 sdev->retry_hwerror = 1;
926 924
927 transport_configure_device(&sdev->sdev_gendev); 925 transport_configure_device(&sdev->sdev_gendev);
928 926
929 if (sdev->host->hostt->slave_configure) { 927 if (sdev->host->hostt->slave_configure) {
930 ret = sdev->host->hostt->slave_configure(sdev); 928 ret = sdev->host->hostt->slave_configure(sdev);
931 if (ret) { 929 if (ret) {
932 /* 930 /*
933 * if LLDD reports slave not present, don't clutter 931 * if LLDD reports slave not present, don't clutter
934 * console with alloc failure messages 932 * console with alloc failure messages
935 */ 933 */
936 if (ret != -ENXIO) { 934 if (ret != -ENXIO) {
937 sdev_printk(KERN_ERR, sdev, 935 sdev_printk(KERN_ERR, sdev,
938 "failed to configure device\n"); 936 "failed to configure device\n");
939 } 937 }
940 return SCSI_SCAN_NO_RESPONSE; 938 return SCSI_SCAN_NO_RESPONSE;
941 } 939 }
942 } 940 }
943 941
944 /* 942 /*
945 * Ok, the device is now all set up, we can 943 * Ok, the device is now all set up, we can
946 * register it and tell the rest of the kernel 944 * register it and tell the rest of the kernel
947 * about it. 945 * about it.
948 */ 946 */
949 if (!async && scsi_sysfs_add_sdev(sdev) != 0) 947 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
950 return SCSI_SCAN_NO_RESPONSE; 948 return SCSI_SCAN_NO_RESPONSE;
951 949
952 return SCSI_SCAN_LUN_PRESENT; 950 return SCSI_SCAN_LUN_PRESENT;
953 } 951 }
954 952
955 static inline void scsi_destroy_sdev(struct scsi_device *sdev) 953 static inline void scsi_destroy_sdev(struct scsi_device *sdev)
956 { 954 {
957 scsi_device_set_state(sdev, SDEV_DEL); 955 scsi_device_set_state(sdev, SDEV_DEL);
958 if (sdev->host->hostt->slave_destroy) 956 if (sdev->host->hostt->slave_destroy)
959 sdev->host->hostt->slave_destroy(sdev); 957 sdev->host->hostt->slave_destroy(sdev);
960 transport_destroy_device(&sdev->sdev_gendev); 958 transport_destroy_device(&sdev->sdev_gendev);
961 put_device(&sdev->sdev_gendev); 959 put_device(&sdev->sdev_gendev);
962 } 960 }
963 961
964 #ifdef CONFIG_SCSI_LOGGING 962 #ifdef CONFIG_SCSI_LOGGING
965 /** 963 /**
966 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace 964 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
967 * @buf: Output buffer with at least end-first+1 bytes of space 965 * @buf: Output buffer with at least end-first+1 bytes of space
968 * @inq: Inquiry buffer (input) 966 * @inq: Inquiry buffer (input)
969 * @first: Offset of string into inq 967 * @first: Offset of string into inq
970 * @end: Index after last character in inq 968 * @end: Index after last character in inq
971 */ 969 */
972 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, 970 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
973 unsigned first, unsigned end) 971 unsigned first, unsigned end)
974 { 972 {
975 unsigned term = 0, idx; 973 unsigned term = 0, idx;
976 974
977 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { 975 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
978 if (inq[idx+first] > ' ') { 976 if (inq[idx+first] > ' ') {
979 buf[idx] = inq[idx+first]; 977 buf[idx] = inq[idx+first];
980 term = idx+1; 978 term = idx+1;
981 } else { 979 } else {
982 buf[idx] = ' '; 980 buf[idx] = ' ';
983 } 981 }
984 } 982 }
985 buf[term] = 0; 983 buf[term] = 0;
986 return buf; 984 return buf;
987 } 985 }
988 #endif 986 #endif
989 987
990 /** 988 /**
991 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 989 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
992 * @starget: pointer to target device structure 990 * @starget: pointer to target device structure
993 * @lun: LUN of target device 991 * @lun: LUN of target device
994 * @bflagsp: store bflags here if not NULL 992 * @bflagsp: store bflags here if not NULL
995 * @sdevp: probe the LUN corresponding to this scsi_device 993 * @sdevp: probe the LUN corresponding to this scsi_device
996 * @rescan: if nonzero skip some code only needed on first scan 994 * @rescan: if nonzero skip some code only needed on first scan
997 * @hostdata: passed to scsi_alloc_sdev() 995 * @hostdata: passed to scsi_alloc_sdev()
998 * 996 *
999 * Description: 997 * Description:
1000 * Call scsi_probe_lun, if a LUN with an attached device is found, 998 * Call scsi_probe_lun, if a LUN with an attached device is found,
1001 * allocate and set it up by calling scsi_add_lun. 999 * allocate and set it up by calling scsi_add_lun.
1002 * 1000 *
1003 * Return: 1001 * Return:
1004 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 1002 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1005 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is 1003 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1006 * attached at the LUN 1004 * attached at the LUN
1007 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 1005 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1008 **/ 1006 **/
1009 static int scsi_probe_and_add_lun(struct scsi_target *starget, 1007 static int scsi_probe_and_add_lun(struct scsi_target *starget,
1010 uint lun, int *bflagsp, 1008 uint lun, int *bflagsp,
1011 struct scsi_device **sdevp, int rescan, 1009 struct scsi_device **sdevp, int rescan,
1012 void *hostdata) 1010 void *hostdata)
1013 { 1011 {
1014 struct scsi_device *sdev; 1012 struct scsi_device *sdev;
1015 unsigned char *result; 1013 unsigned char *result;
1016 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256; 1014 int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1017 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1015 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1018 1016
1019 /* 1017 /*
1020 * The rescan flag is used as an optimization, the first scan of a 1018 * The rescan flag is used as an optimization, the first scan of a
1021 * host adapter calls into here with rescan == 0. 1019 * host adapter calls into here with rescan == 0.
1022 */ 1020 */
1023 sdev = scsi_device_lookup_by_target(starget, lun); 1021 sdev = scsi_device_lookup_by_target(starget, lun);
1024 if (sdev) { 1022 if (sdev) {
1025 if (rescan || !scsi_device_created(sdev)) { 1023 if (rescan || !scsi_device_created(sdev)) {
1026 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1024 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
1027 "scsi scan: device exists on %s\n", 1025 "scsi scan: device exists on %s\n",
1028 dev_name(&sdev->sdev_gendev))); 1026 dev_name(&sdev->sdev_gendev)));
1029 if (sdevp) 1027 if (sdevp)
1030 *sdevp = sdev; 1028 *sdevp = sdev;
1031 else 1029 else
1032 scsi_device_put(sdev); 1030 scsi_device_put(sdev);
1033 1031
1034 if (bflagsp) 1032 if (bflagsp)
1035 *bflagsp = scsi_get_device_flags(sdev, 1033 *bflagsp = scsi_get_device_flags(sdev,
1036 sdev->vendor, 1034 sdev->vendor,
1037 sdev->model); 1035 sdev->model);
1038 return SCSI_SCAN_LUN_PRESENT; 1036 return SCSI_SCAN_LUN_PRESENT;
1039 } 1037 }
1040 scsi_device_put(sdev); 1038 scsi_device_put(sdev);
1041 } else 1039 } else
1042 sdev = scsi_alloc_sdev(starget, lun, hostdata); 1040 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1043 if (!sdev) 1041 if (!sdev)
1044 goto out; 1042 goto out;
1045 1043
1046 result = kmalloc(result_len, GFP_ATOMIC | 1044 result = kmalloc(result_len, GFP_ATOMIC |
1047 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); 1045 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
1048 if (!result) 1046 if (!result)
1049 goto out_free_sdev; 1047 goto out_free_sdev;
1050 1048
1051 if (scsi_probe_lun(sdev, result, result_len, &bflags)) 1049 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1052 goto out_free_result; 1050 goto out_free_result;
1053 1051
1054 if (bflagsp) 1052 if (bflagsp)
1055 *bflagsp = bflags; 1053 *bflagsp = bflags;
1056 /* 1054 /*
1057 * result contains valid SCSI INQUIRY data. 1055 * result contains valid SCSI INQUIRY data.
1058 */ 1056 */
1059 if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) { 1057 if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
1060 /* 1058 /*
1061 * For a Peripheral qualifier 3 (011b), the SCSI 1059 * For a Peripheral qualifier 3 (011b), the SCSI
1062 * spec says: The device server is not capable of 1060 * spec says: The device server is not capable of
1063 * supporting a physical device on this logical 1061 * supporting a physical device on this logical
1064 * unit. 1062 * unit.
1065 * 1063 *
1066 * For disks, this implies that there is no 1064 * For disks, this implies that there is no
1067 * logical disk configured at sdev->lun, but there 1065 * logical disk configured at sdev->lun, but there
1068 * is a target id responding. 1066 * is a target id responding.
1069 */ 1067 */
1070 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" 1068 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1071 " peripheral qualifier of 3, device not" 1069 " peripheral qualifier of 3, device not"
1072 " added\n")) 1070 " added\n"))
1073 if (lun == 0) { 1071 if (lun == 0) {
1074 SCSI_LOG_SCAN_BUS(1, { 1072 SCSI_LOG_SCAN_BUS(1, {
1075 unsigned char vend[9]; 1073 unsigned char vend[9];
1076 unsigned char mod[17]; 1074 unsigned char mod[17];
1077 1075
1078 sdev_printk(KERN_INFO, sdev, 1076 sdev_printk(KERN_INFO, sdev,
1079 "scsi scan: consider passing scsi_mod." 1077 "scsi scan: consider passing scsi_mod."
1080 "dev_flags=%s:%s:0x240 or 0x1000240\n", 1078 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1081 scsi_inq_str(vend, result, 8, 16), 1079 scsi_inq_str(vend, result, 8, 16),
1082 scsi_inq_str(mod, result, 16, 32)); 1080 scsi_inq_str(mod, result, 16, 32));
1083 }); 1081 });
1084 1082
1085 } 1083 }
1086 1084
1087 res = SCSI_SCAN_TARGET_PRESENT; 1085 res = SCSI_SCAN_TARGET_PRESENT;
1088 goto out_free_result; 1086 goto out_free_result;
1089 } 1087 }
1090 1088
1091 /* 1089 /*
1092 * Some targets may set slight variations of PQ and PDT to signal 1090 * Some targets may set slight variations of PQ and PDT to signal
1093 * that no LUN is present, so don't add sdev in these cases. 1091 * that no LUN is present, so don't add sdev in these cases.
1094 * Two specific examples are: 1092 * Two specific examples are:
1095 * 1) NetApp targets: return PQ=1, PDT=0x1f 1093 * 1) NetApp targets: return PQ=1, PDT=0x1f
1096 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" 1094 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1097 * in the UFI 1.0 spec (we cannot rely on reserved bits). 1095 * in the UFI 1.0 spec (we cannot rely on reserved bits).
1098 * 1096 *
1099 * References: 1097 * References:
1100 * 1) SCSI SPC-3, pp. 145-146 1098 * 1) SCSI SPC-3, pp. 145-146
1101 * PQ=1: "A peripheral device having the specified peripheral 1099 * PQ=1: "A peripheral device having the specified peripheral
1102 * device type is not connected to this logical unit. However, the 1100 * device type is not connected to this logical unit. However, the
1103 * device server is capable of supporting the specified peripheral 1101 * device server is capable of supporting the specified peripheral
1104 * device type on this logical unit." 1102 * device type on this logical unit."
1105 * PDT=0x1f: "Unknown or no device type" 1103 * PDT=0x1f: "Unknown or no device type"
1106 * 2) USB UFI 1.0, p. 20 1104 * 2) USB UFI 1.0, p. 20
1107 * PDT=00h Direct-access device (floppy) 1105 * PDT=00h Direct-access device (floppy)
1108 * PDT=1Fh none (no FDD connected to the requested logical unit) 1106 * PDT=1Fh none (no FDD connected to the requested logical unit)
1109 */ 1107 */
1110 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && 1108 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1111 (result[0] & 0x1f) == 0x1f && 1109 (result[0] & 0x1f) == 0x1f &&
1112 !scsi_is_wlun(lun)) { 1110 !scsi_is_wlun(lun)) {
1113 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 1111 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
1114 "scsi scan: peripheral device type" 1112 "scsi scan: peripheral device type"
1115 " of 31, no device added\n")); 1113 " of 31, no device added\n"));
1116 res = SCSI_SCAN_TARGET_PRESENT; 1114 res = SCSI_SCAN_TARGET_PRESENT;
1117 goto out_free_result; 1115 goto out_free_result;
1118 } 1116 }
1119 1117
1120 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); 1118 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1121 if (res == SCSI_SCAN_LUN_PRESENT) { 1119 if (res == SCSI_SCAN_LUN_PRESENT) {
1122 if (bflags & BLIST_KEY) { 1120 if (bflags & BLIST_KEY) {
1123 sdev->lockable = 0; 1121 sdev->lockable = 0;
1124 scsi_unlock_floptical(sdev, result); 1122 scsi_unlock_floptical(sdev, result);
1125 } 1123 }
1126 } 1124 }
1127 1125
1128 out_free_result: 1126 out_free_result:
1129 kfree(result); 1127 kfree(result);
1130 out_free_sdev: 1128 out_free_sdev:
1131 if (res == SCSI_SCAN_LUN_PRESENT) { 1129 if (res == SCSI_SCAN_LUN_PRESENT) {
1132 if (sdevp) { 1130 if (sdevp) {
1133 if (scsi_device_get(sdev) == 0) { 1131 if (scsi_device_get(sdev) == 0) {
1134 *sdevp = sdev; 1132 *sdevp = sdev;
1135 } else { 1133 } else {
1136 __scsi_remove_device(sdev); 1134 __scsi_remove_device(sdev);
1137 res = SCSI_SCAN_NO_RESPONSE; 1135 res = SCSI_SCAN_NO_RESPONSE;
1138 } 1136 }
1139 } 1137 }
1140 } else 1138 } else
1141 scsi_destroy_sdev(sdev); 1139 scsi_destroy_sdev(sdev);
1142 out: 1140 out:
1143 return res; 1141 return res;
1144 } 1142 }
1145 1143
1146 /** 1144 /**
1147 * scsi_sequential_lun_scan - sequentially scan a SCSI target 1145 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1148 * @starget: pointer to target structure to scan 1146 * @starget: pointer to target structure to scan
1149 * @bflags: black/white list flag for LUN 0 1147 * @bflags: black/white list flag for LUN 0
1150 * @scsi_level: Which version of the standard does this device adhere to 1148 * @scsi_level: Which version of the standard does this device adhere to
1151 * @rescan: passed to scsi_probe_add_lun() 1149 * @rescan: passed to scsi_probe_add_lun()
1152 * 1150 *
1153 * Description: 1151 * Description:
1154 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been 1152 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1155 * scanned) to some maximum lun until a LUN is found with no device 1153 * scanned) to some maximum lun until a LUN is found with no device
1156 * attached. Use the bflags to figure out any oddities. 1154 * attached. Use the bflags to figure out any oddities.
1157 * 1155 *
1158 * Modifies sdevscan->lun. 1156 * Modifies sdevscan->lun.
1159 **/ 1157 **/
1160 static void scsi_sequential_lun_scan(struct scsi_target *starget, 1158 static void scsi_sequential_lun_scan(struct scsi_target *starget,
1161 int bflags, int scsi_level, int rescan) 1159 int bflags, int scsi_level, int rescan)
1162 { 1160 {
1163 unsigned int sparse_lun, lun, max_dev_lun; 1161 unsigned int sparse_lun, lun, max_dev_lun;
1164 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1162 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1165 1163
1166 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of" 1164 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
1167 "%s\n", dev_name(&starget->dev))); 1165 "%s\n", dev_name(&starget->dev)));
1168 1166
1169 max_dev_lun = min(max_scsi_luns, shost->max_lun); 1167 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1170 /* 1168 /*
1171 * If this device is known to support sparse multiple units, 1169 * If this device is known to support sparse multiple units,
1172 * override the other settings, and scan all of them. Normally, 1170 * override the other settings, and scan all of them. Normally,
1173 * SCSI-3 devices should be scanned via the REPORT LUNS. 1171 * SCSI-3 devices should be scanned via the REPORT LUNS.
1174 */ 1172 */
1175 if (bflags & BLIST_SPARSELUN) { 1173 if (bflags & BLIST_SPARSELUN) {
1176 max_dev_lun = shost->max_lun; 1174 max_dev_lun = shost->max_lun;
1177 sparse_lun = 1; 1175 sparse_lun = 1;
1178 } else 1176 } else
1179 sparse_lun = 0; 1177 sparse_lun = 0;
1180 1178
1181 /* 1179 /*
1182 * If less than SCSI_1_CSS, and no special lun scaning, stop 1180 * If less than SCSI_1_CSS, and no special lun scaning, stop
1183 * scanning; this matches 2.4 behaviour, but could just be a bug 1181 * scanning; this matches 2.4 behaviour, but could just be a bug
1184 * (to continue scanning a SCSI_1_CSS device). 1182 * (to continue scanning a SCSI_1_CSS device).
1185 * 1183 *
1186 * This test is broken. We might not have any device on lun0 for 1184 * This test is broken. We might not have any device on lun0 for
1187 * a sparselun device, and if that's the case then how would we 1185 * a sparselun device, and if that's the case then how would we
1188 * know the real scsi_level, eh? It might make sense to just not 1186 * know the real scsi_level, eh? It might make sense to just not
1189 * scan any SCSI_1 device for non-0 luns, but that check would best 1187 * scan any SCSI_1 device for non-0 luns, but that check would best
1190 * go into scsi_alloc_sdev() and just have it return null when asked 1188 * go into scsi_alloc_sdev() and just have it return null when asked
1191 * to alloc an sdev for lun > 0 on an already found SCSI_1 device. 1189 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1192 * 1190 *
1193 if ((sdevscan->scsi_level < SCSI_1_CCS) && 1191 if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1194 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) 1192 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1195 == 0)) 1193 == 0))
1196 return; 1194 return;
1197 */ 1195 */
1198 /* 1196 /*
1199 * If this device is known to support multiple units, override 1197 * If this device is known to support multiple units, override
1200 * the other settings, and scan all of them. 1198 * the other settings, and scan all of them.
1201 */ 1199 */
1202 if (bflags & BLIST_FORCELUN) 1200 if (bflags & BLIST_FORCELUN)
1203 max_dev_lun = shost->max_lun; 1201 max_dev_lun = shost->max_lun;
1204 /* 1202 /*
1205 * REGAL CDC-4X: avoid hang after LUN 4 1203 * REGAL CDC-4X: avoid hang after LUN 4
1206 */ 1204 */
1207 if (bflags & BLIST_MAX5LUN) 1205 if (bflags & BLIST_MAX5LUN)
1208 max_dev_lun = min(5U, max_dev_lun); 1206 max_dev_lun = min(5U, max_dev_lun);
1209 /* 1207 /*
1210 * Do not scan SCSI-2 or lower device past LUN 7, unless 1208 * Do not scan SCSI-2 or lower device past LUN 7, unless
1211 * BLIST_LARGELUN. 1209 * BLIST_LARGELUN.
1212 */ 1210 */
1213 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) 1211 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1214 max_dev_lun = min(8U, max_dev_lun); 1212 max_dev_lun = min(8U, max_dev_lun);
1215 1213
1216 /* 1214 /*
1217 * We have already scanned LUN 0, so start at LUN 1. Keep scanning 1215 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1218 * until we reach the max, or no LUN is found and we are not 1216 * until we reach the max, or no LUN is found and we are not
1219 * sparse_lun. 1217 * sparse_lun.
1220 */ 1218 */
1221 for (lun = 1; lun < max_dev_lun; ++lun) 1219 for (lun = 1; lun < max_dev_lun; ++lun)
1222 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, 1220 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1223 NULL) != SCSI_SCAN_LUN_PRESENT) && 1221 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1224 !sparse_lun) 1222 !sparse_lun)
1225 return; 1223 return;
1226 } 1224 }
1227 1225
1228 /** 1226 /**
1229 * scsilun_to_int: convert a scsi_lun to an int 1227 * scsilun_to_int: convert a scsi_lun to an int
1230 * @scsilun: struct scsi_lun to be converted. 1228 * @scsilun: struct scsi_lun to be converted.
1231 * 1229 *
1232 * Description: 1230 * Description:
1233 * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered 1231 * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
1234 * integer, and return the result. The caller must check for 1232 * integer, and return the result. The caller must check for
1235 * truncation before using this function. 1233 * truncation before using this function.
1236 * 1234 *
1237 * Notes: 1235 * Notes:
1238 * The struct scsi_lun is assumed to be four levels, with each level 1236 * The struct scsi_lun is assumed to be four levels, with each level
1239 * effectively containing a SCSI byte-ordered (big endian) short; the 1237 * effectively containing a SCSI byte-ordered (big endian) short; the
1240 * addressing bits of each level are ignored (the highest two bits). 1238 * addressing bits of each level are ignored (the highest two bits).
1241 * For a description of the LUN format, post SCSI-3 see the SCSI 1239 * For a description of the LUN format, post SCSI-3 see the SCSI
1242 * Architecture Model, for SCSI-3 see the SCSI Controller Commands. 1240 * Architecture Model, for SCSI-3 see the SCSI Controller Commands.
1243 * 1241 *
1244 * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns 1242 * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns
1245 * the integer: 0x0b030a04 1243 * the integer: 0x0b030a04
1246 **/ 1244 **/
1247 int scsilun_to_int(struct scsi_lun *scsilun) 1245 int scsilun_to_int(struct scsi_lun *scsilun)
1248 { 1246 {
1249 int i; 1247 int i;
1250 unsigned int lun; 1248 unsigned int lun;
1251 1249
1252 lun = 0; 1250 lun = 0;
1253 for (i = 0; i < sizeof(lun); i += 2) 1251 for (i = 0; i < sizeof(lun); i += 2)
1254 lun = lun | (((scsilun->scsi_lun[i] << 8) | 1252 lun = lun | (((scsilun->scsi_lun[i] << 8) |
1255 scsilun->scsi_lun[i + 1]) << (i * 8)); 1253 scsilun->scsi_lun[i + 1]) << (i * 8));
1256 return lun; 1254 return lun;
1257 } 1255 }
1258 EXPORT_SYMBOL(scsilun_to_int); 1256 EXPORT_SYMBOL(scsilun_to_int);
1259 1257
1260 /** 1258 /**
1261 * int_to_scsilun: reverts an int into a scsi_lun 1259 * int_to_scsilun: reverts an int into a scsi_lun
1262 * @lun: integer to be reverted 1260 * @lun: integer to be reverted
1263 * @scsilun: struct scsi_lun to be set. 1261 * @scsilun: struct scsi_lun to be set.
1264 * 1262 *
1265 * Description: 1263 * Description:
1266 * Reverts the functionality of the scsilun_to_int, which packed 1264 * Reverts the functionality of the scsilun_to_int, which packed
1267 * an 8-byte lun value into an int. This routine unpacks the int 1265 * an 8-byte lun value into an int. This routine unpacks the int
1268 * back into the lun value. 1266 * back into the lun value.
1269 * Note: the scsilun_to_int() routine does not truly handle all 1267 * Note: the scsilun_to_int() routine does not truly handle all
1270 * 8bytes of the lun value. This functions restores only as much 1268 * 8bytes of the lun value. This functions restores only as much
1271 * as was set by the routine. 1269 * as was set by the routine.
1272 * 1270 *
1273 * Notes: 1271 * Notes:
1274 * Given an integer : 0x0b030a04, this function returns a 1272 * Given an integer : 0x0b030a04, this function returns a
1275 * scsi_lun of : struct scsi_lun of: 0a 04 0b 03 00 00 00 00 1273 * scsi_lun of : struct scsi_lun of: 0a 04 0b 03 00 00 00 00
1276 * 1274 *
1277 **/ 1275 **/
1278 void int_to_scsilun(unsigned int lun, struct scsi_lun *scsilun) 1276 void int_to_scsilun(unsigned int lun, struct scsi_lun *scsilun)
1279 { 1277 {
1280 int i; 1278 int i;
1281 1279
1282 memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun)); 1280 memset(scsilun->scsi_lun, 0, sizeof(scsilun->scsi_lun));
1283 1281
1284 for (i = 0; i < sizeof(lun); i += 2) { 1282 for (i = 0; i < sizeof(lun); i += 2) {
1285 scsilun->scsi_lun[i] = (lun >> 8) & 0xFF; 1283 scsilun->scsi_lun[i] = (lun >> 8) & 0xFF;
1286 scsilun->scsi_lun[i+1] = lun & 0xFF; 1284 scsilun->scsi_lun[i+1] = lun & 0xFF;
1287 lun = lun >> 16; 1285 lun = lun >> 16;
1288 } 1286 }
1289 } 1287 }
1290 EXPORT_SYMBOL(int_to_scsilun); 1288 EXPORT_SYMBOL(int_to_scsilun);
1291 1289
1292 /** 1290 /**
1293 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results 1291 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1294 * @starget: which target 1292 * @starget: which target
1295 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN 1293 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1296 * @rescan: nonzero if we can skip code only needed on first scan 1294 * @rescan: nonzero if we can skip code only needed on first scan
1297 * 1295 *
1298 * Description: 1296 * Description:
1299 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. 1297 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1300 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. 1298 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1301 * 1299 *
1302 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 1300 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1303 * LUNs even if it's older than SCSI-3. 1301 * LUNs even if it's older than SCSI-3.
1304 * If BLIST_NOREPORTLUN is set, return 1 always. 1302 * If BLIST_NOREPORTLUN is set, return 1 always.
1305 * If BLIST_NOLUN is set, return 0 always. 1303 * If BLIST_NOLUN is set, return 0 always.
1306 * 1304 *
1307 * Return: 1305 * Return:
1308 * 0: scan completed (or no memory, so further scanning is futile) 1306 * 0: scan completed (or no memory, so further scanning is futile)
1309 * 1: could not scan with REPORT LUN 1307 * 1: could not scan with REPORT LUN
1310 **/ 1308 **/
1311 static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, 1309 static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1312 int rescan) 1310 int rescan)
1313 { 1311 {
1314 char devname[64]; 1312 char devname[64];
1315 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 1313 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1316 unsigned int length; 1314 unsigned int length;
1317 unsigned int lun; 1315 unsigned int lun;
1318 unsigned int num_luns; 1316 unsigned int num_luns;
1319 unsigned int retries; 1317 unsigned int retries;
1320 int result; 1318 int result;
1321 struct scsi_lun *lunp, *lun_data; 1319 struct scsi_lun *lunp, *lun_data;
1322 u8 *data; 1320 u8 *data;
1323 struct scsi_sense_hdr sshdr; 1321 struct scsi_sense_hdr sshdr;
1324 struct scsi_device *sdev; 1322 struct scsi_device *sdev;
1325 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1323 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1326 int ret = 0; 1324 int ret = 0;
1327 1325
1328 /* 1326 /*
1329 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1327 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1330 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does 1328 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1331 * support more than 8 LUNs. 1329 * support more than 8 LUNs.
1332 */ 1330 */
1333 if (bflags & BLIST_NOREPORTLUN) 1331 if (bflags & BLIST_NOREPORTLUN)
1334 return 1; 1332 return 1;
1335 if (starget->scsi_level < SCSI_2 && 1333 if (starget->scsi_level < SCSI_2 &&
1336 starget->scsi_level != SCSI_UNKNOWN) 1334 starget->scsi_level != SCSI_UNKNOWN)
1337 return 1; 1335 return 1;
1338 if (starget->scsi_level < SCSI_3 && 1336 if (starget->scsi_level < SCSI_3 &&
1339 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) 1337 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1340 return 1; 1338 return 1;
1341 if (bflags & BLIST_NOLUN) 1339 if (bflags & BLIST_NOLUN)
1342 return 0; 1340 return 0;
1343 1341
1344 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { 1342 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1345 sdev = scsi_alloc_sdev(starget, 0, NULL); 1343 sdev = scsi_alloc_sdev(starget, 0, NULL);
1346 if (!sdev) 1344 if (!sdev)
1347 return 0; 1345 return 0;
1348 if (scsi_device_get(sdev)) 1346 if (scsi_device_get(sdev))
1349 return 0; 1347 return 0;
1350 } 1348 }
1351 1349
1352 sprintf(devname, "host %d channel %d id %d", 1350 sprintf(devname, "host %d channel %d id %d",
1353 shost->host_no, sdev->channel, sdev->id); 1351 shost->host_no, sdev->channel, sdev->id);
1354 1352
1355 /* 1353 /*
1356 * Allocate enough to hold the header (the same size as one scsi_lun) 1354 * Allocate enough to hold the header (the same size as one scsi_lun)
1357 * plus the max number of luns we are requesting. 1355 * plus the max number of luns we are requesting.
1358 * 1356 *
1359 * Reallocating and trying again (with the exact amount we need) 1357 * Reallocating and trying again (with the exact amount we need)
1360 * would be nice, but then we need to somehow limit the size 1358 * would be nice, but then we need to somehow limit the size
1361 * allocated based on the available memory and the limits of 1359 * allocated based on the available memory and the limits of
1362 * kmalloc - we don't want a kmalloc() failure of a huge value to 1360 * kmalloc - we don't want a kmalloc() failure of a huge value to
1363 * prevent us from finding any LUNs on this target. 1361 * prevent us from finding any LUNs on this target.
1364 */ 1362 */
1365 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); 1363 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
1366 lun_data = kmalloc(length, GFP_ATOMIC | 1364 lun_data = kmalloc(length, GFP_ATOMIC |
1367 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1365 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1368 if (!lun_data) { 1366 if (!lun_data) {
1369 printk(ALLOC_FAILURE_MSG, __func__); 1367 printk(ALLOC_FAILURE_MSG, __func__);
1370 goto out; 1368 goto out;
1371 } 1369 }
1372 1370
1373 scsi_cmd[0] = REPORT_LUNS; 1371 scsi_cmd[0] = REPORT_LUNS;
1374 1372
1375 /* 1373 /*
1376 * bytes 1 - 5: reserved, set to zero. 1374 * bytes 1 - 5: reserved, set to zero.
1377 */ 1375 */
1378 memset(&scsi_cmd[1], 0, 5); 1376 memset(&scsi_cmd[1], 0, 5);
1379 1377
1380 /* 1378 /*
1381 * bytes 6 - 9: length of the command. 1379 * bytes 6 - 9: length of the command.
1382 */ 1380 */
1383 scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff; 1381 scsi_cmd[6] = (unsigned char) (length >> 24) & 0xff;
1384 scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff; 1382 scsi_cmd[7] = (unsigned char) (length >> 16) & 0xff;
1385 scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff; 1383 scsi_cmd[8] = (unsigned char) (length >> 8) & 0xff;
1386 scsi_cmd[9] = (unsigned char) length & 0xff; 1384 scsi_cmd[9] = (unsigned char) length & 0xff;
1387 1385
1388 scsi_cmd[10] = 0; /* reserved */ 1386 scsi_cmd[10] = 0; /* reserved */
1389 scsi_cmd[11] = 0; /* control */ 1387 scsi_cmd[11] = 0; /* control */
1390 1388
1391 /* 1389 /*
1392 * We can get a UNIT ATTENTION, for example a power on/reset, so 1390 * We can get a UNIT ATTENTION, for example a power on/reset, so
1393 * retry a few times (like sd.c does for TEST UNIT READY). 1391 * retry a few times (like sd.c does for TEST UNIT READY).
1394 * Experience shows some combinations of adapter/devices get at 1392 * Experience shows some combinations of adapter/devices get at
1395 * least two power on/resets. 1393 * least two power on/resets.
1396 * 1394 *
1397 * Illegal requests (for devices that do not support REPORT LUNS) 1395 * Illegal requests (for devices that do not support REPORT LUNS)
1398 * should come through as a check condition, and will not generate 1396 * should come through as a check condition, and will not generate
1399 * a retry. 1397 * a retry.
1400 */ 1398 */
1401 for (retries = 0; retries < 3; retries++) { 1399 for (retries = 0; retries < 3; retries++) {
1402 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending" 1400 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending"
1403 " REPORT LUNS to %s (try %d)\n", devname, 1401 " REPORT LUNS to %s (try %d)\n", devname,
1404 retries)); 1402 retries));
1405 1403
1406 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, 1404 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1407 lun_data, length, &sshdr, 1405 lun_data, length, &sshdr,
1408 SCSI_TIMEOUT + 4 * HZ, 3, NULL); 1406 SCSI_TIMEOUT + 4 * HZ, 3, NULL);
1409 1407
1410 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS" 1408 SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
1411 " %s (try %d) result 0x%x\n", result 1409 " %s (try %d) result 0x%x\n", result
1412 ? "failed" : "successful", retries, result)); 1410 ? "failed" : "successful", retries, result));
1413 if (result == 0) 1411 if (result == 0)
1414 break; 1412 break;
1415 else if (scsi_sense_valid(&sshdr)) { 1413 else if (scsi_sense_valid(&sshdr)) {
1416 if (sshdr.sense_key != UNIT_ATTENTION) 1414 if (sshdr.sense_key != UNIT_ATTENTION)
1417 break; 1415 break;
1418 } 1416 }
1419 } 1417 }
1420 1418
1421 if (result) { 1419 if (result) {
1422 /* 1420 /*
1423 * The device probably does not support a REPORT LUN command 1421 * The device probably does not support a REPORT LUN command
1424 */ 1422 */
1425 ret = 1; 1423 ret = 1;
1426 goto out_err; 1424 goto out_err;
1427 } 1425 }
1428 1426
1429 /* 1427 /*
1430 * Get the length from the first four bytes of lun_data. 1428 * Get the length from the first four bytes of lun_data.
1431 */ 1429 */
1432 data = (u8 *) lun_data->scsi_lun; 1430 data = (u8 *) lun_data->scsi_lun;
1433 length = ((data[0] << 24) | (data[1] << 16) | 1431 length = ((data[0] << 24) | (data[1] << 16) |
1434 (data[2] << 8) | (data[3] << 0)); 1432 (data[2] << 8) | (data[3] << 0));
1435 1433
1436 num_luns = (length / sizeof(struct scsi_lun)); 1434 num_luns = (length / sizeof(struct scsi_lun));
1437 if (num_luns > max_scsi_report_luns) { 1435 if (num_luns > max_scsi_report_luns) {
1438 printk(KERN_WARNING "scsi: On %s only %d (max_scsi_report_luns)" 1436 printk(KERN_WARNING "scsi: On %s only %d (max_scsi_report_luns)"
1439 " of %d luns reported, try increasing" 1437 " of %d luns reported, try increasing"
1440 " max_scsi_report_luns.\n", devname, 1438 " max_scsi_report_luns.\n", devname,
1441 max_scsi_report_luns, num_luns); 1439 max_scsi_report_luns, num_luns);
1442 num_luns = max_scsi_report_luns; 1440 num_luns = max_scsi_report_luns;
1443 } 1441 }
1444 1442
1445 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1443 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1446 "scsi scan: REPORT LUN scan\n")); 1444 "scsi scan: REPORT LUN scan\n"));
1447 1445
1448 /* 1446 /*
1449 * Scan the luns in lun_data. The entry at offset 0 is really 1447 * Scan the luns in lun_data. The entry at offset 0 is really
1450 * the header, so start at 1 and go up to and including num_luns. 1448 * the header, so start at 1 and go up to and including num_luns.
1451 */ 1449 */
1452 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { 1450 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1453 lun = scsilun_to_int(lunp); 1451 lun = scsilun_to_int(lunp);
1454 1452
1455 /* 1453 /*
1456 * Check if the unused part of lunp is non-zero, and so 1454 * Check if the unused part of lunp is non-zero, and so
1457 * does not fit in lun. 1455 * does not fit in lun.
1458 */ 1456 */
1459 if (memcmp(&lunp->scsi_lun[sizeof(lun)], "\0\0\0\0", 4)) { 1457 if (memcmp(&lunp->scsi_lun[sizeof(lun)], "\0\0\0\0", 4)) {
1460 int i; 1458 int i;
1461 1459
1462 /* 1460 /*
1463 * Output an error displaying the LUN in byte order, 1461 * Output an error displaying the LUN in byte order,
1464 * this differs from what linux would print for the 1462 * this differs from what linux would print for the
1465 * integer LUN value. 1463 * integer LUN value.
1466 */ 1464 */
1467 printk(KERN_WARNING "scsi: %s lun 0x", devname); 1465 printk(KERN_WARNING "scsi: %s lun 0x", devname);
1468 data = (char *)lunp->scsi_lun; 1466 data = (char *)lunp->scsi_lun;
1469 for (i = 0; i < sizeof(struct scsi_lun); i++) 1467 for (i = 0; i < sizeof(struct scsi_lun); i++)
1470 printk("%02x", data[i]); 1468 printk("%02x", data[i]);
1471 printk(" has a LUN larger than currently supported.\n"); 1469 printk(" has a LUN larger than currently supported.\n");
1472 } else if (lun > sdev->host->max_lun) { 1470 } else if (lun > sdev->host->max_lun) {
1473 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" 1471 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
1474 " than allowed by the host adapter\n", 1472 " than allowed by the host adapter\n",
1475 devname, lun); 1473 devname, lun);
1476 } else { 1474 } else {
1477 int res; 1475 int res;
1478 1476
1479 res = scsi_probe_and_add_lun(starget, 1477 res = scsi_probe_and_add_lun(starget,
1480 lun, NULL, NULL, rescan, NULL); 1478 lun, NULL, NULL, rescan, NULL);
1481 if (res == SCSI_SCAN_NO_RESPONSE) { 1479 if (res == SCSI_SCAN_NO_RESPONSE) {
1482 /* 1480 /*
1483 * Got some results, but now none, abort. 1481 * Got some results, but now none, abort.
1484 */ 1482 */
1485 sdev_printk(KERN_ERR, sdev, 1483 sdev_printk(KERN_ERR, sdev,
1486 "Unexpected response" 1484 "Unexpected response"
1487 " from lun %d while scanning, scan" 1485 " from lun %d while scanning, scan"
1488 " aborted\n", lun); 1486 " aborted\n", lun);
1489 break; 1487 break;
1490 } 1488 }
1491 } 1489 }
1492 } 1490 }
1493 1491
1494 out_err: 1492 out_err:
1495 kfree(lun_data); 1493 kfree(lun_data);
1496 out: 1494 out:
1497 scsi_device_put(sdev); 1495 scsi_device_put(sdev);
1498 if (scsi_device_created(sdev)) 1496 if (scsi_device_created(sdev))
1499 /* 1497 /*
1500 * the sdev we used didn't appear in the report luns scan 1498 * the sdev we used didn't appear in the report luns scan
1501 */ 1499 */
1502 scsi_destroy_sdev(sdev); 1500 scsi_destroy_sdev(sdev);
1503 return ret; 1501 return ret;
1504 } 1502 }
1505 1503
1506 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, 1504 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1507 uint id, uint lun, void *hostdata) 1505 uint id, uint lun, void *hostdata)
1508 { 1506 {
1509 struct scsi_device *sdev = ERR_PTR(-ENODEV); 1507 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1510 struct device *parent = &shost->shost_gendev; 1508 struct device *parent = &shost->shost_gendev;
1511 struct scsi_target *starget; 1509 struct scsi_target *starget;
1512 1510
1513 if (strncmp(scsi_scan_type, "none", 4) == 0) 1511 if (strncmp(scsi_scan_type, "none", 4) == 0)
1514 return ERR_PTR(-ENODEV); 1512 return ERR_PTR(-ENODEV);
1515 1513
1516 starget = scsi_alloc_target(parent, channel, id); 1514 starget = scsi_alloc_target(parent, channel, id);
1517 if (!starget) 1515 if (!starget)
1518 return ERR_PTR(-ENOMEM); 1516 return ERR_PTR(-ENOMEM);
1519 1517
1520 mutex_lock(&shost->scan_mutex); 1518 mutex_lock(&shost->scan_mutex);
1521 if (!shost->async_scan) 1519 if (!shost->async_scan)
1522 scsi_complete_async_scans(); 1520 scsi_complete_async_scans();
1523 1521
1524 if (scsi_host_scan_allowed(shost)) 1522 if (scsi_host_scan_allowed(shost))
1525 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); 1523 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1526 mutex_unlock(&shost->scan_mutex); 1524 mutex_unlock(&shost->scan_mutex);
1527 scsi_target_reap(starget); 1525 scsi_target_reap(starget);
1528 put_device(&starget->dev); 1526 put_device(&starget->dev);
1529 1527
1530 return sdev; 1528 return sdev;
1531 } 1529 }
1532 EXPORT_SYMBOL(__scsi_add_device); 1530 EXPORT_SYMBOL(__scsi_add_device);
1533 1531
1534 int scsi_add_device(struct Scsi_Host *host, uint channel, 1532 int scsi_add_device(struct Scsi_Host *host, uint channel,
1535 uint target, uint lun) 1533 uint target, uint lun)
1536 { 1534 {
1537 struct scsi_device *sdev = 1535 struct scsi_device *sdev =
1538 __scsi_add_device(host, channel, target, lun, NULL); 1536 __scsi_add_device(host, channel, target, lun, NULL);
1539 if (IS_ERR(sdev)) 1537 if (IS_ERR(sdev))
1540 return PTR_ERR(sdev); 1538 return PTR_ERR(sdev);
1541 1539
1542 scsi_device_put(sdev); 1540 scsi_device_put(sdev);
1543 return 0; 1541 return 0;
1544 } 1542 }
1545 EXPORT_SYMBOL(scsi_add_device); 1543 EXPORT_SYMBOL(scsi_add_device);
1546 1544
1547 void scsi_rescan_device(struct device *dev) 1545 void scsi_rescan_device(struct device *dev)
1548 { 1546 {
1549 struct scsi_driver *drv; 1547 struct scsi_driver *drv;
1550 1548
1551 if (!dev->driver) 1549 if (!dev->driver)
1552 return; 1550 return;
1553 1551
1554 drv = to_scsi_driver(dev->driver); 1552 drv = to_scsi_driver(dev->driver);
1555 if (try_module_get(drv->owner)) { 1553 if (try_module_get(drv->owner)) {
1556 if (drv->rescan) 1554 if (drv->rescan)
1557 drv->rescan(dev); 1555 drv->rescan(dev);
1558 module_put(drv->owner); 1556 module_put(drv->owner);
1559 } 1557 }
1560 } 1558 }
1561 EXPORT_SYMBOL(scsi_rescan_device); 1559 EXPORT_SYMBOL(scsi_rescan_device);
1562 1560
1563 static void __scsi_scan_target(struct device *parent, unsigned int channel, 1561 static void __scsi_scan_target(struct device *parent, unsigned int channel,
1564 unsigned int id, unsigned int lun, int rescan) 1562 unsigned int id, unsigned int lun, int rescan)
1565 { 1563 {
1566 struct Scsi_Host *shost = dev_to_shost(parent); 1564 struct Scsi_Host *shost = dev_to_shost(parent);
1567 int bflags = 0; 1565 int bflags = 0;
1568 int res; 1566 int res;
1569 struct scsi_target *starget; 1567 struct scsi_target *starget;
1570 1568
1571 if (shost->this_id == id) 1569 if (shost->this_id == id)
1572 /* 1570 /*
1573 * Don't scan the host adapter 1571 * Don't scan the host adapter
1574 */ 1572 */
1575 return; 1573 return;
1576 1574
1577 starget = scsi_alloc_target(parent, channel, id); 1575 starget = scsi_alloc_target(parent, channel, id);
1578 if (!starget) 1576 if (!starget)
1579 return; 1577 return;
1580 1578
1581 if (lun != SCAN_WILD_CARD) { 1579 if (lun != SCAN_WILD_CARD) {
1582 /* 1580 /*
1583 * Scan for a specific host/chan/id/lun. 1581 * Scan for a specific host/chan/id/lun.
1584 */ 1582 */
1585 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); 1583 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1586 goto out_reap; 1584 goto out_reap;
1587 } 1585 }
1588 1586
1589 /* 1587 /*
1590 * Scan LUN 0, if there is some response, scan further. Ideally, we 1588 * Scan LUN 0, if there is some response, scan further. Ideally, we
1591 * would not configure LUN 0 until all LUNs are scanned. 1589 * would not configure LUN 0 until all LUNs are scanned.
1592 */ 1590 */
1593 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); 1591 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1594 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { 1592 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1595 if (scsi_report_lun_scan(starget, bflags, rescan) != 0) 1593 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1596 /* 1594 /*
1597 * The REPORT LUN did not scan the target, 1595 * The REPORT LUN did not scan the target,
1598 * do a sequential scan. 1596 * do a sequential scan.
1599 */ 1597 */
1600 scsi_sequential_lun_scan(starget, bflags, 1598 scsi_sequential_lun_scan(starget, bflags,
1601 starget->scsi_level, rescan); 1599 starget->scsi_level, rescan);
1602 } 1600 }
1603 1601
1604 out_reap: 1602 out_reap:
1605 /* now determine if the target has any children at all 1603 /* now determine if the target has any children at all
1606 * and if not, nuke it */ 1604 * and if not, nuke it */
1607 scsi_target_reap(starget); 1605 scsi_target_reap(starget);
1608 1606
1609 put_device(&starget->dev); 1607 put_device(&starget->dev);
1610 } 1608 }
1611 1609
1612 /** 1610 /**
1613 * scsi_scan_target - scan a target id, possibly including all LUNs on the target. 1611 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1614 * @parent: host to scan 1612 * @parent: host to scan
1615 * @channel: channel to scan 1613 * @channel: channel to scan
1616 * @id: target id to scan 1614 * @id: target id to scan
1617 * @lun: Specific LUN to scan or SCAN_WILD_CARD 1615 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1618 * @rescan: passed to LUN scanning routines 1616 * @rescan: passed to LUN scanning routines
1619 * 1617 *
1620 * Description: 1618 * Description:
1621 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, 1619 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1622 * and possibly all LUNs on the target id. 1620 * and possibly all LUNs on the target id.
1623 * 1621 *
1624 * First try a REPORT LUN scan, if that does not scan the target, do a 1622 * First try a REPORT LUN scan, if that does not scan the target, do a
1625 * sequential scan of LUNs on the target id. 1623 * sequential scan of LUNs on the target id.
1626 **/ 1624 **/
1627 void scsi_scan_target(struct device *parent, unsigned int channel, 1625 void scsi_scan_target(struct device *parent, unsigned int channel,
1628 unsigned int id, unsigned int lun, int rescan) 1626 unsigned int id, unsigned int lun, int rescan)
1629 { 1627 {
1630 struct Scsi_Host *shost = dev_to_shost(parent); 1628 struct Scsi_Host *shost = dev_to_shost(parent);
1631 1629
1632 if (strncmp(scsi_scan_type, "none", 4) == 0) 1630 if (strncmp(scsi_scan_type, "none", 4) == 0)
1633 return; 1631 return;
1634 1632
1635 mutex_lock(&shost->scan_mutex); 1633 mutex_lock(&shost->scan_mutex);
1636 if (!shost->async_scan) 1634 if (!shost->async_scan)
1637 scsi_complete_async_scans(); 1635 scsi_complete_async_scans();
1638 1636
1639 if (scsi_host_scan_allowed(shost)) 1637 if (scsi_host_scan_allowed(shost))
1640 __scsi_scan_target(parent, channel, id, lun, rescan); 1638 __scsi_scan_target(parent, channel, id, lun, rescan);
1641 mutex_unlock(&shost->scan_mutex); 1639 mutex_unlock(&shost->scan_mutex);
1642 } 1640 }
1643 EXPORT_SYMBOL(scsi_scan_target); 1641 EXPORT_SYMBOL(scsi_scan_target);
1644 1642
1645 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, 1643 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1646 unsigned int id, unsigned int lun, int rescan) 1644 unsigned int id, unsigned int lun, int rescan)
1647 { 1645 {
1648 uint order_id; 1646 uint order_id;
1649 1647
1650 if (id == SCAN_WILD_CARD) 1648 if (id == SCAN_WILD_CARD)
1651 for (id = 0; id < shost->max_id; ++id) { 1649 for (id = 0; id < shost->max_id; ++id) {
1652 /* 1650 /*
1653 * XXX adapter drivers when possible (FCP, iSCSI) 1651 * XXX adapter drivers when possible (FCP, iSCSI)
1654 * could modify max_id to match the current max, 1652 * could modify max_id to match the current max,
1655 * not the absolute max. 1653 * not the absolute max.
1656 * 1654 *
1657 * XXX add a shost id iterator, so for example, 1655 * XXX add a shost id iterator, so for example,
1658 * the FC ID can be the same as a target id 1656 * the FC ID can be the same as a target id
1659 * without a huge overhead of sparse id's. 1657 * without a huge overhead of sparse id's.
1660 */ 1658 */
1661 if (shost->reverse_ordering) 1659 if (shost->reverse_ordering)
1662 /* 1660 /*
1663 * Scan from high to low id. 1661 * Scan from high to low id.
1664 */ 1662 */
1665 order_id = shost->max_id - id - 1; 1663 order_id = shost->max_id - id - 1;
1666 else 1664 else
1667 order_id = id; 1665 order_id = id;
1668 __scsi_scan_target(&shost->shost_gendev, channel, 1666 __scsi_scan_target(&shost->shost_gendev, channel,
1669 order_id, lun, rescan); 1667 order_id, lun, rescan);
1670 } 1668 }
1671 else 1669 else
1672 __scsi_scan_target(&shost->shost_gendev, channel, 1670 __scsi_scan_target(&shost->shost_gendev, channel,
1673 id, lun, rescan); 1671 id, lun, rescan);
1674 } 1672 }
1675 1673
1676 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, 1674 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1677 unsigned int id, unsigned int lun, int rescan) 1675 unsigned int id, unsigned int lun, int rescan)
1678 { 1676 {
1679 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1677 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1680 "%s: <%u:%u:%u>\n", 1678 "%s: <%u:%u:%u>\n",
1681 __func__, channel, id, lun)); 1679 __func__, channel, id, lun));
1682 1680
1683 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1681 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1684 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1682 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1685 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun))) 1683 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
1686 return -EINVAL; 1684 return -EINVAL;
1687 1685
1688 mutex_lock(&shost->scan_mutex); 1686 mutex_lock(&shost->scan_mutex);
1689 if (!shost->async_scan) 1687 if (!shost->async_scan)
1690 scsi_complete_async_scans(); 1688 scsi_complete_async_scans();
1691 1689
1692 if (scsi_host_scan_allowed(shost)) { 1690 if (scsi_host_scan_allowed(shost)) {
1693 if (channel == SCAN_WILD_CARD) 1691 if (channel == SCAN_WILD_CARD)
1694 for (channel = 0; channel <= shost->max_channel; 1692 for (channel = 0; channel <= shost->max_channel;
1695 channel++) 1693 channel++)
1696 scsi_scan_channel(shost, channel, id, lun, 1694 scsi_scan_channel(shost, channel, id, lun,
1697 rescan); 1695 rescan);
1698 else 1696 else
1699 scsi_scan_channel(shost, channel, id, lun, rescan); 1697 scsi_scan_channel(shost, channel, id, lun, rescan);
1700 } 1698 }
1701 mutex_unlock(&shost->scan_mutex); 1699 mutex_unlock(&shost->scan_mutex);
1702 1700
1703 return 0; 1701 return 0;
1704 } 1702 }
1705 1703
1706 static void scsi_sysfs_add_devices(struct Scsi_Host *shost) 1704 static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1707 { 1705 {
1708 struct scsi_device *sdev; 1706 struct scsi_device *sdev;
1709 shost_for_each_device(sdev, shost) { 1707 shost_for_each_device(sdev, shost) {
1710 if (!scsi_host_scan_allowed(shost) || 1708 if (!scsi_host_scan_allowed(shost) ||
1711 scsi_sysfs_add_sdev(sdev) != 0) 1709 scsi_sysfs_add_sdev(sdev) != 0)
1712 scsi_destroy_sdev(sdev); 1710 scsi_destroy_sdev(sdev);
1713 } 1711 }
1714 } 1712 }
1715 1713
1716 /** 1714 /**
1717 * scsi_prep_async_scan - prepare for an async scan 1715 * scsi_prep_async_scan - prepare for an async scan
1718 * @shost: the host which will be scanned 1716 * @shost: the host which will be scanned
1719 * Returns: a cookie to be passed to scsi_finish_async_scan() 1717 * Returns: a cookie to be passed to scsi_finish_async_scan()
1720 * 1718 *
1721 * Tells the midlayer this host is going to do an asynchronous scan. 1719 * Tells the midlayer this host is going to do an asynchronous scan.
1722 * It reserves the host's position in the scanning list and ensures 1720 * It reserves the host's position in the scanning list and ensures
1723 * that other asynchronous scans started after this one won't affect the 1721 * that other asynchronous scans started after this one won't affect the
1724 * ordering of the discovered devices. 1722 * ordering of the discovered devices.
1725 */ 1723 */
1726 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) 1724 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1727 { 1725 {
1728 struct async_scan_data *data; 1726 struct async_scan_data *data;
1729 unsigned long flags; 1727 unsigned long flags;
1730 1728
1731 if (strncmp(scsi_scan_type, "sync", 4) == 0) 1729 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1732 return NULL; 1730 return NULL;
1733 1731
1734 if (shost->async_scan) { 1732 if (shost->async_scan) {
1735 printk("%s called twice for host %d", __func__, 1733 printk("%s called twice for host %d", __func__,
1736 shost->host_no); 1734 shost->host_no);
1737 dump_stack(); 1735 dump_stack();
1738 return NULL; 1736 return NULL;
1739 } 1737 }
1740 1738
1741 data = kmalloc(sizeof(*data), GFP_KERNEL); 1739 data = kmalloc(sizeof(*data), GFP_KERNEL);
1742 if (!data) 1740 if (!data)
1743 goto err; 1741 goto err;
1744 data->shost = scsi_host_get(shost); 1742 data->shost = scsi_host_get(shost);
1745 if (!data->shost) 1743 if (!data->shost)
1746 goto err; 1744 goto err;
1747 init_completion(&data->prev_finished); 1745 init_completion(&data->prev_finished);
1748 1746
1749 mutex_lock(&shost->scan_mutex); 1747 mutex_lock(&shost->scan_mutex);
1750 spin_lock_irqsave(shost->host_lock, flags); 1748 spin_lock_irqsave(shost->host_lock, flags);
1751 shost->async_scan = 1; 1749 shost->async_scan = 1;
1752 spin_unlock_irqrestore(shost->host_lock, flags); 1750 spin_unlock_irqrestore(shost->host_lock, flags);
1753 mutex_unlock(&shost->scan_mutex); 1751 mutex_unlock(&shost->scan_mutex);
1754 1752
1755 spin_lock(&async_scan_lock); 1753 spin_lock(&async_scan_lock);
1756 if (list_empty(&scanning_hosts)) 1754 if (list_empty(&scanning_hosts))
1757 complete(&data->prev_finished); 1755 complete(&data->prev_finished);
1758 list_add_tail(&data->list, &scanning_hosts); 1756 list_add_tail(&data->list, &scanning_hosts);
1759 spin_unlock(&async_scan_lock); 1757 spin_unlock(&async_scan_lock);
1760 1758
1761 return data; 1759 return data;
1762 1760
1763 err: 1761 err:
1764 kfree(data); 1762 kfree(data);
1765 return NULL; 1763 return NULL;
1766 } 1764 }
1767 1765
1768 /** 1766 /**
1769 * scsi_finish_async_scan - asynchronous scan has finished 1767 * scsi_finish_async_scan - asynchronous scan has finished
1770 * @data: cookie returned from earlier call to scsi_prep_async_scan() 1768 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1771 * 1769 *
1772 * All the devices currently attached to this host have been found. 1770 * All the devices currently attached to this host have been found.
1773 * This function announces all the devices it has found to the rest 1771 * This function announces all the devices it has found to the rest
1774 * of the system. 1772 * of the system.
1775 */ 1773 */
1776 static void scsi_finish_async_scan(struct async_scan_data *data) 1774 static void scsi_finish_async_scan(struct async_scan_data *data)
1777 { 1775 {
1778 struct Scsi_Host *shost; 1776 struct Scsi_Host *shost;
1779 unsigned long flags; 1777 unsigned long flags;
1780 1778
1781 if (!data) 1779 if (!data)
1782 return; 1780 return;
1783 1781
1784 shost = data->shost; 1782 shost = data->shost;
1785 1783
1786 mutex_lock(&shost->scan_mutex); 1784 mutex_lock(&shost->scan_mutex);
1787 1785
1788 if (!shost->async_scan) { 1786 if (!shost->async_scan) {
1789 printk("%s called twice for host %d", __func__, 1787 printk("%s called twice for host %d", __func__,
1790 shost->host_no); 1788 shost->host_no);
1791 dump_stack(); 1789 dump_stack();
1792 mutex_unlock(&shost->scan_mutex); 1790 mutex_unlock(&shost->scan_mutex);
1793 return; 1791 return;
1794 } 1792 }
1795 1793
1796 wait_for_completion(&data->prev_finished); 1794 wait_for_completion(&data->prev_finished);
1797 1795
1798 scsi_sysfs_add_devices(shost); 1796 scsi_sysfs_add_devices(shost);
1799 1797
1800 spin_lock_irqsave(shost->host_lock, flags); 1798 spin_lock_irqsave(shost->host_lock, flags);
1801 shost->async_scan = 0; 1799 shost->async_scan = 0;
1802 spin_unlock_irqrestore(shost->host_lock, flags); 1800 spin_unlock_irqrestore(shost->host_lock, flags);
1803 1801
1804 mutex_unlock(&shost->scan_mutex); 1802 mutex_unlock(&shost->scan_mutex);
1805 1803
1806 spin_lock(&async_scan_lock); 1804 spin_lock(&async_scan_lock);
1807 list_del(&data->list); 1805 list_del(&data->list);
1808 if (!list_empty(&scanning_hosts)) { 1806 if (!list_empty(&scanning_hosts)) {
1809 struct async_scan_data *next = list_entry(scanning_hosts.next, 1807 struct async_scan_data *next = list_entry(scanning_hosts.next,
1810 struct async_scan_data, list); 1808 struct async_scan_data, list);
1811 complete(&next->prev_finished); 1809 complete(&next->prev_finished);
1812 } 1810 }
1813 spin_unlock(&async_scan_lock); 1811 spin_unlock(&async_scan_lock);
1814 1812
1815 scsi_host_put(shost); 1813 scsi_host_put(shost);
1816 kfree(data); 1814 kfree(data);
1817 } 1815 }
1818 1816
1819 static void do_scsi_scan_host(struct Scsi_Host *shost) 1817 static void do_scsi_scan_host(struct Scsi_Host *shost)
1820 { 1818 {
1821 if (shost->hostt->scan_finished) { 1819 if (shost->hostt->scan_finished) {
1822 unsigned long start = jiffies; 1820 unsigned long start = jiffies;
1823 if (shost->hostt->scan_start) 1821 if (shost->hostt->scan_start)
1824 shost->hostt->scan_start(shost); 1822 shost->hostt->scan_start(shost);
1825 1823
1826 while (!shost->hostt->scan_finished(shost, jiffies - start)) 1824 while (!shost->hostt->scan_finished(shost, jiffies - start))
1827 msleep(10); 1825 msleep(10);
1828 } else { 1826 } else {
1829 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, 1827 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1830 SCAN_WILD_CARD, 0); 1828 SCAN_WILD_CARD, 0);
1831 } 1829 }
1832 } 1830 }
1833 1831
1834 static int do_scan_async(void *_data) 1832 static int do_scan_async(void *_data)
1835 { 1833 {
1836 struct async_scan_data *data = _data; 1834 struct async_scan_data *data = _data;
1837 do_scsi_scan_host(data->shost); 1835 do_scsi_scan_host(data->shost);
1838 scsi_finish_async_scan(data); 1836 scsi_finish_async_scan(data);
1839 return 0; 1837 return 0;
1840 } 1838 }
1841 1839
1842 /** 1840 /**
1843 * scsi_scan_host - scan the given adapter 1841 * scsi_scan_host - scan the given adapter
1844 * @shost: adapter to scan 1842 * @shost: adapter to scan
1845 **/ 1843 **/
1846 void scsi_scan_host(struct Scsi_Host *shost) 1844 void scsi_scan_host(struct Scsi_Host *shost)
1847 { 1845 {
1848 struct task_struct *p; 1846 struct task_struct *p;
1849 struct async_scan_data *data; 1847 struct async_scan_data *data;
1850 1848
1851 if (strncmp(scsi_scan_type, "none", 4) == 0) 1849 if (strncmp(scsi_scan_type, "none", 4) == 0)
1852 return; 1850 return;
1853 1851
1854 data = scsi_prep_async_scan(shost); 1852 data = scsi_prep_async_scan(shost);
1855 if (!data) { 1853 if (!data) {
1856 do_scsi_scan_host(shost); 1854 do_scsi_scan_host(shost);
1857 return; 1855 return;
1858 } 1856 }
1859 1857
1860 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); 1858 p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
1861 if (IS_ERR(p)) 1859 if (IS_ERR(p))
1862 do_scan_async(data); 1860 do_scan_async(data);
1863 } 1861 }
1864 EXPORT_SYMBOL(scsi_scan_host); 1862 EXPORT_SYMBOL(scsi_scan_host);
1865 1863
1866 void scsi_forget_host(struct Scsi_Host *shost) 1864 void scsi_forget_host(struct Scsi_Host *shost)
1867 { 1865 {
1868 struct scsi_device *sdev; 1866 struct scsi_device *sdev;
1869 unsigned long flags; 1867 unsigned long flags;
1870 1868
1871 restart: 1869 restart:
1872 spin_lock_irqsave(shost->host_lock, flags); 1870 spin_lock_irqsave(shost->host_lock, flags);
1873 list_for_each_entry(sdev, &shost->__devices, siblings) { 1871 list_for_each_entry(sdev, &shost->__devices, siblings) {
1874 if (sdev->sdev_state == SDEV_DEL) 1872 if (sdev->sdev_state == SDEV_DEL)
1875 continue; 1873 continue;
1876 spin_unlock_irqrestore(shost->host_lock, flags); 1874 spin_unlock_irqrestore(shost->host_lock, flags);
1877 __scsi_remove_device(sdev); 1875 __scsi_remove_device(sdev);
1878 goto restart; 1876 goto restart;
1879 } 1877 }
1880 spin_unlock_irqrestore(shost->host_lock, flags); 1878 spin_unlock_irqrestore(shost->host_lock, flags);
1881 } 1879 }
1882 1880
1883 /* 1881 /*
1884 * Function: scsi_get_host_dev() 1882 * Function: scsi_get_host_dev()
1885 * 1883 *
1886 * Purpose: Create a scsi_device that points to the host adapter itself. 1884 * Purpose: Create a scsi_device that points to the host adapter itself.
1887 * 1885 *
1888 * Arguments: SHpnt - Host that needs a scsi_device 1886 * Arguments: SHpnt - Host that needs a scsi_device
1889 * 1887 *
1890 * Lock status: None assumed. 1888 * Lock status: None assumed.
1891 * 1889 *
1892 * Returns: The scsi_device or NULL 1890 * Returns: The scsi_device or NULL
1893 * 1891 *
1894 * Notes: 1892 * Notes:
1895 * Attach a single scsi_device to the Scsi_Host - this should 1893 * Attach a single scsi_device to the Scsi_Host - this should
1896 * be made to look like a "pseudo-device" that points to the 1894 * be made to look like a "pseudo-device" that points to the
1897 * HA itself. 1895 * HA itself.
1898 * 1896 *
1899 * Note - this device is not accessible from any high-level 1897 * Note - this device is not accessible from any high-level
1900 * drivers (including generics), which is probably not 1898 * drivers (including generics), which is probably not
1901 * optimal. We can add hooks later to attach 1899 * optimal. We can add hooks later to attach
1902 */ 1900 */
1903 struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) 1901 struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1904 { 1902 {
1905 struct scsi_device *sdev = NULL; 1903 struct scsi_device *sdev = NULL;
1906 struct scsi_target *starget; 1904 struct scsi_target *starget;
1907 1905
1908 mutex_lock(&shost->scan_mutex); 1906 mutex_lock(&shost->scan_mutex);
1909 if (!scsi_host_scan_allowed(shost)) 1907 if (!scsi_host_scan_allowed(shost))
1910 goto out; 1908 goto out;
1911 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id); 1909 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1912 if (!starget) 1910 if (!starget)
1913 goto out; 1911 goto out;
1914 1912
1915 sdev = scsi_alloc_sdev(starget, 0, NULL); 1913 sdev = scsi_alloc_sdev(starget, 0, NULL);
1916 if (sdev) { 1914 if (sdev) {
1917 sdev->sdev_gendev.parent = get_device(&starget->dev); 1915 sdev->sdev_gendev.parent = get_device(&starget->dev);
1918 sdev->borken = 0; 1916 sdev->borken = 0;
1919 } else 1917 } else
1920 scsi_target_reap(starget); 1918 scsi_target_reap(starget);
1921 put_device(&starget->dev); 1919 put_device(&starget->dev);
1922 out: 1920 out:
1923 mutex_unlock(&shost->scan_mutex); 1921 mutex_unlock(&shost->scan_mutex);
1924 return sdev; 1922 return sdev;
1925 } 1923 }
1926 EXPORT_SYMBOL(scsi_get_host_dev); 1924 EXPORT_SYMBOL(scsi_get_host_dev);
1927 1925
1928 /* 1926 /*
1929 * Function: scsi_free_host_dev() 1927 * Function: scsi_free_host_dev()
1930 * 1928 *
1931 * Purpose: Free a scsi_device that points to the host adapter itself. 1929 * Purpose: Free a scsi_device that points to the host adapter itself.
1932 * 1930 *
1933 * Arguments: SHpnt - Host that needs a scsi_device 1931 * Arguments: SHpnt - Host that needs a scsi_device
1934 * 1932 *
1935 * Lock status: None assumed. 1933 * Lock status: None assumed.
1936 * 1934 *
1937 * Returns: Nothing 1935 * Returns: Nothing
1938 * 1936 *
1939 * Notes: 1937 * Notes:
1940 */ 1938 */
1941 void scsi_free_host_dev(struct scsi_device *sdev) 1939 void scsi_free_host_dev(struct scsi_device *sdev)
1942 { 1940 {
1943 BUG_ON(sdev->id != sdev->host->this_id); 1941 BUG_ON(sdev->id != sdev->host->this_id);
1944 1942
1945 scsi_destroy_sdev(sdev); 1943 scsi_destroy_sdev(sdev);
1946 } 1944 }
1947 EXPORT_SYMBOL(scsi_free_host_dev); 1945 EXPORT_SYMBOL(scsi_free_host_dev);
1948 1946
1949 1947
drivers/scsi/scsi_wait_scan.c
1 /* 1 /*
2 * scsi_wait_scan.c 2 * scsi_wait_scan.c
3 * 3 *
4 * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com> 4 * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
5 * 5 *
6 * This is a simple module to wait until all the async scans are 6 * This is a simple module to wait until all the async scans are
7 * complete. The idea is to use it in initrd/initramfs scripts. You 7 * complete. The idea is to use it in initrd/initramfs scripts. You
8 * modprobe it after all the modprobes of the root SCSI drivers and it 8 * modprobe it after all the modprobes of the root SCSI drivers and it
9 * will wait until they have all finished scanning their busses before 9 * will wait until they have all finished scanning their busses before
10 * allowing the boot to proceed 10 * allowing the boot to proceed
11 */ 11 */
12 12
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/device.h>
14 #include <scsi/scsi_scan.h> 15 #include <scsi/scsi_scan.h>
15 16
16 static int __init wait_scan_init(void) 17 static int __init wait_scan_init(void)
17 { 18 {
19 /*
20 * First we need to wait for device probing to finish;
21 * the drivers we just loaded might just still be probing
22 * and might not yet have reached the scsi async scanning
23 */
24 wait_for_device_probe();
25 /*
26 * and then we wait for the actual asynchronous scsi scan
27 * to finish.
28 */
18 scsi_complete_async_scans(); 29 scsi_complete_async_scans();
19 return 0; 30 return 0;
20 } 31 }
21 32
22 static void __exit wait_scan_exit(void) 33 static void __exit wait_scan_exit(void)
23 { 34 {
24 } 35 }
25 36
26 MODULE_DESCRIPTION("SCSI wait for scans"); 37 MODULE_DESCRIPTION("SCSI wait for scans");
27 MODULE_AUTHOR("James Bottomley"); 38 MODULE_AUTHOR("James Bottomley");
28 MODULE_LICENSE("GPL"); 39 MODULE_LICENSE("GPL");
29 40
30 late_initcall(wait_scan_init); 41 late_initcall(wait_scan_init);
31 module_exit(wait_scan_exit); 42 module_exit(wait_scan_exit);
32 43
include/linux/device.h
1 /* 1 /*
2 * device.h - generic, centralized driver model 2 * device.h - generic, centralized driver model
3 * 3 *
4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> 4 * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
5 * Copyright (c) 2004-2007 Greg Kroah-Hartman <gregkh@suse.de> 5 * Copyright (c) 2004-2007 Greg Kroah-Hartman <gregkh@suse.de>
6 * 6 *
7 * This file is released under the GPLv2 7 * This file is released under the GPLv2
8 * 8 *
9 * See Documentation/driver-model/ for more information. 9 * See Documentation/driver-model/ for more information.
10 */ 10 */
11 11
12 #ifndef _DEVICE_H_ 12 #ifndef _DEVICE_H_
13 #define _DEVICE_H_ 13 #define _DEVICE_H_
14 14
15 #include <linux/ioport.h> 15 #include <linux/ioport.h>
16 #include <linux/kobject.h> 16 #include <linux/kobject.h>
17 #include <linux/klist.h> 17 #include <linux/klist.h>
18 #include <linux/list.h> 18 #include <linux/list.h>
19 #include <linux/lockdep.h> 19 #include <linux/lockdep.h>
20 #include <linux/compiler.h> 20 #include <linux/compiler.h>
21 #include <linux/types.h> 21 #include <linux/types.h>
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/pm.h> 23 #include <linux/pm.h>
24 #include <linux/semaphore.h> 24 #include <linux/semaphore.h>
25 #include <asm/atomic.h> 25 #include <asm/atomic.h>
26 #include <asm/device.h> 26 #include <asm/device.h>
27 27
28 #define BUS_ID_SIZE 20 28 #define BUS_ID_SIZE 20
29 29
30 struct device; 30 struct device;
31 struct device_private; 31 struct device_private;
32 struct device_driver; 32 struct device_driver;
33 struct driver_private; 33 struct driver_private;
34 struct class; 34 struct class;
35 struct class_private; 35 struct class_private;
36 struct bus_type; 36 struct bus_type;
37 struct bus_type_private; 37 struct bus_type_private;
38 38
39 struct bus_attribute { 39 struct bus_attribute {
40 struct attribute attr; 40 struct attribute attr;
41 ssize_t (*show)(struct bus_type *bus, char *buf); 41 ssize_t (*show)(struct bus_type *bus, char *buf);
42 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 42 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
43 }; 43 };
44 44
45 #define BUS_ATTR(_name, _mode, _show, _store) \ 45 #define BUS_ATTR(_name, _mode, _show, _store) \
46 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) 46 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
47 47
48 extern int __must_check bus_create_file(struct bus_type *, 48 extern int __must_check bus_create_file(struct bus_type *,
49 struct bus_attribute *); 49 struct bus_attribute *);
50 extern void bus_remove_file(struct bus_type *, struct bus_attribute *); 50 extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
51 51
52 struct bus_type { 52 struct bus_type {
53 const char *name; 53 const char *name;
54 struct bus_attribute *bus_attrs; 54 struct bus_attribute *bus_attrs;
55 struct device_attribute *dev_attrs; 55 struct device_attribute *dev_attrs;
56 struct driver_attribute *drv_attrs; 56 struct driver_attribute *drv_attrs;
57 57
58 int (*match)(struct device *dev, struct device_driver *drv); 58 int (*match)(struct device *dev, struct device_driver *drv);
59 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 59 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
60 int (*probe)(struct device *dev); 60 int (*probe)(struct device *dev);
61 int (*remove)(struct device *dev); 61 int (*remove)(struct device *dev);
62 void (*shutdown)(struct device *dev); 62 void (*shutdown)(struct device *dev);
63 63
64 int (*suspend)(struct device *dev, pm_message_t state); 64 int (*suspend)(struct device *dev, pm_message_t state);
65 int (*suspend_late)(struct device *dev, pm_message_t state); 65 int (*suspend_late)(struct device *dev, pm_message_t state);
66 int (*resume_early)(struct device *dev); 66 int (*resume_early)(struct device *dev);
67 int (*resume)(struct device *dev); 67 int (*resume)(struct device *dev);
68 68
69 struct dev_pm_ops *pm; 69 struct dev_pm_ops *pm;
70 70
71 struct bus_type_private *p; 71 struct bus_type_private *p;
72 }; 72 };
73 73
74 extern int __must_check bus_register(struct bus_type *bus); 74 extern int __must_check bus_register(struct bus_type *bus);
75 extern void bus_unregister(struct bus_type *bus); 75 extern void bus_unregister(struct bus_type *bus);
76 76
77 extern int __must_check bus_rescan_devices(struct bus_type *bus); 77 extern int __must_check bus_rescan_devices(struct bus_type *bus);
78 78
79 /* iterator helpers for buses */ 79 /* iterator helpers for buses */
80 80
81 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, 81 int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
82 int (*fn)(struct device *dev, void *data)); 82 int (*fn)(struct device *dev, void *data));
83 struct device *bus_find_device(struct bus_type *bus, struct device *start, 83 struct device *bus_find_device(struct bus_type *bus, struct device *start,
84 void *data, 84 void *data,
85 int (*match)(struct device *dev, void *data)); 85 int (*match)(struct device *dev, void *data));
86 struct device *bus_find_device_by_name(struct bus_type *bus, 86 struct device *bus_find_device_by_name(struct bus_type *bus,
87 struct device *start, 87 struct device *start,
88 const char *name); 88 const char *name);
89 89
90 int __must_check bus_for_each_drv(struct bus_type *bus, 90 int __must_check bus_for_each_drv(struct bus_type *bus,
91 struct device_driver *start, void *data, 91 struct device_driver *start, void *data,
92 int (*fn)(struct device_driver *, void *)); 92 int (*fn)(struct device_driver *, void *));
93 93
94 void bus_sort_breadthfirst(struct bus_type *bus, 94 void bus_sort_breadthfirst(struct bus_type *bus,
95 int (*compare)(const struct device *a, 95 int (*compare)(const struct device *a,
96 const struct device *b)); 96 const struct device *b));
97 /* 97 /*
98 * Bus notifiers: Get notified of addition/removal of devices 98 * Bus notifiers: Get notified of addition/removal of devices
99 * and binding/unbinding of drivers to devices. 99 * and binding/unbinding of drivers to devices.
100 * In the long run, it should be a replacement for the platform 100 * In the long run, it should be a replacement for the platform
101 * notify hooks. 101 * notify hooks.
102 */ 102 */
103 struct notifier_block; 103 struct notifier_block;
104 104
105 extern int bus_register_notifier(struct bus_type *bus, 105 extern int bus_register_notifier(struct bus_type *bus,
106 struct notifier_block *nb); 106 struct notifier_block *nb);
107 extern int bus_unregister_notifier(struct bus_type *bus, 107 extern int bus_unregister_notifier(struct bus_type *bus,
108 struct notifier_block *nb); 108 struct notifier_block *nb);
109 109
110 /* All 4 notifers below get called with the target struct device * 110 /* All 4 notifers below get called with the target struct device *
111 * as an argument. Note that those functions are likely to be called 111 * as an argument. Note that those functions are likely to be called
112 * with the device semaphore held in the core, so be careful. 112 * with the device semaphore held in the core, so be careful.
113 */ 113 */
114 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 114 #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
115 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ 115 #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
116 #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ 116 #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */
117 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be 117 #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be
118 unbound */ 118 unbound */
119 119
120 extern struct kset *bus_get_kset(struct bus_type *bus); 120 extern struct kset *bus_get_kset(struct bus_type *bus);
121 extern struct klist *bus_get_device_klist(struct bus_type *bus); 121 extern struct klist *bus_get_device_klist(struct bus_type *bus);
122 122
123 struct device_driver { 123 struct device_driver {
124 const char *name; 124 const char *name;
125 struct bus_type *bus; 125 struct bus_type *bus;
126 126
127 struct module *owner; 127 struct module *owner;
128 const char *mod_name; /* used for built-in modules */ 128 const char *mod_name; /* used for built-in modules */
129 129
130 int (*probe) (struct device *dev); 130 int (*probe) (struct device *dev);
131 int (*remove) (struct device *dev); 131 int (*remove) (struct device *dev);
132 void (*shutdown) (struct device *dev); 132 void (*shutdown) (struct device *dev);
133 int (*suspend) (struct device *dev, pm_message_t state); 133 int (*suspend) (struct device *dev, pm_message_t state);
134 int (*resume) (struct device *dev); 134 int (*resume) (struct device *dev);
135 struct attribute_group **groups; 135 struct attribute_group **groups;
136 136
137 struct dev_pm_ops *pm; 137 struct dev_pm_ops *pm;
138 138
139 struct driver_private *p; 139 struct driver_private *p;
140 }; 140 };
141 141
142 142
143 extern int __must_check driver_register(struct device_driver *drv); 143 extern int __must_check driver_register(struct device_driver *drv);
144 extern void driver_unregister(struct device_driver *drv); 144 extern void driver_unregister(struct device_driver *drv);
145 145
146 extern struct device_driver *get_driver(struct device_driver *drv); 146 extern struct device_driver *get_driver(struct device_driver *drv);
147 extern void put_driver(struct device_driver *drv); 147 extern void put_driver(struct device_driver *drv);
148 extern struct device_driver *driver_find(const char *name, 148 extern struct device_driver *driver_find(const char *name,
149 struct bus_type *bus); 149 struct bus_type *bus);
150 extern int driver_probe_done(void); 150 extern int driver_probe_done(void);
151 extern void wait_for_device_probe(void); 151 extern void wait_for_device_probe(void);
152 152
153 153
154 /* sysfs interface for exporting driver attributes */ 154 /* sysfs interface for exporting driver attributes */
155 155
156 struct driver_attribute { 156 struct driver_attribute {
157 struct attribute attr; 157 struct attribute attr;
158 ssize_t (*show)(struct device_driver *driver, char *buf); 158 ssize_t (*show)(struct device_driver *driver, char *buf);
159 ssize_t (*store)(struct device_driver *driver, const char *buf, 159 ssize_t (*store)(struct device_driver *driver, const char *buf,
160 size_t count); 160 size_t count);
161 }; 161 };
162 162
163 #define DRIVER_ATTR(_name, _mode, _show, _store) \ 163 #define DRIVER_ATTR(_name, _mode, _show, _store) \
164 struct driver_attribute driver_attr_##_name = \ 164 struct driver_attribute driver_attr_##_name = \
165 __ATTR(_name, _mode, _show, _store) 165 __ATTR(_name, _mode, _show, _store)
166 166
167 extern int __must_check driver_create_file(struct device_driver *driver, 167 extern int __must_check driver_create_file(struct device_driver *driver,
168 struct driver_attribute *attr); 168 struct driver_attribute *attr);
169 extern void driver_remove_file(struct device_driver *driver, 169 extern void driver_remove_file(struct device_driver *driver,
170 struct driver_attribute *attr); 170 struct driver_attribute *attr);
171 171
172 extern int __must_check driver_add_kobj(struct device_driver *drv, 172 extern int __must_check driver_add_kobj(struct device_driver *drv,
173 struct kobject *kobj, 173 struct kobject *kobj,
174 const char *fmt, ...); 174 const char *fmt, ...);
175 175
176 extern int __must_check driver_for_each_device(struct device_driver *drv, 176 extern int __must_check driver_for_each_device(struct device_driver *drv,
177 struct device *start, 177 struct device *start,
178 void *data, 178 void *data,
179 int (*fn)(struct device *dev, 179 int (*fn)(struct device *dev,
180 void *)); 180 void *));
181 struct device *driver_find_device(struct device_driver *drv, 181 struct device *driver_find_device(struct device_driver *drv,
182 struct device *start, void *data, 182 struct device *start, void *data,
183 int (*match)(struct device *dev, void *data)); 183 int (*match)(struct device *dev, void *data));
184 184
185 /* 185 /*
186 * device classes 186 * device classes
187 */ 187 */
188 struct class { 188 struct class {
189 const char *name; 189 const char *name;
190 struct module *owner; 190 struct module *owner;
191 191
192 struct class_attribute *class_attrs; 192 struct class_attribute *class_attrs;
193 struct device_attribute *dev_attrs; 193 struct device_attribute *dev_attrs;
194 struct kobject *dev_kobj; 194 struct kobject *dev_kobj;
195 195
196 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); 196 int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
197 197
198 void (*class_release)(struct class *class); 198 void (*class_release)(struct class *class);
199 void (*dev_release)(struct device *dev); 199 void (*dev_release)(struct device *dev);
200 200
201 int (*suspend)(struct device *dev, pm_message_t state); 201 int (*suspend)(struct device *dev, pm_message_t state);
202 int (*resume)(struct device *dev); 202 int (*resume)(struct device *dev);
203 203
204 struct dev_pm_ops *pm; 204 struct dev_pm_ops *pm;
205 struct class_private *p; 205 struct class_private *p;
206 }; 206 };
207 207
208 struct class_dev_iter { 208 struct class_dev_iter {
209 struct klist_iter ki; 209 struct klist_iter ki;
210 const struct device_type *type; 210 const struct device_type *type;
211 }; 211 };
212 212
213 extern struct kobject *sysfs_dev_block_kobj; 213 extern struct kobject *sysfs_dev_block_kobj;
214 extern struct kobject *sysfs_dev_char_kobj; 214 extern struct kobject *sysfs_dev_char_kobj;
215 extern int __must_check __class_register(struct class *class, 215 extern int __must_check __class_register(struct class *class,
216 struct lock_class_key *key); 216 struct lock_class_key *key);
217 extern void class_unregister(struct class *class); 217 extern void class_unregister(struct class *class);
218 218
219 /* This is a #define to keep the compiler from merging different 219 /* This is a #define to keep the compiler from merging different
220 * instances of the __key variable */ 220 * instances of the __key variable */
221 #define class_register(class) \ 221 #define class_register(class) \
222 ({ \ 222 ({ \
223 static struct lock_class_key __key; \ 223 static struct lock_class_key __key; \
224 __class_register(class, &__key); \ 224 __class_register(class, &__key); \
225 }) 225 })
226 226
227 extern void class_dev_iter_init(struct class_dev_iter *iter, 227 extern void class_dev_iter_init(struct class_dev_iter *iter,
228 struct class *class, 228 struct class *class,
229 struct device *start, 229 struct device *start,
230 const struct device_type *type); 230 const struct device_type *type);
231 extern struct device *class_dev_iter_next(struct class_dev_iter *iter); 231 extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
232 extern void class_dev_iter_exit(struct class_dev_iter *iter); 232 extern void class_dev_iter_exit(struct class_dev_iter *iter);
233 233
234 extern int class_for_each_device(struct class *class, struct device *start, 234 extern int class_for_each_device(struct class *class, struct device *start,
235 void *data, 235 void *data,
236 int (*fn)(struct device *dev, void *data)); 236 int (*fn)(struct device *dev, void *data));
237 extern struct device *class_find_device(struct class *class, 237 extern struct device *class_find_device(struct class *class,
238 struct device *start, void *data, 238 struct device *start, void *data,
239 int (*match)(struct device *, void *)); 239 int (*match)(struct device *, void *));
240 240
241 struct class_attribute { 241 struct class_attribute {
242 struct attribute attr; 242 struct attribute attr;
243 ssize_t (*show)(struct class *class, char *buf); 243 ssize_t (*show)(struct class *class, char *buf);
244 ssize_t (*store)(struct class *class, const char *buf, size_t count); 244 ssize_t (*store)(struct class *class, const char *buf, size_t count);
245 }; 245 };
246 246
247 #define CLASS_ATTR(_name, _mode, _show, _store) \ 247 #define CLASS_ATTR(_name, _mode, _show, _store) \
248 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) 248 struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
249 249
250 extern int __must_check class_create_file(struct class *class, 250 extern int __must_check class_create_file(struct class *class,
251 const struct class_attribute *attr); 251 const struct class_attribute *attr);
252 extern void class_remove_file(struct class *class, 252 extern void class_remove_file(struct class *class,
253 const struct class_attribute *attr); 253 const struct class_attribute *attr);
254 254
255 struct class_interface { 255 struct class_interface {
256 struct list_head node; 256 struct list_head node;
257 struct class *class; 257 struct class *class;
258 258
259 int (*add_dev) (struct device *, struct class_interface *); 259 int (*add_dev) (struct device *, struct class_interface *);
260 void (*remove_dev) (struct device *, struct class_interface *); 260 void (*remove_dev) (struct device *, struct class_interface *);
261 }; 261 };
262 262
263 extern int __must_check class_interface_register(struct class_interface *); 263 extern int __must_check class_interface_register(struct class_interface *);
264 extern void class_interface_unregister(struct class_interface *); 264 extern void class_interface_unregister(struct class_interface *);
265 265
266 extern struct class * __must_check __class_create(struct module *owner, 266 extern struct class * __must_check __class_create(struct module *owner,
267 const char *name, 267 const char *name,
268 struct lock_class_key *key); 268 struct lock_class_key *key);
269 extern void class_destroy(struct class *cls); 269 extern void class_destroy(struct class *cls);
270 270
271 /* This is a #define to keep the compiler from merging different 271 /* This is a #define to keep the compiler from merging different
272 * instances of the __key variable */ 272 * instances of the __key variable */
273 #define class_create(owner, name) \ 273 #define class_create(owner, name) \
274 ({ \ 274 ({ \
275 static struct lock_class_key __key; \ 275 static struct lock_class_key __key; \
276 __class_create(owner, name, &__key); \ 276 __class_create(owner, name, &__key); \
277 }) 277 })
278 278
279 /* 279 /*
280 * The type of device, "struct device" is embedded in. A class 280 * The type of device, "struct device" is embedded in. A class
281 * or bus can contain devices of different types 281 * or bus can contain devices of different types
282 * like "partitions" and "disks", "mouse" and "event". 282 * like "partitions" and "disks", "mouse" and "event".
283 * This identifies the device type and carries type-specific 283 * This identifies the device type and carries type-specific
284 * information, equivalent to the kobj_type of a kobject. 284 * information, equivalent to the kobj_type of a kobject.
285 * If "name" is specified, the uevent will contain it in 285 * If "name" is specified, the uevent will contain it in
286 * the DEVTYPE variable. 286 * the DEVTYPE variable.
287 */ 287 */
288 struct device_type { 288 struct device_type {
289 const char *name; 289 const char *name;
290 struct attribute_group **groups; 290 struct attribute_group **groups;
291 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 291 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
292 void (*release)(struct device *dev); 292 void (*release)(struct device *dev);
293 293
294 int (*suspend)(struct device *dev, pm_message_t state); 294 int (*suspend)(struct device *dev, pm_message_t state);
295 int (*resume)(struct device *dev); 295 int (*resume)(struct device *dev);
296 296
297 struct dev_pm_ops *pm; 297 struct dev_pm_ops *pm;
298 }; 298 };
299 299
300 /* interface for exporting device attributes */ 300 /* interface for exporting device attributes */
301 struct device_attribute { 301 struct device_attribute {
302 struct attribute attr; 302 struct attribute attr;
303 ssize_t (*show)(struct device *dev, struct device_attribute *attr, 303 ssize_t (*show)(struct device *dev, struct device_attribute *attr,
304 char *buf); 304 char *buf);
305 ssize_t (*store)(struct device *dev, struct device_attribute *attr, 305 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
306 const char *buf, size_t count); 306 const char *buf, size_t count);
307 }; 307 };
308 308
309 #define DEVICE_ATTR(_name, _mode, _show, _store) \ 309 #define DEVICE_ATTR(_name, _mode, _show, _store) \
310 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 310 struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
311 311
312 extern int __must_check device_create_file(struct device *device, 312 extern int __must_check device_create_file(struct device *device,
313 struct device_attribute *entry); 313 struct device_attribute *entry);
314 extern void device_remove_file(struct device *dev, 314 extern void device_remove_file(struct device *dev,
315 struct device_attribute *attr); 315 struct device_attribute *attr);
316 extern int __must_check device_create_bin_file(struct device *dev, 316 extern int __must_check device_create_bin_file(struct device *dev,
317 struct bin_attribute *attr); 317 struct bin_attribute *attr);
318 extern void device_remove_bin_file(struct device *dev, 318 extern void device_remove_bin_file(struct device *dev,
319 struct bin_attribute *attr); 319 struct bin_attribute *attr);
320 extern int device_schedule_callback_owner(struct device *dev, 320 extern int device_schedule_callback_owner(struct device *dev,
321 void (*func)(struct device *dev), struct module *owner); 321 void (*func)(struct device *dev), struct module *owner);
322 322
323 /* This is a macro to avoid include problems with THIS_MODULE */ 323 /* This is a macro to avoid include problems with THIS_MODULE */
324 #define device_schedule_callback(dev, func) \ 324 #define device_schedule_callback(dev, func) \
325 device_schedule_callback_owner(dev, func, THIS_MODULE) 325 device_schedule_callback_owner(dev, func, THIS_MODULE)
326 326
327 /* device resource management */ 327 /* device resource management */
328 typedef void (*dr_release_t)(struct device *dev, void *res); 328 typedef void (*dr_release_t)(struct device *dev, void *res);
329 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); 329 typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
330 330
331 #ifdef CONFIG_DEBUG_DEVRES 331 #ifdef CONFIG_DEBUG_DEVRES
332 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, 332 extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
333 const char *name); 333 const char *name);
334 #define devres_alloc(release, size, gfp) \ 334 #define devres_alloc(release, size, gfp) \
335 __devres_alloc(release, size, gfp, #release) 335 __devres_alloc(release, size, gfp, #release)
336 #else 336 #else
337 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); 337 extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
338 #endif 338 #endif
339 extern void devres_free(void *res); 339 extern void devres_free(void *res);
340 extern void devres_add(struct device *dev, void *res); 340 extern void devres_add(struct device *dev, void *res);
341 extern void *devres_find(struct device *dev, dr_release_t release, 341 extern void *devres_find(struct device *dev, dr_release_t release,
342 dr_match_t match, void *match_data); 342 dr_match_t match, void *match_data);
343 extern void *devres_get(struct device *dev, void *new_res, 343 extern void *devres_get(struct device *dev, void *new_res,
344 dr_match_t match, void *match_data); 344 dr_match_t match, void *match_data);
345 extern void *devres_remove(struct device *dev, dr_release_t release, 345 extern void *devres_remove(struct device *dev, dr_release_t release,
346 dr_match_t match, void *match_data); 346 dr_match_t match, void *match_data);
347 extern int devres_destroy(struct device *dev, dr_release_t release, 347 extern int devres_destroy(struct device *dev, dr_release_t release,
348 dr_match_t match, void *match_data); 348 dr_match_t match, void *match_data);
349 349
350 /* devres group */ 350 /* devres group */
351 extern void * __must_check devres_open_group(struct device *dev, void *id, 351 extern void * __must_check devres_open_group(struct device *dev, void *id,
352 gfp_t gfp); 352 gfp_t gfp);
353 extern void devres_close_group(struct device *dev, void *id); 353 extern void devres_close_group(struct device *dev, void *id);
354 extern void devres_remove_group(struct device *dev, void *id); 354 extern void devres_remove_group(struct device *dev, void *id);
355 extern int devres_release_group(struct device *dev, void *id); 355 extern int devres_release_group(struct device *dev, void *id);
356 356
357 /* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */ 357 /* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */
358 extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); 358 extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
359 extern void devm_kfree(struct device *dev, void *p); 359 extern void devm_kfree(struct device *dev, void *p);
360 360
361 struct device_dma_parameters { 361 struct device_dma_parameters {
362 /* 362 /*
363 * a low level driver may set these to teach IOMMU code about 363 * a low level driver may set these to teach IOMMU code about
364 * sg limitations. 364 * sg limitations.
365 */ 365 */
366 unsigned int max_segment_size; 366 unsigned int max_segment_size;
367 unsigned long segment_boundary_mask; 367 unsigned long segment_boundary_mask;
368 }; 368 };
369 369
370 struct device { 370 struct device {
371 struct device *parent; 371 struct device *parent;
372 372
373 struct device_private *p; 373 struct device_private *p;
374 374
375 struct kobject kobj; 375 struct kobject kobj;
376 const char *init_name; /* initial name of the device */ 376 const char *init_name; /* initial name of the device */
377 struct device_type *type; 377 struct device_type *type;
378 378
379 struct semaphore sem; /* semaphore to synchronize calls to 379 struct semaphore sem; /* semaphore to synchronize calls to
380 * its driver. 380 * its driver.
381 */ 381 */
382 382
383 struct bus_type *bus; /* type of bus device is on */ 383 struct bus_type *bus; /* type of bus device is on */
384 struct device_driver *driver; /* which driver has allocated this 384 struct device_driver *driver; /* which driver has allocated this
385 device */ 385 device */
386 void *driver_data; /* data private to the driver */ 386 void *driver_data; /* data private to the driver */
387 387
388 void *platform_data; /* We will remove platform_data 388 void *platform_data; /* We will remove platform_data
389 field if all platform devices 389 field if all platform devices
390 pass its platform specific data 390 pass its platform specific data
391 from platform_device->platform_data, 391 from platform_device->platform_data,
392 other kind of devices should not 392 other kind of devices should not
393 use platform_data. */ 393 use platform_data. */
394 struct dev_pm_info power; 394 struct dev_pm_info power;
395 395
396 #ifdef CONFIG_NUMA 396 #ifdef CONFIG_NUMA
397 int numa_node; /* NUMA node this device is close to */ 397 int numa_node; /* NUMA node this device is close to */
398 #endif 398 #endif
399 u64 *dma_mask; /* dma mask (if dma'able device) */ 399 u64 *dma_mask; /* dma mask (if dma'able device) */
400 u64 coherent_dma_mask;/* Like dma_mask, but for 400 u64 coherent_dma_mask;/* Like dma_mask, but for
401 alloc_coherent mappings as 401 alloc_coherent mappings as
402 not all hardware supports 402 not all hardware supports
403 64 bit addresses for consistent 403 64 bit addresses for consistent
404 allocations such descriptors. */ 404 allocations such descriptors. */
405 405
406 struct device_dma_parameters *dma_parms; 406 struct device_dma_parameters *dma_parms;
407 407
408 struct list_head dma_pools; /* dma pools (if dma'ble) */ 408 struct list_head dma_pools; /* dma pools (if dma'ble) */
409 409
410 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 410 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
411 override */ 411 override */
412 /* arch specific additions */ 412 /* arch specific additions */
413 struct dev_archdata archdata; 413 struct dev_archdata archdata;
414 414
415 dev_t devt; /* dev_t, creates the sysfs "dev" */ 415 dev_t devt; /* dev_t, creates the sysfs "dev" */
416 416
417 spinlock_t devres_lock; 417 spinlock_t devres_lock;
418 struct list_head devres_head; 418 struct list_head devres_head;
419 419
420 struct klist_node knode_class; 420 struct klist_node knode_class;
421 struct class *class; 421 struct class *class;
422 struct attribute_group **groups; /* optional groups */ 422 struct attribute_group **groups; /* optional groups */
423 423
424 void (*release)(struct device *dev); 424 void (*release)(struct device *dev);
425 }; 425 };
426 426
427 /* Get the wakeup routines, which depend on struct device */ 427 /* Get the wakeup routines, which depend on struct device */
428 #include <linux/pm_wakeup.h> 428 #include <linux/pm_wakeup.h>
429 429
430 static inline const char *dev_name(const struct device *dev) 430 static inline const char *dev_name(const struct device *dev)
431 { 431 {
432 return kobject_name(&dev->kobj); 432 return kobject_name(&dev->kobj);
433 } 433 }
434 434
435 extern int dev_set_name(struct device *dev, const char *name, ...) 435 extern int dev_set_name(struct device *dev, const char *name, ...)
436 __attribute__((format(printf, 2, 3))); 436 __attribute__((format(printf, 2, 3)));
437 437
438 #ifdef CONFIG_NUMA 438 #ifdef CONFIG_NUMA
439 static inline int dev_to_node(struct device *dev) 439 static inline int dev_to_node(struct device *dev)
440 { 440 {
441 return dev->numa_node; 441 return dev->numa_node;
442 } 442 }
443 static inline void set_dev_node(struct device *dev, int node) 443 static inline void set_dev_node(struct device *dev, int node)
444 { 444 {
445 dev->numa_node = node; 445 dev->numa_node = node;
446 } 446 }
447 #else 447 #else
448 static inline int dev_to_node(struct device *dev) 448 static inline int dev_to_node(struct device *dev)
449 { 449 {
450 return -1; 450 return -1;
451 } 451 }
452 static inline void set_dev_node(struct device *dev, int node) 452 static inline void set_dev_node(struct device *dev, int node)
453 { 453 {
454 } 454 }
455 #endif 455 #endif
456 456
457 static inline void *dev_get_drvdata(const struct device *dev) 457 static inline void *dev_get_drvdata(const struct device *dev)
458 { 458 {
459 return dev->driver_data; 459 return dev->driver_data;
460 } 460 }
461 461
462 static inline void dev_set_drvdata(struct device *dev, void *data) 462 static inline void dev_set_drvdata(struct device *dev, void *data)
463 { 463 {
464 dev->driver_data = data; 464 dev->driver_data = data;
465 } 465 }
466 466
467 static inline unsigned int dev_get_uevent_suppress(const struct device *dev) 467 static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
468 { 468 {
469 return dev->kobj.uevent_suppress; 469 return dev->kobj.uevent_suppress;
470 } 470 }
471 471
472 static inline void dev_set_uevent_suppress(struct device *dev, int val) 472 static inline void dev_set_uevent_suppress(struct device *dev, int val)
473 { 473 {
474 dev->kobj.uevent_suppress = val; 474 dev->kobj.uevent_suppress = val;
475 } 475 }
476 476
477 static inline int device_is_registered(struct device *dev) 477 static inline int device_is_registered(struct device *dev)
478 { 478 {
479 return dev->kobj.state_in_sysfs; 479 return dev->kobj.state_in_sysfs;
480 } 480 }
481 481
482 void driver_init(void); 482 void driver_init(void);
483 483
484 /* 484 /*
485 * High level routines for use by the bus drivers 485 * High level routines for use by the bus drivers
486 */ 486 */
487 extern int __must_check device_register(struct device *dev); 487 extern int __must_check device_register(struct device *dev);
488 extern void device_unregister(struct device *dev); 488 extern void device_unregister(struct device *dev);
489 extern void device_initialize(struct device *dev); 489 extern void device_initialize(struct device *dev);
490 extern int __must_check device_add(struct device *dev); 490 extern int __must_check device_add(struct device *dev);
491 extern void device_del(struct device *dev); 491 extern void device_del(struct device *dev);
492 extern int device_for_each_child(struct device *dev, void *data, 492 extern int device_for_each_child(struct device *dev, void *data,
493 int (*fn)(struct device *dev, void *data)); 493 int (*fn)(struct device *dev, void *data));
494 extern struct device *device_find_child(struct device *dev, void *data, 494 extern struct device *device_find_child(struct device *dev, void *data,
495 int (*match)(struct device *dev, void *data)); 495 int (*match)(struct device *dev, void *data));
496 extern int device_rename(struct device *dev, char *new_name); 496 extern int device_rename(struct device *dev, char *new_name);
497 extern int device_move(struct device *dev, struct device *new_parent, 497 extern int device_move(struct device *dev, struct device *new_parent,
498 enum dpm_order dpm_order); 498 enum dpm_order dpm_order);
499 499
500 /* 500 /*
501 * Root device objects for grouping under /sys/devices 501 * Root device objects for grouping under /sys/devices
502 */ 502 */
503 extern struct device *__root_device_register(const char *name, 503 extern struct device *__root_device_register(const char *name,
504 struct module *owner); 504 struct module *owner);
505 static inline struct device *root_device_register(const char *name) 505 static inline struct device *root_device_register(const char *name)
506 { 506 {
507 return __root_device_register(name, THIS_MODULE); 507 return __root_device_register(name, THIS_MODULE);
508 } 508 }
509 extern void root_device_unregister(struct device *root); 509 extern void root_device_unregister(struct device *root);
510 510
511 /* 511 /*
512 * Manual binding of a device to driver. See drivers/base/bus.c 512 * Manual binding of a device to driver. See drivers/base/bus.c
513 * for information on use. 513 * for information on use.
514 */ 514 */
515 extern int __must_check device_bind_driver(struct device *dev); 515 extern int __must_check device_bind_driver(struct device *dev);
516 extern void device_release_driver(struct device *dev); 516 extern void device_release_driver(struct device *dev);
517 extern int __must_check device_attach(struct device *dev); 517 extern int __must_check device_attach(struct device *dev);
518 extern int __must_check driver_attach(struct device_driver *drv); 518 extern int __must_check driver_attach(struct device_driver *drv);
519 extern int __must_check device_reprobe(struct device *dev); 519 extern int __must_check device_reprobe(struct device *dev);
520 520
521 /* 521 /*
522 * Easy functions for dynamically creating devices on the fly 522 * Easy functions for dynamically creating devices on the fly
523 */ 523 */
524 extern struct device *device_create_vargs(struct class *cls, 524 extern struct device *device_create_vargs(struct class *cls,
525 struct device *parent, 525 struct device *parent,
526 dev_t devt, 526 dev_t devt,
527 void *drvdata, 527 void *drvdata,
528 const char *fmt, 528 const char *fmt,
529 va_list vargs); 529 va_list vargs);
530 extern struct device *device_create(struct class *cls, struct device *parent, 530 extern struct device *device_create(struct class *cls, struct device *parent,
531 dev_t devt, void *drvdata, 531 dev_t devt, void *drvdata,
532 const char *fmt, ...) 532 const char *fmt, ...)
533 __attribute__((format(printf, 5, 6))); 533 __attribute__((format(printf, 5, 6)));
534 extern void device_destroy(struct class *cls, dev_t devt); 534 extern void device_destroy(struct class *cls, dev_t devt);
535 535
536 /* 536 /*
537 * Platform "fixup" functions - allow the platform to have their say 537 * Platform "fixup" functions - allow the platform to have their say
538 * about devices and actions that the general device layer doesn't 538 * about devices and actions that the general device layer doesn't
539 * know about. 539 * know about.
540 */ 540 */
541 /* Notify platform of device discovery */ 541 /* Notify platform of device discovery */
542 extern int (*platform_notify)(struct device *dev); 542 extern int (*platform_notify)(struct device *dev);
543 543
544 extern int (*platform_notify_remove)(struct device *dev); 544 extern int (*platform_notify_remove)(struct device *dev);
545 545
546 546
547 /** 547 /**
548 * get_device - atomically increment the reference count for the device. 548 * get_device - atomically increment the reference count for the device.
549 * 549 *
550 */ 550 */
551 extern struct device *get_device(struct device *dev); 551 extern struct device *get_device(struct device *dev);
552 extern void put_device(struct device *dev); 552 extern void put_device(struct device *dev);
553 553
554 extern void wait_for_device_probe(void);
554 555
555 /* drivers/base/power/shutdown.c */ 556 /* drivers/base/power/shutdown.c */
556 extern void device_shutdown(void); 557 extern void device_shutdown(void);
557 558
558 /* drivers/base/sys.c */ 559 /* drivers/base/sys.c */
559 extern void sysdev_shutdown(void); 560 extern void sysdev_shutdown(void);
560 561
561 /* debugging and troubleshooting/diagnostic helpers. */ 562 /* debugging and troubleshooting/diagnostic helpers. */
562 extern const char *dev_driver_string(const struct device *dev); 563 extern const char *dev_driver_string(const struct device *dev);
563 #define dev_printk(level, dev, format, arg...) \ 564 #define dev_printk(level, dev, format, arg...) \
564 printk(level "%s %s: " format , dev_driver_string(dev) , \ 565 printk(level "%s %s: " format , dev_driver_string(dev) , \
565 dev_name(dev) , ## arg) 566 dev_name(dev) , ## arg)
566 567
567 #define dev_emerg(dev, format, arg...) \ 568 #define dev_emerg(dev, format, arg...) \
568 dev_printk(KERN_EMERG , dev , format , ## arg) 569 dev_printk(KERN_EMERG , dev , format , ## arg)
569 #define dev_alert(dev, format, arg...) \ 570 #define dev_alert(dev, format, arg...) \
570 dev_printk(KERN_ALERT , dev , format , ## arg) 571 dev_printk(KERN_ALERT , dev , format , ## arg)
571 #define dev_crit(dev, format, arg...) \ 572 #define dev_crit(dev, format, arg...) \
572 dev_printk(KERN_CRIT , dev , format , ## arg) 573 dev_printk(KERN_CRIT , dev , format , ## arg)
573 #define dev_err(dev, format, arg...) \ 574 #define dev_err(dev, format, arg...) \
574 dev_printk(KERN_ERR , dev , format , ## arg) 575 dev_printk(KERN_ERR , dev , format , ## arg)
575 #define dev_warn(dev, format, arg...) \ 576 #define dev_warn(dev, format, arg...) \
576 dev_printk(KERN_WARNING , dev , format , ## arg) 577 dev_printk(KERN_WARNING , dev , format , ## arg)
577 #define dev_notice(dev, format, arg...) \ 578 #define dev_notice(dev, format, arg...) \
578 dev_printk(KERN_NOTICE , dev , format , ## arg) 579 dev_printk(KERN_NOTICE , dev , format , ## arg)
579 #define dev_info(dev, format, arg...) \ 580 #define dev_info(dev, format, arg...) \
580 dev_printk(KERN_INFO , dev , format , ## arg) 581 dev_printk(KERN_INFO , dev , format , ## arg)
581 582
582 #if defined(DEBUG) 583 #if defined(DEBUG)
583 #define dev_dbg(dev, format, arg...) \ 584 #define dev_dbg(dev, format, arg...) \
584 dev_printk(KERN_DEBUG , dev , format , ## arg) 585 dev_printk(KERN_DEBUG , dev , format , ## arg)
585 #elif defined(CONFIG_DYNAMIC_DEBUG) 586 #elif defined(CONFIG_DYNAMIC_DEBUG)
586 #define dev_dbg(dev, format, ...) do { \ 587 #define dev_dbg(dev, format, ...) do { \
587 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ 588 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
588 } while (0) 589 } while (0)
589 #else 590 #else
590 #define dev_dbg(dev, format, arg...) \ 591 #define dev_dbg(dev, format, arg...) \
591 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) 592 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
592 #endif 593 #endif
593 594
594 #ifdef VERBOSE_DEBUG 595 #ifdef VERBOSE_DEBUG
595 #define dev_vdbg dev_dbg 596 #define dev_vdbg dev_dbg
596 #else 597 #else
597 598
598 #define dev_vdbg(dev, format, arg...) \ 599 #define dev_vdbg(dev, format, arg...) \
599 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) 600 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
600 #endif 601 #endif
601 602
602 /* 603 /*
603 * dev_WARN() acts like dev_printk(), but with the key difference 604 * dev_WARN() acts like dev_printk(), but with the key difference
604 * of using a WARN/WARN_ON to get the message out, including the 605 * of using a WARN/WARN_ON to get the message out, including the
605 * file/line information and a backtrace. 606 * file/line information and a backtrace.
606 */ 607 */
607 #define dev_WARN(dev, format, arg...) \ 608 #define dev_WARN(dev, format, arg...) \
608 WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg); 609 WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
609 610
610 /* Create alias, so I can be autoloaded. */ 611 /* Create alias, so I can be autoloaded. */
611 #define MODULE_ALIAS_CHARDEV(major,minor) \ 612 #define MODULE_ALIAS_CHARDEV(major,minor) \
612 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 613 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
613 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ 614 #define MODULE_ALIAS_CHARDEV_MAJOR(major) \
614 MODULE_ALIAS("char-major-" __stringify(major) "-*") 615 MODULE_ALIAS("char-major-" __stringify(major) "-*")
615 #endif /* _DEVICE_H_ */ 616 #endif /* _DEVICE_H_ */
616 617