Commit cd9070c9c512ff7995f9019392e0ae548df3a088

Authored by Christoph Hellwig
1 parent 71e75c97f9

scsi: fix the {host,target,device}_blocked counter mess

Seems like these counters are missing any sort of synchronization for
updates, as a over 10 year old comment from me noted.  Fix this by
using atomic counters, and while we're at it also make sure they are
in the same cacheline as the _busy counters and not needlessly stored
to in every I/O completion.

With the new model the _busy counters can temporarily go negative,
so all the readers are updated to check for > 0 values.  Longer
term every successful I/O completion will reset the counters to zero,
so the temporarily negative values will not cause any harm.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Webb Scales <webbnh@hp.com>
Acked-by: Jens Axboe <axboe@kernel.dk>
Tested-by: Bart Van Assche <bvanassche@acm.org>
Tested-by: Robert Elliott <elliott@hp.com>

Showing 5 changed files with 58 additions and 53 deletions Inline Diff

1 /* 1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt 2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale 3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig 4 * Copyright (C) 2002, 2003 Christoph Hellwig
5 * 5 *
6 * generic mid-level SCSI driver 6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt 7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale 8 * Subsequent revisions: Eric Youngdale
9 * 9 *
10 * <drew@colorado.edu> 10 * <drew@colorado.edu>
11 * 11 *
12 * Bug correction thanks go to : 12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu> 13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn> 14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> 15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * 16 *
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to 17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other 18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements. 19 * enhancements.
20 * 20 *
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging 21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net> 22 * support added by Michael Neuffer <mike@i-connect.net>
23 * 23 *
24 * Added request_module("scsi_hostadapter") for kerneld: 24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf) 25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se> 26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod) 27 * (changed to kmod)
28 * 28 *
29 * Major improvements to the timeout, abort, and reset processing, 29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by 30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com> 31 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * 32 *
33 * Converted cli() code to spinlocks, Ingo Molnar 33 * Converted cli() code to spinlocks, Ingo Molnar
34 * 34 *
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli 35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * 36 *
37 * out_of_space hacks, D. Gilbert (dpg) 990608 37 * out_of_space hacks, D. Gilbert (dpg) 990608
38 */ 38 */
39 39
40 #include <linux/module.h> 40 #include <linux/module.h>
41 #include <linux/moduleparam.h> 41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h> 42 #include <linux/kernel.h>
43 #include <linux/timer.h> 43 #include <linux/timer.h>
44 #include <linux/string.h> 44 #include <linux/string.h>
45 #include <linux/slab.h> 45 #include <linux/slab.h>
46 #include <linux/blkdev.h> 46 #include <linux/blkdev.h>
47 #include <linux/delay.h> 47 #include <linux/delay.h>
48 #include <linux/init.h> 48 #include <linux/init.h>
49 #include <linux/completion.h> 49 #include <linux/completion.h>
50 #include <linux/unistd.h> 50 #include <linux/unistd.h>
51 #include <linux/spinlock.h> 51 #include <linux/spinlock.h>
52 #include <linux/kmod.h> 52 #include <linux/kmod.h>
53 #include <linux/interrupt.h> 53 #include <linux/interrupt.h>
54 #include <linux/notifier.h> 54 #include <linux/notifier.h>
55 #include <linux/cpu.h> 55 #include <linux/cpu.h>
56 #include <linux/mutex.h> 56 #include <linux/mutex.h>
57 #include <linux/async.h> 57 #include <linux/async.h>
58 #include <asm/unaligned.h> 58 #include <asm/unaligned.h>
59 59
60 #include <scsi/scsi.h> 60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h> 61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_dbg.h> 62 #include <scsi/scsi_dbg.h>
63 #include <scsi/scsi_device.h> 63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_driver.h> 64 #include <scsi/scsi_driver.h>
65 #include <scsi/scsi_eh.h> 65 #include <scsi/scsi_eh.h>
66 #include <scsi/scsi_host.h> 66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi_tcq.h> 67 #include <scsi/scsi_tcq.h>
68 68
69 #include "scsi_priv.h" 69 #include "scsi_priv.h"
70 #include "scsi_logging.h" 70 #include "scsi_logging.h"
71 71
72 #define CREATE_TRACE_POINTS 72 #define CREATE_TRACE_POINTS
73 #include <trace/events/scsi.h> 73 #include <trace/events/scsi.h>
74 74
75 /* 75 /*
76 * Definitions and constants. 76 * Definitions and constants.
77 */ 77 */
78 78
79 /* 79 /*
80 * Note - the initial logging level can be set here to log events at boot time. 80 * Note - the initial logging level can be set here to log events at boot time.
81 * After the system is up, you may enable logging via the /proc interface. 81 * After the system is up, you may enable logging via the /proc interface.
82 */ 82 */
83 unsigned int scsi_logging_level; 83 unsigned int scsi_logging_level;
84 #if defined(CONFIG_SCSI_LOGGING) 84 #if defined(CONFIG_SCSI_LOGGING)
85 EXPORT_SYMBOL(scsi_logging_level); 85 EXPORT_SYMBOL(scsi_logging_level);
86 #endif 86 #endif
87 87
88 /* sd, scsi core and power management need to coordinate flushing async actions */ 88 /* sd, scsi core and power management need to coordinate flushing async actions */
89 ASYNC_DOMAIN(scsi_sd_probe_domain); 89 ASYNC_DOMAIN(scsi_sd_probe_domain);
90 EXPORT_SYMBOL(scsi_sd_probe_domain); 90 EXPORT_SYMBOL(scsi_sd_probe_domain);
91 91
92 /* 92 /*
93 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of 93 * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
94 * asynchronous system resume operations. It is marked 'exclusive' to avoid 94 * asynchronous system resume operations. It is marked 'exclusive' to avoid
95 * being included in the async_synchronize_full() that is invoked by 95 * being included in the async_synchronize_full() that is invoked by
96 * dpm_resume() 96 * dpm_resume()
97 */ 97 */
98 ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); 98 ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
99 EXPORT_SYMBOL(scsi_sd_pm_domain); 99 EXPORT_SYMBOL(scsi_sd_pm_domain);
100 100
101 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 101 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
102 * You may not alter any existing entry (although adding new ones is 102 * You may not alter any existing entry (although adding new ones is
103 * encouraged once assigned by ANSI/INCITS T10 103 * encouraged once assigned by ANSI/INCITS T10
104 */ 104 */
105 static const char *const scsi_device_types[] = { 105 static const char *const scsi_device_types[] = {
106 "Direct-Access ", 106 "Direct-Access ",
107 "Sequential-Access", 107 "Sequential-Access",
108 "Printer ", 108 "Printer ",
109 "Processor ", 109 "Processor ",
110 "WORM ", 110 "WORM ",
111 "CD-ROM ", 111 "CD-ROM ",
112 "Scanner ", 112 "Scanner ",
113 "Optical Device ", 113 "Optical Device ",
114 "Medium Changer ", 114 "Medium Changer ",
115 "Communications ", 115 "Communications ",
116 "ASC IT8 ", 116 "ASC IT8 ",
117 "ASC IT8 ", 117 "ASC IT8 ",
118 "RAID ", 118 "RAID ",
119 "Enclosure ", 119 "Enclosure ",
120 "Direct-Access-RBC", 120 "Direct-Access-RBC",
121 "Optical card ", 121 "Optical card ",
122 "Bridge controller", 122 "Bridge controller",
123 "Object storage ", 123 "Object storage ",
124 "Automation/Drive ", 124 "Automation/Drive ",
125 }; 125 };
126 126
127 /** 127 /**
128 * scsi_device_type - Return 17 char string indicating device type. 128 * scsi_device_type - Return 17 char string indicating device type.
129 * @type: type number to look up 129 * @type: type number to look up
130 */ 130 */
131 131
132 const char * scsi_device_type(unsigned type) 132 const char * scsi_device_type(unsigned type)
133 { 133 {
134 if (type == 0x1e) 134 if (type == 0x1e)
135 return "Well-known LUN "; 135 return "Well-known LUN ";
136 if (type == 0x1f) 136 if (type == 0x1f)
137 return "No Device "; 137 return "No Device ";
138 if (type >= ARRAY_SIZE(scsi_device_types)) 138 if (type >= ARRAY_SIZE(scsi_device_types))
139 return "Unknown "; 139 return "Unknown ";
140 return scsi_device_types[type]; 140 return scsi_device_types[type];
141 } 141 }
142 142
143 EXPORT_SYMBOL(scsi_device_type); 143 EXPORT_SYMBOL(scsi_device_type);
144 144
145 struct scsi_host_cmd_pool { 145 struct scsi_host_cmd_pool {
146 struct kmem_cache *cmd_slab; 146 struct kmem_cache *cmd_slab;
147 struct kmem_cache *sense_slab; 147 struct kmem_cache *sense_slab;
148 unsigned int users; 148 unsigned int users;
149 char *cmd_name; 149 char *cmd_name;
150 char *sense_name; 150 char *sense_name;
151 unsigned int slab_flags; 151 unsigned int slab_flags;
152 gfp_t gfp_mask; 152 gfp_t gfp_mask;
153 }; 153 };
154 154
155 static struct scsi_host_cmd_pool scsi_cmd_pool = { 155 static struct scsi_host_cmd_pool scsi_cmd_pool = {
156 .cmd_name = "scsi_cmd_cache", 156 .cmd_name = "scsi_cmd_cache",
157 .sense_name = "scsi_sense_cache", 157 .sense_name = "scsi_sense_cache",
158 .slab_flags = SLAB_HWCACHE_ALIGN, 158 .slab_flags = SLAB_HWCACHE_ALIGN,
159 }; 159 };
160 160
161 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { 161 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
162 .cmd_name = "scsi_cmd_cache(DMA)", 162 .cmd_name = "scsi_cmd_cache(DMA)",
163 .sense_name = "scsi_sense_cache(DMA)", 163 .sense_name = "scsi_sense_cache(DMA)",
164 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA, 164 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
165 .gfp_mask = __GFP_DMA, 165 .gfp_mask = __GFP_DMA,
166 }; 166 };
167 167
168 static DEFINE_MUTEX(host_cmd_pool_mutex); 168 static DEFINE_MUTEX(host_cmd_pool_mutex);
169 169
170 /** 170 /**
171 * scsi_host_free_command - internal function to release a command 171 * scsi_host_free_command - internal function to release a command
172 * @shost: host to free the command for 172 * @shost: host to free the command for
173 * @cmd: command to release 173 * @cmd: command to release
174 * 174 *
175 * the command must previously have been allocated by 175 * the command must previously have been allocated by
176 * scsi_host_alloc_command. 176 * scsi_host_alloc_command.
177 */ 177 */
178 static void 178 static void
179 scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 179 scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
180 { 180 {
181 struct scsi_host_cmd_pool *pool = shost->cmd_pool; 181 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
182 182
183 if (cmd->prot_sdb) 183 if (cmd->prot_sdb)
184 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); 184 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
185 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 185 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
186 kmem_cache_free(pool->cmd_slab, cmd); 186 kmem_cache_free(pool->cmd_slab, cmd);
187 } 187 }
188 188
189 /** 189 /**
190 * scsi_host_alloc_command - internal function to allocate command 190 * scsi_host_alloc_command - internal function to allocate command
191 * @shost: SCSI host whose pool to allocate from 191 * @shost: SCSI host whose pool to allocate from
192 * @gfp_mask: mask for the allocation 192 * @gfp_mask: mask for the allocation
193 * 193 *
194 * Returns a fully allocated command with sense buffer and protection 194 * Returns a fully allocated command with sense buffer and protection
195 * data buffer (where applicable) or NULL on failure 195 * data buffer (where applicable) or NULL on failure
196 */ 196 */
197 static struct scsi_cmnd * 197 static struct scsi_cmnd *
198 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) 198 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
199 { 199 {
200 struct scsi_host_cmd_pool *pool = shost->cmd_pool; 200 struct scsi_host_cmd_pool *pool = shost->cmd_pool;
201 struct scsi_cmnd *cmd; 201 struct scsi_cmnd *cmd;
202 202
203 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); 203 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
204 if (!cmd) 204 if (!cmd)
205 goto fail; 205 goto fail;
206 206
207 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, 207 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
208 gfp_mask | pool->gfp_mask); 208 gfp_mask | pool->gfp_mask);
209 if (!cmd->sense_buffer) 209 if (!cmd->sense_buffer)
210 goto fail_free_cmd; 210 goto fail_free_cmd;
211 211
212 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) { 212 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
213 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); 213 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
214 if (!cmd->prot_sdb) 214 if (!cmd->prot_sdb)
215 goto fail_free_sense; 215 goto fail_free_sense;
216 } 216 }
217 217
218 return cmd; 218 return cmd;
219 219
220 fail_free_sense: 220 fail_free_sense:
221 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 221 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
222 fail_free_cmd: 222 fail_free_cmd:
223 kmem_cache_free(pool->cmd_slab, cmd); 223 kmem_cache_free(pool->cmd_slab, cmd);
224 fail: 224 fail:
225 return NULL; 225 return NULL;
226 } 226 }
227 227
228 /** 228 /**
229 * __scsi_get_command - Allocate a struct scsi_cmnd 229 * __scsi_get_command - Allocate a struct scsi_cmnd
230 * @shost: host to transmit command 230 * @shost: host to transmit command
231 * @gfp_mask: allocation mask 231 * @gfp_mask: allocation mask
232 * 232 *
233 * Description: allocate a struct scsi_cmd from host's slab, recycling from the 233 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
234 * host's free_list if necessary. 234 * host's free_list if necessary.
235 */ 235 */
236 static struct scsi_cmnd * 236 static struct scsi_cmnd *
237 __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) 237 __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
238 { 238 {
239 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); 239 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
240 240
241 if (unlikely(!cmd)) { 241 if (unlikely(!cmd)) {
242 unsigned long flags; 242 unsigned long flags;
243 243
244 spin_lock_irqsave(&shost->free_list_lock, flags); 244 spin_lock_irqsave(&shost->free_list_lock, flags);
245 if (likely(!list_empty(&shost->free_list))) { 245 if (likely(!list_empty(&shost->free_list))) {
246 cmd = list_entry(shost->free_list.next, 246 cmd = list_entry(shost->free_list.next,
247 struct scsi_cmnd, list); 247 struct scsi_cmnd, list);
248 list_del_init(&cmd->list); 248 list_del_init(&cmd->list);
249 } 249 }
250 spin_unlock_irqrestore(&shost->free_list_lock, flags); 250 spin_unlock_irqrestore(&shost->free_list_lock, flags);
251 251
252 if (cmd) { 252 if (cmd) {
253 void *buf, *prot; 253 void *buf, *prot;
254 254
255 buf = cmd->sense_buffer; 255 buf = cmd->sense_buffer;
256 prot = cmd->prot_sdb; 256 prot = cmd->prot_sdb;
257 257
258 memset(cmd, 0, sizeof(*cmd)); 258 memset(cmd, 0, sizeof(*cmd));
259 259
260 cmd->sense_buffer = buf; 260 cmd->sense_buffer = buf;
261 cmd->prot_sdb = prot; 261 cmd->prot_sdb = prot;
262 } 262 }
263 } 263 }
264 264
265 return cmd; 265 return cmd;
266 } 266 }
267 267
268 /** 268 /**
269 * scsi_get_command - Allocate and setup a scsi command block 269 * scsi_get_command - Allocate and setup a scsi command block
270 * @dev: parent scsi device 270 * @dev: parent scsi device
271 * @gfp_mask: allocator flags 271 * @gfp_mask: allocator flags
272 * 272 *
273 * Returns: The allocated scsi command structure. 273 * Returns: The allocated scsi command structure.
274 */ 274 */
275 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) 275 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
276 { 276 {
277 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); 277 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask);
278 unsigned long flags; 278 unsigned long flags;
279 279
280 if (unlikely(cmd == NULL)) 280 if (unlikely(cmd == NULL))
281 return NULL; 281 return NULL;
282 282
283 cmd->device = dev; 283 cmd->device = dev;
284 INIT_LIST_HEAD(&cmd->list); 284 INIT_LIST_HEAD(&cmd->list);
285 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 285 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
286 spin_lock_irqsave(&dev->list_lock, flags); 286 spin_lock_irqsave(&dev->list_lock, flags);
287 list_add_tail(&cmd->list, &dev->cmd_list); 287 list_add_tail(&cmd->list, &dev->cmd_list);
288 spin_unlock_irqrestore(&dev->list_lock, flags); 288 spin_unlock_irqrestore(&dev->list_lock, flags);
289 cmd->jiffies_at_alloc = jiffies; 289 cmd->jiffies_at_alloc = jiffies;
290 return cmd; 290 return cmd;
291 } 291 }
292 292
293 /** 293 /**
294 * __scsi_put_command - Free a struct scsi_cmnd 294 * __scsi_put_command - Free a struct scsi_cmnd
295 * @shost: dev->host 295 * @shost: dev->host
296 * @cmd: Command to free 296 * @cmd: Command to free
297 */ 297 */
298 static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd) 298 static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
299 { 299 {
300 unsigned long flags; 300 unsigned long flags;
301 301
302 if (unlikely(list_empty(&shost->free_list))) { 302 if (unlikely(list_empty(&shost->free_list))) {
303 spin_lock_irqsave(&shost->free_list_lock, flags); 303 spin_lock_irqsave(&shost->free_list_lock, flags);
304 if (list_empty(&shost->free_list)) { 304 if (list_empty(&shost->free_list)) {
305 list_add(&cmd->list, &shost->free_list); 305 list_add(&cmd->list, &shost->free_list);
306 cmd = NULL; 306 cmd = NULL;
307 } 307 }
308 spin_unlock_irqrestore(&shost->free_list_lock, flags); 308 spin_unlock_irqrestore(&shost->free_list_lock, flags);
309 } 309 }
310 310
311 if (likely(cmd != NULL)) 311 if (likely(cmd != NULL))
312 scsi_host_free_command(shost, cmd); 312 scsi_host_free_command(shost, cmd);
313 } 313 }
314 314
315 /** 315 /**
316 * scsi_put_command - Free a scsi command block 316 * scsi_put_command - Free a scsi command block
317 * @cmd: command block to free 317 * @cmd: command block to free
318 * 318 *
319 * Returns: Nothing. 319 * Returns: Nothing.
320 * 320 *
321 * Notes: The command must not belong to any lists. 321 * Notes: The command must not belong to any lists.
322 */ 322 */
323 void scsi_put_command(struct scsi_cmnd *cmd) 323 void scsi_put_command(struct scsi_cmnd *cmd)
324 { 324 {
325 unsigned long flags; 325 unsigned long flags;
326 326
327 /* serious error if the command hasn't come from a device list */ 327 /* serious error if the command hasn't come from a device list */
328 spin_lock_irqsave(&cmd->device->list_lock, flags); 328 spin_lock_irqsave(&cmd->device->list_lock, flags);
329 BUG_ON(list_empty(&cmd->list)); 329 BUG_ON(list_empty(&cmd->list));
330 list_del_init(&cmd->list); 330 list_del_init(&cmd->list);
331 spin_unlock_irqrestore(&cmd->device->list_lock, flags); 331 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
332 332
333 BUG_ON(delayed_work_pending(&cmd->abort_work)); 333 BUG_ON(delayed_work_pending(&cmd->abort_work));
334 334
335 __scsi_put_command(cmd->device->host, cmd); 335 __scsi_put_command(cmd->device->host, cmd);
336 } 336 }
337 337
338 static struct scsi_host_cmd_pool * 338 static struct scsi_host_cmd_pool *
339 scsi_find_host_cmd_pool(struct Scsi_Host *shost) 339 scsi_find_host_cmd_pool(struct Scsi_Host *shost)
340 { 340 {
341 if (shost->hostt->cmd_size) 341 if (shost->hostt->cmd_size)
342 return shost->hostt->cmd_pool; 342 return shost->hostt->cmd_pool;
343 if (shost->unchecked_isa_dma) 343 if (shost->unchecked_isa_dma)
344 return &scsi_cmd_dma_pool; 344 return &scsi_cmd_dma_pool;
345 return &scsi_cmd_pool; 345 return &scsi_cmd_pool;
346 } 346 }
347 347
348 static void 348 static void
349 scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) 349 scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool)
350 { 350 {
351 kfree(pool->sense_name); 351 kfree(pool->sense_name);
352 kfree(pool->cmd_name); 352 kfree(pool->cmd_name);
353 kfree(pool); 353 kfree(pool);
354 } 354 }
355 355
356 static struct scsi_host_cmd_pool * 356 static struct scsi_host_cmd_pool *
357 scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) 357 scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
358 { 358 {
359 struct scsi_host_template *hostt = shost->hostt; 359 struct scsi_host_template *hostt = shost->hostt;
360 struct scsi_host_cmd_pool *pool; 360 struct scsi_host_cmd_pool *pool;
361 361
362 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 362 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
363 if (!pool) 363 if (!pool)
364 return NULL; 364 return NULL;
365 365
366 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name); 366 pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
367 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name); 367 pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
368 if (!pool->cmd_name || !pool->sense_name) { 368 if (!pool->cmd_name || !pool->sense_name) {
369 scsi_free_host_cmd_pool(pool); 369 scsi_free_host_cmd_pool(pool);
370 return NULL; 370 return NULL;
371 } 371 }
372 372
373 pool->slab_flags = SLAB_HWCACHE_ALIGN; 373 pool->slab_flags = SLAB_HWCACHE_ALIGN;
374 if (shost->unchecked_isa_dma) { 374 if (shost->unchecked_isa_dma) {
375 pool->slab_flags |= SLAB_CACHE_DMA; 375 pool->slab_flags |= SLAB_CACHE_DMA;
376 pool->gfp_mask = __GFP_DMA; 376 pool->gfp_mask = __GFP_DMA;
377 } 377 }
378 return pool; 378 return pool;
379 } 379 }
380 380
381 static struct scsi_host_cmd_pool * 381 static struct scsi_host_cmd_pool *
382 scsi_get_host_cmd_pool(struct Scsi_Host *shost) 382 scsi_get_host_cmd_pool(struct Scsi_Host *shost)
383 { 383 {
384 struct scsi_host_template *hostt = shost->hostt; 384 struct scsi_host_template *hostt = shost->hostt;
385 struct scsi_host_cmd_pool *retval = NULL, *pool; 385 struct scsi_host_cmd_pool *retval = NULL, *pool;
386 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size; 386 size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size;
387 387
388 /* 388 /*
389 * Select a command slab for this host and create it if not 389 * Select a command slab for this host and create it if not
390 * yet existent. 390 * yet existent.
391 */ 391 */
392 mutex_lock(&host_cmd_pool_mutex); 392 mutex_lock(&host_cmd_pool_mutex);
393 pool = scsi_find_host_cmd_pool(shost); 393 pool = scsi_find_host_cmd_pool(shost);
394 if (!pool) { 394 if (!pool) {
395 pool = scsi_alloc_host_cmd_pool(shost); 395 pool = scsi_alloc_host_cmd_pool(shost);
396 if (!pool) 396 if (!pool)
397 goto out; 397 goto out;
398 } 398 }
399 399
400 if (!pool->users) { 400 if (!pool->users) {
401 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0, 401 pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,
402 pool->slab_flags, NULL); 402 pool->slab_flags, NULL);
403 if (!pool->cmd_slab) 403 if (!pool->cmd_slab)
404 goto out_free_pool; 404 goto out_free_pool;
405 405
406 pool->sense_slab = kmem_cache_create(pool->sense_name, 406 pool->sense_slab = kmem_cache_create(pool->sense_name,
407 SCSI_SENSE_BUFFERSIZE, 0, 407 SCSI_SENSE_BUFFERSIZE, 0,
408 pool->slab_flags, NULL); 408 pool->slab_flags, NULL);
409 if (!pool->sense_slab) 409 if (!pool->sense_slab)
410 goto out_free_slab; 410 goto out_free_slab;
411 } 411 }
412 412
413 pool->users++; 413 pool->users++;
414 retval = pool; 414 retval = pool;
415 out: 415 out:
416 mutex_unlock(&host_cmd_pool_mutex); 416 mutex_unlock(&host_cmd_pool_mutex);
417 return retval; 417 return retval;
418 418
419 out_free_slab: 419 out_free_slab:
420 kmem_cache_destroy(pool->cmd_slab); 420 kmem_cache_destroy(pool->cmd_slab);
421 out_free_pool: 421 out_free_pool:
422 if (hostt->cmd_size) 422 if (hostt->cmd_size)
423 scsi_free_host_cmd_pool(pool); 423 scsi_free_host_cmd_pool(pool);
424 goto out; 424 goto out;
425 } 425 }
426 426
427 static void scsi_put_host_cmd_pool(struct Scsi_Host *shost) 427 static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)
428 { 428 {
429 struct scsi_host_template *hostt = shost->hostt; 429 struct scsi_host_template *hostt = shost->hostt;
430 struct scsi_host_cmd_pool *pool; 430 struct scsi_host_cmd_pool *pool;
431 431
432 mutex_lock(&host_cmd_pool_mutex); 432 mutex_lock(&host_cmd_pool_mutex);
433 pool = scsi_find_host_cmd_pool(shost); 433 pool = scsi_find_host_cmd_pool(shost);
434 434
435 /* 435 /*
436 * This may happen if a driver has a mismatched get and put 436 * This may happen if a driver has a mismatched get and put
437 * of the command pool; the driver should be implicated in 437 * of the command pool; the driver should be implicated in
438 * the stack trace 438 * the stack trace
439 */ 439 */
440 BUG_ON(pool->users == 0); 440 BUG_ON(pool->users == 0);
441 441
442 if (!--pool->users) { 442 if (!--pool->users) {
443 kmem_cache_destroy(pool->cmd_slab); 443 kmem_cache_destroy(pool->cmd_slab);
444 kmem_cache_destroy(pool->sense_slab); 444 kmem_cache_destroy(pool->sense_slab);
445 if (hostt->cmd_size) 445 if (hostt->cmd_size)
446 scsi_free_host_cmd_pool(pool); 446 scsi_free_host_cmd_pool(pool);
447 } 447 }
448 mutex_unlock(&host_cmd_pool_mutex); 448 mutex_unlock(&host_cmd_pool_mutex);
449 } 449 }
450 450
451 /** 451 /**
452 * scsi_setup_command_freelist - Setup the command freelist for a scsi host. 452 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
453 * @shost: host to allocate the freelist for. 453 * @shost: host to allocate the freelist for.
454 * 454 *
455 * Description: The command freelist protects against system-wide out of memory 455 * Description: The command freelist protects against system-wide out of memory
456 * deadlock by preallocating one SCSI command structure for each host, so the 456 * deadlock by preallocating one SCSI command structure for each host, so the
457 * system can always write to a swap file on a device associated with that host. 457 * system can always write to a swap file on a device associated with that host.
458 * 458 *
459 * Returns: Nothing. 459 * Returns: Nothing.
460 */ 460 */
461 int scsi_setup_command_freelist(struct Scsi_Host *shost) 461 int scsi_setup_command_freelist(struct Scsi_Host *shost)
462 { 462 {
463 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; 463 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
464 struct scsi_cmnd *cmd; 464 struct scsi_cmnd *cmd;
465 465
466 spin_lock_init(&shost->free_list_lock); 466 spin_lock_init(&shost->free_list_lock);
467 INIT_LIST_HEAD(&shost->free_list); 467 INIT_LIST_HEAD(&shost->free_list);
468 468
469 shost->cmd_pool = scsi_get_host_cmd_pool(shost); 469 shost->cmd_pool = scsi_get_host_cmd_pool(shost);
470 if (!shost->cmd_pool) 470 if (!shost->cmd_pool)
471 return -ENOMEM; 471 return -ENOMEM;
472 472
473 /* 473 /*
474 * Get one backup command for this host. 474 * Get one backup command for this host.
475 */ 475 */
476 cmd = scsi_host_alloc_command(shost, gfp_mask); 476 cmd = scsi_host_alloc_command(shost, gfp_mask);
477 if (!cmd) { 477 if (!cmd) {
478 scsi_put_host_cmd_pool(shost); 478 scsi_put_host_cmd_pool(shost);
479 shost->cmd_pool = NULL; 479 shost->cmd_pool = NULL;
480 return -ENOMEM; 480 return -ENOMEM;
481 } 481 }
482 list_add(&cmd->list, &shost->free_list); 482 list_add(&cmd->list, &shost->free_list);
483 return 0; 483 return 0;
484 } 484 }
485 485
486 /** 486 /**
487 * scsi_destroy_command_freelist - Release the command freelist for a scsi host. 487 * scsi_destroy_command_freelist - Release the command freelist for a scsi host.
488 * @shost: host whose freelist is going to be destroyed 488 * @shost: host whose freelist is going to be destroyed
489 */ 489 */
490 void scsi_destroy_command_freelist(struct Scsi_Host *shost) 490 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
491 { 491 {
492 /* 492 /*
493 * If cmd_pool is NULL the free list was not initialized, so 493 * If cmd_pool is NULL the free list was not initialized, so
494 * do not attempt to release resources. 494 * do not attempt to release resources.
495 */ 495 */
496 if (!shost->cmd_pool) 496 if (!shost->cmd_pool)
497 return; 497 return;
498 498
499 while (!list_empty(&shost->free_list)) { 499 while (!list_empty(&shost->free_list)) {
500 struct scsi_cmnd *cmd; 500 struct scsi_cmnd *cmd;
501 501
502 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list); 502 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
503 list_del_init(&cmd->list); 503 list_del_init(&cmd->list);
504 scsi_host_free_command(shost, cmd); 504 scsi_host_free_command(shost, cmd);
505 } 505 }
506 shost->cmd_pool = NULL; 506 shost->cmd_pool = NULL;
507 scsi_put_host_cmd_pool(shost); 507 scsi_put_host_cmd_pool(shost);
508 } 508 }
509 509
510 #ifdef CONFIG_SCSI_LOGGING 510 #ifdef CONFIG_SCSI_LOGGING
511 void scsi_log_send(struct scsi_cmnd *cmd) 511 void scsi_log_send(struct scsi_cmnd *cmd)
512 { 512 {
513 unsigned int level; 513 unsigned int level;
514 514
515 /* 515 /*
516 * If ML QUEUE log level is greater than or equal to: 516 * If ML QUEUE log level is greater than or equal to:
517 * 517 *
518 * 1: nothing (match completion) 518 * 1: nothing (match completion)
519 * 519 *
520 * 2: log opcode + command of all commands 520 * 2: log opcode + command of all commands
521 * 521 *
522 * 3: same as 2 plus dump cmd address 522 * 3: same as 2 plus dump cmd address
523 * 523 *
524 * 4: same as 3 plus dump extra junk 524 * 4: same as 3 plus dump extra junk
525 */ 525 */
526 if (unlikely(scsi_logging_level)) { 526 if (unlikely(scsi_logging_level)) {
527 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT, 527 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
528 SCSI_LOG_MLQUEUE_BITS); 528 SCSI_LOG_MLQUEUE_BITS);
529 if (level > 1) { 529 if (level > 1) {
530 scmd_printk(KERN_INFO, cmd, "Send: "); 530 scmd_printk(KERN_INFO, cmd, "Send: ");
531 if (level > 2) 531 if (level > 2)
532 printk("0x%p ", cmd); 532 printk("0x%p ", cmd);
533 printk("\n"); 533 printk("\n");
534 scsi_print_command(cmd); 534 scsi_print_command(cmd);
535 if (level > 3) { 535 if (level > 3) {
536 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 536 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
537 " queuecommand 0x%p\n", 537 " queuecommand 0x%p\n",
538 scsi_sglist(cmd), scsi_bufflen(cmd), 538 scsi_sglist(cmd), scsi_bufflen(cmd),
539 cmd->device->host->hostt->queuecommand); 539 cmd->device->host->hostt->queuecommand);
540 540
541 } 541 }
542 } 542 }
543 } 543 }
544 } 544 }
545 545
546 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) 546 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
547 { 547 {
548 unsigned int level; 548 unsigned int level;
549 549
550 /* 550 /*
551 * If ML COMPLETE log level is greater than or equal to: 551 * If ML COMPLETE log level is greater than or equal to:
552 * 552 *
553 * 1: log disposition, result, opcode + command, and conditionally 553 * 1: log disposition, result, opcode + command, and conditionally
554 * sense data for failures or non SUCCESS dispositions. 554 * sense data for failures or non SUCCESS dispositions.
555 * 555 *
556 * 2: same as 1 but for all command completions. 556 * 2: same as 1 but for all command completions.
557 * 557 *
558 * 3: same as 2 plus dump cmd address 558 * 3: same as 2 plus dump cmd address
559 * 559 *
560 * 4: same as 3 plus dump extra junk 560 * 4: same as 3 plus dump extra junk
561 */ 561 */
562 if (unlikely(scsi_logging_level)) { 562 if (unlikely(scsi_logging_level)) {
563 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, 563 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
564 SCSI_LOG_MLCOMPLETE_BITS); 564 SCSI_LOG_MLCOMPLETE_BITS);
565 if (((level > 0) && (cmd->result || disposition != SUCCESS)) || 565 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
566 (level > 1)) { 566 (level > 1)) {
567 scmd_printk(KERN_INFO, cmd, "Done: "); 567 scmd_printk(KERN_INFO, cmd, "Done: ");
568 if (level > 2) 568 if (level > 2)
569 printk("0x%p ", cmd); 569 printk("0x%p ", cmd);
570 /* 570 /*
571 * Dump truncated values, so we usually fit within 571 * Dump truncated values, so we usually fit within
572 * 80 chars. 572 * 80 chars.
573 */ 573 */
574 switch (disposition) { 574 switch (disposition) {
575 case SUCCESS: 575 case SUCCESS:
576 printk("SUCCESS\n"); 576 printk("SUCCESS\n");
577 break; 577 break;
578 case NEEDS_RETRY: 578 case NEEDS_RETRY:
579 printk("RETRY\n"); 579 printk("RETRY\n");
580 break; 580 break;
581 case ADD_TO_MLQUEUE: 581 case ADD_TO_MLQUEUE:
582 printk("MLQUEUE\n"); 582 printk("MLQUEUE\n");
583 break; 583 break;
584 case FAILED: 584 case FAILED:
585 printk("FAILED\n"); 585 printk("FAILED\n");
586 break; 586 break;
587 case TIMEOUT_ERROR: 587 case TIMEOUT_ERROR:
588 /* 588 /*
589 * If called via scsi_times_out. 589 * If called via scsi_times_out.
590 */ 590 */
591 printk("TIMEOUT\n"); 591 printk("TIMEOUT\n");
592 break; 592 break;
593 default: 593 default:
594 printk("UNKNOWN\n"); 594 printk("UNKNOWN\n");
595 } 595 }
596 scsi_print_result(cmd); 596 scsi_print_result(cmd);
597 scsi_print_command(cmd); 597 scsi_print_command(cmd);
598 if (status_byte(cmd->result) & CHECK_CONDITION) 598 if (status_byte(cmd->result) & CHECK_CONDITION)
599 scsi_print_sense("", cmd); 599 scsi_print_sense("", cmd);
600 if (level > 3) 600 if (level > 3)
601 scmd_printk(KERN_INFO, cmd, 601 scmd_printk(KERN_INFO, cmd,
602 "scsi host busy %d failed %d\n", 602 "scsi host busy %d failed %d\n",
603 atomic_read(&cmd->device->host->host_busy), 603 atomic_read(&cmd->device->host->host_busy),
604 cmd->device->host->host_failed); 604 cmd->device->host->host_failed);
605 } 605 }
606 } 606 }
607 } 607 }
608 #endif 608 #endif
609 609
610 /** 610 /**
611 * scsi_cmd_get_serial - Assign a serial number to a command 611 * scsi_cmd_get_serial - Assign a serial number to a command
612 * @host: the scsi host 612 * @host: the scsi host
613 * @cmd: command to assign serial number to 613 * @cmd: command to assign serial number to
614 * 614 *
615 * Description: a serial number identifies a request for error recovery 615 * Description: a serial number identifies a request for error recovery
616 * and debugging purposes. Protected by the Host_Lock of host. 616 * and debugging purposes. Protected by the Host_Lock of host.
617 */ 617 */
618 void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) 618 void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
619 { 619 {
620 cmd->serial_number = host->cmd_serial_number++; 620 cmd->serial_number = host->cmd_serial_number++;
621 if (cmd->serial_number == 0) 621 if (cmd->serial_number == 0)
622 cmd->serial_number = host->cmd_serial_number++; 622 cmd->serial_number = host->cmd_serial_number++;
623 } 623 }
624 EXPORT_SYMBOL(scsi_cmd_get_serial); 624 EXPORT_SYMBOL(scsi_cmd_get_serial);
625 625
626 /** 626 /**
627 * scsi_dispatch_command - Dispatch a command to the low-level driver. 627 * scsi_dispatch_command - Dispatch a command to the low-level driver.
628 * @cmd: command block we are dispatching. 628 * @cmd: command block we are dispatching.
629 * 629 *
630 * Return: nonzero return request was rejected and device's queue needs to be 630 * Return: nonzero return request was rejected and device's queue needs to be
631 * plugged. 631 * plugged.
632 */ 632 */
633 int scsi_dispatch_cmd(struct scsi_cmnd *cmd) 633 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
634 { 634 {
635 struct Scsi_Host *host = cmd->device->host; 635 struct Scsi_Host *host = cmd->device->host;
636 int rtn = 0; 636 int rtn = 0;
637 637
638 atomic_inc(&cmd->device->iorequest_cnt); 638 atomic_inc(&cmd->device->iorequest_cnt);
639 639
640 /* check if the device is still usable */ 640 /* check if the device is still usable */
641 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { 641 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
642 /* in SDEV_DEL we error all commands. DID_NO_CONNECT 642 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
643 * returns an immediate error upwards, and signals 643 * returns an immediate error upwards, and signals
644 * that the device is no longer present */ 644 * that the device is no longer present */
645 cmd->result = DID_NO_CONNECT << 16; 645 cmd->result = DID_NO_CONNECT << 16;
646 goto done; 646 goto done;
647 } 647 }
648 648
649 /* Check to see if the scsi lld made this device blocked. */ 649 /* Check to see if the scsi lld made this device blocked. */
650 if (unlikely(scsi_device_blocked(cmd->device))) { 650 if (unlikely(scsi_device_blocked(cmd->device))) {
651 /* 651 /*
652 * in blocked state, the command is just put back on 652 * in blocked state, the command is just put back on
653 * the device queue. The suspend state has already 653 * the device queue. The suspend state has already
654 * blocked the queue so future requests should not 654 * blocked the queue so future requests should not
655 * occur until the device transitions out of the 655 * occur until the device transitions out of the
656 * suspend state. 656 * suspend state.
657 */ 657 */
658 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 658 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
659 "queuecommand : device blocked\n")); 659 "queuecommand : device blocked\n"));
660 return SCSI_MLQUEUE_DEVICE_BUSY; 660 return SCSI_MLQUEUE_DEVICE_BUSY;
661 } 661 }
662 662
663 /* 663 /*
664 * If SCSI-2 or lower, store the LUN value in cmnd. 664 * If SCSI-2 or lower, store the LUN value in cmnd.
665 */ 665 */
666 if (cmd->device->scsi_level <= SCSI_2 && 666 if (cmd->device->scsi_level <= SCSI_2 &&
667 cmd->device->scsi_level != SCSI_UNKNOWN) { 667 cmd->device->scsi_level != SCSI_UNKNOWN) {
668 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 668 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
669 (cmd->device->lun << 5 & 0xe0); 669 (cmd->device->lun << 5 & 0xe0);
670 } 670 }
671 671
672 scsi_log_send(cmd); 672 scsi_log_send(cmd);
673 673
674 /* 674 /*
675 * Before we queue this command, check if the command 675 * Before we queue this command, check if the command
676 * length exceeds what the host adapter can handle. 676 * length exceeds what the host adapter can handle.
677 */ 677 */
678 if (cmd->cmd_len > cmd->device->host->max_cmd_len) { 678 if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
679 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 679 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
680 "queuecommand : command too long. " 680 "queuecommand : command too long. "
681 "cdb_size=%d host->max_cmd_len=%d\n", 681 "cdb_size=%d host->max_cmd_len=%d\n",
682 cmd->cmd_len, cmd->device->host->max_cmd_len)); 682 cmd->cmd_len, cmd->device->host->max_cmd_len));
683 cmd->result = (DID_ABORT << 16); 683 cmd->result = (DID_ABORT << 16);
684 goto done; 684 goto done;
685 } 685 }
686 686
687 if (unlikely(host->shost_state == SHOST_DEL)) { 687 if (unlikely(host->shost_state == SHOST_DEL)) {
688 cmd->result = (DID_NO_CONNECT << 16); 688 cmd->result = (DID_NO_CONNECT << 16);
689 goto done; 689 goto done;
690 690
691 } 691 }
692 692
693 trace_scsi_dispatch_cmd_start(cmd); 693 trace_scsi_dispatch_cmd_start(cmd);
694 rtn = host->hostt->queuecommand(host, cmd); 694 rtn = host->hostt->queuecommand(host, cmd);
695 if (rtn) { 695 if (rtn) {
696 trace_scsi_dispatch_cmd_error(cmd, rtn); 696 trace_scsi_dispatch_cmd_error(cmd, rtn);
697 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && 697 if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
698 rtn != SCSI_MLQUEUE_TARGET_BUSY) 698 rtn != SCSI_MLQUEUE_TARGET_BUSY)
699 rtn = SCSI_MLQUEUE_HOST_BUSY; 699 rtn = SCSI_MLQUEUE_HOST_BUSY;
700 700
701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, 701 SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
702 "queuecommand : request rejected\n")); 702 "queuecommand : request rejected\n"));
703 } 703 }
704 704
705 return rtn; 705 return rtn;
706 done: 706 done:
707 cmd->scsi_done(cmd); 707 cmd->scsi_done(cmd);
708 return 0; 708 return 0;
709 } 709 }
710 710
711 /** 711 /**
712 * scsi_finish_command - cleanup and pass command back to upper layer 712 * scsi_finish_command - cleanup and pass command back to upper layer
713 * @cmd: the command 713 * @cmd: the command
714 * 714 *
715 * Description: Pass command off to upper layer for finishing of I/O 715 * Description: Pass command off to upper layer for finishing of I/O
716 * request, waking processes that are waiting on results, 716 * request, waking processes that are waiting on results,
717 * etc. 717 * etc.
718 */ 718 */
719 void scsi_finish_command(struct scsi_cmnd *cmd) 719 void scsi_finish_command(struct scsi_cmnd *cmd)
720 { 720 {
721 struct scsi_device *sdev = cmd->device; 721 struct scsi_device *sdev = cmd->device;
722 struct scsi_target *starget = scsi_target(sdev); 722 struct scsi_target *starget = scsi_target(sdev);
723 struct Scsi_Host *shost = sdev->host; 723 struct Scsi_Host *shost = sdev->host;
724 struct scsi_driver *drv; 724 struct scsi_driver *drv;
725 unsigned int good_bytes; 725 unsigned int good_bytes;
726 726
727 scsi_device_unbusy(sdev); 727 scsi_device_unbusy(sdev);
728 728
729 /* 729 /*
730 * Clear the flags which say that the device/host is no longer 730 * Clear the flags that say that the device/target/host is no longer
731 * capable of accepting new commands. These are set in scsi_queue.c 731 * capable of accepting new commands.
732 * for both the queue full condition on a device, and for a 732 */
733 * host full condition on the host. 733 if (atomic_read(&shost->host_blocked))
734 * 734 atomic_set(&shost->host_blocked, 0);
735 * XXX(hch): What about locking? 735 if (atomic_read(&starget->target_blocked))
736 */ 736 atomic_set(&starget->target_blocked, 0);
737 shost->host_blocked = 0; 737 if (atomic_read(&sdev->device_blocked))
738 starget->target_blocked = 0; 738 atomic_set(&sdev->device_blocked, 0);
739 sdev->device_blocked = 0;
740 739
741 /* 740 /*
742 * If we have valid sense information, then some kind of recovery 741 * If we have valid sense information, then some kind of recovery
743 * must have taken place. Make a note of this. 742 * must have taken place. Make a note of this.
744 */ 743 */
745 if (SCSI_SENSE_VALID(cmd)) 744 if (SCSI_SENSE_VALID(cmd))
746 cmd->result |= (DRIVER_SENSE << 24); 745 cmd->result |= (DRIVER_SENSE << 24);
747 746
748 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, 747 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
749 "Notifying upper driver of completion " 748 "Notifying upper driver of completion "
750 "(result %x)\n", cmd->result)); 749 "(result %x)\n", cmd->result));
751 750
752 good_bytes = scsi_bufflen(cmd); 751 good_bytes = scsi_bufflen(cmd);
753 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 752 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
754 int old_good_bytes = good_bytes; 753 int old_good_bytes = good_bytes;
755 drv = scsi_cmd_to_driver(cmd); 754 drv = scsi_cmd_to_driver(cmd);
756 if (drv->done) 755 if (drv->done)
757 good_bytes = drv->done(cmd); 756 good_bytes = drv->done(cmd);
758 /* 757 /*
759 * USB may not give sense identifying bad sector and 758 * USB may not give sense identifying bad sector and
760 * simply return a residue instead, so subtract off the 759 * simply return a residue instead, so subtract off the
761 * residue if drv->done() error processing indicates no 760 * residue if drv->done() error processing indicates no
762 * change to the completion length. 761 * change to the completion length.
763 */ 762 */
764 if (good_bytes == old_good_bytes) 763 if (good_bytes == old_good_bytes)
765 good_bytes -= scsi_get_resid(cmd); 764 good_bytes -= scsi_get_resid(cmd);
766 } 765 }
767 scsi_io_completion(cmd, good_bytes); 766 scsi_io_completion(cmd, good_bytes);
768 } 767 }
769 768
770 /** 769 /**
771 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth 770 * scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
772 * @sdev: SCSI Device in question 771 * @sdev: SCSI Device in question
773 * @tagged: Do we use tagged queueing (non-0) or do we treat 772 * @tagged: Do we use tagged queueing (non-0) or do we treat
774 * this device as an untagged device (0) 773 * this device as an untagged device (0)
775 * @tags: Number of tags allowed if tagged queueing enabled, 774 * @tags: Number of tags allowed if tagged queueing enabled,
776 * or number of commands the low level driver can 775 * or number of commands the low level driver can
777 * queue up in non-tagged mode (as per cmd_per_lun). 776 * queue up in non-tagged mode (as per cmd_per_lun).
778 * 777 *
779 * Returns: Nothing 778 * Returns: Nothing
780 * 779 *
781 * Lock Status: None held on entry 780 * Lock Status: None held on entry
782 * 781 *
783 * Notes: Low level drivers may call this at any time and we will do 782 * Notes: Low level drivers may call this at any time and we will do
784 * the right thing depending on whether or not the device is 783 * the right thing depending on whether or not the device is
785 * currently active and whether or not it even has the 784 * currently active and whether or not it even has the
786 * command blocks built yet. 785 * command blocks built yet.
787 */ 786 */
788 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) 787 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
789 { 788 {
790 unsigned long flags; 789 unsigned long flags;
791 790
792 /* 791 /*
793 * refuse to set tagged depth to an unworkable size 792 * refuse to set tagged depth to an unworkable size
794 */ 793 */
795 if (tags <= 0) 794 if (tags <= 0)
796 return; 795 return;
797 796
798 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 797 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
799 798
800 /* 799 /*
801 * Check to see if the queue is managed by the block layer. 800 * Check to see if the queue is managed by the block layer.
802 * If it is, and we fail to adjust the depth, exit. 801 * If it is, and we fail to adjust the depth, exit.
803 * 802 *
804 * Do not resize the tag map if it is a host wide share bqt, 803 * Do not resize the tag map if it is a host wide share bqt,
805 * because the size should be the hosts's can_queue. If there 804 * because the size should be the hosts's can_queue. If there
806 * is more IO than the LLD's can_queue (so there are not enuogh 805 * is more IO than the LLD's can_queue (so there are not enuogh
807 * tags) request_fn's host queue ready check will handle it. 806 * tags) request_fn's host queue ready check will handle it.
808 */ 807 */
809 if (!sdev->host->bqt) { 808 if (!sdev->host->bqt) {
810 if (blk_queue_tagged(sdev->request_queue) && 809 if (blk_queue_tagged(sdev->request_queue) &&
811 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 810 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
812 goto out; 811 goto out;
813 } 812 }
814 813
815 sdev->queue_depth = tags; 814 sdev->queue_depth = tags;
816 switch (tagged) { 815 switch (tagged) {
817 case 0: 816 case 0:
818 sdev->ordered_tags = 0; 817 sdev->ordered_tags = 0;
819 sdev->simple_tags = 0; 818 sdev->simple_tags = 0;
820 break; 819 break;
821 case MSG_ORDERED_TAG: 820 case MSG_ORDERED_TAG:
822 sdev->ordered_tags = 1; 821 sdev->ordered_tags = 1;
823 sdev->simple_tags = 1; 822 sdev->simple_tags = 1;
824 break; 823 break;
825 case MSG_SIMPLE_TAG: 824 case MSG_SIMPLE_TAG:
826 sdev->ordered_tags = 0; 825 sdev->ordered_tags = 0;
827 sdev->simple_tags = 1; 826 sdev->simple_tags = 1;
828 break; 827 break;
829 default: 828 default:
830 sdev->ordered_tags = 0; 829 sdev->ordered_tags = 0;
831 sdev->simple_tags = 0; 830 sdev->simple_tags = 0;
832 sdev_printk(KERN_WARNING, sdev, 831 sdev_printk(KERN_WARNING, sdev,
833 "scsi_adjust_queue_depth, bad queue type, " 832 "scsi_adjust_queue_depth, bad queue type, "
834 "disabled\n"); 833 "disabled\n");
835 } 834 }
836 out: 835 out:
837 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 836 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
838 } 837 }
839 EXPORT_SYMBOL(scsi_adjust_queue_depth); 838 EXPORT_SYMBOL(scsi_adjust_queue_depth);
840 839
841 /** 840 /**
842 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth 841 * scsi_track_queue_full - track QUEUE_FULL events to adjust queue depth
843 * @sdev: SCSI Device in question 842 * @sdev: SCSI Device in question
844 * @depth: Current number of outstanding SCSI commands on this device, 843 * @depth: Current number of outstanding SCSI commands on this device,
845 * not counting the one returned as QUEUE_FULL. 844 * not counting the one returned as QUEUE_FULL.
846 * 845 *
847 * Description: This function will track successive QUEUE_FULL events on a 846 * Description: This function will track successive QUEUE_FULL events on a
848 * specific SCSI device to determine if and when there is a 847 * specific SCSI device to determine if and when there is a
849 * need to adjust the queue depth on the device. 848 * need to adjust the queue depth on the device.
850 * 849 *
851 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth, 850 * Returns: 0 - No change needed, >0 - Adjust queue depth to this new depth,
852 * -1 - Drop back to untagged operation using host->cmd_per_lun 851 * -1 - Drop back to untagged operation using host->cmd_per_lun
853 * as the untagged command depth 852 * as the untagged command depth
854 * 853 *
855 * Lock Status: None held on entry 854 * Lock Status: None held on entry
856 * 855 *
857 * Notes: Low level drivers may call this at any time and we will do 856 * Notes: Low level drivers may call this at any time and we will do
858 * "The Right Thing." We are interrupt context safe. 857 * "The Right Thing." We are interrupt context safe.
859 */ 858 */
860 int scsi_track_queue_full(struct scsi_device *sdev, int depth) 859 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
861 { 860 {
862 861
863 /* 862 /*
864 * Don't let QUEUE_FULLs on the same 863 * Don't let QUEUE_FULLs on the same
865 * jiffies count, they could all be from 864 * jiffies count, they could all be from
866 * same event. 865 * same event.
867 */ 866 */
868 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4)) 867 if ((jiffies >> 4) == (sdev->last_queue_full_time >> 4))
869 return 0; 868 return 0;
870 869
871 sdev->last_queue_full_time = jiffies; 870 sdev->last_queue_full_time = jiffies;
872 if (sdev->last_queue_full_depth != depth) { 871 if (sdev->last_queue_full_depth != depth) {
873 sdev->last_queue_full_count = 1; 872 sdev->last_queue_full_count = 1;
874 sdev->last_queue_full_depth = depth; 873 sdev->last_queue_full_depth = depth;
875 } else { 874 } else {
876 sdev->last_queue_full_count++; 875 sdev->last_queue_full_count++;
877 } 876 }
878 877
879 if (sdev->last_queue_full_count <= 10) 878 if (sdev->last_queue_full_count <= 10)
880 return 0; 879 return 0;
881 if (sdev->last_queue_full_depth < 8) { 880 if (sdev->last_queue_full_depth < 8) {
882 /* Drop back to untagged */ 881 /* Drop back to untagged */
883 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); 882 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
884 return -1; 883 return -1;
885 } 884 }
886 885
887 if (sdev->ordered_tags) 886 if (sdev->ordered_tags)
888 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth); 887 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
889 else 888 else
890 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 889 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
891 return depth; 890 return depth;
892 } 891 }
893 EXPORT_SYMBOL(scsi_track_queue_full); 892 EXPORT_SYMBOL(scsi_track_queue_full);
894 893
895 /** 894 /**
896 * scsi_vpd_inquiry - Request a device provide us with a VPD page 895 * scsi_vpd_inquiry - Request a device provide us with a VPD page
897 * @sdev: The device to ask 896 * @sdev: The device to ask
898 * @buffer: Where to put the result 897 * @buffer: Where to put the result
899 * @page: Which Vital Product Data to return 898 * @page: Which Vital Product Data to return
900 * @len: The length of the buffer 899 * @len: The length of the buffer
901 * 900 *
902 * This is an internal helper function. You probably want to use 901 * This is an internal helper function. You probably want to use
903 * scsi_get_vpd_page instead. 902 * scsi_get_vpd_page instead.
904 * 903 *
905 * Returns size of the vpd page on success or a negative error number. 904 * Returns size of the vpd page on success or a negative error number.
906 */ 905 */
907 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer, 906 static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
908 u8 page, unsigned len) 907 u8 page, unsigned len)
909 { 908 {
910 int result; 909 int result;
911 unsigned char cmd[16]; 910 unsigned char cmd[16];
912 911
913 if (len < 4) 912 if (len < 4)
914 return -EINVAL; 913 return -EINVAL;
915 914
916 cmd[0] = INQUIRY; 915 cmd[0] = INQUIRY;
917 cmd[1] = 1; /* EVPD */ 916 cmd[1] = 1; /* EVPD */
918 cmd[2] = page; 917 cmd[2] = page;
919 cmd[3] = len >> 8; 918 cmd[3] = len >> 8;
920 cmd[4] = len & 0xff; 919 cmd[4] = len & 0xff;
921 cmd[5] = 0; /* Control byte */ 920 cmd[5] = 0; /* Control byte */
922 921
923 /* 922 /*
924 * I'm not convinced we need to try quite this hard to get VPD, but 923 * I'm not convinced we need to try quite this hard to get VPD, but
925 * all the existing users tried this hard. 924 * all the existing users tried this hard.
926 */ 925 */
927 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, 926 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
928 len, NULL, 30 * HZ, 3, NULL); 927 len, NULL, 30 * HZ, 3, NULL);
929 if (result) 928 if (result)
930 return -EIO; 929 return -EIO;
931 930
932 /* Sanity check that we got the page back that we asked for */ 931 /* Sanity check that we got the page back that we asked for */
933 if (buffer[1] != page) 932 if (buffer[1] != page)
934 return -EIO; 933 return -EIO;
935 934
936 return get_unaligned_be16(&buffer[2]) + 4; 935 return get_unaligned_be16(&buffer[2]) + 4;
937 } 936 }
938 937
939 /** 938 /**
940 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device 939 * scsi_get_vpd_page - Get Vital Product Data from a SCSI device
941 * @sdev: The device to ask 940 * @sdev: The device to ask
942 * @page: Which Vital Product Data to return 941 * @page: Which Vital Product Data to return
943 * @buf: where to store the VPD 942 * @buf: where to store the VPD
944 * @buf_len: number of bytes in the VPD buffer area 943 * @buf_len: number of bytes in the VPD buffer area
945 * 944 *
946 * SCSI devices may optionally supply Vital Product Data. Each 'page' 945 * SCSI devices may optionally supply Vital Product Data. Each 'page'
947 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC). 946 * of VPD is defined in the appropriate SCSI document (eg SPC, SBC).
948 * If the device supports this VPD page, this routine returns a pointer 947 * If the device supports this VPD page, this routine returns a pointer
949 * to a buffer containing the data from that page. The caller is 948 * to a buffer containing the data from that page. The caller is
950 * responsible for calling kfree() on this pointer when it is no longer 949 * responsible for calling kfree() on this pointer when it is no longer
951 * needed. If we cannot retrieve the VPD page this routine returns %NULL. 950 * needed. If we cannot retrieve the VPD page this routine returns %NULL.
952 */ 951 */
953 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, 952 int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
954 int buf_len) 953 int buf_len)
955 { 954 {
956 int i, result; 955 int i, result;
957 956
958 if (sdev->skip_vpd_pages) 957 if (sdev->skip_vpd_pages)
959 goto fail; 958 goto fail;
960 959
961 /* Ask for all the pages supported by this device */ 960 /* Ask for all the pages supported by this device */
962 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); 961 result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
963 if (result < 4) 962 if (result < 4)
964 goto fail; 963 goto fail;
965 964
966 /* If the user actually wanted this page, we can skip the rest */ 965 /* If the user actually wanted this page, we can skip the rest */
967 if (page == 0) 966 if (page == 0)
968 return 0; 967 return 0;
969 968
970 for (i = 4; i < min(result, buf_len); i++) 969 for (i = 4; i < min(result, buf_len); i++)
971 if (buf[i] == page) 970 if (buf[i] == page)
972 goto found; 971 goto found;
973 972
974 if (i < result && i >= buf_len) 973 if (i < result && i >= buf_len)
975 /* ran off the end of the buffer, give us benefit of doubt */ 974 /* ran off the end of the buffer, give us benefit of doubt */
976 goto found; 975 goto found;
977 /* The device claims it doesn't support the requested page */ 976 /* The device claims it doesn't support the requested page */
978 goto fail; 977 goto fail;
979 978
980 found: 979 found:
981 result = scsi_vpd_inquiry(sdev, buf, page, buf_len); 980 result = scsi_vpd_inquiry(sdev, buf, page, buf_len);
982 if (result < 0) 981 if (result < 0)
983 goto fail; 982 goto fail;
984 983
985 return 0; 984 return 0;
986 985
987 fail: 986 fail:
988 return -EINVAL; 987 return -EINVAL;
989 } 988 }
990 EXPORT_SYMBOL_GPL(scsi_get_vpd_page); 989 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
991 990
992 /** 991 /**
993 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure 992 * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure
994 * @sdev: The device to ask 993 * @sdev: The device to ask
995 * 994 *
996 * Attach the 'Device Identification' VPD page (0x83) and the 995 * Attach the 'Device Identification' VPD page (0x83) and the
997 * 'Unit Serial Number' VPD page (0x80) to a SCSI device 996 * 'Unit Serial Number' VPD page (0x80) to a SCSI device
998 * structure. This information can be used to identify the device 997 * structure. This information can be used to identify the device
999 * uniquely. 998 * uniquely.
1000 */ 999 */
1001 void scsi_attach_vpd(struct scsi_device *sdev) 1000 void scsi_attach_vpd(struct scsi_device *sdev)
1002 { 1001 {
1003 int result, i; 1002 int result, i;
1004 int vpd_len = SCSI_VPD_PG_LEN; 1003 int vpd_len = SCSI_VPD_PG_LEN;
1005 int pg80_supported = 0; 1004 int pg80_supported = 0;
1006 int pg83_supported = 0; 1005 int pg83_supported = 0;
1007 unsigned char *vpd_buf; 1006 unsigned char *vpd_buf;
1008 1007
1009 if (sdev->skip_vpd_pages) 1008 if (sdev->skip_vpd_pages)
1010 return; 1009 return;
1011 retry_pg0: 1010 retry_pg0:
1012 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 1011 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1013 if (!vpd_buf) 1012 if (!vpd_buf)
1014 return; 1013 return;
1015 1014
1016 /* Ask for all the pages supported by this device */ 1015 /* Ask for all the pages supported by this device */
1017 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len); 1016 result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len);
1018 if (result < 0) { 1017 if (result < 0) {
1019 kfree(vpd_buf); 1018 kfree(vpd_buf);
1020 return; 1019 return;
1021 } 1020 }
1022 if (result > vpd_len) { 1021 if (result > vpd_len) {
1023 vpd_len = result; 1022 vpd_len = result;
1024 kfree(vpd_buf); 1023 kfree(vpd_buf);
1025 goto retry_pg0; 1024 goto retry_pg0;
1026 } 1025 }
1027 1026
1028 for (i = 4; i < result; i++) { 1027 for (i = 4; i < result; i++) {
1029 if (vpd_buf[i] == 0x80) 1028 if (vpd_buf[i] == 0x80)
1030 pg80_supported = 1; 1029 pg80_supported = 1;
1031 if (vpd_buf[i] == 0x83) 1030 if (vpd_buf[i] == 0x83)
1032 pg83_supported = 1; 1031 pg83_supported = 1;
1033 } 1032 }
1034 kfree(vpd_buf); 1033 kfree(vpd_buf);
1035 vpd_len = SCSI_VPD_PG_LEN; 1034 vpd_len = SCSI_VPD_PG_LEN;
1036 1035
1037 if (pg80_supported) { 1036 if (pg80_supported) {
1038 retry_pg80: 1037 retry_pg80:
1039 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 1038 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1040 if (!vpd_buf) 1039 if (!vpd_buf)
1041 return; 1040 return;
1042 1041
1043 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len); 1042 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len);
1044 if (result < 0) { 1043 if (result < 0) {
1045 kfree(vpd_buf); 1044 kfree(vpd_buf);
1046 return; 1045 return;
1047 } 1046 }
1048 if (result > vpd_len) { 1047 if (result > vpd_len) {
1049 vpd_len = result; 1048 vpd_len = result;
1050 kfree(vpd_buf); 1049 kfree(vpd_buf);
1051 goto retry_pg80; 1050 goto retry_pg80;
1052 } 1051 }
1053 sdev->vpd_pg80_len = result; 1052 sdev->vpd_pg80_len = result;
1054 sdev->vpd_pg80 = vpd_buf; 1053 sdev->vpd_pg80 = vpd_buf;
1055 vpd_len = SCSI_VPD_PG_LEN; 1054 vpd_len = SCSI_VPD_PG_LEN;
1056 } 1055 }
1057 1056
1058 if (pg83_supported) { 1057 if (pg83_supported) {
1059 retry_pg83: 1058 retry_pg83:
1060 vpd_buf = kmalloc(vpd_len, GFP_KERNEL); 1059 vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
1061 if (!vpd_buf) 1060 if (!vpd_buf)
1062 return; 1061 return;
1063 1062
1064 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len); 1063 result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len);
1065 if (result < 0) { 1064 if (result < 0) {
1066 kfree(vpd_buf); 1065 kfree(vpd_buf);
1067 return; 1066 return;
1068 } 1067 }
1069 if (result > vpd_len) { 1068 if (result > vpd_len) {
1070 vpd_len = result; 1069 vpd_len = result;
1071 kfree(vpd_buf); 1070 kfree(vpd_buf);
1072 goto retry_pg83; 1071 goto retry_pg83;
1073 } 1072 }
1074 sdev->vpd_pg83_len = result; 1073 sdev->vpd_pg83_len = result;
1075 sdev->vpd_pg83 = vpd_buf; 1074 sdev->vpd_pg83 = vpd_buf;
1076 } 1075 }
1077 } 1076 }
1078 1077
1079 /** 1078 /**
1080 * scsi_report_opcode - Find out if a given command opcode is supported 1079 * scsi_report_opcode - Find out if a given command opcode is supported
1081 * @sdev: scsi device to query 1080 * @sdev: scsi device to query
1082 * @buffer: scratch buffer (must be at least 20 bytes long) 1081 * @buffer: scratch buffer (must be at least 20 bytes long)
1083 * @len: length of buffer 1082 * @len: length of buffer
1084 * @opcode: opcode for command to look up 1083 * @opcode: opcode for command to look up
1085 * 1084 *
1086 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given 1085 * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
1087 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is 1086 * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is
1088 * unsupported and 1 if the device claims to support the command. 1087 * unsupported and 1 if the device claims to support the command.
1089 */ 1088 */
1090 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, 1089 int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
1091 unsigned int len, unsigned char opcode) 1090 unsigned int len, unsigned char opcode)
1092 { 1091 {
1093 unsigned char cmd[16]; 1092 unsigned char cmd[16];
1094 struct scsi_sense_hdr sshdr; 1093 struct scsi_sense_hdr sshdr;
1095 int result; 1094 int result;
1096 1095
1097 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3) 1096 if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
1098 return -EINVAL; 1097 return -EINVAL;
1099 1098
1100 memset(cmd, 0, 16); 1099 memset(cmd, 0, 16);
1101 cmd[0] = MAINTENANCE_IN; 1100 cmd[0] = MAINTENANCE_IN;
1102 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; 1101 cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
1103 cmd[2] = 1; /* One command format */ 1102 cmd[2] = 1; /* One command format */
1104 cmd[3] = opcode; 1103 cmd[3] = opcode;
1105 put_unaligned_be32(len, &cmd[6]); 1104 put_unaligned_be32(len, &cmd[6]);
1106 memset(buffer, 0, len); 1105 memset(buffer, 0, len);
1107 1106
1108 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1107 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1109 &sshdr, 30 * HZ, 3, NULL); 1108 &sshdr, 30 * HZ, 3, NULL);
1110 1109
1111 if (result && scsi_sense_valid(&sshdr) && 1110 if (result && scsi_sense_valid(&sshdr) &&
1112 sshdr.sense_key == ILLEGAL_REQUEST && 1111 sshdr.sense_key == ILLEGAL_REQUEST &&
1113 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00) 1112 (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
1114 return -EINVAL; 1113 return -EINVAL;
1115 1114
1116 if ((buffer[1] & 3) == 3) /* Command supported */ 1115 if ((buffer[1] & 3) == 3) /* Command supported */
1117 return 1; 1116 return 1;
1118 1117
1119 return 0; 1118 return 0;
1120 } 1119 }
1121 EXPORT_SYMBOL(scsi_report_opcode); 1120 EXPORT_SYMBOL(scsi_report_opcode);
1122 1121
1123 /** 1122 /**
1124 * scsi_device_get - get an additional reference to a scsi_device 1123 * scsi_device_get - get an additional reference to a scsi_device
1125 * @sdev: device to get a reference to 1124 * @sdev: device to get a reference to
1126 * 1125 *
1127 * Description: Gets a reference to the scsi_device and increments the use count 1126 * Description: Gets a reference to the scsi_device and increments the use count
1128 * of the underlying LLDD module. You must hold host_lock of the 1127 * of the underlying LLDD module. You must hold host_lock of the
1129 * parent Scsi_Host or already have a reference when calling this. 1128 * parent Scsi_Host or already have a reference when calling this.
1130 */ 1129 */
1131 int scsi_device_get(struct scsi_device *sdev) 1130 int scsi_device_get(struct scsi_device *sdev)
1132 { 1131 {
1133 if (sdev->sdev_state == SDEV_DEL) 1132 if (sdev->sdev_state == SDEV_DEL)
1134 return -ENXIO; 1133 return -ENXIO;
1135 if (!get_device(&sdev->sdev_gendev)) 1134 if (!get_device(&sdev->sdev_gendev))
1136 return -ENXIO; 1135 return -ENXIO;
1137 /* We can fail this if we're doing SCSI operations 1136 /* We can fail this if we're doing SCSI operations
1138 * from module exit (like cache flush) */ 1137 * from module exit (like cache flush) */
1139 try_module_get(sdev->host->hostt->module); 1138 try_module_get(sdev->host->hostt->module);
1140 1139
1141 return 0; 1140 return 0;
1142 } 1141 }
1143 EXPORT_SYMBOL(scsi_device_get); 1142 EXPORT_SYMBOL(scsi_device_get);
1144 1143
1145 /** 1144 /**
1146 * scsi_device_put - release a reference to a scsi_device 1145 * scsi_device_put - release a reference to a scsi_device
1147 * @sdev: device to release a reference on. 1146 * @sdev: device to release a reference on.
1148 * 1147 *
1149 * Description: Release a reference to the scsi_device and decrements the use 1148 * Description: Release a reference to the scsi_device and decrements the use
1150 * count of the underlying LLDD module. The device is freed once the last 1149 * count of the underlying LLDD module. The device is freed once the last
1151 * user vanishes. 1150 * user vanishes.
1152 */ 1151 */
1153 void scsi_device_put(struct scsi_device *sdev) 1152 void scsi_device_put(struct scsi_device *sdev)
1154 { 1153 {
1155 #ifdef CONFIG_MODULE_UNLOAD 1154 #ifdef CONFIG_MODULE_UNLOAD
1156 struct module *module = sdev->host->hostt->module; 1155 struct module *module = sdev->host->hostt->module;
1157 1156
1158 /* The module refcount will be zero if scsi_device_get() 1157 /* The module refcount will be zero if scsi_device_get()
1159 * was called from a module removal routine */ 1158 * was called from a module removal routine */
1160 if (module && module_refcount(module) != 0) 1159 if (module && module_refcount(module) != 0)
1161 module_put(module); 1160 module_put(module);
1162 #endif 1161 #endif
1163 put_device(&sdev->sdev_gendev); 1162 put_device(&sdev->sdev_gendev);
1164 } 1163 }
1165 EXPORT_SYMBOL(scsi_device_put); 1164 EXPORT_SYMBOL(scsi_device_put);
1166 1165
1167 /* helper for shost_for_each_device, see that for documentation */ 1166 /* helper for shost_for_each_device, see that for documentation */
1168 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, 1167 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1169 struct scsi_device *prev) 1168 struct scsi_device *prev)
1170 { 1169 {
1171 struct list_head *list = (prev ? &prev->siblings : &shost->__devices); 1170 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1172 struct scsi_device *next = NULL; 1171 struct scsi_device *next = NULL;
1173 unsigned long flags; 1172 unsigned long flags;
1174 1173
1175 spin_lock_irqsave(shost->host_lock, flags); 1174 spin_lock_irqsave(shost->host_lock, flags);
1176 while (list->next != &shost->__devices) { 1175 while (list->next != &shost->__devices) {
1177 next = list_entry(list->next, struct scsi_device, siblings); 1176 next = list_entry(list->next, struct scsi_device, siblings);
1178 /* skip devices that we can't get a reference to */ 1177 /* skip devices that we can't get a reference to */
1179 if (!scsi_device_get(next)) 1178 if (!scsi_device_get(next))
1180 break; 1179 break;
1181 next = NULL; 1180 next = NULL;
1182 list = list->next; 1181 list = list->next;
1183 } 1182 }
1184 spin_unlock_irqrestore(shost->host_lock, flags); 1183 spin_unlock_irqrestore(shost->host_lock, flags);
1185 1184
1186 if (prev) 1185 if (prev)
1187 scsi_device_put(prev); 1186 scsi_device_put(prev);
1188 return next; 1187 return next;
1189 } 1188 }
1190 EXPORT_SYMBOL(__scsi_iterate_devices); 1189 EXPORT_SYMBOL(__scsi_iterate_devices);
1191 1190
1192 /** 1191 /**
1193 * starget_for_each_device - helper to walk all devices of a target 1192 * starget_for_each_device - helper to walk all devices of a target
1194 * @starget: target whose devices we want to iterate over. 1193 * @starget: target whose devices we want to iterate over.
1195 * @data: Opaque passed to each function call. 1194 * @data: Opaque passed to each function call.
1196 * @fn: Function to call on each device 1195 * @fn: Function to call on each device
1197 * 1196 *
1198 * This traverses over each device of @starget. The devices have 1197 * This traverses over each device of @starget. The devices have
1199 * a reference that must be released by scsi_host_put when breaking 1198 * a reference that must be released by scsi_host_put when breaking
1200 * out of the loop. 1199 * out of the loop.
1201 */ 1200 */
1202 void starget_for_each_device(struct scsi_target *starget, void *data, 1201 void starget_for_each_device(struct scsi_target *starget, void *data,
1203 void (*fn)(struct scsi_device *, void *)) 1202 void (*fn)(struct scsi_device *, void *))
1204 { 1203 {
1205 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1204 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1206 struct scsi_device *sdev; 1205 struct scsi_device *sdev;
1207 1206
1208 shost_for_each_device(sdev, shost) { 1207 shost_for_each_device(sdev, shost) {
1209 if ((sdev->channel == starget->channel) && 1208 if ((sdev->channel == starget->channel) &&
1210 (sdev->id == starget->id)) 1209 (sdev->id == starget->id))
1211 fn(sdev, data); 1210 fn(sdev, data);
1212 } 1211 }
1213 } 1212 }
1214 EXPORT_SYMBOL(starget_for_each_device); 1213 EXPORT_SYMBOL(starget_for_each_device);
1215 1214
1216 /** 1215 /**
1217 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED) 1216 * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
1218 * @starget: target whose devices we want to iterate over. 1217 * @starget: target whose devices we want to iterate over.
1219 * @data: parameter for callback @fn() 1218 * @data: parameter for callback @fn()
1220 * @fn: callback function that is invoked for each device 1219 * @fn: callback function that is invoked for each device
1221 * 1220 *
1222 * This traverses over each device of @starget. It does _not_ 1221 * This traverses over each device of @starget. It does _not_
1223 * take a reference on the scsi_device, so the whole loop must be 1222 * take a reference on the scsi_device, so the whole loop must be
1224 * protected by shost->host_lock. 1223 * protected by shost->host_lock.
1225 * 1224 *
1226 * Note: The only reason why drivers would want to use this is because 1225 * Note: The only reason why drivers would want to use this is because
1227 * they need to access the device list in irq context. Otherwise you 1226 * they need to access the device list in irq context. Otherwise you
1228 * really want to use starget_for_each_device instead. 1227 * really want to use starget_for_each_device instead.
1229 **/ 1228 **/
1230 void __starget_for_each_device(struct scsi_target *starget, void *data, 1229 void __starget_for_each_device(struct scsi_target *starget, void *data,
1231 void (*fn)(struct scsi_device *, void *)) 1230 void (*fn)(struct scsi_device *, void *))
1232 { 1231 {
1233 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1232 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1234 struct scsi_device *sdev; 1233 struct scsi_device *sdev;
1235 1234
1236 __shost_for_each_device(sdev, shost) { 1235 __shost_for_each_device(sdev, shost) {
1237 if ((sdev->channel == starget->channel) && 1236 if ((sdev->channel == starget->channel) &&
1238 (sdev->id == starget->id)) 1237 (sdev->id == starget->id))
1239 fn(sdev, data); 1238 fn(sdev, data);
1240 } 1239 }
1241 } 1240 }
1242 EXPORT_SYMBOL(__starget_for_each_device); 1241 EXPORT_SYMBOL(__starget_for_each_device);
1243 1242
1244 /** 1243 /**
1245 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) 1244 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1246 * @starget: SCSI target pointer 1245 * @starget: SCSI target pointer
1247 * @lun: SCSI Logical Unit Number 1246 * @lun: SCSI Logical Unit Number
1248 * 1247 *
1249 * Description: Looks up the scsi_device with the specified @lun for a given 1248 * Description: Looks up the scsi_device with the specified @lun for a given
1250 * @starget. The returned scsi_device does not have an additional 1249 * @starget. The returned scsi_device does not have an additional
1251 * reference. You must hold the host's host_lock over this call and 1250 * reference. You must hold the host's host_lock over this call and
1252 * any access to the returned scsi_device. A scsi_device in state 1251 * any access to the returned scsi_device. A scsi_device in state
1253 * SDEV_DEL is skipped. 1252 * SDEV_DEL is skipped.
1254 * 1253 *
1255 * Note: The only reason why drivers should use this is because 1254 * Note: The only reason why drivers should use this is because
1256 * they need to access the device list in irq context. Otherwise you 1255 * they need to access the device list in irq context. Otherwise you
1257 * really want to use scsi_device_lookup_by_target instead. 1256 * really want to use scsi_device_lookup_by_target instead.
1258 **/ 1257 **/
1259 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, 1258 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1260 u64 lun) 1259 u64 lun)
1261 { 1260 {
1262 struct scsi_device *sdev; 1261 struct scsi_device *sdev;
1263 1262
1264 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1263 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1265 if (sdev->sdev_state == SDEV_DEL) 1264 if (sdev->sdev_state == SDEV_DEL)
1266 continue; 1265 continue;
1267 if (sdev->lun ==lun) 1266 if (sdev->lun ==lun)
1268 return sdev; 1267 return sdev;
1269 } 1268 }
1270 1269
1271 return NULL; 1270 return NULL;
1272 } 1271 }
1273 EXPORT_SYMBOL(__scsi_device_lookup_by_target); 1272 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1274 1273
1275 /** 1274 /**
1276 * scsi_device_lookup_by_target - find a device given the target 1275 * scsi_device_lookup_by_target - find a device given the target
1277 * @starget: SCSI target pointer 1276 * @starget: SCSI target pointer
1278 * @lun: SCSI Logical Unit Number 1277 * @lun: SCSI Logical Unit Number
1279 * 1278 *
1280 * Description: Looks up the scsi_device with the specified @lun for a given 1279 * Description: Looks up the scsi_device with the specified @lun for a given
1281 * @starget. The returned scsi_device has an additional reference that 1280 * @starget. The returned scsi_device has an additional reference that
1282 * needs to be released with scsi_device_put once you're done with it. 1281 * needs to be released with scsi_device_put once you're done with it.
1283 **/ 1282 **/
1284 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, 1283 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1285 u64 lun) 1284 u64 lun)
1286 { 1285 {
1287 struct scsi_device *sdev; 1286 struct scsi_device *sdev;
1288 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1287 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1289 unsigned long flags; 1288 unsigned long flags;
1290 1289
1291 spin_lock_irqsave(shost->host_lock, flags); 1290 spin_lock_irqsave(shost->host_lock, flags);
1292 sdev = __scsi_device_lookup_by_target(starget, lun); 1291 sdev = __scsi_device_lookup_by_target(starget, lun);
1293 if (sdev && scsi_device_get(sdev)) 1292 if (sdev && scsi_device_get(sdev))
1294 sdev = NULL; 1293 sdev = NULL;
1295 spin_unlock_irqrestore(shost->host_lock, flags); 1294 spin_unlock_irqrestore(shost->host_lock, flags);
1296 1295
1297 return sdev; 1296 return sdev;
1298 } 1297 }
1299 EXPORT_SYMBOL(scsi_device_lookup_by_target); 1298 EXPORT_SYMBOL(scsi_device_lookup_by_target);
1300 1299
1301 /** 1300 /**
1302 * __scsi_device_lookup - find a device given the host (UNLOCKED) 1301 * __scsi_device_lookup - find a device given the host (UNLOCKED)
1303 * @shost: SCSI host pointer 1302 * @shost: SCSI host pointer
1304 * @channel: SCSI channel (zero if only one channel) 1303 * @channel: SCSI channel (zero if only one channel)
1305 * @id: SCSI target number (physical unit number) 1304 * @id: SCSI target number (physical unit number)
1306 * @lun: SCSI Logical Unit Number 1305 * @lun: SCSI Logical Unit Number
1307 * 1306 *
1308 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1307 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1309 * for a given host. The returned scsi_device does not have an additional 1308 * for a given host. The returned scsi_device does not have an additional
1310 * reference. You must hold the host's host_lock over this call and any access 1309 * reference. You must hold the host's host_lock over this call and any access
1311 * to the returned scsi_device. 1310 * to the returned scsi_device.
1312 * 1311 *
1313 * Note: The only reason why drivers would want to use this is because 1312 * Note: The only reason why drivers would want to use this is because
1314 * they need to access the device list in irq context. Otherwise you 1313 * they need to access the device list in irq context. Otherwise you
1315 * really want to use scsi_device_lookup instead. 1314 * really want to use scsi_device_lookup instead.
1316 **/ 1315 **/
1317 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost, 1316 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1318 uint channel, uint id, u64 lun) 1317 uint channel, uint id, u64 lun)
1319 { 1318 {
1320 struct scsi_device *sdev; 1319 struct scsi_device *sdev;
1321 1320
1322 list_for_each_entry(sdev, &shost->__devices, siblings) { 1321 list_for_each_entry(sdev, &shost->__devices, siblings) {
1323 if (sdev->channel == channel && sdev->id == id && 1322 if (sdev->channel == channel && sdev->id == id &&
1324 sdev->lun ==lun) 1323 sdev->lun ==lun)
1325 return sdev; 1324 return sdev;
1326 } 1325 }
1327 1326
1328 return NULL; 1327 return NULL;
1329 } 1328 }
1330 EXPORT_SYMBOL(__scsi_device_lookup); 1329 EXPORT_SYMBOL(__scsi_device_lookup);
1331 1330
1332 /** 1331 /**
1333 * scsi_device_lookup - find a device given the host 1332 * scsi_device_lookup - find a device given the host
1334 * @shost: SCSI host pointer 1333 * @shost: SCSI host pointer
1335 * @channel: SCSI channel (zero if only one channel) 1334 * @channel: SCSI channel (zero if only one channel)
1336 * @id: SCSI target number (physical unit number) 1335 * @id: SCSI target number (physical unit number)
1337 * @lun: SCSI Logical Unit Number 1336 * @lun: SCSI Logical Unit Number
1338 * 1337 *
1339 * Description: Looks up the scsi_device with the specified @channel, @id, @lun 1338 * Description: Looks up the scsi_device with the specified @channel, @id, @lun
1340 * for a given host. The returned scsi_device has an additional reference that 1339 * for a given host. The returned scsi_device has an additional reference that
1341 * needs to be released with scsi_device_put once you're done with it. 1340 * needs to be released with scsi_device_put once you're done with it.
1342 **/ 1341 **/
1343 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost, 1342 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1344 uint channel, uint id, u64 lun) 1343 uint channel, uint id, u64 lun)
1345 { 1344 {
1346 struct scsi_device *sdev; 1345 struct scsi_device *sdev;
1347 unsigned long flags; 1346 unsigned long flags;
1348 1347
1349 spin_lock_irqsave(shost->host_lock, flags); 1348 spin_lock_irqsave(shost->host_lock, flags);
1350 sdev = __scsi_device_lookup(shost, channel, id, lun); 1349 sdev = __scsi_device_lookup(shost, channel, id, lun);
1351 if (sdev && scsi_device_get(sdev)) 1350 if (sdev && scsi_device_get(sdev))
1352 sdev = NULL; 1351 sdev = NULL;
1353 spin_unlock_irqrestore(shost->host_lock, flags); 1352 spin_unlock_irqrestore(shost->host_lock, flags);
1354 1353
1355 return sdev; 1354 return sdev;
1356 } 1355 }
1357 EXPORT_SYMBOL(scsi_device_lookup); 1356 EXPORT_SYMBOL(scsi_device_lookup);
1358 1357
1359 MODULE_DESCRIPTION("SCSI core"); 1358 MODULE_DESCRIPTION("SCSI core");
1360 MODULE_LICENSE("GPL"); 1359 MODULE_LICENSE("GPL");
1361 1360
1362 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1361 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1363 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1362 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1364 1363
1365 static int __init init_scsi(void) 1364 static int __init init_scsi(void)
1366 { 1365 {
1367 int error; 1366 int error;
1368 1367
1369 error = scsi_init_queue(); 1368 error = scsi_init_queue();
1370 if (error) 1369 if (error)
1371 return error; 1370 return error;
1372 error = scsi_init_procfs(); 1371 error = scsi_init_procfs();
1373 if (error) 1372 if (error)
1374 goto cleanup_queue; 1373 goto cleanup_queue;
1375 error = scsi_init_devinfo(); 1374 error = scsi_init_devinfo();
1376 if (error) 1375 if (error)
1377 goto cleanup_procfs; 1376 goto cleanup_procfs;
1378 error = scsi_init_hosts(); 1377 error = scsi_init_hosts();
1379 if (error) 1378 if (error)
1380 goto cleanup_devlist; 1379 goto cleanup_devlist;
1381 error = scsi_init_sysctl(); 1380 error = scsi_init_sysctl();
1382 if (error) 1381 if (error)
1383 goto cleanup_hosts; 1382 goto cleanup_hosts;
1384 error = scsi_sysfs_register(); 1383 error = scsi_sysfs_register();
1385 if (error) 1384 if (error)
1386 goto cleanup_sysctl; 1385 goto cleanup_sysctl;
1387 1386
1388 scsi_netlink_init(); 1387 scsi_netlink_init();
1389 1388
1390 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1389 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1391 return 0; 1390 return 0;
1392 1391
1393 cleanup_sysctl: 1392 cleanup_sysctl:
1394 scsi_exit_sysctl(); 1393 scsi_exit_sysctl();
1395 cleanup_hosts: 1394 cleanup_hosts:
1396 scsi_exit_hosts(); 1395 scsi_exit_hosts();
1397 cleanup_devlist: 1396 cleanup_devlist:
1398 scsi_exit_devinfo(); 1397 scsi_exit_devinfo();
1399 cleanup_procfs: 1398 cleanup_procfs:
1400 scsi_exit_procfs(); 1399 scsi_exit_procfs();
1401 cleanup_queue: 1400 cleanup_queue:
1402 scsi_exit_queue(); 1401 scsi_exit_queue();
1403 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n", 1402 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1404 -error); 1403 -error);
1405 return error; 1404 return error;
1406 } 1405 }
1407 1406
1408 static void __exit exit_scsi(void) 1407 static void __exit exit_scsi(void)
1409 { 1408 {
1410 scsi_netlink_exit(); 1409 scsi_netlink_exit();
1411 scsi_sysfs_unregister(); 1410 scsi_sysfs_unregister();
1412 scsi_exit_sysctl(); 1411 scsi_exit_sysctl();
1413 scsi_exit_hosts(); 1412 scsi_exit_hosts();
1414 scsi_exit_devinfo(); 1413 scsi_exit_devinfo();
1415 scsi_exit_procfs(); 1414 scsi_exit_procfs();
1416 scsi_exit_queue(); 1415 scsi_exit_queue();
1417 async_unregister_domain(&scsi_sd_probe_domain); 1416 async_unregister_domain(&scsi_sd_probe_domain);
1418 } 1417 }
1419 1418
1420 subsys_initcall(init_scsi); 1419 subsys_initcall(init_scsi);
1421 module_exit(exit_scsi); 1420 module_exit(exit_scsi);
1422 1421
drivers/scsi/scsi_lib.c
1 /* 1 /*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 * 3 *
4 * SCSI queueing library. 4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org). 5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers 6 * Based upon conversations with large numbers
7 * of people at Linux Expo. 7 * of people at Linux Expo.
8 */ 8 */
9 9
10 #include <linux/bio.h> 10 #include <linux/bio.h>
11 #include <linux/bitops.h> 11 #include <linux/bitops.h>
12 #include <linux/blkdev.h> 12 #include <linux/blkdev.h>
13 #include <linux/completion.h> 13 #include <linux/completion.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/export.h> 15 #include <linux/export.h>
16 #include <linux/mempool.h> 16 #include <linux/mempool.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/pci.h> 19 #include <linux/pci.h>
20 #include <linux/delay.h> 20 #include <linux/delay.h>
21 #include <linux/hardirq.h> 21 #include <linux/hardirq.h>
22 #include <linux/scatterlist.h> 22 #include <linux/scatterlist.h>
23 23
24 #include <scsi/scsi.h> 24 #include <scsi/scsi.h>
25 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_dbg.h> 26 #include <scsi/scsi_dbg.h>
27 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h> 28 #include <scsi/scsi_driver.h>
29 #include <scsi/scsi_eh.h> 29 #include <scsi/scsi_eh.h>
30 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_host.h>
31 31
32 #include <trace/events/scsi.h> 32 #include <trace/events/scsi.h>
33 33
34 #include "scsi_priv.h" 34 #include "scsi_priv.h"
35 #include "scsi_logging.h" 35 #include "scsi_logging.h"
36 36
37 37
38 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) 38 #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
39 #define SG_MEMPOOL_SIZE 2 39 #define SG_MEMPOOL_SIZE 2
40 40
41 struct scsi_host_sg_pool { 41 struct scsi_host_sg_pool {
42 size_t size; 42 size_t size;
43 char *name; 43 char *name;
44 struct kmem_cache *slab; 44 struct kmem_cache *slab;
45 mempool_t *pool; 45 mempool_t *pool;
46 }; 46 };
47 47
48 #define SP(x) { x, "sgpool-" __stringify(x) } 48 #define SP(x) { x, "sgpool-" __stringify(x) }
49 #if (SCSI_MAX_SG_SEGMENTS < 32) 49 #if (SCSI_MAX_SG_SEGMENTS < 32)
50 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater) 50 #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
51 #endif 51 #endif
52 static struct scsi_host_sg_pool scsi_sg_pools[] = { 52 static struct scsi_host_sg_pool scsi_sg_pools[] = {
53 SP(8), 53 SP(8),
54 SP(16), 54 SP(16),
55 #if (SCSI_MAX_SG_SEGMENTS > 32) 55 #if (SCSI_MAX_SG_SEGMENTS > 32)
56 SP(32), 56 SP(32),
57 #if (SCSI_MAX_SG_SEGMENTS > 64) 57 #if (SCSI_MAX_SG_SEGMENTS > 64)
58 SP(64), 58 SP(64),
59 #if (SCSI_MAX_SG_SEGMENTS > 128) 59 #if (SCSI_MAX_SG_SEGMENTS > 128)
60 SP(128), 60 SP(128),
61 #if (SCSI_MAX_SG_SEGMENTS > 256) 61 #if (SCSI_MAX_SG_SEGMENTS > 256)
62 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX) 62 #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
63 #endif 63 #endif
64 #endif 64 #endif
65 #endif 65 #endif
66 #endif 66 #endif
67 SP(SCSI_MAX_SG_SEGMENTS) 67 SP(SCSI_MAX_SG_SEGMENTS)
68 }; 68 };
69 #undef SP 69 #undef SP
70 70
71 struct kmem_cache *scsi_sdb_cache; 71 struct kmem_cache *scsi_sdb_cache;
72 72
73 /* 73 /*
74 * When to reinvoke queueing after a resource shortage. It's 3 msecs to 74 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
75 * not change behaviour from the previous unplug mechanism, experimentation 75 * not change behaviour from the previous unplug mechanism, experimentation
76 * may prove this needs changing. 76 * may prove this needs changing.
77 */ 77 */
78 #define SCSI_QUEUE_DELAY 3 78 #define SCSI_QUEUE_DELAY 3
79 79
80 static void 80 static void
81 scsi_set_blocked(struct scsi_cmnd *cmd, int reason) 81 scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
82 { 82 {
83 struct Scsi_Host *host = cmd->device->host; 83 struct Scsi_Host *host = cmd->device->host;
84 struct scsi_device *device = cmd->device; 84 struct scsi_device *device = cmd->device;
85 struct scsi_target *starget = scsi_target(device); 85 struct scsi_target *starget = scsi_target(device);
86 86
87 /* 87 /*
88 * Set the appropriate busy bit for the device/host. 88 * Set the appropriate busy bit for the device/host.
89 * 89 *
90 * If the host/device isn't busy, assume that something actually 90 * If the host/device isn't busy, assume that something actually
91 * completed, and that we should be able to queue a command now. 91 * completed, and that we should be able to queue a command now.
92 * 92 *
93 * Note that the prior mid-layer assumption that any host could 93 * Note that the prior mid-layer assumption that any host could
94 * always queue at least one command is now broken. The mid-layer 94 * always queue at least one command is now broken. The mid-layer
95 * will implement a user specifiable stall (see 95 * will implement a user specifiable stall (see
96 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 96 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
97 * if a command is requeued with no other commands outstanding 97 * if a command is requeued with no other commands outstanding
98 * either for the device or for the host. 98 * either for the device or for the host.
99 */ 99 */
100 switch (reason) { 100 switch (reason) {
101 case SCSI_MLQUEUE_HOST_BUSY: 101 case SCSI_MLQUEUE_HOST_BUSY:
102 host->host_blocked = host->max_host_blocked; 102 atomic_set(&host->host_blocked, host->max_host_blocked);
103 break; 103 break;
104 case SCSI_MLQUEUE_DEVICE_BUSY: 104 case SCSI_MLQUEUE_DEVICE_BUSY:
105 case SCSI_MLQUEUE_EH_RETRY: 105 case SCSI_MLQUEUE_EH_RETRY:
106 device->device_blocked = device->max_device_blocked; 106 atomic_set(&device->device_blocked,
107 device->max_device_blocked);
107 break; 108 break;
108 case SCSI_MLQUEUE_TARGET_BUSY: 109 case SCSI_MLQUEUE_TARGET_BUSY:
109 starget->target_blocked = starget->max_target_blocked; 110 atomic_set(&starget->target_blocked,
111 starget->max_target_blocked);
110 break; 112 break;
111 } 113 }
112 } 114 }
113 115
114 /** 116 /**
115 * __scsi_queue_insert - private queue insertion 117 * __scsi_queue_insert - private queue insertion
116 * @cmd: The SCSI command being requeued 118 * @cmd: The SCSI command being requeued
117 * @reason: The reason for the requeue 119 * @reason: The reason for the requeue
118 * @unbusy: Whether the queue should be unbusied 120 * @unbusy: Whether the queue should be unbusied
119 * 121 *
120 * This is a private queue insertion. The public interface 122 * This is a private queue insertion. The public interface
121 * scsi_queue_insert() always assumes the queue should be unbusied 123 * scsi_queue_insert() always assumes the queue should be unbusied
122 * because it's always called before the completion. This function is 124 * because it's always called before the completion. This function is
123 * for a requeue after completion, which should only occur in this 125 * for a requeue after completion, which should only occur in this
124 * file. 126 * file.
125 */ 127 */
126 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) 128 static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
127 { 129 {
128 struct scsi_device *device = cmd->device; 130 struct scsi_device *device = cmd->device;
129 struct request_queue *q = device->request_queue; 131 struct request_queue *q = device->request_queue;
130 unsigned long flags; 132 unsigned long flags;
131 133
132 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, 134 SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
133 "Inserting command %p into mlqueue\n", cmd)); 135 "Inserting command %p into mlqueue\n", cmd));
134 136
135 scsi_set_blocked(cmd, reason); 137 scsi_set_blocked(cmd, reason);
136 138
137 /* 139 /*
138 * Decrement the counters, since these commands are no longer 140 * Decrement the counters, since these commands are no longer
139 * active on the host/device. 141 * active on the host/device.
140 */ 142 */
141 if (unbusy) 143 if (unbusy)
142 scsi_device_unbusy(device); 144 scsi_device_unbusy(device);
143 145
144 /* 146 /*
145 * Requeue this command. It will go before all other commands 147 * Requeue this command. It will go before all other commands
146 * that are already in the queue. Schedule requeue work under 148 * that are already in the queue. Schedule requeue work under
147 * lock such that the kblockd_schedule_work() call happens 149 * lock such that the kblockd_schedule_work() call happens
148 * before blk_cleanup_queue() finishes. 150 * before blk_cleanup_queue() finishes.
149 */ 151 */
150 cmd->result = 0; 152 cmd->result = 0;
151 spin_lock_irqsave(q->queue_lock, flags); 153 spin_lock_irqsave(q->queue_lock, flags);
152 blk_requeue_request(q, cmd->request); 154 blk_requeue_request(q, cmd->request);
153 kblockd_schedule_work(&device->requeue_work); 155 kblockd_schedule_work(&device->requeue_work);
154 spin_unlock_irqrestore(q->queue_lock, flags); 156 spin_unlock_irqrestore(q->queue_lock, flags);
155 } 157 }
156 158
157 /* 159 /*
158 * Function: scsi_queue_insert() 160 * Function: scsi_queue_insert()
159 * 161 *
160 * Purpose: Insert a command in the midlevel queue. 162 * Purpose: Insert a command in the midlevel queue.
161 * 163 *
162 * Arguments: cmd - command that we are adding to queue. 164 * Arguments: cmd - command that we are adding to queue.
163 * reason - why we are inserting command to queue. 165 * reason - why we are inserting command to queue.
164 * 166 *
165 * Lock status: Assumed that lock is not held upon entry. 167 * Lock status: Assumed that lock is not held upon entry.
166 * 168 *
167 * Returns: Nothing. 169 * Returns: Nothing.
168 * 170 *
169 * Notes: We do this for one of two cases. Either the host is busy 171 * Notes: We do this for one of two cases. Either the host is busy
170 * and it cannot accept any more commands for the time being, 172 * and it cannot accept any more commands for the time being,
171 * or the device returned QUEUE_FULL and can accept no more 173 * or the device returned QUEUE_FULL and can accept no more
172 * commands. 174 * commands.
173 * Notes: This could be called either from an interrupt context or a 175 * Notes: This could be called either from an interrupt context or a
174 * normal process context. 176 * normal process context.
175 */ 177 */
176 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 178 void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
177 { 179 {
178 __scsi_queue_insert(cmd, reason, 1); 180 __scsi_queue_insert(cmd, reason, 1);
179 } 181 }
180 /** 182 /**
181 * scsi_execute - insert request and wait for the result 183 * scsi_execute - insert request and wait for the result
182 * @sdev: scsi device 184 * @sdev: scsi device
183 * @cmd: scsi command 185 * @cmd: scsi command
184 * @data_direction: data direction 186 * @data_direction: data direction
185 * @buffer: data buffer 187 * @buffer: data buffer
186 * @bufflen: len of buffer 188 * @bufflen: len of buffer
187 * @sense: optional sense buffer 189 * @sense: optional sense buffer
188 * @timeout: request timeout in seconds 190 * @timeout: request timeout in seconds
189 * @retries: number of times to retry request 191 * @retries: number of times to retry request
190 * @flags: or into request flags; 192 * @flags: or into request flags;
191 * @resid: optional residual length 193 * @resid: optional residual length
192 * 194 *
193 * returns the req->errors value which is the scsi_cmnd result 195 * returns the req->errors value which is the scsi_cmnd result
194 * field. 196 * field.
195 */ 197 */
196 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 198 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
197 int data_direction, void *buffer, unsigned bufflen, 199 int data_direction, void *buffer, unsigned bufflen,
198 unsigned char *sense, int timeout, int retries, u64 flags, 200 unsigned char *sense, int timeout, int retries, u64 flags,
199 int *resid) 201 int *resid)
200 { 202 {
201 struct request *req; 203 struct request *req;
202 int write = (data_direction == DMA_TO_DEVICE); 204 int write = (data_direction == DMA_TO_DEVICE);
203 int ret = DRIVER_ERROR << 24; 205 int ret = DRIVER_ERROR << 24;
204 206
205 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 207 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
206 if (!req) 208 if (!req)
207 return ret; 209 return ret;
208 blk_rq_set_block_pc(req); 210 blk_rq_set_block_pc(req);
209 211
210 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 212 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
211 buffer, bufflen, __GFP_WAIT)) 213 buffer, bufflen, __GFP_WAIT))
212 goto out; 214 goto out;
213 215
214 req->cmd_len = COMMAND_SIZE(cmd[0]); 216 req->cmd_len = COMMAND_SIZE(cmd[0]);
215 memcpy(req->cmd, cmd, req->cmd_len); 217 memcpy(req->cmd, cmd, req->cmd_len);
216 req->sense = sense; 218 req->sense = sense;
217 req->sense_len = 0; 219 req->sense_len = 0;
218 req->retries = retries; 220 req->retries = retries;
219 req->timeout = timeout; 221 req->timeout = timeout;
220 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 222 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
221 223
222 /* 224 /*
223 * head injection *required* here otherwise quiesce won't work 225 * head injection *required* here otherwise quiesce won't work
224 */ 226 */
225 blk_execute_rq(req->q, NULL, req, 1); 227 blk_execute_rq(req->q, NULL, req, 1);
226 228
227 /* 229 /*
228 * Some devices (USB mass-storage in particular) may transfer 230 * Some devices (USB mass-storage in particular) may transfer
229 * garbage data together with a residue indicating that the data 231 * garbage data together with a residue indicating that the data
230 * is invalid. Prevent the garbage from being misinterpreted 232 * is invalid. Prevent the garbage from being misinterpreted
231 * and prevent security leaks by zeroing out the excess data. 233 * and prevent security leaks by zeroing out the excess data.
232 */ 234 */
233 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 235 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
234 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 236 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
235 237
236 if (resid) 238 if (resid)
237 *resid = req->resid_len; 239 *resid = req->resid_len;
238 ret = req->errors; 240 ret = req->errors;
239 out: 241 out:
240 blk_put_request(req); 242 blk_put_request(req);
241 243
242 return ret; 244 return ret;
243 } 245 }
244 EXPORT_SYMBOL(scsi_execute); 246 EXPORT_SYMBOL(scsi_execute);
245 247
246 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 248 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
247 int data_direction, void *buffer, unsigned bufflen, 249 int data_direction, void *buffer, unsigned bufflen,
248 struct scsi_sense_hdr *sshdr, int timeout, int retries, 250 struct scsi_sense_hdr *sshdr, int timeout, int retries,
249 int *resid, u64 flags) 251 int *resid, u64 flags)
250 { 252 {
251 char *sense = NULL; 253 char *sense = NULL;
252 int result; 254 int result;
253 255
254 if (sshdr) { 256 if (sshdr) {
255 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 257 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
256 if (!sense) 258 if (!sense)
257 return DRIVER_ERROR << 24; 259 return DRIVER_ERROR << 24;
258 } 260 }
259 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 261 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
260 sense, timeout, retries, flags, resid); 262 sense, timeout, retries, flags, resid);
261 if (sshdr) 263 if (sshdr)
262 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 264 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
263 265
264 kfree(sense); 266 kfree(sense);
265 return result; 267 return result;
266 } 268 }
267 EXPORT_SYMBOL(scsi_execute_req_flags); 269 EXPORT_SYMBOL(scsi_execute_req_flags);
268 270
269 /* 271 /*
270 * Function: scsi_init_cmd_errh() 272 * Function: scsi_init_cmd_errh()
271 * 273 *
272 * Purpose: Initialize cmd fields related to error handling. 274 * Purpose: Initialize cmd fields related to error handling.
273 * 275 *
274 * Arguments: cmd - command that is ready to be queued. 276 * Arguments: cmd - command that is ready to be queued.
275 * 277 *
276 * Notes: This function has the job of initializing a number of 278 * Notes: This function has the job of initializing a number of
277 * fields related to error handling. Typically this will 279 * fields related to error handling. Typically this will
278 * be called once for each command, as required. 280 * be called once for each command, as required.
279 */ 281 */
280 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) 282 static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
281 { 283 {
282 cmd->serial_number = 0; 284 cmd->serial_number = 0;
283 scsi_set_resid(cmd, 0); 285 scsi_set_resid(cmd, 0);
284 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 286 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
285 if (cmd->cmd_len == 0) 287 if (cmd->cmd_len == 0)
286 cmd->cmd_len = scsi_command_size(cmd->cmnd); 288 cmd->cmd_len = scsi_command_size(cmd->cmnd);
287 } 289 }
288 290
289 void scsi_device_unbusy(struct scsi_device *sdev) 291 void scsi_device_unbusy(struct scsi_device *sdev)
290 { 292 {
291 struct Scsi_Host *shost = sdev->host; 293 struct Scsi_Host *shost = sdev->host;
292 struct scsi_target *starget = scsi_target(sdev); 294 struct scsi_target *starget = scsi_target(sdev);
293 unsigned long flags; 295 unsigned long flags;
294 296
295 atomic_dec(&shost->host_busy); 297 atomic_dec(&shost->host_busy);
296 atomic_dec(&starget->target_busy); 298 atomic_dec(&starget->target_busy);
297 299
298 if (unlikely(scsi_host_in_recovery(shost) && 300 if (unlikely(scsi_host_in_recovery(shost) &&
299 (shost->host_failed || shost->host_eh_scheduled))) { 301 (shost->host_failed || shost->host_eh_scheduled))) {
300 spin_lock_irqsave(shost->host_lock, flags); 302 spin_lock_irqsave(shost->host_lock, flags);
301 scsi_eh_wakeup(shost); 303 scsi_eh_wakeup(shost);
302 spin_unlock_irqrestore(shost->host_lock, flags); 304 spin_unlock_irqrestore(shost->host_lock, flags);
303 } 305 }
304 306
305 atomic_dec(&sdev->device_busy); 307 atomic_dec(&sdev->device_busy);
306 } 308 }
307 309
308 /* 310 /*
309 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 311 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
310 * and call blk_run_queue for all the scsi_devices on the target - 312 * and call blk_run_queue for all the scsi_devices on the target -
311 * including current_sdev first. 313 * including current_sdev first.
312 * 314 *
313 * Called with *no* scsi locks held. 315 * Called with *no* scsi locks held.
314 */ 316 */
315 static void scsi_single_lun_run(struct scsi_device *current_sdev) 317 static void scsi_single_lun_run(struct scsi_device *current_sdev)
316 { 318 {
317 struct Scsi_Host *shost = current_sdev->host; 319 struct Scsi_Host *shost = current_sdev->host;
318 struct scsi_device *sdev, *tmp; 320 struct scsi_device *sdev, *tmp;
319 struct scsi_target *starget = scsi_target(current_sdev); 321 struct scsi_target *starget = scsi_target(current_sdev);
320 unsigned long flags; 322 unsigned long flags;
321 323
322 spin_lock_irqsave(shost->host_lock, flags); 324 spin_lock_irqsave(shost->host_lock, flags);
323 starget->starget_sdev_user = NULL; 325 starget->starget_sdev_user = NULL;
324 spin_unlock_irqrestore(shost->host_lock, flags); 326 spin_unlock_irqrestore(shost->host_lock, flags);
325 327
326 /* 328 /*
327 * Call blk_run_queue for all LUNs on the target, starting with 329 * Call blk_run_queue for all LUNs on the target, starting with
328 * current_sdev. We race with others (to set starget_sdev_user), 330 * current_sdev. We race with others (to set starget_sdev_user),
329 * but in most cases, we will be first. Ideally, each LU on the 331 * but in most cases, we will be first. Ideally, each LU on the
330 * target would get some limited time or requests on the target. 332 * target would get some limited time or requests on the target.
331 */ 333 */
332 blk_run_queue(current_sdev->request_queue); 334 blk_run_queue(current_sdev->request_queue);
333 335
334 spin_lock_irqsave(shost->host_lock, flags); 336 spin_lock_irqsave(shost->host_lock, flags);
335 if (starget->starget_sdev_user) 337 if (starget->starget_sdev_user)
336 goto out; 338 goto out;
337 list_for_each_entry_safe(sdev, tmp, &starget->devices, 339 list_for_each_entry_safe(sdev, tmp, &starget->devices,
338 same_target_siblings) { 340 same_target_siblings) {
339 if (sdev == current_sdev) 341 if (sdev == current_sdev)
340 continue; 342 continue;
341 if (scsi_device_get(sdev)) 343 if (scsi_device_get(sdev))
342 continue; 344 continue;
343 345
344 spin_unlock_irqrestore(shost->host_lock, flags); 346 spin_unlock_irqrestore(shost->host_lock, flags);
345 blk_run_queue(sdev->request_queue); 347 blk_run_queue(sdev->request_queue);
346 spin_lock_irqsave(shost->host_lock, flags); 348 spin_lock_irqsave(shost->host_lock, flags);
347 349
348 scsi_device_put(sdev); 350 scsi_device_put(sdev);
349 } 351 }
350 out: 352 out:
351 spin_unlock_irqrestore(shost->host_lock, flags); 353 spin_unlock_irqrestore(shost->host_lock, flags);
352 } 354 }
353 355
354 static inline int scsi_device_is_busy(struct scsi_device *sdev) 356 static inline bool scsi_device_is_busy(struct scsi_device *sdev)
355 { 357 {
356 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth || 358 if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
357 sdev->device_blocked) 359 return true;
358 return 1; 360 if (atomic_read(&sdev->device_blocked) > 0)
359 return 0; 361 return true;
362 return false;
360 } 363 }
361 364
362 static inline int scsi_target_is_busy(struct scsi_target *starget) 365 static inline bool scsi_target_is_busy(struct scsi_target *starget)
363 { 366 {
364 return ((starget->can_queue > 0 && 367 if (starget->can_queue > 0 &&
365 atomic_read(&starget->target_busy) >= starget->can_queue) || 368 atomic_read(&starget->target_busy) >= starget->can_queue)
366 starget->target_blocked); 369 return true;
370 if (atomic_read(&starget->target_blocked) > 0)
371 return true;
372 return false;
367 } 373 }
368 374
369 static inline int scsi_host_is_busy(struct Scsi_Host *shost) 375 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
370 { 376 {
371 if ((shost->can_queue > 0 && 377 if (shost->can_queue > 0 &&
372 atomic_read(&shost->host_busy) >= shost->can_queue) || 378 atomic_read(&shost->host_busy) >= shost->can_queue)
373 shost->host_blocked || shost->host_self_blocked) 379 return true;
374 return 1; 380 if (atomic_read(&shost->host_blocked) > 0)
375 381 return true;
376 return 0; 382 if (shost->host_self_blocked)
383 return true;
384 return false;
377 } 385 }
378 386
379 static void scsi_starved_list_run(struct Scsi_Host *shost) 387 static void scsi_starved_list_run(struct Scsi_Host *shost)
380 { 388 {
381 LIST_HEAD(starved_list); 389 LIST_HEAD(starved_list);
382 struct scsi_device *sdev; 390 struct scsi_device *sdev;
383 unsigned long flags; 391 unsigned long flags;
384 392
385 spin_lock_irqsave(shost->host_lock, flags); 393 spin_lock_irqsave(shost->host_lock, flags);
386 list_splice_init(&shost->starved_list, &starved_list); 394 list_splice_init(&shost->starved_list, &starved_list);
387 395
388 while (!list_empty(&starved_list)) { 396 while (!list_empty(&starved_list)) {
389 struct request_queue *slq; 397 struct request_queue *slq;
390 398
391 /* 399 /*
392 * As long as shost is accepting commands and we have 400 * As long as shost is accepting commands and we have
393 * starved queues, call blk_run_queue. scsi_request_fn 401 * starved queues, call blk_run_queue. scsi_request_fn
394 * drops the queue_lock and can add us back to the 402 * drops the queue_lock and can add us back to the
395 * starved_list. 403 * starved_list.
396 * 404 *
397 * host_lock protects the starved_list and starved_entry. 405 * host_lock protects the starved_list and starved_entry.
398 * scsi_request_fn must get the host_lock before checking 406 * scsi_request_fn must get the host_lock before checking
399 * or modifying starved_list or starved_entry. 407 * or modifying starved_list or starved_entry.
400 */ 408 */
401 if (scsi_host_is_busy(shost)) 409 if (scsi_host_is_busy(shost))
402 break; 410 break;
403 411
404 sdev = list_entry(starved_list.next, 412 sdev = list_entry(starved_list.next,
405 struct scsi_device, starved_entry); 413 struct scsi_device, starved_entry);
406 list_del_init(&sdev->starved_entry); 414 list_del_init(&sdev->starved_entry);
407 if (scsi_target_is_busy(scsi_target(sdev))) { 415 if (scsi_target_is_busy(scsi_target(sdev))) {
408 list_move_tail(&sdev->starved_entry, 416 list_move_tail(&sdev->starved_entry,
409 &shost->starved_list); 417 &shost->starved_list);
410 continue; 418 continue;
411 } 419 }
412 420
413 /* 421 /*
414 * Once we drop the host lock, a racing scsi_remove_device() 422 * Once we drop the host lock, a racing scsi_remove_device()
415 * call may remove the sdev from the starved list and destroy 423 * call may remove the sdev from the starved list and destroy
416 * it and the queue. Mitigate by taking a reference to the 424 * it and the queue. Mitigate by taking a reference to the
417 * queue and never touching the sdev again after we drop the 425 * queue and never touching the sdev again after we drop the
418 * host lock. Note: if __scsi_remove_device() invokes 426 * host lock. Note: if __scsi_remove_device() invokes
419 * blk_cleanup_queue() before the queue is run from this 427 * blk_cleanup_queue() before the queue is run from this
420 * function then blk_run_queue() will return immediately since 428 * function then blk_run_queue() will return immediately since
421 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. 429 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
422 */ 430 */
423 slq = sdev->request_queue; 431 slq = sdev->request_queue;
424 if (!blk_get_queue(slq)) 432 if (!blk_get_queue(slq))
425 continue; 433 continue;
426 spin_unlock_irqrestore(shost->host_lock, flags); 434 spin_unlock_irqrestore(shost->host_lock, flags);
427 435
428 blk_run_queue(slq); 436 blk_run_queue(slq);
429 blk_put_queue(slq); 437 blk_put_queue(slq);
430 438
431 spin_lock_irqsave(shost->host_lock, flags); 439 spin_lock_irqsave(shost->host_lock, flags);
432 } 440 }
433 /* put any unprocessed entries back */ 441 /* put any unprocessed entries back */
434 list_splice(&starved_list, &shost->starved_list); 442 list_splice(&starved_list, &shost->starved_list);
435 spin_unlock_irqrestore(shost->host_lock, flags); 443 spin_unlock_irqrestore(shost->host_lock, flags);
436 } 444 }
437 445
438 /* 446 /*
439 * Function: scsi_run_queue() 447 * Function: scsi_run_queue()
440 * 448 *
441 * Purpose: Select a proper request queue to serve next 449 * Purpose: Select a proper request queue to serve next
442 * 450 *
443 * Arguments: q - last request's queue 451 * Arguments: q - last request's queue
444 * 452 *
445 * Returns: Nothing 453 * Returns: Nothing
446 * 454 *
447 * Notes: The previous command was completely finished, start 455 * Notes: The previous command was completely finished, start
448 * a new one if possible. 456 * a new one if possible.
449 */ 457 */
450 static void scsi_run_queue(struct request_queue *q) 458 static void scsi_run_queue(struct request_queue *q)
451 { 459 {
452 struct scsi_device *sdev = q->queuedata; 460 struct scsi_device *sdev = q->queuedata;
453 461
454 if (scsi_target(sdev)->single_lun) 462 if (scsi_target(sdev)->single_lun)
455 scsi_single_lun_run(sdev); 463 scsi_single_lun_run(sdev);
456 if (!list_empty(&sdev->host->starved_list)) 464 if (!list_empty(&sdev->host->starved_list))
457 scsi_starved_list_run(sdev->host); 465 scsi_starved_list_run(sdev->host);
458 466
459 blk_run_queue(q); 467 blk_run_queue(q);
460 } 468 }
461 469
462 void scsi_requeue_run_queue(struct work_struct *work) 470 void scsi_requeue_run_queue(struct work_struct *work)
463 { 471 {
464 struct scsi_device *sdev; 472 struct scsi_device *sdev;
465 struct request_queue *q; 473 struct request_queue *q;
466 474
467 sdev = container_of(work, struct scsi_device, requeue_work); 475 sdev = container_of(work, struct scsi_device, requeue_work);
468 q = sdev->request_queue; 476 q = sdev->request_queue;
469 scsi_run_queue(q); 477 scsi_run_queue(q);
470 } 478 }
471 479
472 /* 480 /*
473 * Function: scsi_requeue_command() 481 * Function: scsi_requeue_command()
474 * 482 *
475 * Purpose: Handle post-processing of completed commands. 483 * Purpose: Handle post-processing of completed commands.
476 * 484 *
477 * Arguments: q - queue to operate on 485 * Arguments: q - queue to operate on
478 * cmd - command that may need to be requeued. 486 * cmd - command that may need to be requeued.
479 * 487 *
480 * Returns: Nothing 488 * Returns: Nothing
481 * 489 *
482 * Notes: After command completion, there may be blocks left 490 * Notes: After command completion, there may be blocks left
483 * over which weren't finished by the previous command 491 * over which weren't finished by the previous command
484 * this can be for a number of reasons - the main one is 492 * this can be for a number of reasons - the main one is
485 * I/O errors in the middle of the request, in which case 493 * I/O errors in the middle of the request, in which case
486 * we need to request the blocks that come after the bad 494 * we need to request the blocks that come after the bad
487 * sector. 495 * sector.
488 * Notes: Upon return, cmd is a stale pointer. 496 * Notes: Upon return, cmd is a stale pointer.
489 */ 497 */
490 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 498 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
491 { 499 {
492 struct scsi_device *sdev = cmd->device; 500 struct scsi_device *sdev = cmd->device;
493 struct request *req = cmd->request; 501 struct request *req = cmd->request;
494 unsigned long flags; 502 unsigned long flags;
495 503
496 spin_lock_irqsave(q->queue_lock, flags); 504 spin_lock_irqsave(q->queue_lock, flags);
497 blk_unprep_request(req); 505 blk_unprep_request(req);
498 req->special = NULL; 506 req->special = NULL;
499 scsi_put_command(cmd); 507 scsi_put_command(cmd);
500 blk_requeue_request(q, req); 508 blk_requeue_request(q, req);
501 spin_unlock_irqrestore(q->queue_lock, flags); 509 spin_unlock_irqrestore(q->queue_lock, flags);
502 510
503 scsi_run_queue(q); 511 scsi_run_queue(q);
504 512
505 put_device(&sdev->sdev_gendev); 513 put_device(&sdev->sdev_gendev);
506 } 514 }
507 515
508 void scsi_next_command(struct scsi_cmnd *cmd) 516 void scsi_next_command(struct scsi_cmnd *cmd)
509 { 517 {
510 struct scsi_device *sdev = cmd->device; 518 struct scsi_device *sdev = cmd->device;
511 struct request_queue *q = sdev->request_queue; 519 struct request_queue *q = sdev->request_queue;
512 520
513 scsi_put_command(cmd); 521 scsi_put_command(cmd);
514 scsi_run_queue(q); 522 scsi_run_queue(q);
515 523
516 put_device(&sdev->sdev_gendev); 524 put_device(&sdev->sdev_gendev);
517 } 525 }
518 526
519 void scsi_run_host_queues(struct Scsi_Host *shost) 527 void scsi_run_host_queues(struct Scsi_Host *shost)
520 { 528 {
521 struct scsi_device *sdev; 529 struct scsi_device *sdev;
522 530
523 shost_for_each_device(sdev, shost) 531 shost_for_each_device(sdev, shost)
524 scsi_run_queue(sdev->request_queue); 532 scsi_run_queue(sdev->request_queue);
525 } 533 }
526 534
527 static inline unsigned int scsi_sgtable_index(unsigned short nents) 535 static inline unsigned int scsi_sgtable_index(unsigned short nents)
528 { 536 {
529 unsigned int index; 537 unsigned int index;
530 538
531 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS); 539 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
532 540
533 if (nents <= 8) 541 if (nents <= 8)
534 index = 0; 542 index = 0;
535 else 543 else
536 index = get_count_order(nents) - 3; 544 index = get_count_order(nents) - 3;
537 545
538 return index; 546 return index;
539 } 547 }
540 548
541 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) 549 static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
542 { 550 {
543 struct scsi_host_sg_pool *sgp; 551 struct scsi_host_sg_pool *sgp;
544 552
545 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 553 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
546 mempool_free(sgl, sgp->pool); 554 mempool_free(sgl, sgp->pool);
547 } 555 }
548 556
549 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) 557 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
550 { 558 {
551 struct scsi_host_sg_pool *sgp; 559 struct scsi_host_sg_pool *sgp;
552 560
553 sgp = scsi_sg_pools + scsi_sgtable_index(nents); 561 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
554 return mempool_alloc(sgp->pool, gfp_mask); 562 return mempool_alloc(sgp->pool, gfp_mask);
555 } 563 }
556 564
557 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, 565 static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
558 gfp_t gfp_mask) 566 gfp_t gfp_mask)
559 { 567 {
560 int ret; 568 int ret;
561 569
562 BUG_ON(!nents); 570 BUG_ON(!nents);
563 571
564 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, 572 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
565 gfp_mask, scsi_sg_alloc); 573 gfp_mask, scsi_sg_alloc);
566 if (unlikely(ret)) 574 if (unlikely(ret))
567 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, 575 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
568 scsi_sg_free); 576 scsi_sg_free);
569 577
570 return ret; 578 return ret;
571 } 579 }
572 580
573 static void scsi_free_sgtable(struct scsi_data_buffer *sdb) 581 static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
574 { 582 {
575 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); 583 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
576 } 584 }
577 585
578 /* 586 /*
579 * Function: scsi_release_buffers() 587 * Function: scsi_release_buffers()
580 * 588 *
581 * Purpose: Free resources allocate for a scsi_command. 589 * Purpose: Free resources allocate for a scsi_command.
582 * 590 *
583 * Arguments: cmd - command that we are bailing. 591 * Arguments: cmd - command that we are bailing.
584 * 592 *
585 * Lock status: Assumed that no lock is held upon entry. 593 * Lock status: Assumed that no lock is held upon entry.
586 * 594 *
587 * Returns: Nothing 595 * Returns: Nothing
588 * 596 *
589 * Notes: In the event that an upper level driver rejects a 597 * Notes: In the event that an upper level driver rejects a
590 * command, we must release resources allocated during 598 * command, we must release resources allocated during
591 * the __init_io() function. Primarily this would involve 599 * the __init_io() function. Primarily this would involve
592 * the scatter-gather table. 600 * the scatter-gather table.
593 */ 601 */
594 static void scsi_release_buffers(struct scsi_cmnd *cmd) 602 static void scsi_release_buffers(struct scsi_cmnd *cmd)
595 { 603 {
596 if (cmd->sdb.table.nents) 604 if (cmd->sdb.table.nents)
597 scsi_free_sgtable(&cmd->sdb); 605 scsi_free_sgtable(&cmd->sdb);
598 606
599 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 607 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
600 608
601 if (scsi_prot_sg_count(cmd)) 609 if (scsi_prot_sg_count(cmd))
602 scsi_free_sgtable(cmd->prot_sdb); 610 scsi_free_sgtable(cmd->prot_sdb);
603 } 611 }
604 612
605 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) 613 static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
606 { 614 {
607 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; 615 struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
608 616
609 scsi_free_sgtable(bidi_sdb); 617 scsi_free_sgtable(bidi_sdb);
610 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 618 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
611 cmd->request->next_rq->special = NULL; 619 cmd->request->next_rq->special = NULL;
612 } 620 }
613 621
614 /** 622 /**
615 * __scsi_error_from_host_byte - translate SCSI error code into errno 623 * __scsi_error_from_host_byte - translate SCSI error code into errno
616 * @cmd: SCSI command (unused) 624 * @cmd: SCSI command (unused)
617 * @result: scsi error code 625 * @result: scsi error code
618 * 626 *
619 * Translate SCSI error code into standard UNIX errno. 627 * Translate SCSI error code into standard UNIX errno.
620 * Return values: 628 * Return values:
621 * -ENOLINK temporary transport failure 629 * -ENOLINK temporary transport failure
622 * -EREMOTEIO permanent target failure, do not retry 630 * -EREMOTEIO permanent target failure, do not retry
623 * -EBADE permanent nexus failure, retry on other path 631 * -EBADE permanent nexus failure, retry on other path
624 * -ENOSPC No write space available 632 * -ENOSPC No write space available
625 * -ENODATA Medium error 633 * -ENODATA Medium error
626 * -EIO unspecified I/O error 634 * -EIO unspecified I/O error
627 */ 635 */
628 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) 636 static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
629 { 637 {
630 int error = 0; 638 int error = 0;
631 639
632 switch(host_byte(result)) { 640 switch(host_byte(result)) {
633 case DID_TRANSPORT_FAILFAST: 641 case DID_TRANSPORT_FAILFAST:
634 error = -ENOLINK; 642 error = -ENOLINK;
635 break; 643 break;
636 case DID_TARGET_FAILURE: 644 case DID_TARGET_FAILURE:
637 set_host_byte(cmd, DID_OK); 645 set_host_byte(cmd, DID_OK);
638 error = -EREMOTEIO; 646 error = -EREMOTEIO;
639 break; 647 break;
640 case DID_NEXUS_FAILURE: 648 case DID_NEXUS_FAILURE:
641 set_host_byte(cmd, DID_OK); 649 set_host_byte(cmd, DID_OK);
642 error = -EBADE; 650 error = -EBADE;
643 break; 651 break;
644 case DID_ALLOC_FAILURE: 652 case DID_ALLOC_FAILURE:
645 set_host_byte(cmd, DID_OK); 653 set_host_byte(cmd, DID_OK);
646 error = -ENOSPC; 654 error = -ENOSPC;
647 break; 655 break;
648 case DID_MEDIUM_ERROR: 656 case DID_MEDIUM_ERROR:
649 set_host_byte(cmd, DID_OK); 657 set_host_byte(cmd, DID_OK);
650 error = -ENODATA; 658 error = -ENODATA;
651 break; 659 break;
652 default: 660 default:
653 error = -EIO; 661 error = -EIO;
654 break; 662 break;
655 } 663 }
656 664
657 return error; 665 return error;
658 } 666 }
659 667
660 /* 668 /*
661 * Function: scsi_io_completion() 669 * Function: scsi_io_completion()
662 * 670 *
663 * Purpose: Completion processing for block device I/O requests. 671 * Purpose: Completion processing for block device I/O requests.
664 * 672 *
665 * Arguments: cmd - command that is finished. 673 * Arguments: cmd - command that is finished.
666 * 674 *
667 * Lock status: Assumed that no lock is held upon entry. 675 * Lock status: Assumed that no lock is held upon entry.
668 * 676 *
669 * Returns: Nothing 677 * Returns: Nothing
670 * 678 *
671 * Notes: We will finish off the specified number of sectors. If we 679 * Notes: We will finish off the specified number of sectors. If we
672 * are done, the command block will be released and the queue 680 * are done, the command block will be released and the queue
673 * function will be goosed. If we are not done then we have to 681 * function will be goosed. If we are not done then we have to
674 * figure out what to do next: 682 * figure out what to do next:
675 * 683 *
676 * a) We can call scsi_requeue_command(). The request 684 * a) We can call scsi_requeue_command(). The request
677 * will be unprepared and put back on the queue. Then 685 * will be unprepared and put back on the queue. Then
678 * a new command will be created for it. This should 686 * a new command will be created for it. This should
679 * be used if we made forward progress, or if we want 687 * be used if we made forward progress, or if we want
680 * to switch from READ(10) to READ(6) for example. 688 * to switch from READ(10) to READ(6) for example.
681 * 689 *
682 * b) We can call __scsi_queue_insert(). The request will 690 * b) We can call __scsi_queue_insert(). The request will
683 * be put back on the queue and retried using the same 691 * be put back on the queue and retried using the same
684 * command as before, possibly after a delay. 692 * command as before, possibly after a delay.
685 * 693 *
686 * c) We can call blk_end_request() with -EIO to fail 694 * c) We can call blk_end_request() with -EIO to fail
687 * the remainder of the request. 695 * the remainder of the request.
688 */ 696 */
689 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 697 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
690 { 698 {
691 int result = cmd->result; 699 int result = cmd->result;
692 struct request_queue *q = cmd->device->request_queue; 700 struct request_queue *q = cmd->device->request_queue;
693 struct request *req = cmd->request; 701 struct request *req = cmd->request;
694 int error = 0; 702 int error = 0;
695 struct scsi_sense_hdr sshdr; 703 struct scsi_sense_hdr sshdr;
696 int sense_valid = 0; 704 int sense_valid = 0;
697 int sense_deferred = 0; 705 int sense_deferred = 0;
698 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, 706 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
699 ACTION_DELAYED_RETRY} action; 707 ACTION_DELAYED_RETRY} action;
700 unsigned long wait_for = (cmd->allowed + 1) * req->timeout; 708 unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
701 709
702 if (result) { 710 if (result) {
703 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 711 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
704 if (sense_valid) 712 if (sense_valid)
705 sense_deferred = scsi_sense_is_deferred(&sshdr); 713 sense_deferred = scsi_sense_is_deferred(&sshdr);
706 } 714 }
707 715
708 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 716 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
709 if (result) { 717 if (result) {
710 if (sense_valid && req->sense) { 718 if (sense_valid && req->sense) {
711 /* 719 /*
712 * SG_IO wants current and deferred errors 720 * SG_IO wants current and deferred errors
713 */ 721 */
714 int len = 8 + cmd->sense_buffer[7]; 722 int len = 8 + cmd->sense_buffer[7];
715 723
716 if (len > SCSI_SENSE_BUFFERSIZE) 724 if (len > SCSI_SENSE_BUFFERSIZE)
717 len = SCSI_SENSE_BUFFERSIZE; 725 len = SCSI_SENSE_BUFFERSIZE;
718 memcpy(req->sense, cmd->sense_buffer, len); 726 memcpy(req->sense, cmd->sense_buffer, len);
719 req->sense_len = len; 727 req->sense_len = len;
720 } 728 }
721 if (!sense_deferred) 729 if (!sense_deferred)
722 error = __scsi_error_from_host_byte(cmd, result); 730 error = __scsi_error_from_host_byte(cmd, result);
723 } 731 }
724 /* 732 /*
725 * __scsi_error_from_host_byte may have reset the host_byte 733 * __scsi_error_from_host_byte may have reset the host_byte
726 */ 734 */
727 req->errors = cmd->result; 735 req->errors = cmd->result;
728 736
729 req->resid_len = scsi_get_resid(cmd); 737 req->resid_len = scsi_get_resid(cmd);
730 738
731 if (scsi_bidi_cmnd(cmd)) { 739 if (scsi_bidi_cmnd(cmd)) {
732 /* 740 /*
733 * Bidi commands Must be complete as a whole, 741 * Bidi commands Must be complete as a whole,
734 * both sides at once. 742 * both sides at once.
735 */ 743 */
736 req->next_rq->resid_len = scsi_in(cmd)->resid; 744 req->next_rq->resid_len = scsi_in(cmd)->resid;
737 745
738 scsi_release_buffers(cmd); 746 scsi_release_buffers(cmd);
739 scsi_release_bidi_buffers(cmd); 747 scsi_release_bidi_buffers(cmd);
740 748
741 blk_end_request_all(req, 0); 749 blk_end_request_all(req, 0);
742 750
743 scsi_next_command(cmd); 751 scsi_next_command(cmd);
744 return; 752 return;
745 } 753 }
746 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 754 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
747 /* 755 /*
748 * Certain non BLOCK_PC requests are commands that don't 756 * Certain non BLOCK_PC requests are commands that don't
749 * actually transfer anything (FLUSH), so cannot use 757 * actually transfer anything (FLUSH), so cannot use
750 * good_bytes != blk_rq_bytes(req) as the signal for an error. 758 * good_bytes != blk_rq_bytes(req) as the signal for an error.
751 * This sets the error explicitly for the problem case. 759 * This sets the error explicitly for the problem case.
752 */ 760 */
753 error = __scsi_error_from_host_byte(cmd, result); 761 error = __scsi_error_from_host_byte(cmd, result);
754 } 762 }
755 763
756 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 764 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
757 BUG_ON(blk_bidi_rq(req)); 765 BUG_ON(blk_bidi_rq(req));
758 766
759 /* 767 /*
760 * Next deal with any sectors which we were able to correctly 768 * Next deal with any sectors which we were able to correctly
761 * handle. 769 * handle.
762 */ 770 */
763 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, 771 SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
764 "%u sectors total, %d bytes done.\n", 772 "%u sectors total, %d bytes done.\n",
765 blk_rq_sectors(req), good_bytes)); 773 blk_rq_sectors(req), good_bytes));
766 774
767 /* 775 /*
768 * Recovered errors need reporting, but they're always treated 776 * Recovered errors need reporting, but they're always treated
769 * as success, so fiddle the result code here. For BLOCK_PC 777 * as success, so fiddle the result code here. For BLOCK_PC
770 * we already took a copy of the original into rq->errors which 778 * we already took a copy of the original into rq->errors which
771 * is what gets returned to the user 779 * is what gets returned to the user
772 */ 780 */
773 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { 781 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
774 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip 782 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
775 * print since caller wants ATA registers. Only occurs on 783 * print since caller wants ATA registers. Only occurs on
776 * SCSI ATA PASS_THROUGH commands when CK_COND=1 784 * SCSI ATA PASS_THROUGH commands when CK_COND=1
777 */ 785 */
778 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 786 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
779 ; 787 ;
780 else if (!(req->cmd_flags & REQ_QUIET)) 788 else if (!(req->cmd_flags & REQ_QUIET))
781 scsi_print_sense("", cmd); 789 scsi_print_sense("", cmd);
782 result = 0; 790 result = 0;
783 /* BLOCK_PC may have set error */ 791 /* BLOCK_PC may have set error */
784 error = 0; 792 error = 0;
785 } 793 }
786 794
787 /* 795 /*
788 * If we finished all bytes in the request we are done now. 796 * If we finished all bytes in the request we are done now.
789 */ 797 */
790 if (!blk_end_request(req, error, good_bytes)) 798 if (!blk_end_request(req, error, good_bytes))
791 goto next_command; 799 goto next_command;
792 800
793 /* 801 /*
794 * Kill remainder if no retrys. 802 * Kill remainder if no retrys.
795 */ 803 */
796 if (error && scsi_noretry_cmd(cmd)) { 804 if (error && scsi_noretry_cmd(cmd)) {
797 blk_end_request_all(req, error); 805 blk_end_request_all(req, error);
798 goto next_command; 806 goto next_command;
799 } 807 }
800 808
801 /* 809 /*
802 * If there had been no error, but we have leftover bytes in the 810 * If there had been no error, but we have leftover bytes in the
803 * requeues just queue the command up again. 811 * requeues just queue the command up again.
804 */ 812 */
805 if (result == 0) 813 if (result == 0)
806 goto requeue; 814 goto requeue;
807 815
808 error = __scsi_error_from_host_byte(cmd, result); 816 error = __scsi_error_from_host_byte(cmd, result);
809 817
810 if (host_byte(result) == DID_RESET) { 818 if (host_byte(result) == DID_RESET) {
811 /* Third party bus reset or reset for error recovery 819 /* Third party bus reset or reset for error recovery
812 * reasons. Just retry the command and see what 820 * reasons. Just retry the command and see what
813 * happens. 821 * happens.
814 */ 822 */
815 action = ACTION_RETRY; 823 action = ACTION_RETRY;
816 } else if (sense_valid && !sense_deferred) { 824 } else if (sense_valid && !sense_deferred) {
817 switch (sshdr.sense_key) { 825 switch (sshdr.sense_key) {
818 case UNIT_ATTENTION: 826 case UNIT_ATTENTION:
819 if (cmd->device->removable) { 827 if (cmd->device->removable) {
820 /* Detected disc change. Set a bit 828 /* Detected disc change. Set a bit
821 * and quietly refuse further access. 829 * and quietly refuse further access.
822 */ 830 */
823 cmd->device->changed = 1; 831 cmd->device->changed = 1;
824 action = ACTION_FAIL; 832 action = ACTION_FAIL;
825 } else { 833 } else {
826 /* Must have been a power glitch, or a 834 /* Must have been a power glitch, or a
827 * bus reset. Could not have been a 835 * bus reset. Could not have been a
828 * media change, so we just retry the 836 * media change, so we just retry the
829 * command and see what happens. 837 * command and see what happens.
830 */ 838 */
831 action = ACTION_RETRY; 839 action = ACTION_RETRY;
832 } 840 }
833 break; 841 break;
834 case ILLEGAL_REQUEST: 842 case ILLEGAL_REQUEST:
835 /* If we had an ILLEGAL REQUEST returned, then 843 /* If we had an ILLEGAL REQUEST returned, then
836 * we may have performed an unsupported 844 * we may have performed an unsupported
837 * command. The only thing this should be 845 * command. The only thing this should be
838 * would be a ten byte read where only a six 846 * would be a ten byte read where only a six
839 * byte read was supported. Also, on a system 847 * byte read was supported. Also, on a system
840 * where READ CAPACITY failed, we may have 848 * where READ CAPACITY failed, we may have
841 * read past the end of the disk. 849 * read past the end of the disk.
842 */ 850 */
843 if ((cmd->device->use_10_for_rw && 851 if ((cmd->device->use_10_for_rw &&
844 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 852 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
845 (cmd->cmnd[0] == READ_10 || 853 (cmd->cmnd[0] == READ_10 ||
846 cmd->cmnd[0] == WRITE_10)) { 854 cmd->cmnd[0] == WRITE_10)) {
847 /* This will issue a new 6-byte command. */ 855 /* This will issue a new 6-byte command. */
848 cmd->device->use_10_for_rw = 0; 856 cmd->device->use_10_for_rw = 0;
849 action = ACTION_REPREP; 857 action = ACTION_REPREP;
850 } else if (sshdr.asc == 0x10) /* DIX */ { 858 } else if (sshdr.asc == 0x10) /* DIX */ {
851 action = ACTION_FAIL; 859 action = ACTION_FAIL;
852 error = -EILSEQ; 860 error = -EILSEQ;
853 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ 861 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
854 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { 862 } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
855 action = ACTION_FAIL; 863 action = ACTION_FAIL;
856 error = -EREMOTEIO; 864 error = -EREMOTEIO;
857 } else 865 } else
858 action = ACTION_FAIL; 866 action = ACTION_FAIL;
859 break; 867 break;
860 case ABORTED_COMMAND: 868 case ABORTED_COMMAND:
861 action = ACTION_FAIL; 869 action = ACTION_FAIL;
862 if (sshdr.asc == 0x10) /* DIF */ 870 if (sshdr.asc == 0x10) /* DIF */
863 error = -EILSEQ; 871 error = -EILSEQ;
864 break; 872 break;
865 case NOT_READY: 873 case NOT_READY:
866 /* If the device is in the process of becoming 874 /* If the device is in the process of becoming
867 * ready, or has a temporary blockage, retry. 875 * ready, or has a temporary blockage, retry.
868 */ 876 */
869 if (sshdr.asc == 0x04) { 877 if (sshdr.asc == 0x04) {
870 switch (sshdr.ascq) { 878 switch (sshdr.ascq) {
871 case 0x01: /* becoming ready */ 879 case 0x01: /* becoming ready */
872 case 0x04: /* format in progress */ 880 case 0x04: /* format in progress */
873 case 0x05: /* rebuild in progress */ 881 case 0x05: /* rebuild in progress */
874 case 0x06: /* recalculation in progress */ 882 case 0x06: /* recalculation in progress */
875 case 0x07: /* operation in progress */ 883 case 0x07: /* operation in progress */
876 case 0x08: /* Long write in progress */ 884 case 0x08: /* Long write in progress */
877 case 0x09: /* self test in progress */ 885 case 0x09: /* self test in progress */
878 case 0x14: /* space allocation in progress */ 886 case 0x14: /* space allocation in progress */
879 action = ACTION_DELAYED_RETRY; 887 action = ACTION_DELAYED_RETRY;
880 break; 888 break;
881 default: 889 default:
882 action = ACTION_FAIL; 890 action = ACTION_FAIL;
883 break; 891 break;
884 } 892 }
885 } else 893 } else
886 action = ACTION_FAIL; 894 action = ACTION_FAIL;
887 break; 895 break;
888 case VOLUME_OVERFLOW: 896 case VOLUME_OVERFLOW:
889 /* See SSC3rXX or current. */ 897 /* See SSC3rXX or current. */
890 action = ACTION_FAIL; 898 action = ACTION_FAIL;
891 break; 899 break;
892 default: 900 default:
893 action = ACTION_FAIL; 901 action = ACTION_FAIL;
894 break; 902 break;
895 } 903 }
896 } else 904 } else
897 action = ACTION_FAIL; 905 action = ACTION_FAIL;
898 906
899 if (action != ACTION_FAIL && 907 if (action != ACTION_FAIL &&
900 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) 908 time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
901 action = ACTION_FAIL; 909 action = ACTION_FAIL;
902 910
903 switch (action) { 911 switch (action) {
904 case ACTION_FAIL: 912 case ACTION_FAIL:
905 /* Give up and fail the remainder of the request */ 913 /* Give up and fail the remainder of the request */
906 if (!(req->cmd_flags & REQ_QUIET)) { 914 if (!(req->cmd_flags & REQ_QUIET)) {
907 scsi_print_result(cmd); 915 scsi_print_result(cmd);
908 if (driver_byte(result) & DRIVER_SENSE) 916 if (driver_byte(result) & DRIVER_SENSE)
909 scsi_print_sense("", cmd); 917 scsi_print_sense("", cmd);
910 scsi_print_command(cmd); 918 scsi_print_command(cmd);
911 } 919 }
912 if (!blk_end_request_err(req, error)) 920 if (!blk_end_request_err(req, error))
913 goto next_command; 921 goto next_command;
914 /*FALLTHRU*/ 922 /*FALLTHRU*/
915 case ACTION_REPREP: 923 case ACTION_REPREP:
916 requeue: 924 requeue:
917 /* Unprep the request and put it back at the head of the queue. 925 /* Unprep the request and put it back at the head of the queue.
918 * A new command will be prepared and issued. 926 * A new command will be prepared and issued.
919 */ 927 */
920 scsi_release_buffers(cmd); 928 scsi_release_buffers(cmd);
921 scsi_requeue_command(q, cmd); 929 scsi_requeue_command(q, cmd);
922 break; 930 break;
923 case ACTION_RETRY: 931 case ACTION_RETRY:
924 /* Retry the same command immediately */ 932 /* Retry the same command immediately */
925 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); 933 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
926 break; 934 break;
927 case ACTION_DELAYED_RETRY: 935 case ACTION_DELAYED_RETRY:
928 /* Retry the same command after a delay */ 936 /* Retry the same command after a delay */
929 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); 937 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
930 break; 938 break;
931 } 939 }
932 return; 940 return;
933 941
934 next_command: 942 next_command:
935 scsi_release_buffers(cmd); 943 scsi_release_buffers(cmd);
936 scsi_next_command(cmd); 944 scsi_next_command(cmd);
937 } 945 }
938 946
939 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, 947 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
940 gfp_t gfp_mask) 948 gfp_t gfp_mask)
941 { 949 {
942 int count; 950 int count;
943 951
944 /* 952 /*
945 * If sg table allocation fails, requeue request later. 953 * If sg table allocation fails, requeue request later.
946 */ 954 */
947 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments, 955 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
948 gfp_mask))) { 956 gfp_mask))) {
949 return BLKPREP_DEFER; 957 return BLKPREP_DEFER;
950 } 958 }
951 959
952 /* 960 /*
953 * Next, walk the list, and fill in the addresses and sizes of 961 * Next, walk the list, and fill in the addresses and sizes of
954 * each segment. 962 * each segment.
955 */ 963 */
956 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 964 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
957 BUG_ON(count > sdb->table.nents); 965 BUG_ON(count > sdb->table.nents);
958 sdb->table.nents = count; 966 sdb->table.nents = count;
959 sdb->length = blk_rq_bytes(req); 967 sdb->length = blk_rq_bytes(req);
960 return BLKPREP_OK; 968 return BLKPREP_OK;
961 } 969 }
962 970
963 /* 971 /*
964 * Function: scsi_init_io() 972 * Function: scsi_init_io()
965 * 973 *
966 * Purpose: SCSI I/O initialize function. 974 * Purpose: SCSI I/O initialize function.
967 * 975 *
968 * Arguments: cmd - Command descriptor we wish to initialize 976 * Arguments: cmd - Command descriptor we wish to initialize
969 * 977 *
970 * Returns: 0 on success 978 * Returns: 0 on success
971 * BLKPREP_DEFER if the failure is retryable 979 * BLKPREP_DEFER if the failure is retryable
972 * BLKPREP_KILL if the failure is fatal 980 * BLKPREP_KILL if the failure is fatal
973 */ 981 */
974 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 982 int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
975 { 983 {
976 struct scsi_device *sdev = cmd->device; 984 struct scsi_device *sdev = cmd->device;
977 struct request *rq = cmd->request; 985 struct request *rq = cmd->request;
978 int error; 986 int error;
979 987
980 BUG_ON(!rq->nr_phys_segments); 988 BUG_ON(!rq->nr_phys_segments);
981 989
982 error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); 990 error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
983 if (error) 991 if (error)
984 goto err_exit; 992 goto err_exit;
985 993
986 if (blk_bidi_rq(rq)) { 994 if (blk_bidi_rq(rq)) {
987 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( 995 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
988 scsi_sdb_cache, GFP_ATOMIC); 996 scsi_sdb_cache, GFP_ATOMIC);
989 if (!bidi_sdb) { 997 if (!bidi_sdb) {
990 error = BLKPREP_DEFER; 998 error = BLKPREP_DEFER;
991 goto err_exit; 999 goto err_exit;
992 } 1000 }
993 1001
994 rq->next_rq->special = bidi_sdb; 1002 rq->next_rq->special = bidi_sdb;
995 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); 1003 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
996 if (error) 1004 if (error)
997 goto err_exit; 1005 goto err_exit;
998 } 1006 }
999 1007
1000 if (blk_integrity_rq(rq)) { 1008 if (blk_integrity_rq(rq)) {
1001 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; 1009 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1002 int ivecs, count; 1010 int ivecs, count;
1003 1011
1004 BUG_ON(prot_sdb == NULL); 1012 BUG_ON(prot_sdb == NULL);
1005 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); 1013 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1006 1014
1007 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { 1015 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1008 error = BLKPREP_DEFER; 1016 error = BLKPREP_DEFER;
1009 goto err_exit; 1017 goto err_exit;
1010 } 1018 }
1011 1019
1012 count = blk_rq_map_integrity_sg(rq->q, rq->bio, 1020 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1013 prot_sdb->table.sgl); 1021 prot_sdb->table.sgl);
1014 BUG_ON(unlikely(count > ivecs)); 1022 BUG_ON(unlikely(count > ivecs));
1015 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); 1023 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1016 1024
1017 cmd->prot_sdb = prot_sdb; 1025 cmd->prot_sdb = prot_sdb;
1018 cmd->prot_sdb->table.nents = count; 1026 cmd->prot_sdb->table.nents = count;
1019 } 1027 }
1020 1028
1021 return BLKPREP_OK ; 1029 return BLKPREP_OK ;
1022 1030
1023 err_exit: 1031 err_exit:
1024 scsi_release_buffers(cmd); 1032 scsi_release_buffers(cmd);
1025 cmd->request->special = NULL; 1033 cmd->request->special = NULL;
1026 scsi_put_command(cmd); 1034 scsi_put_command(cmd);
1027 put_device(&sdev->sdev_gendev); 1035 put_device(&sdev->sdev_gendev);
1028 return error; 1036 return error;
1029 } 1037 }
1030 EXPORT_SYMBOL(scsi_init_io); 1038 EXPORT_SYMBOL(scsi_init_io);
1031 1039
1032 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, 1040 static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1033 struct request *req) 1041 struct request *req)
1034 { 1042 {
1035 struct scsi_cmnd *cmd; 1043 struct scsi_cmnd *cmd;
1036 1044
1037 if (!req->special) { 1045 if (!req->special) {
1038 /* Bail if we can't get a reference to the device */ 1046 /* Bail if we can't get a reference to the device */
1039 if (!get_device(&sdev->sdev_gendev)) 1047 if (!get_device(&sdev->sdev_gendev))
1040 return NULL; 1048 return NULL;
1041 1049
1042 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1050 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1043 if (unlikely(!cmd)) { 1051 if (unlikely(!cmd)) {
1044 put_device(&sdev->sdev_gendev); 1052 put_device(&sdev->sdev_gendev);
1045 return NULL; 1053 return NULL;
1046 } 1054 }
1047 req->special = cmd; 1055 req->special = cmd;
1048 } else { 1056 } else {
1049 cmd = req->special; 1057 cmd = req->special;
1050 } 1058 }
1051 1059
1052 /* pull a tag out of the request if we have one */ 1060 /* pull a tag out of the request if we have one */
1053 cmd->tag = req->tag; 1061 cmd->tag = req->tag;
1054 cmd->request = req; 1062 cmd->request = req;
1055 1063
1056 cmd->cmnd = req->cmd; 1064 cmd->cmnd = req->cmd;
1057 cmd->prot_op = SCSI_PROT_NORMAL; 1065 cmd->prot_op = SCSI_PROT_NORMAL;
1058 1066
1059 return cmd; 1067 return cmd;
1060 } 1068 }
1061 1069
1062 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1070 static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1063 { 1071 {
1064 struct scsi_cmnd *cmd = req->special; 1072 struct scsi_cmnd *cmd = req->special;
1065 1073
1066 /* 1074 /*
1067 * BLOCK_PC requests may transfer data, in which case they must 1075 * BLOCK_PC requests may transfer data, in which case they must
1068 * a bio attached to them. Or they might contain a SCSI command 1076 * a bio attached to them. Or they might contain a SCSI command
1069 * that does not transfer data, in which case they may optionally 1077 * that does not transfer data, in which case they may optionally
1070 * submit a request without an attached bio. 1078 * submit a request without an attached bio.
1071 */ 1079 */
1072 if (req->bio) { 1080 if (req->bio) {
1073 int ret = scsi_init_io(cmd, GFP_ATOMIC); 1081 int ret = scsi_init_io(cmd, GFP_ATOMIC);
1074 if (unlikely(ret)) 1082 if (unlikely(ret))
1075 return ret; 1083 return ret;
1076 } else { 1084 } else {
1077 BUG_ON(blk_rq_bytes(req)); 1085 BUG_ON(blk_rq_bytes(req));
1078 1086
1079 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1087 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1080 } 1088 }
1081 1089
1082 cmd->cmd_len = req->cmd_len; 1090 cmd->cmd_len = req->cmd_len;
1083 cmd->transfersize = blk_rq_bytes(req); 1091 cmd->transfersize = blk_rq_bytes(req);
1084 cmd->allowed = req->retries; 1092 cmd->allowed = req->retries;
1085 return BLKPREP_OK; 1093 return BLKPREP_OK;
1086 } 1094 }
1087 1095
1088 /* 1096 /*
1089 * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1097 * Setup a REQ_TYPE_FS command. These are simple request from filesystems
1090 * that still need to be translated to SCSI CDBs from the ULD. 1098 * that still need to be translated to SCSI CDBs from the ULD.
1091 */ 1099 */
1092 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) 1100 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1093 { 1101 {
1094 struct scsi_cmnd *cmd = req->special; 1102 struct scsi_cmnd *cmd = req->special;
1095 1103
1096 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh 1104 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1097 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { 1105 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1098 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); 1106 int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1099 if (ret != BLKPREP_OK) 1107 if (ret != BLKPREP_OK)
1100 return ret; 1108 return ret;
1101 } 1109 }
1102 1110
1103 memset(cmd->cmnd, 0, BLK_MAX_CDB); 1111 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1104 return scsi_cmd_to_driver(cmd)->init_command(cmd); 1112 return scsi_cmd_to_driver(cmd)->init_command(cmd);
1105 } 1113 }
1106 1114
1107 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) 1115 static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
1108 { 1116 {
1109 struct scsi_cmnd *cmd = req->special; 1117 struct scsi_cmnd *cmd = req->special;
1110 1118
1111 if (!blk_rq_bytes(req)) 1119 if (!blk_rq_bytes(req))
1112 cmd->sc_data_direction = DMA_NONE; 1120 cmd->sc_data_direction = DMA_NONE;
1113 else if (rq_data_dir(req) == WRITE) 1121 else if (rq_data_dir(req) == WRITE)
1114 cmd->sc_data_direction = DMA_TO_DEVICE; 1122 cmd->sc_data_direction = DMA_TO_DEVICE;
1115 else 1123 else
1116 cmd->sc_data_direction = DMA_FROM_DEVICE; 1124 cmd->sc_data_direction = DMA_FROM_DEVICE;
1117 1125
1118 switch (req->cmd_type) { 1126 switch (req->cmd_type) {
1119 case REQ_TYPE_FS: 1127 case REQ_TYPE_FS:
1120 return scsi_setup_fs_cmnd(sdev, req); 1128 return scsi_setup_fs_cmnd(sdev, req);
1121 case REQ_TYPE_BLOCK_PC: 1129 case REQ_TYPE_BLOCK_PC:
1122 return scsi_setup_blk_pc_cmnd(sdev, req); 1130 return scsi_setup_blk_pc_cmnd(sdev, req);
1123 default: 1131 default:
1124 return BLKPREP_KILL; 1132 return BLKPREP_KILL;
1125 } 1133 }
1126 } 1134 }
1127 1135
1128 static int 1136 static int
1129 scsi_prep_state_check(struct scsi_device *sdev, struct request *req) 1137 scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1130 { 1138 {
1131 int ret = BLKPREP_OK; 1139 int ret = BLKPREP_OK;
1132 1140
1133 /* 1141 /*
1134 * If the device is not in running state we will reject some 1142 * If the device is not in running state we will reject some
1135 * or all commands. 1143 * or all commands.
1136 */ 1144 */
1137 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1145 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1138 switch (sdev->sdev_state) { 1146 switch (sdev->sdev_state) {
1139 case SDEV_OFFLINE: 1147 case SDEV_OFFLINE:
1140 case SDEV_TRANSPORT_OFFLINE: 1148 case SDEV_TRANSPORT_OFFLINE:
1141 /* 1149 /*
1142 * If the device is offline we refuse to process any 1150 * If the device is offline we refuse to process any
1143 * commands. The device must be brought online 1151 * commands. The device must be brought online
1144 * before trying any recovery commands. 1152 * before trying any recovery commands.
1145 */ 1153 */
1146 sdev_printk(KERN_ERR, sdev, 1154 sdev_printk(KERN_ERR, sdev,
1147 "rejecting I/O to offline device\n"); 1155 "rejecting I/O to offline device\n");
1148 ret = BLKPREP_KILL; 1156 ret = BLKPREP_KILL;
1149 break; 1157 break;
1150 case SDEV_DEL: 1158 case SDEV_DEL:
1151 /* 1159 /*
1152 * If the device is fully deleted, we refuse to 1160 * If the device is fully deleted, we refuse to
1153 * process any commands as well. 1161 * process any commands as well.
1154 */ 1162 */
1155 sdev_printk(KERN_ERR, sdev, 1163 sdev_printk(KERN_ERR, sdev,
1156 "rejecting I/O to dead device\n"); 1164 "rejecting I/O to dead device\n");
1157 ret = BLKPREP_KILL; 1165 ret = BLKPREP_KILL;
1158 break; 1166 break;
1159 case SDEV_QUIESCE: 1167 case SDEV_QUIESCE:
1160 case SDEV_BLOCK: 1168 case SDEV_BLOCK:
1161 case SDEV_CREATED_BLOCK: 1169 case SDEV_CREATED_BLOCK:
1162 /* 1170 /*
1163 * If the devices is blocked we defer normal commands. 1171 * If the devices is blocked we defer normal commands.
1164 */ 1172 */
1165 if (!(req->cmd_flags & REQ_PREEMPT)) 1173 if (!(req->cmd_flags & REQ_PREEMPT))
1166 ret = BLKPREP_DEFER; 1174 ret = BLKPREP_DEFER;
1167 break; 1175 break;
1168 default: 1176 default:
1169 /* 1177 /*
1170 * For any other not fully online state we only allow 1178 * For any other not fully online state we only allow
1171 * special commands. In particular any user initiated 1179 * special commands. In particular any user initiated
1172 * command is not allowed. 1180 * command is not allowed.
1173 */ 1181 */
1174 if (!(req->cmd_flags & REQ_PREEMPT)) 1182 if (!(req->cmd_flags & REQ_PREEMPT))
1175 ret = BLKPREP_KILL; 1183 ret = BLKPREP_KILL;
1176 break; 1184 break;
1177 } 1185 }
1178 } 1186 }
1179 return ret; 1187 return ret;
1180 } 1188 }
1181 1189
1182 static int 1190 static int
1183 scsi_prep_return(struct request_queue *q, struct request *req, int ret) 1191 scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1184 { 1192 {
1185 struct scsi_device *sdev = q->queuedata; 1193 struct scsi_device *sdev = q->queuedata;
1186 1194
1187 switch (ret) { 1195 switch (ret) {
1188 case BLKPREP_KILL: 1196 case BLKPREP_KILL:
1189 req->errors = DID_NO_CONNECT << 16; 1197 req->errors = DID_NO_CONNECT << 16;
1190 /* release the command and kill it */ 1198 /* release the command and kill it */
1191 if (req->special) { 1199 if (req->special) {
1192 struct scsi_cmnd *cmd = req->special; 1200 struct scsi_cmnd *cmd = req->special;
1193 scsi_release_buffers(cmd); 1201 scsi_release_buffers(cmd);
1194 scsi_put_command(cmd); 1202 scsi_put_command(cmd);
1195 put_device(&sdev->sdev_gendev); 1203 put_device(&sdev->sdev_gendev);
1196 req->special = NULL; 1204 req->special = NULL;
1197 } 1205 }
1198 break; 1206 break;
1199 case BLKPREP_DEFER: 1207 case BLKPREP_DEFER:
1200 /* 1208 /*
1201 * If we defer, the blk_peek_request() returns NULL, but the 1209 * If we defer, the blk_peek_request() returns NULL, but the
1202 * queue must be restarted, so we schedule a callback to happen 1210 * queue must be restarted, so we schedule a callback to happen
1203 * shortly. 1211 * shortly.
1204 */ 1212 */
1205 if (atomic_read(&sdev->device_busy) == 0) 1213 if (atomic_read(&sdev->device_busy) == 0)
1206 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1214 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1207 break; 1215 break;
1208 default: 1216 default:
1209 req->cmd_flags |= REQ_DONTPREP; 1217 req->cmd_flags |= REQ_DONTPREP;
1210 } 1218 }
1211 1219
1212 return ret; 1220 return ret;
1213 } 1221 }
1214 1222
1215 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1223 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1216 { 1224 {
1217 struct scsi_device *sdev = q->queuedata; 1225 struct scsi_device *sdev = q->queuedata;
1218 struct scsi_cmnd *cmd; 1226 struct scsi_cmnd *cmd;
1219 int ret; 1227 int ret;
1220 1228
1221 ret = scsi_prep_state_check(sdev, req); 1229 ret = scsi_prep_state_check(sdev, req);
1222 if (ret != BLKPREP_OK) 1230 if (ret != BLKPREP_OK)
1223 goto out; 1231 goto out;
1224 1232
1225 cmd = scsi_get_cmd_from_req(sdev, req); 1233 cmd = scsi_get_cmd_from_req(sdev, req);
1226 if (unlikely(!cmd)) { 1234 if (unlikely(!cmd)) {
1227 ret = BLKPREP_DEFER; 1235 ret = BLKPREP_DEFER;
1228 goto out; 1236 goto out;
1229 } 1237 }
1230 1238
1231 ret = scsi_setup_cmnd(sdev, req); 1239 ret = scsi_setup_cmnd(sdev, req);
1232 out: 1240 out:
1233 return scsi_prep_return(q, req, ret); 1241 return scsi_prep_return(q, req, ret);
1234 } 1242 }
1235 1243
1236 static void scsi_unprep_fn(struct request_queue *q, struct request *req) 1244 static void scsi_unprep_fn(struct request_queue *q, struct request *req)
1237 { 1245 {
1238 if (req->cmd_type == REQ_TYPE_FS) { 1246 if (req->cmd_type == REQ_TYPE_FS) {
1239 struct scsi_cmnd *cmd = req->special; 1247 struct scsi_cmnd *cmd = req->special;
1240 struct scsi_driver *drv = scsi_cmd_to_driver(cmd); 1248 struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
1241 1249
1242 if (drv->uninit_command) 1250 if (drv->uninit_command)
1243 drv->uninit_command(cmd); 1251 drv->uninit_command(cmd);
1244 } 1252 }
1245 } 1253 }
1246 1254
1247 /* 1255 /*
1248 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1256 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1249 * return 0. 1257 * return 0.
1250 * 1258 *
1251 * Called with the queue_lock held. 1259 * Called with the queue_lock held.
1252 */ 1260 */
1253 static inline int scsi_dev_queue_ready(struct request_queue *q, 1261 static inline int scsi_dev_queue_ready(struct request_queue *q,
1254 struct scsi_device *sdev) 1262 struct scsi_device *sdev)
1255 { 1263 {
1256 unsigned int busy; 1264 unsigned int busy;
1257 1265
1258 busy = atomic_inc_return(&sdev->device_busy) - 1; 1266 busy = atomic_inc_return(&sdev->device_busy) - 1;
1259 if (sdev->device_blocked) { 1267 if (atomic_read(&sdev->device_blocked)) {
1260 if (busy) 1268 if (busy)
1261 goto out_dec; 1269 goto out_dec;
1262 1270
1263 /* 1271 /*
1264 * unblock after device_blocked iterates to zero 1272 * unblock after device_blocked iterates to zero
1265 */ 1273 */
1266 if (--sdev->device_blocked != 0) { 1274 if (atomic_dec_return(&sdev->device_blocked) > 0) {
1267 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1275 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1268 goto out_dec; 1276 goto out_dec;
1269 } 1277 }
1270 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, 1278 SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
1271 "unblocking device at zero depth\n")); 1279 "unblocking device at zero depth\n"));
1272 } 1280 }
1273 1281
1274 if (busy >= sdev->queue_depth) 1282 if (busy >= sdev->queue_depth)
1275 goto out_dec; 1283 goto out_dec;
1276 1284
1277 return 1; 1285 return 1;
1278 out_dec: 1286 out_dec:
1279 atomic_dec(&sdev->device_busy); 1287 atomic_dec(&sdev->device_busy);
1280 return 0; 1288 return 0;
1281 } 1289 }
1282 1290
1283 /* 1291 /*
1284 * scsi_target_queue_ready: checks if there we can send commands to target 1292 * scsi_target_queue_ready: checks if there we can send commands to target
1285 * @sdev: scsi device on starget to check. 1293 * @sdev: scsi device on starget to check.
1286 */ 1294 */
1287 static inline int scsi_target_queue_ready(struct Scsi_Host *shost, 1295 static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1288 struct scsi_device *sdev) 1296 struct scsi_device *sdev)
1289 { 1297 {
1290 struct scsi_target *starget = scsi_target(sdev); 1298 struct scsi_target *starget = scsi_target(sdev);
1291 unsigned int busy; 1299 unsigned int busy;
1292 1300
1293 if (starget->single_lun) { 1301 if (starget->single_lun) {
1294 spin_lock_irq(shost->host_lock); 1302 spin_lock_irq(shost->host_lock);
1295 if (starget->starget_sdev_user && 1303 if (starget->starget_sdev_user &&
1296 starget->starget_sdev_user != sdev) { 1304 starget->starget_sdev_user != sdev) {
1297 spin_unlock_irq(shost->host_lock); 1305 spin_unlock_irq(shost->host_lock);
1298 return 0; 1306 return 0;
1299 } 1307 }
1300 starget->starget_sdev_user = sdev; 1308 starget->starget_sdev_user = sdev;
1301 spin_unlock_irq(shost->host_lock); 1309 spin_unlock_irq(shost->host_lock);
1302 } 1310 }
1303 1311
1304 busy = atomic_inc_return(&starget->target_busy) - 1; 1312 busy = atomic_inc_return(&starget->target_busy) - 1;
1305 if (starget->target_blocked) { 1313 if (atomic_read(&starget->target_blocked) > 0) {
1306 if (busy) 1314 if (busy)
1307 goto starved; 1315 goto starved;
1308 1316
1309 /* 1317 /*
1310 * unblock after target_blocked iterates to zero 1318 * unblock after target_blocked iterates to zero
1311 */ 1319 */
1312 spin_lock_irq(shost->host_lock); 1320 if (atomic_dec_return(&starget->target_blocked) > 0)
1313 if (--starget->target_blocked != 0) {
1314 spin_unlock_irq(shost->host_lock);
1315 goto out_dec; 1321 goto out_dec;
1316 }
1317 spin_unlock_irq(shost->host_lock);
1318 1322
1319 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, 1323 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1320 "unblocking target at zero depth\n")); 1324 "unblocking target at zero depth\n"));
1321 } 1325 }
1322 1326
1323 if (starget->can_queue > 0 && busy >= starget->can_queue) 1327 if (starget->can_queue > 0 && busy >= starget->can_queue)
1324 goto starved; 1328 goto starved;
1325 1329
1326 return 1; 1330 return 1;
1327 1331
1328 starved: 1332 starved:
1329 spin_lock_irq(shost->host_lock); 1333 spin_lock_irq(shost->host_lock);
1330 list_move_tail(&sdev->starved_entry, &shost->starved_list); 1334 list_move_tail(&sdev->starved_entry, &shost->starved_list);
1331 spin_unlock_irq(shost->host_lock); 1335 spin_unlock_irq(shost->host_lock);
1332 out_dec: 1336 out_dec:
1333 atomic_dec(&starget->target_busy); 1337 atomic_dec(&starget->target_busy);
1334 return 0; 1338 return 0;
1335 } 1339 }
1336 1340
1337 /* 1341 /*
1338 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1342 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1339 * return 0. We must end up running the queue again whenever 0 is 1343 * return 0. We must end up running the queue again whenever 0 is
1340 * returned, else IO can hang. 1344 * returned, else IO can hang.
1341 */ 1345 */
1342 static inline int scsi_host_queue_ready(struct request_queue *q, 1346 static inline int scsi_host_queue_ready(struct request_queue *q,
1343 struct Scsi_Host *shost, 1347 struct Scsi_Host *shost,
1344 struct scsi_device *sdev) 1348 struct scsi_device *sdev)
1345 { 1349 {
1346 unsigned int busy; 1350 unsigned int busy;
1347 1351
1348 if (scsi_host_in_recovery(shost)) 1352 if (scsi_host_in_recovery(shost))
1349 return 0; 1353 return 0;
1350 1354
1351 busy = atomic_inc_return(&shost->host_busy) - 1; 1355 busy = atomic_inc_return(&shost->host_busy) - 1;
1352 if (shost->host_blocked) { 1356 if (atomic_read(&shost->host_blocked) > 0) {
1353 if (busy) 1357 if (busy)
1354 goto starved; 1358 goto starved;
1355 1359
1356 /* 1360 /*
1357 * unblock after host_blocked iterates to zero 1361 * unblock after host_blocked iterates to zero
1358 */ 1362 */
1359 spin_lock_irq(shost->host_lock); 1363 if (atomic_dec_return(&shost->host_blocked) > 0)
1360 if (--shost->host_blocked != 0) {
1361 spin_unlock_irq(shost->host_lock);
1362 goto out_dec; 1364 goto out_dec;
1363 }
1364 spin_unlock_irq(shost->host_lock);
1365 1365
1366 SCSI_LOG_MLQUEUE(3, 1366 SCSI_LOG_MLQUEUE(3,
1367 shost_printk(KERN_INFO, shost, 1367 shost_printk(KERN_INFO, shost,
1368 "unblocking host at zero depth\n")); 1368 "unblocking host at zero depth\n"));
1369 } 1369 }
1370 1370
1371 if (shost->can_queue > 0 && busy >= shost->can_queue) 1371 if (shost->can_queue > 0 && busy >= shost->can_queue)
1372 goto starved; 1372 goto starved;
1373 if (shost->host_self_blocked) 1373 if (shost->host_self_blocked)
1374 goto starved; 1374 goto starved;
1375 1375
1376 /* We're OK to process the command, so we can't be starved */ 1376 /* We're OK to process the command, so we can't be starved */
1377 if (!list_empty(&sdev->starved_entry)) { 1377 if (!list_empty(&sdev->starved_entry)) {
1378 spin_lock_irq(shost->host_lock); 1378 spin_lock_irq(shost->host_lock);
1379 if (!list_empty(&sdev->starved_entry)) 1379 if (!list_empty(&sdev->starved_entry))
1380 list_del_init(&sdev->starved_entry); 1380 list_del_init(&sdev->starved_entry);
1381 spin_unlock_irq(shost->host_lock); 1381 spin_unlock_irq(shost->host_lock);
1382 } 1382 }
1383 1383
1384 return 1; 1384 return 1;
1385 1385
1386 starved: 1386 starved:
1387 spin_lock_irq(shost->host_lock); 1387 spin_lock_irq(shost->host_lock);
1388 if (list_empty(&sdev->starved_entry)) 1388 if (list_empty(&sdev->starved_entry))
1389 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1389 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1390 spin_unlock_irq(shost->host_lock); 1390 spin_unlock_irq(shost->host_lock);
1391 out_dec: 1391 out_dec:
1392 atomic_dec(&shost->host_busy); 1392 atomic_dec(&shost->host_busy);
1393 return 0; 1393 return 0;
1394 } 1394 }
1395 1395
1396 /* 1396 /*
1397 * Busy state exporting function for request stacking drivers. 1397 * Busy state exporting function for request stacking drivers.
1398 * 1398 *
1399 * For efficiency, no lock is taken to check the busy state of 1399 * For efficiency, no lock is taken to check the busy state of
1400 * shost/starget/sdev, since the returned value is not guaranteed and 1400 * shost/starget/sdev, since the returned value is not guaranteed and
1401 * may be changed after request stacking drivers call the function, 1401 * may be changed after request stacking drivers call the function,
1402 * regardless of taking lock or not. 1402 * regardless of taking lock or not.
1403 * 1403 *
1404 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi 1404 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
1405 * needs to return 'not busy'. Otherwise, request stacking drivers 1405 * needs to return 'not busy'. Otherwise, request stacking drivers
1406 * may hold requests forever. 1406 * may hold requests forever.
1407 */ 1407 */
1408 static int scsi_lld_busy(struct request_queue *q) 1408 static int scsi_lld_busy(struct request_queue *q)
1409 { 1409 {
1410 struct scsi_device *sdev = q->queuedata; 1410 struct scsi_device *sdev = q->queuedata;
1411 struct Scsi_Host *shost; 1411 struct Scsi_Host *shost;
1412 1412
1413 if (blk_queue_dying(q)) 1413 if (blk_queue_dying(q))
1414 return 0; 1414 return 0;
1415 1415
1416 shost = sdev->host; 1416 shost = sdev->host;
1417 1417
1418 /* 1418 /*
1419 * Ignore host/starget busy state. 1419 * Ignore host/starget busy state.
1420 * Since block layer does not have a concept of fairness across 1420 * Since block layer does not have a concept of fairness across
1421 * multiple queues, congestion of host/starget needs to be handled 1421 * multiple queues, congestion of host/starget needs to be handled
1422 * in SCSI layer. 1422 * in SCSI layer.
1423 */ 1423 */
1424 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) 1424 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1425 return 1; 1425 return 1;
1426 1426
1427 return 0; 1427 return 0;
1428 } 1428 }
1429 1429
1430 /* 1430 /*
1431 * Kill a request for a dead device 1431 * Kill a request for a dead device
1432 */ 1432 */
1433 static void scsi_kill_request(struct request *req, struct request_queue *q) 1433 static void scsi_kill_request(struct request *req, struct request_queue *q)
1434 { 1434 {
1435 struct scsi_cmnd *cmd = req->special; 1435 struct scsi_cmnd *cmd = req->special;
1436 struct scsi_device *sdev; 1436 struct scsi_device *sdev;
1437 struct scsi_target *starget; 1437 struct scsi_target *starget;
1438 struct Scsi_Host *shost; 1438 struct Scsi_Host *shost;
1439 1439
1440 blk_start_request(req); 1440 blk_start_request(req);
1441 1441
1442 scmd_printk(KERN_INFO, cmd, "killing request\n"); 1442 scmd_printk(KERN_INFO, cmd, "killing request\n");
1443 1443
1444 sdev = cmd->device; 1444 sdev = cmd->device;
1445 starget = scsi_target(sdev); 1445 starget = scsi_target(sdev);
1446 shost = sdev->host; 1446 shost = sdev->host;
1447 scsi_init_cmd_errh(cmd); 1447 scsi_init_cmd_errh(cmd);
1448 cmd->result = DID_NO_CONNECT << 16; 1448 cmd->result = DID_NO_CONNECT << 16;
1449 atomic_inc(&cmd->device->iorequest_cnt); 1449 atomic_inc(&cmd->device->iorequest_cnt);
1450 1450
1451 /* 1451 /*
1452 * SCSI request completion path will do scsi_device_unbusy(), 1452 * SCSI request completion path will do scsi_device_unbusy(),
1453 * bump busy counts. To bump the counters, we need to dance 1453 * bump busy counts. To bump the counters, we need to dance
1454 * with the locks as normal issue path does. 1454 * with the locks as normal issue path does.
1455 */ 1455 */
1456 atomic_inc(&sdev->device_busy); 1456 atomic_inc(&sdev->device_busy);
1457 atomic_inc(&shost->host_busy); 1457 atomic_inc(&shost->host_busy);
1458 atomic_inc(&starget->target_busy); 1458 atomic_inc(&starget->target_busy);
1459 1459
1460 blk_complete_request(req); 1460 blk_complete_request(req);
1461 } 1461 }
1462 1462
1463 static void scsi_softirq_done(struct request *rq) 1463 static void scsi_softirq_done(struct request *rq)
1464 { 1464 {
1465 struct scsi_cmnd *cmd = rq->special; 1465 struct scsi_cmnd *cmd = rq->special;
1466 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; 1466 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1467 int disposition; 1467 int disposition;
1468 1468
1469 INIT_LIST_HEAD(&cmd->eh_entry); 1469 INIT_LIST_HEAD(&cmd->eh_entry);
1470 1470
1471 atomic_inc(&cmd->device->iodone_cnt); 1471 atomic_inc(&cmd->device->iodone_cnt);
1472 if (cmd->result) 1472 if (cmd->result)
1473 atomic_inc(&cmd->device->ioerr_cnt); 1473 atomic_inc(&cmd->device->ioerr_cnt);
1474 1474
1475 disposition = scsi_decide_disposition(cmd); 1475 disposition = scsi_decide_disposition(cmd);
1476 if (disposition != SUCCESS && 1476 if (disposition != SUCCESS &&
1477 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1477 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1478 sdev_printk(KERN_ERR, cmd->device, 1478 sdev_printk(KERN_ERR, cmd->device,
1479 "timing out command, waited %lus\n", 1479 "timing out command, waited %lus\n",
1480 wait_for/HZ); 1480 wait_for/HZ);
1481 disposition = SUCCESS; 1481 disposition = SUCCESS;
1482 } 1482 }
1483 1483
1484 scsi_log_completion(cmd, disposition); 1484 scsi_log_completion(cmd, disposition);
1485 1485
1486 switch (disposition) { 1486 switch (disposition) {
1487 case SUCCESS: 1487 case SUCCESS:
1488 scsi_finish_command(cmd); 1488 scsi_finish_command(cmd);
1489 break; 1489 break;
1490 case NEEDS_RETRY: 1490 case NEEDS_RETRY:
1491 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); 1491 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1492 break; 1492 break;
1493 case ADD_TO_MLQUEUE: 1493 case ADD_TO_MLQUEUE:
1494 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1494 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1495 break; 1495 break;
1496 default: 1496 default:
1497 if (!scsi_eh_scmd_add(cmd, 0)) 1497 if (!scsi_eh_scmd_add(cmd, 0))
1498 scsi_finish_command(cmd); 1498 scsi_finish_command(cmd);
1499 } 1499 }
1500 } 1500 }
1501 1501
1502 /** 1502 /**
1503 * scsi_done - Invoke completion on finished SCSI command. 1503 * scsi_done - Invoke completion on finished SCSI command.
1504 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives 1504 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1505 * ownership back to SCSI Core -- i.e. the LLDD has finished with it. 1505 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
1506 * 1506 *
1507 * Description: This function is the mid-level's (SCSI Core) interrupt routine, 1507 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
1508 * which regains ownership of the SCSI command (de facto) from a LLDD, and 1508 * which regains ownership of the SCSI command (de facto) from a LLDD, and
1509 * calls blk_complete_request() for further processing. 1509 * calls blk_complete_request() for further processing.
1510 * 1510 *
1511 * This function is interrupt context safe. 1511 * This function is interrupt context safe.
1512 */ 1512 */
1513 static void scsi_done(struct scsi_cmnd *cmd) 1513 static void scsi_done(struct scsi_cmnd *cmd)
1514 { 1514 {
1515 trace_scsi_dispatch_cmd_done(cmd); 1515 trace_scsi_dispatch_cmd_done(cmd);
1516 blk_complete_request(cmd->request); 1516 blk_complete_request(cmd->request);
1517 } 1517 }
1518 1518
1519 /* 1519 /*
1520 * Function: scsi_request_fn() 1520 * Function: scsi_request_fn()
1521 * 1521 *
1522 * Purpose: Main strategy routine for SCSI. 1522 * Purpose: Main strategy routine for SCSI.
1523 * 1523 *
1524 * Arguments: q - Pointer to actual queue. 1524 * Arguments: q - Pointer to actual queue.
1525 * 1525 *
1526 * Returns: Nothing 1526 * Returns: Nothing
1527 * 1527 *
1528 * Lock status: IO request lock assumed to be held when called. 1528 * Lock status: IO request lock assumed to be held when called.
1529 */ 1529 */
1530 static void scsi_request_fn(struct request_queue *q) 1530 static void scsi_request_fn(struct request_queue *q)
1531 __releases(q->queue_lock) 1531 __releases(q->queue_lock)
1532 __acquires(q->queue_lock) 1532 __acquires(q->queue_lock)
1533 { 1533 {
1534 struct scsi_device *sdev = q->queuedata; 1534 struct scsi_device *sdev = q->queuedata;
1535 struct Scsi_Host *shost; 1535 struct Scsi_Host *shost;
1536 struct scsi_cmnd *cmd; 1536 struct scsi_cmnd *cmd;
1537 struct request *req; 1537 struct request *req;
1538 1538
1539 /* 1539 /*
1540 * To start with, we keep looping until the queue is empty, or until 1540 * To start with, we keep looping until the queue is empty, or until
1541 * the host is no longer able to accept any more requests. 1541 * the host is no longer able to accept any more requests.
1542 */ 1542 */
1543 shost = sdev->host; 1543 shost = sdev->host;
1544 for (;;) { 1544 for (;;) {
1545 int rtn; 1545 int rtn;
1546 /* 1546 /*
1547 * get next queueable request. We do this early to make sure 1547 * get next queueable request. We do this early to make sure
1548 * that the request is fully prepared even if we cannot 1548 * that the request is fully prepared even if we cannot
1549 * accept it. 1549 * accept it.
1550 */ 1550 */
1551 req = blk_peek_request(q); 1551 req = blk_peek_request(q);
1552 if (!req) 1552 if (!req)
1553 break; 1553 break;
1554 1554
1555 if (unlikely(!scsi_device_online(sdev))) { 1555 if (unlikely(!scsi_device_online(sdev))) {
1556 sdev_printk(KERN_ERR, sdev, 1556 sdev_printk(KERN_ERR, sdev,
1557 "rejecting I/O to offline device\n"); 1557 "rejecting I/O to offline device\n");
1558 scsi_kill_request(req, q); 1558 scsi_kill_request(req, q);
1559 continue; 1559 continue;
1560 } 1560 }
1561 1561
1562 if (!scsi_dev_queue_ready(q, sdev)) 1562 if (!scsi_dev_queue_ready(q, sdev))
1563 break; 1563 break;
1564 1564
1565 /* 1565 /*
1566 * Remove the request from the request list. 1566 * Remove the request from the request list.
1567 */ 1567 */
1568 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1568 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1569 blk_start_request(req); 1569 blk_start_request(req);
1570 1570
1571 spin_unlock_irq(q->queue_lock); 1571 spin_unlock_irq(q->queue_lock);
1572 cmd = req->special; 1572 cmd = req->special;
1573 if (unlikely(cmd == NULL)) { 1573 if (unlikely(cmd == NULL)) {
1574 printk(KERN_CRIT "impossible request in %s.\n" 1574 printk(KERN_CRIT "impossible request in %s.\n"
1575 "please mail a stack trace to " 1575 "please mail a stack trace to "
1576 "linux-scsi@vger.kernel.org\n", 1576 "linux-scsi@vger.kernel.org\n",
1577 __func__); 1577 __func__);
1578 blk_dump_rq_flags(req, "foo"); 1578 blk_dump_rq_flags(req, "foo");
1579 BUG(); 1579 BUG();
1580 } 1580 }
1581 1581
1582 /* 1582 /*
1583 * We hit this when the driver is using a host wide 1583 * We hit this when the driver is using a host wide
1584 * tag map. For device level tag maps the queue_depth check 1584 * tag map. For device level tag maps the queue_depth check
1585 * in the device ready fn would prevent us from trying 1585 * in the device ready fn would prevent us from trying
1586 * to allocate a tag. Since the map is a shared host resource 1586 * to allocate a tag. Since the map is a shared host resource
1587 * we add the dev to the starved list so it eventually gets 1587 * we add the dev to the starved list so it eventually gets
1588 * a run when a tag is freed. 1588 * a run when a tag is freed.
1589 */ 1589 */
1590 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { 1590 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1591 spin_lock_irq(shost->host_lock); 1591 spin_lock_irq(shost->host_lock);
1592 if (list_empty(&sdev->starved_entry)) 1592 if (list_empty(&sdev->starved_entry))
1593 list_add_tail(&sdev->starved_entry, 1593 list_add_tail(&sdev->starved_entry,
1594 &shost->starved_list); 1594 &shost->starved_list);
1595 spin_unlock_irq(shost->host_lock); 1595 spin_unlock_irq(shost->host_lock);
1596 goto not_ready; 1596 goto not_ready;
1597 } 1597 }
1598 1598
1599 if (!scsi_target_queue_ready(shost, sdev)) 1599 if (!scsi_target_queue_ready(shost, sdev))
1600 goto not_ready; 1600 goto not_ready;
1601 1601
1602 if (!scsi_host_queue_ready(q, shost, sdev)) 1602 if (!scsi_host_queue_ready(q, shost, sdev))
1603 goto host_not_ready; 1603 goto host_not_ready;
1604 1604
1605 /* 1605 /*
1606 * Finally, initialize any error handling parameters, and set up 1606 * Finally, initialize any error handling parameters, and set up
1607 * the timers for timeouts. 1607 * the timers for timeouts.
1608 */ 1608 */
1609 scsi_init_cmd_errh(cmd); 1609 scsi_init_cmd_errh(cmd);
1610 1610
1611 /* 1611 /*
1612 * Dispatch the command to the low-level driver. 1612 * Dispatch the command to the low-level driver.
1613 */ 1613 */
1614 cmd->scsi_done = scsi_done; 1614 cmd->scsi_done = scsi_done;
1615 rtn = scsi_dispatch_cmd(cmd); 1615 rtn = scsi_dispatch_cmd(cmd);
1616 if (rtn) { 1616 if (rtn) {
1617 scsi_queue_insert(cmd, rtn); 1617 scsi_queue_insert(cmd, rtn);
1618 spin_lock_irq(q->queue_lock); 1618 spin_lock_irq(q->queue_lock);
1619 goto out_delay; 1619 goto out_delay;
1620 } 1620 }
1621 spin_lock_irq(q->queue_lock); 1621 spin_lock_irq(q->queue_lock);
1622 } 1622 }
1623 1623
1624 return; 1624 return;
1625 1625
1626 host_not_ready: 1626 host_not_ready:
1627 atomic_dec(&scsi_target(sdev)->target_busy); 1627 atomic_dec(&scsi_target(sdev)->target_busy);
1628 not_ready: 1628 not_ready:
1629 /* 1629 /*
1630 * lock q, handle tag, requeue req, and decrement device_busy. We 1630 * lock q, handle tag, requeue req, and decrement device_busy. We
1631 * must return with queue_lock held. 1631 * must return with queue_lock held.
1632 * 1632 *
1633 * Decrementing device_busy without checking it is OK, as all such 1633 * Decrementing device_busy without checking it is OK, as all such
1634 * cases (host limits or settings) should run the queue at some 1634 * cases (host limits or settings) should run the queue at some
1635 * later time. 1635 * later time.
1636 */ 1636 */
1637 spin_lock_irq(q->queue_lock); 1637 spin_lock_irq(q->queue_lock);
1638 blk_requeue_request(q, req); 1638 blk_requeue_request(q, req);
1639 atomic_dec(&sdev->device_busy); 1639 atomic_dec(&sdev->device_busy);
1640 out_delay: 1640 out_delay:
1641 if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev)) 1641 if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
1642 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1642 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1643 } 1643 }
1644 1644
1645 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1645 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1646 { 1646 {
1647 struct device *host_dev; 1647 struct device *host_dev;
1648 u64 bounce_limit = 0xffffffff; 1648 u64 bounce_limit = 0xffffffff;
1649 1649
1650 if (shost->unchecked_isa_dma) 1650 if (shost->unchecked_isa_dma)
1651 return BLK_BOUNCE_ISA; 1651 return BLK_BOUNCE_ISA;
1652 /* 1652 /*
1653 * Platforms with virtual-DMA translation 1653 * Platforms with virtual-DMA translation
1654 * hardware have no practical limit. 1654 * hardware have no practical limit.
1655 */ 1655 */
1656 if (!PCI_DMA_BUS_IS_PHYS) 1656 if (!PCI_DMA_BUS_IS_PHYS)
1657 return BLK_BOUNCE_ANY; 1657 return BLK_BOUNCE_ANY;
1658 1658
1659 host_dev = scsi_get_device(shost); 1659 host_dev = scsi_get_device(shost);
1660 if (host_dev && host_dev->dma_mask) 1660 if (host_dev && host_dev->dma_mask)
1661 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; 1661 bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
1662 1662
1663 return bounce_limit; 1663 return bounce_limit;
1664 } 1664 }
1665 1665
1666 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 1666 struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1667 request_fn_proc *request_fn) 1667 request_fn_proc *request_fn)
1668 { 1668 {
1669 struct request_queue *q; 1669 struct request_queue *q;
1670 struct device *dev = shost->dma_dev; 1670 struct device *dev = shost->dma_dev;
1671 1671
1672 q = blk_init_queue(request_fn, NULL); 1672 q = blk_init_queue(request_fn, NULL);
1673 if (!q) 1673 if (!q)
1674 return NULL; 1674 return NULL;
1675 1675
1676 /* 1676 /*
1677 * this limit is imposed by hardware restrictions 1677 * this limit is imposed by hardware restrictions
1678 */ 1678 */
1679 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, 1679 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1680 SCSI_MAX_SG_CHAIN_SEGMENTS)); 1680 SCSI_MAX_SG_CHAIN_SEGMENTS));
1681 1681
1682 if (scsi_host_prot_dma(shost)) { 1682 if (scsi_host_prot_dma(shost)) {
1683 shost->sg_prot_tablesize = 1683 shost->sg_prot_tablesize =
1684 min_not_zero(shost->sg_prot_tablesize, 1684 min_not_zero(shost->sg_prot_tablesize,
1685 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); 1685 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1686 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); 1686 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1687 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); 1687 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1688 } 1688 }
1689 1689
1690 blk_queue_max_hw_sectors(q, shost->max_sectors); 1690 blk_queue_max_hw_sectors(q, shost->max_sectors);
1691 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1691 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1692 blk_queue_segment_boundary(q, shost->dma_boundary); 1692 blk_queue_segment_boundary(q, shost->dma_boundary);
1693 dma_set_seg_boundary(dev, shost->dma_boundary); 1693 dma_set_seg_boundary(dev, shost->dma_boundary);
1694 1694
1695 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1695 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1696 1696
1697 if (!shost->use_clustering) 1697 if (!shost->use_clustering)
1698 q->limits.cluster = 0; 1698 q->limits.cluster = 0;
1699 1699
1700 /* 1700 /*
1701 * set a reasonable default alignment on word boundaries: the 1701 * set a reasonable default alignment on word boundaries: the
1702 * host and device may alter it using 1702 * host and device may alter it using
1703 * blk_queue_update_dma_alignment() later. 1703 * blk_queue_update_dma_alignment() later.
1704 */ 1704 */
1705 blk_queue_dma_alignment(q, 0x03); 1705 blk_queue_dma_alignment(q, 0x03);
1706 1706
1707 return q; 1707 return q;
1708 } 1708 }
1709 EXPORT_SYMBOL(__scsi_alloc_queue); 1709 EXPORT_SYMBOL(__scsi_alloc_queue);
1710 1710
1711 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1711 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1712 { 1712 {
1713 struct request_queue *q; 1713 struct request_queue *q;
1714 1714
1715 q = __scsi_alloc_queue(sdev->host, scsi_request_fn); 1715 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1716 if (!q) 1716 if (!q)
1717 return NULL; 1717 return NULL;
1718 1718
1719 blk_queue_prep_rq(q, scsi_prep_fn); 1719 blk_queue_prep_rq(q, scsi_prep_fn);
1720 blk_queue_unprep_rq(q, scsi_unprep_fn); 1720 blk_queue_unprep_rq(q, scsi_unprep_fn);
1721 blk_queue_softirq_done(q, scsi_softirq_done); 1721 blk_queue_softirq_done(q, scsi_softirq_done);
1722 blk_queue_rq_timed_out(q, scsi_times_out); 1722 blk_queue_rq_timed_out(q, scsi_times_out);
1723 blk_queue_lld_busy(q, scsi_lld_busy); 1723 blk_queue_lld_busy(q, scsi_lld_busy);
1724 return q; 1724 return q;
1725 } 1725 }
1726 1726
1727 /* 1727 /*
1728 * Function: scsi_block_requests() 1728 * Function: scsi_block_requests()
1729 * 1729 *
1730 * Purpose: Utility function used by low-level drivers to prevent further 1730 * Purpose: Utility function used by low-level drivers to prevent further
1731 * commands from being queued to the device. 1731 * commands from being queued to the device.
1732 * 1732 *
1733 * Arguments: shost - Host in question 1733 * Arguments: shost - Host in question
1734 * 1734 *
1735 * Returns: Nothing 1735 * Returns: Nothing
1736 * 1736 *
1737 * Lock status: No locks are assumed held. 1737 * Lock status: No locks are assumed held.
1738 * 1738 *
1739 * Notes: There is no timer nor any other means by which the requests 1739 * Notes: There is no timer nor any other means by which the requests
1740 * get unblocked other than the low-level driver calling 1740 * get unblocked other than the low-level driver calling
1741 * scsi_unblock_requests(). 1741 * scsi_unblock_requests().
1742 */ 1742 */
1743 void scsi_block_requests(struct Scsi_Host *shost) 1743 void scsi_block_requests(struct Scsi_Host *shost)
1744 { 1744 {
1745 shost->host_self_blocked = 1; 1745 shost->host_self_blocked = 1;
1746 } 1746 }
1747 EXPORT_SYMBOL(scsi_block_requests); 1747 EXPORT_SYMBOL(scsi_block_requests);
1748 1748
1749 /* 1749 /*
1750 * Function: scsi_unblock_requests() 1750 * Function: scsi_unblock_requests()
1751 * 1751 *
1752 * Purpose: Utility function used by low-level drivers to allow further 1752 * Purpose: Utility function used by low-level drivers to allow further
1753 * commands from being queued to the device. 1753 * commands from being queued to the device.
1754 * 1754 *
1755 * Arguments: shost - Host in question 1755 * Arguments: shost - Host in question
1756 * 1756 *
1757 * Returns: Nothing 1757 * Returns: Nothing
1758 * 1758 *
1759 * Lock status: No locks are assumed held. 1759 * Lock status: No locks are assumed held.
1760 * 1760 *
1761 * Notes: There is no timer nor any other means by which the requests 1761 * Notes: There is no timer nor any other means by which the requests
1762 * get unblocked other than the low-level driver calling 1762 * get unblocked other than the low-level driver calling
1763 * scsi_unblock_requests(). 1763 * scsi_unblock_requests().
1764 * 1764 *
1765 * This is done as an API function so that changes to the 1765 * This is done as an API function so that changes to the
1766 * internals of the scsi mid-layer won't require wholesale 1766 * internals of the scsi mid-layer won't require wholesale
1767 * changes to drivers that use this feature. 1767 * changes to drivers that use this feature.
1768 */ 1768 */
1769 void scsi_unblock_requests(struct Scsi_Host *shost) 1769 void scsi_unblock_requests(struct Scsi_Host *shost)
1770 { 1770 {
1771 shost->host_self_blocked = 0; 1771 shost->host_self_blocked = 0;
1772 scsi_run_host_queues(shost); 1772 scsi_run_host_queues(shost);
1773 } 1773 }
1774 EXPORT_SYMBOL(scsi_unblock_requests); 1774 EXPORT_SYMBOL(scsi_unblock_requests);
1775 1775
1776 int __init scsi_init_queue(void) 1776 int __init scsi_init_queue(void)
1777 { 1777 {
1778 int i; 1778 int i;
1779 1779
1780 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", 1780 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1781 sizeof(struct scsi_data_buffer), 1781 sizeof(struct scsi_data_buffer),
1782 0, 0, NULL); 1782 0, 0, NULL);
1783 if (!scsi_sdb_cache) { 1783 if (!scsi_sdb_cache) {
1784 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); 1784 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1785 return -ENOMEM; 1785 return -ENOMEM;
1786 } 1786 }
1787 1787
1788 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1788 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1789 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1789 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1790 int size = sgp->size * sizeof(struct scatterlist); 1790 int size = sgp->size * sizeof(struct scatterlist);
1791 1791
1792 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1792 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1793 SLAB_HWCACHE_ALIGN, NULL); 1793 SLAB_HWCACHE_ALIGN, NULL);
1794 if (!sgp->slab) { 1794 if (!sgp->slab) {
1795 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1795 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1796 sgp->name); 1796 sgp->name);
1797 goto cleanup_sdb; 1797 goto cleanup_sdb;
1798 } 1798 }
1799 1799
1800 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, 1800 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1801 sgp->slab); 1801 sgp->slab);
1802 if (!sgp->pool) { 1802 if (!sgp->pool) {
1803 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1803 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1804 sgp->name); 1804 sgp->name);
1805 goto cleanup_sdb; 1805 goto cleanup_sdb;
1806 } 1806 }
1807 } 1807 }
1808 1808
1809 return 0; 1809 return 0;
1810 1810
1811 cleanup_sdb: 1811 cleanup_sdb:
1812 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1812 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1813 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1813 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1814 if (sgp->pool) 1814 if (sgp->pool)
1815 mempool_destroy(sgp->pool); 1815 mempool_destroy(sgp->pool);
1816 if (sgp->slab) 1816 if (sgp->slab)
1817 kmem_cache_destroy(sgp->slab); 1817 kmem_cache_destroy(sgp->slab);
1818 } 1818 }
1819 kmem_cache_destroy(scsi_sdb_cache); 1819 kmem_cache_destroy(scsi_sdb_cache);
1820 1820
1821 return -ENOMEM; 1821 return -ENOMEM;
1822 } 1822 }
1823 1823
1824 void scsi_exit_queue(void) 1824 void scsi_exit_queue(void)
1825 { 1825 {
1826 int i; 1826 int i;
1827 1827
1828 kmem_cache_destroy(scsi_sdb_cache); 1828 kmem_cache_destroy(scsi_sdb_cache);
1829 1829
1830 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1830 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1831 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1831 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1832 mempool_destroy(sgp->pool); 1832 mempool_destroy(sgp->pool);
1833 kmem_cache_destroy(sgp->slab); 1833 kmem_cache_destroy(sgp->slab);
1834 } 1834 }
1835 } 1835 }
1836 1836
1837 /** 1837 /**
1838 * scsi_mode_select - issue a mode select 1838 * scsi_mode_select - issue a mode select
1839 * @sdev: SCSI device to be queried 1839 * @sdev: SCSI device to be queried
1840 * @pf: Page format bit (1 == standard, 0 == vendor specific) 1840 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1841 * @sp: Save page bit (0 == don't save, 1 == save) 1841 * @sp: Save page bit (0 == don't save, 1 == save)
1842 * @modepage: mode page being requested 1842 * @modepage: mode page being requested
1843 * @buffer: request buffer (may not be smaller than eight bytes) 1843 * @buffer: request buffer (may not be smaller than eight bytes)
1844 * @len: length of request buffer. 1844 * @len: length of request buffer.
1845 * @timeout: command timeout 1845 * @timeout: command timeout
1846 * @retries: number of retries before failing 1846 * @retries: number of retries before failing
1847 * @data: returns a structure abstracting the mode header data 1847 * @data: returns a structure abstracting the mode header data
1848 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1848 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1849 * must be SCSI_SENSE_BUFFERSIZE big. 1849 * must be SCSI_SENSE_BUFFERSIZE big.
1850 * 1850 *
1851 * Returns zero if successful; negative error number or scsi 1851 * Returns zero if successful; negative error number or scsi
1852 * status on error 1852 * status on error
1853 * 1853 *
1854 */ 1854 */
1855 int 1855 int
1856 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, 1856 scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1857 unsigned char *buffer, int len, int timeout, int retries, 1857 unsigned char *buffer, int len, int timeout, int retries,
1858 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1858 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1859 { 1859 {
1860 unsigned char cmd[10]; 1860 unsigned char cmd[10];
1861 unsigned char *real_buffer; 1861 unsigned char *real_buffer;
1862 int ret; 1862 int ret;
1863 1863
1864 memset(cmd, 0, sizeof(cmd)); 1864 memset(cmd, 0, sizeof(cmd));
1865 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); 1865 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1866 1866
1867 if (sdev->use_10_for_ms) { 1867 if (sdev->use_10_for_ms) {
1868 if (len > 65535) 1868 if (len > 65535)
1869 return -EINVAL; 1869 return -EINVAL;
1870 real_buffer = kmalloc(8 + len, GFP_KERNEL); 1870 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1871 if (!real_buffer) 1871 if (!real_buffer)
1872 return -ENOMEM; 1872 return -ENOMEM;
1873 memcpy(real_buffer + 8, buffer, len); 1873 memcpy(real_buffer + 8, buffer, len);
1874 len += 8; 1874 len += 8;
1875 real_buffer[0] = 0; 1875 real_buffer[0] = 0;
1876 real_buffer[1] = 0; 1876 real_buffer[1] = 0;
1877 real_buffer[2] = data->medium_type; 1877 real_buffer[2] = data->medium_type;
1878 real_buffer[3] = data->device_specific; 1878 real_buffer[3] = data->device_specific;
1879 real_buffer[4] = data->longlba ? 0x01 : 0; 1879 real_buffer[4] = data->longlba ? 0x01 : 0;
1880 real_buffer[5] = 0; 1880 real_buffer[5] = 0;
1881 real_buffer[6] = data->block_descriptor_length >> 8; 1881 real_buffer[6] = data->block_descriptor_length >> 8;
1882 real_buffer[7] = data->block_descriptor_length; 1882 real_buffer[7] = data->block_descriptor_length;
1883 1883
1884 cmd[0] = MODE_SELECT_10; 1884 cmd[0] = MODE_SELECT_10;
1885 cmd[7] = len >> 8; 1885 cmd[7] = len >> 8;
1886 cmd[8] = len; 1886 cmd[8] = len;
1887 } else { 1887 } else {
1888 if (len > 255 || data->block_descriptor_length > 255 || 1888 if (len > 255 || data->block_descriptor_length > 255 ||
1889 data->longlba) 1889 data->longlba)
1890 return -EINVAL; 1890 return -EINVAL;
1891 1891
1892 real_buffer = kmalloc(4 + len, GFP_KERNEL); 1892 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1893 if (!real_buffer) 1893 if (!real_buffer)
1894 return -ENOMEM; 1894 return -ENOMEM;
1895 memcpy(real_buffer + 4, buffer, len); 1895 memcpy(real_buffer + 4, buffer, len);
1896 len += 4; 1896 len += 4;
1897 real_buffer[0] = 0; 1897 real_buffer[0] = 0;
1898 real_buffer[1] = data->medium_type; 1898 real_buffer[1] = data->medium_type;
1899 real_buffer[2] = data->device_specific; 1899 real_buffer[2] = data->device_specific;
1900 real_buffer[3] = data->block_descriptor_length; 1900 real_buffer[3] = data->block_descriptor_length;
1901 1901
1902 1902
1903 cmd[0] = MODE_SELECT; 1903 cmd[0] = MODE_SELECT;
1904 cmd[4] = len; 1904 cmd[4] = len;
1905 } 1905 }
1906 1906
1907 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, 1907 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1908 sshdr, timeout, retries, NULL); 1908 sshdr, timeout, retries, NULL);
1909 kfree(real_buffer); 1909 kfree(real_buffer);
1910 return ret; 1910 return ret;
1911 } 1911 }
1912 EXPORT_SYMBOL_GPL(scsi_mode_select); 1912 EXPORT_SYMBOL_GPL(scsi_mode_select);
1913 1913
1914 /** 1914 /**
1915 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. 1915 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1916 * @sdev: SCSI device to be queried 1916 * @sdev: SCSI device to be queried
1917 * @dbd: set if mode sense will allow block descriptors to be returned 1917 * @dbd: set if mode sense will allow block descriptors to be returned
1918 * @modepage: mode page being requested 1918 * @modepage: mode page being requested
1919 * @buffer: request buffer (may not be smaller than eight bytes) 1919 * @buffer: request buffer (may not be smaller than eight bytes)
1920 * @len: length of request buffer. 1920 * @len: length of request buffer.
1921 * @timeout: command timeout 1921 * @timeout: command timeout
1922 * @retries: number of retries before failing 1922 * @retries: number of retries before failing
1923 * @data: returns a structure abstracting the mode header data 1923 * @data: returns a structure abstracting the mode header data
1924 * @sshdr: place to put sense data (or NULL if no sense to be collected). 1924 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1925 * must be SCSI_SENSE_BUFFERSIZE big. 1925 * must be SCSI_SENSE_BUFFERSIZE big.
1926 * 1926 *
1927 * Returns zero if unsuccessful, or the header offset (either 4 1927 * Returns zero if unsuccessful, or the header offset (either 4
1928 * or 8 depending on whether a six or ten byte command was 1928 * or 8 depending on whether a six or ten byte command was
1929 * issued) if successful. 1929 * issued) if successful.
1930 */ 1930 */
1931 int 1931 int
1932 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1932 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1933 unsigned char *buffer, int len, int timeout, int retries, 1933 unsigned char *buffer, int len, int timeout, int retries,
1934 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) 1934 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1935 { 1935 {
1936 unsigned char cmd[12]; 1936 unsigned char cmd[12];
1937 int use_10_for_ms; 1937 int use_10_for_ms;
1938 int header_length; 1938 int header_length;
1939 int result; 1939 int result;
1940 struct scsi_sense_hdr my_sshdr; 1940 struct scsi_sense_hdr my_sshdr;
1941 1941
1942 memset(data, 0, sizeof(*data)); 1942 memset(data, 0, sizeof(*data));
1943 memset(&cmd[0], 0, 12); 1943 memset(&cmd[0], 0, 12);
1944 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1944 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1945 cmd[2] = modepage; 1945 cmd[2] = modepage;
1946 1946
1947 /* caller might not be interested in sense, but we need it */ 1947 /* caller might not be interested in sense, but we need it */
1948 if (!sshdr) 1948 if (!sshdr)
1949 sshdr = &my_sshdr; 1949 sshdr = &my_sshdr;
1950 1950
1951 retry: 1951 retry:
1952 use_10_for_ms = sdev->use_10_for_ms; 1952 use_10_for_ms = sdev->use_10_for_ms;
1953 1953
1954 if (use_10_for_ms) { 1954 if (use_10_for_ms) {
1955 if (len < 8) 1955 if (len < 8)
1956 len = 8; 1956 len = 8;
1957 1957
1958 cmd[0] = MODE_SENSE_10; 1958 cmd[0] = MODE_SENSE_10;
1959 cmd[8] = len; 1959 cmd[8] = len;
1960 header_length = 8; 1960 header_length = 8;
1961 } else { 1961 } else {
1962 if (len < 4) 1962 if (len < 4)
1963 len = 4; 1963 len = 4;
1964 1964
1965 cmd[0] = MODE_SENSE; 1965 cmd[0] = MODE_SENSE;
1966 cmd[4] = len; 1966 cmd[4] = len;
1967 header_length = 4; 1967 header_length = 4;
1968 } 1968 }
1969 1969
1970 memset(buffer, 0, len); 1970 memset(buffer, 0, len);
1971 1971
1972 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1972 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1973 sshdr, timeout, retries, NULL); 1973 sshdr, timeout, retries, NULL);
1974 1974
1975 /* This code looks awful: what it's doing is making sure an 1975 /* This code looks awful: what it's doing is making sure an
1976 * ILLEGAL REQUEST sense return identifies the actual command 1976 * ILLEGAL REQUEST sense return identifies the actual command
1977 * byte as the problem. MODE_SENSE commands can return 1977 * byte as the problem. MODE_SENSE commands can return
1978 * ILLEGAL REQUEST if the code page isn't supported */ 1978 * ILLEGAL REQUEST if the code page isn't supported */
1979 1979
1980 if (use_10_for_ms && !scsi_status_is_good(result) && 1980 if (use_10_for_ms && !scsi_status_is_good(result) &&
1981 (driver_byte(result) & DRIVER_SENSE)) { 1981 (driver_byte(result) & DRIVER_SENSE)) {
1982 if (scsi_sense_valid(sshdr)) { 1982 if (scsi_sense_valid(sshdr)) {
1983 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1983 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1984 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1984 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1985 /* 1985 /*
1986 * Invalid command operation code 1986 * Invalid command operation code
1987 */ 1987 */
1988 sdev->use_10_for_ms = 0; 1988 sdev->use_10_for_ms = 0;
1989 goto retry; 1989 goto retry;
1990 } 1990 }
1991 } 1991 }
1992 } 1992 }
1993 1993
1994 if(scsi_status_is_good(result)) { 1994 if(scsi_status_is_good(result)) {
1995 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && 1995 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1996 (modepage == 6 || modepage == 8))) { 1996 (modepage == 6 || modepage == 8))) {
1997 /* Initio breakage? */ 1997 /* Initio breakage? */
1998 header_length = 0; 1998 header_length = 0;
1999 data->length = 13; 1999 data->length = 13;
2000 data->medium_type = 0; 2000 data->medium_type = 0;
2001 data->device_specific = 0; 2001 data->device_specific = 0;
2002 data->longlba = 0; 2002 data->longlba = 0;
2003 data->block_descriptor_length = 0; 2003 data->block_descriptor_length = 0;
2004 } else if(use_10_for_ms) { 2004 } else if(use_10_for_ms) {
2005 data->length = buffer[0]*256 + buffer[1] + 2; 2005 data->length = buffer[0]*256 + buffer[1] + 2;
2006 data->medium_type = buffer[2]; 2006 data->medium_type = buffer[2];
2007 data->device_specific = buffer[3]; 2007 data->device_specific = buffer[3];
2008 data->longlba = buffer[4] & 0x01; 2008 data->longlba = buffer[4] & 0x01;
2009 data->block_descriptor_length = buffer[6]*256 2009 data->block_descriptor_length = buffer[6]*256
2010 + buffer[7]; 2010 + buffer[7];
2011 } else { 2011 } else {
2012 data->length = buffer[0] + 1; 2012 data->length = buffer[0] + 1;
2013 data->medium_type = buffer[1]; 2013 data->medium_type = buffer[1];
2014 data->device_specific = buffer[2]; 2014 data->device_specific = buffer[2];
2015 data->block_descriptor_length = buffer[3]; 2015 data->block_descriptor_length = buffer[3];
2016 } 2016 }
2017 data->header_length = header_length; 2017 data->header_length = header_length;
2018 } 2018 }
2019 2019
2020 return result; 2020 return result;
2021 } 2021 }
2022 EXPORT_SYMBOL(scsi_mode_sense); 2022 EXPORT_SYMBOL(scsi_mode_sense);
2023 2023
2024 /** 2024 /**
2025 * scsi_test_unit_ready - test if unit is ready 2025 * scsi_test_unit_ready - test if unit is ready
2026 * @sdev: scsi device to change the state of. 2026 * @sdev: scsi device to change the state of.
2027 * @timeout: command timeout 2027 * @timeout: command timeout
2028 * @retries: number of retries before failing 2028 * @retries: number of retries before failing
2029 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for 2029 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
2030 * returning sense. Make sure that this is cleared before passing 2030 * returning sense. Make sure that this is cleared before passing
2031 * in. 2031 * in.
2032 * 2032 *
2033 * Returns zero if unsuccessful or an error if TUR failed. For 2033 * Returns zero if unsuccessful or an error if TUR failed. For
2034 * removable media, UNIT_ATTENTION sets ->changed flag. 2034 * removable media, UNIT_ATTENTION sets ->changed flag.
2035 **/ 2035 **/
2036 int 2036 int
2037 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, 2037 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
2038 struct scsi_sense_hdr *sshdr_external) 2038 struct scsi_sense_hdr *sshdr_external)
2039 { 2039 {
2040 char cmd[] = { 2040 char cmd[] = {
2041 TEST_UNIT_READY, 0, 0, 0, 0, 0, 2041 TEST_UNIT_READY, 0, 0, 0, 0, 0,
2042 }; 2042 };
2043 struct scsi_sense_hdr *sshdr; 2043 struct scsi_sense_hdr *sshdr;
2044 int result; 2044 int result;
2045 2045
2046 if (!sshdr_external) 2046 if (!sshdr_external)
2047 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); 2047 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2048 else 2048 else
2049 sshdr = sshdr_external; 2049 sshdr = sshdr_external;
2050 2050
2051 /* try to eat the UNIT_ATTENTION if there are enough retries */ 2051 /* try to eat the UNIT_ATTENTION if there are enough retries */
2052 do { 2052 do {
2053 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, 2053 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2054 timeout, retries, NULL); 2054 timeout, retries, NULL);
2055 if (sdev->removable && scsi_sense_valid(sshdr) && 2055 if (sdev->removable && scsi_sense_valid(sshdr) &&
2056 sshdr->sense_key == UNIT_ATTENTION) 2056 sshdr->sense_key == UNIT_ATTENTION)
2057 sdev->changed = 1; 2057 sdev->changed = 1;
2058 } while (scsi_sense_valid(sshdr) && 2058 } while (scsi_sense_valid(sshdr) &&
2059 sshdr->sense_key == UNIT_ATTENTION && --retries); 2059 sshdr->sense_key == UNIT_ATTENTION && --retries);
2060 2060
2061 if (!sshdr_external) 2061 if (!sshdr_external)
2062 kfree(sshdr); 2062 kfree(sshdr);
2063 return result; 2063 return result;
2064 } 2064 }
2065 EXPORT_SYMBOL(scsi_test_unit_ready); 2065 EXPORT_SYMBOL(scsi_test_unit_ready);
2066 2066
2067 /** 2067 /**
2068 * scsi_device_set_state - Take the given device through the device state model. 2068 * scsi_device_set_state - Take the given device through the device state model.
2069 * @sdev: scsi device to change the state of. 2069 * @sdev: scsi device to change the state of.
2070 * @state: state to change to. 2070 * @state: state to change to.
2071 * 2071 *
2072 * Returns zero if unsuccessful or an error if the requested 2072 * Returns zero if unsuccessful or an error if the requested
2073 * transition is illegal. 2073 * transition is illegal.
2074 */ 2074 */
2075 int 2075 int
2076 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 2076 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2077 { 2077 {
2078 enum scsi_device_state oldstate = sdev->sdev_state; 2078 enum scsi_device_state oldstate = sdev->sdev_state;
2079 2079
2080 if (state == oldstate) 2080 if (state == oldstate)
2081 return 0; 2081 return 0;
2082 2082
2083 switch (state) { 2083 switch (state) {
2084 case SDEV_CREATED: 2084 case SDEV_CREATED:
2085 switch (oldstate) { 2085 switch (oldstate) {
2086 case SDEV_CREATED_BLOCK: 2086 case SDEV_CREATED_BLOCK:
2087 break; 2087 break;
2088 default: 2088 default:
2089 goto illegal; 2089 goto illegal;
2090 } 2090 }
2091 break; 2091 break;
2092 2092
2093 case SDEV_RUNNING: 2093 case SDEV_RUNNING:
2094 switch (oldstate) { 2094 switch (oldstate) {
2095 case SDEV_CREATED: 2095 case SDEV_CREATED:
2096 case SDEV_OFFLINE: 2096 case SDEV_OFFLINE:
2097 case SDEV_TRANSPORT_OFFLINE: 2097 case SDEV_TRANSPORT_OFFLINE:
2098 case SDEV_QUIESCE: 2098 case SDEV_QUIESCE:
2099 case SDEV_BLOCK: 2099 case SDEV_BLOCK:
2100 break; 2100 break;
2101 default: 2101 default:
2102 goto illegal; 2102 goto illegal;
2103 } 2103 }
2104 break; 2104 break;
2105 2105
2106 case SDEV_QUIESCE: 2106 case SDEV_QUIESCE:
2107 switch (oldstate) { 2107 switch (oldstate) {
2108 case SDEV_RUNNING: 2108 case SDEV_RUNNING:
2109 case SDEV_OFFLINE: 2109 case SDEV_OFFLINE:
2110 case SDEV_TRANSPORT_OFFLINE: 2110 case SDEV_TRANSPORT_OFFLINE:
2111 break; 2111 break;
2112 default: 2112 default:
2113 goto illegal; 2113 goto illegal;
2114 } 2114 }
2115 break; 2115 break;
2116 2116
2117 case SDEV_OFFLINE: 2117 case SDEV_OFFLINE:
2118 case SDEV_TRANSPORT_OFFLINE: 2118 case SDEV_TRANSPORT_OFFLINE:
2119 switch (oldstate) { 2119 switch (oldstate) {
2120 case SDEV_CREATED: 2120 case SDEV_CREATED:
2121 case SDEV_RUNNING: 2121 case SDEV_RUNNING:
2122 case SDEV_QUIESCE: 2122 case SDEV_QUIESCE:
2123 case SDEV_BLOCK: 2123 case SDEV_BLOCK:
2124 break; 2124 break;
2125 default: 2125 default:
2126 goto illegal; 2126 goto illegal;
2127 } 2127 }
2128 break; 2128 break;
2129 2129
2130 case SDEV_BLOCK: 2130 case SDEV_BLOCK:
2131 switch (oldstate) { 2131 switch (oldstate) {
2132 case SDEV_RUNNING: 2132 case SDEV_RUNNING:
2133 case SDEV_CREATED_BLOCK: 2133 case SDEV_CREATED_BLOCK:
2134 break; 2134 break;
2135 default: 2135 default:
2136 goto illegal; 2136 goto illegal;
2137 } 2137 }
2138 break; 2138 break;
2139 2139
2140 case SDEV_CREATED_BLOCK: 2140 case SDEV_CREATED_BLOCK:
2141 switch (oldstate) { 2141 switch (oldstate) {
2142 case SDEV_CREATED: 2142 case SDEV_CREATED:
2143 break; 2143 break;
2144 default: 2144 default:
2145 goto illegal; 2145 goto illegal;
2146 } 2146 }
2147 break; 2147 break;
2148 2148
2149 case SDEV_CANCEL: 2149 case SDEV_CANCEL:
2150 switch (oldstate) { 2150 switch (oldstate) {
2151 case SDEV_CREATED: 2151 case SDEV_CREATED:
2152 case SDEV_RUNNING: 2152 case SDEV_RUNNING:
2153 case SDEV_QUIESCE: 2153 case SDEV_QUIESCE:
2154 case SDEV_OFFLINE: 2154 case SDEV_OFFLINE:
2155 case SDEV_TRANSPORT_OFFLINE: 2155 case SDEV_TRANSPORT_OFFLINE:
2156 case SDEV_BLOCK: 2156 case SDEV_BLOCK:
2157 break; 2157 break;
2158 default: 2158 default:
2159 goto illegal; 2159 goto illegal;
2160 } 2160 }
2161 break; 2161 break;
2162 2162
2163 case SDEV_DEL: 2163 case SDEV_DEL:
2164 switch (oldstate) { 2164 switch (oldstate) {
2165 case SDEV_CREATED: 2165 case SDEV_CREATED:
2166 case SDEV_RUNNING: 2166 case SDEV_RUNNING:
2167 case SDEV_OFFLINE: 2167 case SDEV_OFFLINE:
2168 case SDEV_TRANSPORT_OFFLINE: 2168 case SDEV_TRANSPORT_OFFLINE:
2169 case SDEV_CANCEL: 2169 case SDEV_CANCEL:
2170 case SDEV_CREATED_BLOCK: 2170 case SDEV_CREATED_BLOCK:
2171 break; 2171 break;
2172 default: 2172 default:
2173 goto illegal; 2173 goto illegal;
2174 } 2174 }
2175 break; 2175 break;
2176 2176
2177 } 2177 }
2178 sdev->sdev_state = state; 2178 sdev->sdev_state = state;
2179 return 0; 2179 return 0;
2180 2180
2181 illegal: 2181 illegal:
2182 SCSI_LOG_ERROR_RECOVERY(1, 2182 SCSI_LOG_ERROR_RECOVERY(1,
2183 sdev_printk(KERN_ERR, sdev, 2183 sdev_printk(KERN_ERR, sdev,
2184 "Illegal state transition %s->%s", 2184 "Illegal state transition %s->%s",
2185 scsi_device_state_name(oldstate), 2185 scsi_device_state_name(oldstate),
2186 scsi_device_state_name(state)) 2186 scsi_device_state_name(state))
2187 ); 2187 );
2188 return -EINVAL; 2188 return -EINVAL;
2189 } 2189 }
2190 EXPORT_SYMBOL(scsi_device_set_state); 2190 EXPORT_SYMBOL(scsi_device_set_state);
2191 2191
2192 /** 2192 /**
2193 * sdev_evt_emit - emit a single SCSI device uevent 2193 * sdev_evt_emit - emit a single SCSI device uevent
2194 * @sdev: associated SCSI device 2194 * @sdev: associated SCSI device
2195 * @evt: event to emit 2195 * @evt: event to emit
2196 * 2196 *
2197 * Send a single uevent (scsi_event) to the associated scsi_device. 2197 * Send a single uevent (scsi_event) to the associated scsi_device.
2198 */ 2198 */
2199 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) 2199 static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2200 { 2200 {
2201 int idx = 0; 2201 int idx = 0;
2202 char *envp[3]; 2202 char *envp[3];
2203 2203
2204 switch (evt->evt_type) { 2204 switch (evt->evt_type) {
2205 case SDEV_EVT_MEDIA_CHANGE: 2205 case SDEV_EVT_MEDIA_CHANGE:
2206 envp[idx++] = "SDEV_MEDIA_CHANGE=1"; 2206 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2207 break; 2207 break;
2208 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2208 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2209 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; 2209 envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
2210 break; 2210 break;
2211 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2211 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2212 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; 2212 envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
2213 break; 2213 break;
2214 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2214 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2215 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; 2215 envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
2216 break; 2216 break;
2217 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2217 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2218 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; 2218 envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
2219 break; 2219 break;
2220 case SDEV_EVT_LUN_CHANGE_REPORTED: 2220 case SDEV_EVT_LUN_CHANGE_REPORTED:
2221 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2221 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2222 break; 2222 break;
2223 default: 2223 default:
2224 /* do nothing */ 2224 /* do nothing */
2225 break; 2225 break;
2226 } 2226 }
2227 2227
2228 envp[idx++] = NULL; 2228 envp[idx++] = NULL;
2229 2229
2230 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); 2230 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2231 } 2231 }
2232 2232
2233 /** 2233 /**
2234 * sdev_evt_thread - send a uevent for each scsi event 2234 * sdev_evt_thread - send a uevent for each scsi event
2235 * @work: work struct for scsi_device 2235 * @work: work struct for scsi_device
2236 * 2236 *
2237 * Dispatch queued events to their associated scsi_device kobjects 2237 * Dispatch queued events to their associated scsi_device kobjects
2238 * as uevents. 2238 * as uevents.
2239 */ 2239 */
2240 void scsi_evt_thread(struct work_struct *work) 2240 void scsi_evt_thread(struct work_struct *work)
2241 { 2241 {
2242 struct scsi_device *sdev; 2242 struct scsi_device *sdev;
2243 enum scsi_device_event evt_type; 2243 enum scsi_device_event evt_type;
2244 LIST_HEAD(event_list); 2244 LIST_HEAD(event_list);
2245 2245
2246 sdev = container_of(work, struct scsi_device, event_work); 2246 sdev = container_of(work, struct scsi_device, event_work);
2247 2247
2248 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) 2248 for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
2249 if (test_and_clear_bit(evt_type, sdev->pending_events)) 2249 if (test_and_clear_bit(evt_type, sdev->pending_events))
2250 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); 2250 sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
2251 2251
2252 while (1) { 2252 while (1) {
2253 struct scsi_event *evt; 2253 struct scsi_event *evt;
2254 struct list_head *this, *tmp; 2254 struct list_head *this, *tmp;
2255 unsigned long flags; 2255 unsigned long flags;
2256 2256
2257 spin_lock_irqsave(&sdev->list_lock, flags); 2257 spin_lock_irqsave(&sdev->list_lock, flags);
2258 list_splice_init(&sdev->event_list, &event_list); 2258 list_splice_init(&sdev->event_list, &event_list);
2259 spin_unlock_irqrestore(&sdev->list_lock, flags); 2259 spin_unlock_irqrestore(&sdev->list_lock, flags);
2260 2260
2261 if (list_empty(&event_list)) 2261 if (list_empty(&event_list))
2262 break; 2262 break;
2263 2263
2264 list_for_each_safe(this, tmp, &event_list) { 2264 list_for_each_safe(this, tmp, &event_list) {
2265 evt = list_entry(this, struct scsi_event, node); 2265 evt = list_entry(this, struct scsi_event, node);
2266 list_del(&evt->node); 2266 list_del(&evt->node);
2267 scsi_evt_emit(sdev, evt); 2267 scsi_evt_emit(sdev, evt);
2268 kfree(evt); 2268 kfree(evt);
2269 } 2269 }
2270 } 2270 }
2271 } 2271 }
2272 2272
2273 /** 2273 /**
2274 * sdev_evt_send - send asserted event to uevent thread 2274 * sdev_evt_send - send asserted event to uevent thread
2275 * @sdev: scsi_device event occurred on 2275 * @sdev: scsi_device event occurred on
2276 * @evt: event to send 2276 * @evt: event to send
2277 * 2277 *
2278 * Assert scsi device event asynchronously. 2278 * Assert scsi device event asynchronously.
2279 */ 2279 */
2280 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) 2280 void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2281 { 2281 {
2282 unsigned long flags; 2282 unsigned long flags;
2283 2283
2284 #if 0 2284 #if 0
2285 /* FIXME: currently this check eliminates all media change events 2285 /* FIXME: currently this check eliminates all media change events
2286 * for polled devices. Need to update to discriminate between AN 2286 * for polled devices. Need to update to discriminate between AN
2287 * and polled events */ 2287 * and polled events */
2288 if (!test_bit(evt->evt_type, sdev->supported_events)) { 2288 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2289 kfree(evt); 2289 kfree(evt);
2290 return; 2290 return;
2291 } 2291 }
2292 #endif 2292 #endif
2293 2293
2294 spin_lock_irqsave(&sdev->list_lock, flags); 2294 spin_lock_irqsave(&sdev->list_lock, flags);
2295 list_add_tail(&evt->node, &sdev->event_list); 2295 list_add_tail(&evt->node, &sdev->event_list);
2296 schedule_work(&sdev->event_work); 2296 schedule_work(&sdev->event_work);
2297 spin_unlock_irqrestore(&sdev->list_lock, flags); 2297 spin_unlock_irqrestore(&sdev->list_lock, flags);
2298 } 2298 }
2299 EXPORT_SYMBOL_GPL(sdev_evt_send); 2299 EXPORT_SYMBOL_GPL(sdev_evt_send);
2300 2300
2301 /** 2301 /**
2302 * sdev_evt_alloc - allocate a new scsi event 2302 * sdev_evt_alloc - allocate a new scsi event
2303 * @evt_type: type of event to allocate 2303 * @evt_type: type of event to allocate
2304 * @gfpflags: GFP flags for allocation 2304 * @gfpflags: GFP flags for allocation
2305 * 2305 *
2306 * Allocates and returns a new scsi_event. 2306 * Allocates and returns a new scsi_event.
2307 */ 2307 */
2308 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 2308 struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2309 gfp_t gfpflags) 2309 gfp_t gfpflags)
2310 { 2310 {
2311 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); 2311 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2312 if (!evt) 2312 if (!evt)
2313 return NULL; 2313 return NULL;
2314 2314
2315 evt->evt_type = evt_type; 2315 evt->evt_type = evt_type;
2316 INIT_LIST_HEAD(&evt->node); 2316 INIT_LIST_HEAD(&evt->node);
2317 2317
2318 /* evt_type-specific initialization, if any */ 2318 /* evt_type-specific initialization, if any */
2319 switch (evt_type) { 2319 switch (evt_type) {
2320 case SDEV_EVT_MEDIA_CHANGE: 2320 case SDEV_EVT_MEDIA_CHANGE:
2321 case SDEV_EVT_INQUIRY_CHANGE_REPORTED: 2321 case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
2322 case SDEV_EVT_CAPACITY_CHANGE_REPORTED: 2322 case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
2323 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2323 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2324 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2324 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2325 case SDEV_EVT_LUN_CHANGE_REPORTED: 2325 case SDEV_EVT_LUN_CHANGE_REPORTED:
2326 default: 2326 default:
2327 /* do nothing */ 2327 /* do nothing */
2328 break; 2328 break;
2329 } 2329 }
2330 2330
2331 return evt; 2331 return evt;
2332 } 2332 }
2333 EXPORT_SYMBOL_GPL(sdev_evt_alloc); 2333 EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2334 2334
2335 /** 2335 /**
2336 * sdev_evt_send_simple - send asserted event to uevent thread 2336 * sdev_evt_send_simple - send asserted event to uevent thread
2337 * @sdev: scsi_device event occurred on 2337 * @sdev: scsi_device event occurred on
2338 * @evt_type: type of event to send 2338 * @evt_type: type of event to send
2339 * @gfpflags: GFP flags for allocation 2339 * @gfpflags: GFP flags for allocation
2340 * 2340 *
2341 * Assert scsi device event asynchronously, given an event type. 2341 * Assert scsi device event asynchronously, given an event type.
2342 */ 2342 */
2343 void sdev_evt_send_simple(struct scsi_device *sdev, 2343 void sdev_evt_send_simple(struct scsi_device *sdev,
2344 enum scsi_device_event evt_type, gfp_t gfpflags) 2344 enum scsi_device_event evt_type, gfp_t gfpflags)
2345 { 2345 {
2346 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); 2346 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2347 if (!evt) { 2347 if (!evt) {
2348 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n", 2348 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2349 evt_type); 2349 evt_type);
2350 return; 2350 return;
2351 } 2351 }
2352 2352
2353 sdev_evt_send(sdev, evt); 2353 sdev_evt_send(sdev, evt);
2354 } 2354 }
2355 EXPORT_SYMBOL_GPL(sdev_evt_send_simple); 2355 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2356 2356
2357 /** 2357 /**
2358 * scsi_device_quiesce - Block user issued commands. 2358 * scsi_device_quiesce - Block user issued commands.
2359 * @sdev: scsi device to quiesce. 2359 * @sdev: scsi device to quiesce.
2360 * 2360 *
2361 * This works by trying to transition to the SDEV_QUIESCE state 2361 * This works by trying to transition to the SDEV_QUIESCE state
2362 * (which must be a legal transition). When the device is in this 2362 * (which must be a legal transition). When the device is in this
2363 * state, only special requests will be accepted, all others will 2363 * state, only special requests will be accepted, all others will
2364 * be deferred. Since special requests may also be requeued requests, 2364 * be deferred. Since special requests may also be requeued requests,
2365 * a successful return doesn't guarantee the device will be 2365 * a successful return doesn't guarantee the device will be
2366 * totally quiescent. 2366 * totally quiescent.
2367 * 2367 *
2368 * Must be called with user context, may sleep. 2368 * Must be called with user context, may sleep.
2369 * 2369 *
2370 * Returns zero if unsuccessful or an error if not. 2370 * Returns zero if unsuccessful or an error if not.
2371 */ 2371 */
2372 int 2372 int
2373 scsi_device_quiesce(struct scsi_device *sdev) 2373 scsi_device_quiesce(struct scsi_device *sdev)
2374 { 2374 {
2375 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2375 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2376 if (err) 2376 if (err)
2377 return err; 2377 return err;
2378 2378
2379 scsi_run_queue(sdev->request_queue); 2379 scsi_run_queue(sdev->request_queue);
2380 while (atomic_read(&sdev->device_busy)) { 2380 while (atomic_read(&sdev->device_busy)) {
2381 msleep_interruptible(200); 2381 msleep_interruptible(200);
2382 scsi_run_queue(sdev->request_queue); 2382 scsi_run_queue(sdev->request_queue);
2383 } 2383 }
2384 return 0; 2384 return 0;
2385 } 2385 }
2386 EXPORT_SYMBOL(scsi_device_quiesce); 2386 EXPORT_SYMBOL(scsi_device_quiesce);
2387 2387
2388 /** 2388 /**
2389 * scsi_device_resume - Restart user issued commands to a quiesced device. 2389 * scsi_device_resume - Restart user issued commands to a quiesced device.
2390 * @sdev: scsi device to resume. 2390 * @sdev: scsi device to resume.
2391 * 2391 *
2392 * Moves the device from quiesced back to running and restarts the 2392 * Moves the device from quiesced back to running and restarts the
2393 * queues. 2393 * queues.
2394 * 2394 *
2395 * Must be called with user context, may sleep. 2395 * Must be called with user context, may sleep.
2396 */ 2396 */
2397 void scsi_device_resume(struct scsi_device *sdev) 2397 void scsi_device_resume(struct scsi_device *sdev)
2398 { 2398 {
2399 /* check if the device state was mutated prior to resume, and if 2399 /* check if the device state was mutated prior to resume, and if
2400 * so assume the state is being managed elsewhere (for example 2400 * so assume the state is being managed elsewhere (for example
2401 * device deleted during suspend) 2401 * device deleted during suspend)
2402 */ 2402 */
2403 if (sdev->sdev_state != SDEV_QUIESCE || 2403 if (sdev->sdev_state != SDEV_QUIESCE ||
2404 scsi_device_set_state(sdev, SDEV_RUNNING)) 2404 scsi_device_set_state(sdev, SDEV_RUNNING))
2405 return; 2405 return;
2406 scsi_run_queue(sdev->request_queue); 2406 scsi_run_queue(sdev->request_queue);
2407 } 2407 }
2408 EXPORT_SYMBOL(scsi_device_resume); 2408 EXPORT_SYMBOL(scsi_device_resume);
2409 2409
2410 static void 2410 static void
2411 device_quiesce_fn(struct scsi_device *sdev, void *data) 2411 device_quiesce_fn(struct scsi_device *sdev, void *data)
2412 { 2412 {
2413 scsi_device_quiesce(sdev); 2413 scsi_device_quiesce(sdev);
2414 } 2414 }
2415 2415
2416 void 2416 void
2417 scsi_target_quiesce(struct scsi_target *starget) 2417 scsi_target_quiesce(struct scsi_target *starget)
2418 { 2418 {
2419 starget_for_each_device(starget, NULL, device_quiesce_fn); 2419 starget_for_each_device(starget, NULL, device_quiesce_fn);
2420 } 2420 }
2421 EXPORT_SYMBOL(scsi_target_quiesce); 2421 EXPORT_SYMBOL(scsi_target_quiesce);
2422 2422
2423 static void 2423 static void
2424 device_resume_fn(struct scsi_device *sdev, void *data) 2424 device_resume_fn(struct scsi_device *sdev, void *data)
2425 { 2425 {
2426 scsi_device_resume(sdev); 2426 scsi_device_resume(sdev);
2427 } 2427 }
2428 2428
2429 void 2429 void
2430 scsi_target_resume(struct scsi_target *starget) 2430 scsi_target_resume(struct scsi_target *starget)
2431 { 2431 {
2432 starget_for_each_device(starget, NULL, device_resume_fn); 2432 starget_for_each_device(starget, NULL, device_resume_fn);
2433 } 2433 }
2434 EXPORT_SYMBOL(scsi_target_resume); 2434 EXPORT_SYMBOL(scsi_target_resume);
2435 2435
2436 /** 2436 /**
2437 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state 2437 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2438 * @sdev: device to block 2438 * @sdev: device to block
2439 * 2439 *
2440 * Block request made by scsi lld's to temporarily stop all 2440 * Block request made by scsi lld's to temporarily stop all
2441 * scsi commands on the specified device. Called from interrupt 2441 * scsi commands on the specified device. Called from interrupt
2442 * or normal process context. 2442 * or normal process context.
2443 * 2443 *
2444 * Returns zero if successful or error if not 2444 * Returns zero if successful or error if not
2445 * 2445 *
2446 * Notes: 2446 * Notes:
2447 * This routine transitions the device to the SDEV_BLOCK state 2447 * This routine transitions the device to the SDEV_BLOCK state
2448 * (which must be a legal transition). When the device is in this 2448 * (which must be a legal transition). When the device is in this
2449 * state, all commands are deferred until the scsi lld reenables 2449 * state, all commands are deferred until the scsi lld reenables
2450 * the device with scsi_device_unblock or device_block_tmo fires. 2450 * the device with scsi_device_unblock or device_block_tmo fires.
2451 */ 2451 */
2452 int 2452 int
2453 scsi_internal_device_block(struct scsi_device *sdev) 2453 scsi_internal_device_block(struct scsi_device *sdev)
2454 { 2454 {
2455 struct request_queue *q = sdev->request_queue; 2455 struct request_queue *q = sdev->request_queue;
2456 unsigned long flags; 2456 unsigned long flags;
2457 int err = 0; 2457 int err = 0;
2458 2458
2459 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2459 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2460 if (err) { 2460 if (err) {
2461 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); 2461 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2462 2462
2463 if (err) 2463 if (err)
2464 return err; 2464 return err;
2465 } 2465 }
2466 2466
2467 /* 2467 /*
2468 * The device has transitioned to SDEV_BLOCK. Stop the 2468 * The device has transitioned to SDEV_BLOCK. Stop the
2469 * block layer from calling the midlayer with this device's 2469 * block layer from calling the midlayer with this device's
2470 * request queue. 2470 * request queue.
2471 */ 2471 */
2472 spin_lock_irqsave(q->queue_lock, flags); 2472 spin_lock_irqsave(q->queue_lock, flags);
2473 blk_stop_queue(q); 2473 blk_stop_queue(q);
2474 spin_unlock_irqrestore(q->queue_lock, flags); 2474 spin_unlock_irqrestore(q->queue_lock, flags);
2475 2475
2476 return 0; 2476 return 0;
2477 } 2477 }
2478 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2478 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2479 2479
2480 /** 2480 /**
2481 * scsi_internal_device_unblock - resume a device after a block request 2481 * scsi_internal_device_unblock - resume a device after a block request
2482 * @sdev: device to resume 2482 * @sdev: device to resume
2483 * @new_state: state to set devices to after unblocking 2483 * @new_state: state to set devices to after unblocking
2484 * 2484 *
2485 * Called by scsi lld's or the midlayer to restart the device queue 2485 * Called by scsi lld's or the midlayer to restart the device queue
2486 * for the previously suspended scsi device. Called from interrupt or 2486 * for the previously suspended scsi device. Called from interrupt or
2487 * normal process context. 2487 * normal process context.
2488 * 2488 *
2489 * Returns zero if successful or error if not. 2489 * Returns zero if successful or error if not.
2490 * 2490 *
2491 * Notes: 2491 * Notes:
2492 * This routine transitions the device to the SDEV_RUNNING state 2492 * This routine transitions the device to the SDEV_RUNNING state
2493 * or to one of the offline states (which must be a legal transition) 2493 * or to one of the offline states (which must be a legal transition)
2494 * allowing the midlayer to goose the queue for this device. 2494 * allowing the midlayer to goose the queue for this device.
2495 */ 2495 */
2496 int 2496 int
2497 scsi_internal_device_unblock(struct scsi_device *sdev, 2497 scsi_internal_device_unblock(struct scsi_device *sdev,
2498 enum scsi_device_state new_state) 2498 enum scsi_device_state new_state)
2499 { 2499 {
2500 struct request_queue *q = sdev->request_queue; 2500 struct request_queue *q = sdev->request_queue;
2501 unsigned long flags; 2501 unsigned long flags;
2502 2502
2503 /* 2503 /*
2504 * Try to transition the scsi device to SDEV_RUNNING or one of the 2504 * Try to transition the scsi device to SDEV_RUNNING or one of the
2505 * offlined states and goose the device queue if successful. 2505 * offlined states and goose the device queue if successful.
2506 */ 2506 */
2507 if ((sdev->sdev_state == SDEV_BLOCK) || 2507 if ((sdev->sdev_state == SDEV_BLOCK) ||
2508 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) 2508 (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2509 sdev->sdev_state = new_state; 2509 sdev->sdev_state = new_state;
2510 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { 2510 else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
2511 if (new_state == SDEV_TRANSPORT_OFFLINE || 2511 if (new_state == SDEV_TRANSPORT_OFFLINE ||
2512 new_state == SDEV_OFFLINE) 2512 new_state == SDEV_OFFLINE)
2513 sdev->sdev_state = new_state; 2513 sdev->sdev_state = new_state;
2514 else 2514 else
2515 sdev->sdev_state = SDEV_CREATED; 2515 sdev->sdev_state = SDEV_CREATED;
2516 } else if (sdev->sdev_state != SDEV_CANCEL && 2516 } else if (sdev->sdev_state != SDEV_CANCEL &&
2517 sdev->sdev_state != SDEV_OFFLINE) 2517 sdev->sdev_state != SDEV_OFFLINE)
2518 return -EINVAL; 2518 return -EINVAL;
2519 2519
2520 spin_lock_irqsave(q->queue_lock, flags); 2520 spin_lock_irqsave(q->queue_lock, flags);
2521 blk_start_queue(q); 2521 blk_start_queue(q);
2522 spin_unlock_irqrestore(q->queue_lock, flags); 2522 spin_unlock_irqrestore(q->queue_lock, flags);
2523 2523
2524 return 0; 2524 return 0;
2525 } 2525 }
2526 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2526 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2527 2527
2528 static void 2528 static void
2529 device_block(struct scsi_device *sdev, void *data) 2529 device_block(struct scsi_device *sdev, void *data)
2530 { 2530 {
2531 scsi_internal_device_block(sdev); 2531 scsi_internal_device_block(sdev);
2532 } 2532 }
2533 2533
2534 static int 2534 static int
2535 target_block(struct device *dev, void *data) 2535 target_block(struct device *dev, void *data)
2536 { 2536 {
2537 if (scsi_is_target_device(dev)) 2537 if (scsi_is_target_device(dev))
2538 starget_for_each_device(to_scsi_target(dev), NULL, 2538 starget_for_each_device(to_scsi_target(dev), NULL,
2539 device_block); 2539 device_block);
2540 return 0; 2540 return 0;
2541 } 2541 }
2542 2542
2543 void 2543 void
2544 scsi_target_block(struct device *dev) 2544 scsi_target_block(struct device *dev)
2545 { 2545 {
2546 if (scsi_is_target_device(dev)) 2546 if (scsi_is_target_device(dev))
2547 starget_for_each_device(to_scsi_target(dev), NULL, 2547 starget_for_each_device(to_scsi_target(dev), NULL,
2548 device_block); 2548 device_block);
2549 else 2549 else
2550 device_for_each_child(dev, NULL, target_block); 2550 device_for_each_child(dev, NULL, target_block);
2551 } 2551 }
2552 EXPORT_SYMBOL_GPL(scsi_target_block); 2552 EXPORT_SYMBOL_GPL(scsi_target_block);
2553 2553
2554 static void 2554 static void
2555 device_unblock(struct scsi_device *sdev, void *data) 2555 device_unblock(struct scsi_device *sdev, void *data)
2556 { 2556 {
2557 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); 2557 scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
2558 } 2558 }
2559 2559
2560 static int 2560 static int
2561 target_unblock(struct device *dev, void *data) 2561 target_unblock(struct device *dev, void *data)
2562 { 2562 {
2563 if (scsi_is_target_device(dev)) 2563 if (scsi_is_target_device(dev))
2564 starget_for_each_device(to_scsi_target(dev), data, 2564 starget_for_each_device(to_scsi_target(dev), data,
2565 device_unblock); 2565 device_unblock);
2566 return 0; 2566 return 0;
2567 } 2567 }
2568 2568
2569 void 2569 void
2570 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) 2570 scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
2571 { 2571 {
2572 if (scsi_is_target_device(dev)) 2572 if (scsi_is_target_device(dev))
2573 starget_for_each_device(to_scsi_target(dev), &new_state, 2573 starget_for_each_device(to_scsi_target(dev), &new_state,
2574 device_unblock); 2574 device_unblock);
2575 else 2575 else
2576 device_for_each_child(dev, &new_state, target_unblock); 2576 device_for_each_child(dev, &new_state, target_unblock);
2577 } 2577 }
2578 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2578 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2579 2579
2580 /** 2580 /**
2581 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt 2581 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2582 * @sgl: scatter-gather list 2582 * @sgl: scatter-gather list
2583 * @sg_count: number of segments in sg 2583 * @sg_count: number of segments in sg
2584 * @offset: offset in bytes into sg, on return offset into the mapped area 2584 * @offset: offset in bytes into sg, on return offset into the mapped area
2585 * @len: bytes to map, on return number of bytes mapped 2585 * @len: bytes to map, on return number of bytes mapped
2586 * 2586 *
2587 * Returns virtual address of the start of the mapped page 2587 * Returns virtual address of the start of the mapped page
2588 */ 2588 */
2589 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, 2589 void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2590 size_t *offset, size_t *len) 2590 size_t *offset, size_t *len)
2591 { 2591 {
2592 int i; 2592 int i;
2593 size_t sg_len = 0, len_complete = 0; 2593 size_t sg_len = 0, len_complete = 0;
2594 struct scatterlist *sg; 2594 struct scatterlist *sg;
2595 struct page *page; 2595 struct page *page;
2596 2596
2597 WARN_ON(!irqs_disabled()); 2597 WARN_ON(!irqs_disabled());
2598 2598
2599 for_each_sg(sgl, sg, sg_count, i) { 2599 for_each_sg(sgl, sg, sg_count, i) {
2600 len_complete = sg_len; /* Complete sg-entries */ 2600 len_complete = sg_len; /* Complete sg-entries */
2601 sg_len += sg->length; 2601 sg_len += sg->length;
2602 if (sg_len > *offset) 2602 if (sg_len > *offset)
2603 break; 2603 break;
2604 } 2604 }
2605 2605
2606 if (unlikely(i == sg_count)) { 2606 if (unlikely(i == sg_count)) {
2607 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2607 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2608 "elements %d\n", 2608 "elements %d\n",
2609 __func__, sg_len, *offset, sg_count); 2609 __func__, sg_len, *offset, sg_count);
2610 WARN_ON(1); 2610 WARN_ON(1);
2611 return NULL; 2611 return NULL;
2612 } 2612 }
2613 2613
2614 /* Offset starting from the beginning of first page in this sg-entry */ 2614 /* Offset starting from the beginning of first page in this sg-entry */
2615 *offset = *offset - len_complete + sg->offset; 2615 *offset = *offset - len_complete + sg->offset;
2616 2616
2617 /* Assumption: contiguous pages can be accessed as "page + i" */ 2617 /* Assumption: contiguous pages can be accessed as "page + i" */
2618 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); 2618 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2619 *offset &= ~PAGE_MASK; 2619 *offset &= ~PAGE_MASK;
2620 2620
2621 /* Bytes in this sg-entry from *offset to the end of the page */ 2621 /* Bytes in this sg-entry from *offset to the end of the page */
2622 sg_len = PAGE_SIZE - *offset; 2622 sg_len = PAGE_SIZE - *offset;
2623 if (*len > sg_len) 2623 if (*len > sg_len)
2624 *len = sg_len; 2624 *len = sg_len;
2625 2625
2626 return kmap_atomic(page); 2626 return kmap_atomic(page);
2627 } 2627 }
2628 EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2628 EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2629 2629
2630 /** 2630 /**
2631 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg 2631 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2632 * @virt: virtual address to be unmapped 2632 * @virt: virtual address to be unmapped
2633 */ 2633 */
2634 void scsi_kunmap_atomic_sg(void *virt) 2634 void scsi_kunmap_atomic_sg(void *virt)
2635 { 2635 {
2636 kunmap_atomic(virt); 2636 kunmap_atomic(virt);
2637 } 2637 }
2638 EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2638 EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2639 2639
2640 void sdev_disable_disk_events(struct scsi_device *sdev) 2640 void sdev_disable_disk_events(struct scsi_device *sdev)
2641 { 2641 {
2642 atomic_inc(&sdev->disk_events_disable_depth); 2642 atomic_inc(&sdev->disk_events_disable_depth);
2643 } 2643 }
2644 EXPORT_SYMBOL(sdev_disable_disk_events); 2644 EXPORT_SYMBOL(sdev_disable_disk_events);
2645 2645
drivers/scsi/scsi_sysfs.c
1 /* 1 /*
2 * scsi_sysfs.c 2 * scsi_sysfs.c
3 * 3 *
4 * SCSI sysfs interface routines. 4 * SCSI sysfs interface routines.
5 * 5 *
6 * Created to pull SCSI mid layer sysfs routines into one file. 6 * Created to pull SCSI mid layer sysfs routines into one file.
7 */ 7 */
8 8
9 #include <linux/module.h> 9 #include <linux/module.h>
10 #include <linux/slab.h> 10 #include <linux/slab.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/blkdev.h> 12 #include <linux/blkdev.h>
13 #include <linux/device.h> 13 #include <linux/device.h>
14 #include <linux/pm_runtime.h> 14 #include <linux/pm_runtime.h>
15 15
16 #include <scsi/scsi.h> 16 #include <scsi/scsi.h>
17 #include <scsi/scsi_device.h> 17 #include <scsi/scsi_device.h>
18 #include <scsi/scsi_host.h> 18 #include <scsi/scsi_host.h>
19 #include <scsi/scsi_tcq.h> 19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi_transport.h> 20 #include <scsi/scsi_transport.h>
21 #include <scsi/scsi_driver.h> 21 #include <scsi/scsi_driver.h>
22 22
23 #include "scsi_priv.h" 23 #include "scsi_priv.h"
24 #include "scsi_logging.h" 24 #include "scsi_logging.h"
25 25
26 static struct device_type scsi_dev_type; 26 static struct device_type scsi_dev_type;
27 27
28 static const struct { 28 static const struct {
29 enum scsi_device_state value; 29 enum scsi_device_state value;
30 char *name; 30 char *name;
31 } sdev_states[] = { 31 } sdev_states[] = {
32 { SDEV_CREATED, "created" }, 32 { SDEV_CREATED, "created" },
33 { SDEV_RUNNING, "running" }, 33 { SDEV_RUNNING, "running" },
34 { SDEV_CANCEL, "cancel" }, 34 { SDEV_CANCEL, "cancel" },
35 { SDEV_DEL, "deleted" }, 35 { SDEV_DEL, "deleted" },
36 { SDEV_QUIESCE, "quiesce" }, 36 { SDEV_QUIESCE, "quiesce" },
37 { SDEV_OFFLINE, "offline" }, 37 { SDEV_OFFLINE, "offline" },
38 { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, 38 { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
39 { SDEV_BLOCK, "blocked" }, 39 { SDEV_BLOCK, "blocked" },
40 { SDEV_CREATED_BLOCK, "created-blocked" }, 40 { SDEV_CREATED_BLOCK, "created-blocked" },
41 }; 41 };
42 42
43 const char *scsi_device_state_name(enum scsi_device_state state) 43 const char *scsi_device_state_name(enum scsi_device_state state)
44 { 44 {
45 int i; 45 int i;
46 char *name = NULL; 46 char *name = NULL;
47 47
48 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { 48 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
49 if (sdev_states[i].value == state) { 49 if (sdev_states[i].value == state) {
50 name = sdev_states[i].name; 50 name = sdev_states[i].name;
51 break; 51 break;
52 } 52 }
53 } 53 }
54 return name; 54 return name;
55 } 55 }
56 56
57 static const struct { 57 static const struct {
58 enum scsi_host_state value; 58 enum scsi_host_state value;
59 char *name; 59 char *name;
60 } shost_states[] = { 60 } shost_states[] = {
61 { SHOST_CREATED, "created" }, 61 { SHOST_CREATED, "created" },
62 { SHOST_RUNNING, "running" }, 62 { SHOST_RUNNING, "running" },
63 { SHOST_CANCEL, "cancel" }, 63 { SHOST_CANCEL, "cancel" },
64 { SHOST_DEL, "deleted" }, 64 { SHOST_DEL, "deleted" },
65 { SHOST_RECOVERY, "recovery" }, 65 { SHOST_RECOVERY, "recovery" },
66 { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, 66 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
67 { SHOST_DEL_RECOVERY, "deleted/recovery", }, 67 { SHOST_DEL_RECOVERY, "deleted/recovery", },
68 }; 68 };
69 const char *scsi_host_state_name(enum scsi_host_state state) 69 const char *scsi_host_state_name(enum scsi_host_state state)
70 { 70 {
71 int i; 71 int i;
72 char *name = NULL; 72 char *name = NULL;
73 73
74 for (i = 0; i < ARRAY_SIZE(shost_states); i++) { 74 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
75 if (shost_states[i].value == state) { 75 if (shost_states[i].value == state) {
76 name = shost_states[i].name; 76 name = shost_states[i].name;
77 break; 77 break;
78 } 78 }
79 } 79 }
80 return name; 80 return name;
81 } 81 }
82 82
83 static int check_set(unsigned long long *val, char *src) 83 static int check_set(unsigned long long *val, char *src)
84 { 84 {
85 char *last; 85 char *last;
86 86
87 if (strncmp(src, "-", 20) == 0) { 87 if (strncmp(src, "-", 20) == 0) {
88 *val = SCAN_WILD_CARD; 88 *val = SCAN_WILD_CARD;
89 } else { 89 } else {
90 /* 90 /*
91 * Doesn't check for int overflow 91 * Doesn't check for int overflow
92 */ 92 */
93 *val = simple_strtoull(src, &last, 0); 93 *val = simple_strtoull(src, &last, 0);
94 if (*last != '\0') 94 if (*last != '\0')
95 return 1; 95 return 1;
96 } 96 }
97 return 0; 97 return 0;
98 } 98 }
99 99
100 static int scsi_scan(struct Scsi_Host *shost, const char *str) 100 static int scsi_scan(struct Scsi_Host *shost, const char *str)
101 { 101 {
102 char s1[15], s2[15], s3[17], junk; 102 char s1[15], s2[15], s3[17], junk;
103 unsigned long long channel, id, lun; 103 unsigned long long channel, id, lun;
104 int res; 104 int res;
105 105
106 res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); 106 res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk);
107 if (res != 3) 107 if (res != 3)
108 return -EINVAL; 108 return -EINVAL;
109 if (check_set(&channel, s1)) 109 if (check_set(&channel, s1))
110 return -EINVAL; 110 return -EINVAL;
111 if (check_set(&id, s2)) 111 if (check_set(&id, s2))
112 return -EINVAL; 112 return -EINVAL;
113 if (check_set(&lun, s3)) 113 if (check_set(&lun, s3))
114 return -EINVAL; 114 return -EINVAL;
115 if (shost->transportt->user_scan) 115 if (shost->transportt->user_scan)
116 res = shost->transportt->user_scan(shost, channel, id, lun); 116 res = shost->transportt->user_scan(shost, channel, id, lun);
117 else 117 else
118 res = scsi_scan_host_selected(shost, channel, id, lun, 1); 118 res = scsi_scan_host_selected(shost, channel, id, lun, 1);
119 return res; 119 return res;
120 } 120 }
121 121
122 /* 122 /*
123 * shost_show_function: macro to create an attr function that can be used to 123 * shost_show_function: macro to create an attr function that can be used to
124 * show a non-bit field. 124 * show a non-bit field.
125 */ 125 */
126 #define shost_show_function(name, field, format_string) \ 126 #define shost_show_function(name, field, format_string) \
127 static ssize_t \ 127 static ssize_t \
128 show_##name (struct device *dev, struct device_attribute *attr, \ 128 show_##name (struct device *dev, struct device_attribute *attr, \
129 char *buf) \ 129 char *buf) \
130 { \ 130 { \
131 struct Scsi_Host *shost = class_to_shost(dev); \ 131 struct Scsi_Host *shost = class_to_shost(dev); \
132 return snprintf (buf, 20, format_string, shost->field); \ 132 return snprintf (buf, 20, format_string, shost->field); \
133 } 133 }
134 134
135 /* 135 /*
136 * shost_rd_attr: macro to create a function and attribute variable for a 136 * shost_rd_attr: macro to create a function and attribute variable for a
137 * read only field. 137 * read only field.
138 */ 138 */
139 #define shost_rd_attr2(name, field, format_string) \ 139 #define shost_rd_attr2(name, field, format_string) \
140 shost_show_function(name, field, format_string) \ 140 shost_show_function(name, field, format_string) \
141 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); 141 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
142 142
143 #define shost_rd_attr(field, format_string) \ 143 #define shost_rd_attr(field, format_string) \
144 shost_rd_attr2(field, field, format_string) 144 shost_rd_attr2(field, field, format_string)
145 145
146 /* 146 /*
147 * Create the actual show/store functions and data structures. 147 * Create the actual show/store functions and data structures.
148 */ 148 */
149 149
150 static ssize_t 150 static ssize_t
151 store_scan(struct device *dev, struct device_attribute *attr, 151 store_scan(struct device *dev, struct device_attribute *attr,
152 const char *buf, size_t count) 152 const char *buf, size_t count)
153 { 153 {
154 struct Scsi_Host *shost = class_to_shost(dev); 154 struct Scsi_Host *shost = class_to_shost(dev);
155 int res; 155 int res;
156 156
157 res = scsi_scan(shost, buf); 157 res = scsi_scan(shost, buf);
158 if (res == 0) 158 if (res == 0)
159 res = count; 159 res = count;
160 return res; 160 return res;
161 }; 161 };
162 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); 162 static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
163 163
164 static ssize_t 164 static ssize_t
165 store_shost_state(struct device *dev, struct device_attribute *attr, 165 store_shost_state(struct device *dev, struct device_attribute *attr,
166 const char *buf, size_t count) 166 const char *buf, size_t count)
167 { 167 {
168 int i; 168 int i;
169 struct Scsi_Host *shost = class_to_shost(dev); 169 struct Scsi_Host *shost = class_to_shost(dev);
170 enum scsi_host_state state = 0; 170 enum scsi_host_state state = 0;
171 171
172 for (i = 0; i < ARRAY_SIZE(shost_states); i++) { 172 for (i = 0; i < ARRAY_SIZE(shost_states); i++) {
173 const int len = strlen(shost_states[i].name); 173 const int len = strlen(shost_states[i].name);
174 if (strncmp(shost_states[i].name, buf, len) == 0 && 174 if (strncmp(shost_states[i].name, buf, len) == 0 &&
175 buf[len] == '\n') { 175 buf[len] == '\n') {
176 state = shost_states[i].value; 176 state = shost_states[i].value;
177 break; 177 break;
178 } 178 }
179 } 179 }
180 if (!state) 180 if (!state)
181 return -EINVAL; 181 return -EINVAL;
182 182
183 if (scsi_host_set_state(shost, state)) 183 if (scsi_host_set_state(shost, state))
184 return -EINVAL; 184 return -EINVAL;
185 return count; 185 return count;
186 } 186 }
187 187
188 static ssize_t 188 static ssize_t
189 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) 189 show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
190 { 190 {
191 struct Scsi_Host *shost = class_to_shost(dev); 191 struct Scsi_Host *shost = class_to_shost(dev);
192 const char *name = scsi_host_state_name(shost->shost_state); 192 const char *name = scsi_host_state_name(shost->shost_state);
193 193
194 if (!name) 194 if (!name)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 return snprintf(buf, 20, "%s\n", name); 197 return snprintf(buf, 20, "%s\n", name);
198 } 198 }
199 199
200 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ 200 /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
201 struct device_attribute dev_attr_hstate = 201 struct device_attribute dev_attr_hstate =
202 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); 202 __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
203 203
204 static ssize_t 204 static ssize_t
205 show_shost_mode(unsigned int mode, char *buf) 205 show_shost_mode(unsigned int mode, char *buf)
206 { 206 {
207 ssize_t len = 0; 207 ssize_t len = 0;
208 208
209 if (mode & MODE_INITIATOR) 209 if (mode & MODE_INITIATOR)
210 len = sprintf(buf, "%s", "Initiator"); 210 len = sprintf(buf, "%s", "Initiator");
211 211
212 if (mode & MODE_TARGET) 212 if (mode & MODE_TARGET)
213 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); 213 len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target");
214 214
215 len += sprintf(buf + len, "\n"); 215 len += sprintf(buf + len, "\n");
216 216
217 return len; 217 return len;
218 } 218 }
219 219
220 static ssize_t 220 static ssize_t
221 show_shost_supported_mode(struct device *dev, struct device_attribute *attr, 221 show_shost_supported_mode(struct device *dev, struct device_attribute *attr,
222 char *buf) 222 char *buf)
223 { 223 {
224 struct Scsi_Host *shost = class_to_shost(dev); 224 struct Scsi_Host *shost = class_to_shost(dev);
225 unsigned int supported_mode = shost->hostt->supported_mode; 225 unsigned int supported_mode = shost->hostt->supported_mode;
226 226
227 if (supported_mode == MODE_UNKNOWN) 227 if (supported_mode == MODE_UNKNOWN)
228 /* by default this should be initiator */ 228 /* by default this should be initiator */
229 supported_mode = MODE_INITIATOR; 229 supported_mode = MODE_INITIATOR;
230 230
231 return show_shost_mode(supported_mode, buf); 231 return show_shost_mode(supported_mode, buf);
232 } 232 }
233 233
234 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); 234 static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL);
235 235
236 static ssize_t 236 static ssize_t
237 show_shost_active_mode(struct device *dev, 237 show_shost_active_mode(struct device *dev,
238 struct device_attribute *attr, char *buf) 238 struct device_attribute *attr, char *buf)
239 { 239 {
240 struct Scsi_Host *shost = class_to_shost(dev); 240 struct Scsi_Host *shost = class_to_shost(dev);
241 241
242 if (shost->active_mode == MODE_UNKNOWN) 242 if (shost->active_mode == MODE_UNKNOWN)
243 return snprintf(buf, 20, "unknown\n"); 243 return snprintf(buf, 20, "unknown\n");
244 else 244 else
245 return show_shost_mode(shost->active_mode, buf); 245 return show_shost_mode(shost->active_mode, buf);
246 } 246 }
247 247
248 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); 248 static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
249 249
250 static int check_reset_type(const char *str) 250 static int check_reset_type(const char *str)
251 { 251 {
252 if (sysfs_streq(str, "adapter")) 252 if (sysfs_streq(str, "adapter"))
253 return SCSI_ADAPTER_RESET; 253 return SCSI_ADAPTER_RESET;
254 else if (sysfs_streq(str, "firmware")) 254 else if (sysfs_streq(str, "firmware"))
255 return SCSI_FIRMWARE_RESET; 255 return SCSI_FIRMWARE_RESET;
256 else 256 else
257 return 0; 257 return 0;
258 } 258 }
259 259
260 static ssize_t 260 static ssize_t
261 store_host_reset(struct device *dev, struct device_attribute *attr, 261 store_host_reset(struct device *dev, struct device_attribute *attr,
262 const char *buf, size_t count) 262 const char *buf, size_t count)
263 { 263 {
264 struct Scsi_Host *shost = class_to_shost(dev); 264 struct Scsi_Host *shost = class_to_shost(dev);
265 struct scsi_host_template *sht = shost->hostt; 265 struct scsi_host_template *sht = shost->hostt;
266 int ret = -EINVAL; 266 int ret = -EINVAL;
267 int type; 267 int type;
268 268
269 type = check_reset_type(buf); 269 type = check_reset_type(buf);
270 if (!type) 270 if (!type)
271 goto exit_store_host_reset; 271 goto exit_store_host_reset;
272 272
273 if (sht->host_reset) 273 if (sht->host_reset)
274 ret = sht->host_reset(shost, type); 274 ret = sht->host_reset(shost, type);
275 275
276 exit_store_host_reset: 276 exit_store_host_reset:
277 if (ret == 0) 277 if (ret == 0)
278 ret = count; 278 ret = count;
279 return ret; 279 return ret;
280 } 280 }
281 281
282 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); 282 static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
283 283
284 static ssize_t 284 static ssize_t
285 show_shost_eh_deadline(struct device *dev, 285 show_shost_eh_deadline(struct device *dev,
286 struct device_attribute *attr, char *buf) 286 struct device_attribute *attr, char *buf)
287 { 287 {
288 struct Scsi_Host *shost = class_to_shost(dev); 288 struct Scsi_Host *shost = class_to_shost(dev);
289 289
290 if (shost->eh_deadline == -1) 290 if (shost->eh_deadline == -1)
291 return snprintf(buf, strlen("off") + 2, "off\n"); 291 return snprintf(buf, strlen("off") + 2, "off\n");
292 return sprintf(buf, "%u\n", shost->eh_deadline / HZ); 292 return sprintf(buf, "%u\n", shost->eh_deadline / HZ);
293 } 293 }
294 294
295 static ssize_t 295 static ssize_t
296 store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, 296 store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count) 297 const char *buf, size_t count)
298 { 298 {
299 struct Scsi_Host *shost = class_to_shost(dev); 299 struct Scsi_Host *shost = class_to_shost(dev);
300 int ret = -EINVAL; 300 int ret = -EINVAL;
301 unsigned long deadline, flags; 301 unsigned long deadline, flags;
302 302
303 if (shost->transportt && 303 if (shost->transportt &&
304 (shost->transportt->eh_strategy_handler || 304 (shost->transportt->eh_strategy_handler ||
305 !shost->hostt->eh_host_reset_handler)) 305 !shost->hostt->eh_host_reset_handler))
306 return ret; 306 return ret;
307 307
308 if (!strncmp(buf, "off", strlen("off"))) 308 if (!strncmp(buf, "off", strlen("off")))
309 deadline = -1; 309 deadline = -1;
310 else { 310 else {
311 ret = kstrtoul(buf, 10, &deadline); 311 ret = kstrtoul(buf, 10, &deadline);
312 if (ret) 312 if (ret)
313 return ret; 313 return ret;
314 if (deadline * HZ > UINT_MAX) 314 if (deadline * HZ > UINT_MAX)
315 return -EINVAL; 315 return -EINVAL;
316 } 316 }
317 317
318 spin_lock_irqsave(shost->host_lock, flags); 318 spin_lock_irqsave(shost->host_lock, flags);
319 if (scsi_host_in_recovery(shost)) 319 if (scsi_host_in_recovery(shost))
320 ret = -EBUSY; 320 ret = -EBUSY;
321 else { 321 else {
322 if (deadline == -1) 322 if (deadline == -1)
323 shost->eh_deadline = -1; 323 shost->eh_deadline = -1;
324 else 324 else
325 shost->eh_deadline = deadline * HZ; 325 shost->eh_deadline = deadline * HZ;
326 326
327 ret = count; 327 ret = count;
328 } 328 }
329 spin_unlock_irqrestore(shost->host_lock, flags); 329 spin_unlock_irqrestore(shost->host_lock, flags);
330 330
331 return ret; 331 return ret;
332 } 332 }
333 333
334 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); 334 static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
335 335
336 shost_rd_attr(unique_id, "%u\n"); 336 shost_rd_attr(unique_id, "%u\n");
337 shost_rd_attr(cmd_per_lun, "%hd\n"); 337 shost_rd_attr(cmd_per_lun, "%hd\n");
338 shost_rd_attr(can_queue, "%hd\n"); 338 shost_rd_attr(can_queue, "%hd\n");
339 shost_rd_attr(sg_tablesize, "%hu\n"); 339 shost_rd_attr(sg_tablesize, "%hu\n");
340 shost_rd_attr(sg_prot_tablesize, "%hu\n"); 340 shost_rd_attr(sg_prot_tablesize, "%hu\n");
341 shost_rd_attr(unchecked_isa_dma, "%d\n"); 341 shost_rd_attr(unchecked_isa_dma, "%d\n");
342 shost_rd_attr(prot_capabilities, "%u\n"); 342 shost_rd_attr(prot_capabilities, "%u\n");
343 shost_rd_attr(prot_guard_type, "%hd\n"); 343 shost_rd_attr(prot_guard_type, "%hd\n");
344 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 344 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
345 345
346 static ssize_t 346 static ssize_t
347 show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) 347 show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
348 { 348 {
349 struct Scsi_Host *shost = class_to_shost(dev); 349 struct Scsi_Host *shost = class_to_shost(dev);
350 return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy)); 350 return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
351 } 351 }
352 static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); 352 static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
353 353
354 static struct attribute *scsi_sysfs_shost_attrs[] = { 354 static struct attribute *scsi_sysfs_shost_attrs[] = {
355 &dev_attr_unique_id.attr, 355 &dev_attr_unique_id.attr,
356 &dev_attr_host_busy.attr, 356 &dev_attr_host_busy.attr,
357 &dev_attr_cmd_per_lun.attr, 357 &dev_attr_cmd_per_lun.attr,
358 &dev_attr_can_queue.attr, 358 &dev_attr_can_queue.attr,
359 &dev_attr_sg_tablesize.attr, 359 &dev_attr_sg_tablesize.attr,
360 &dev_attr_sg_prot_tablesize.attr, 360 &dev_attr_sg_prot_tablesize.attr,
361 &dev_attr_unchecked_isa_dma.attr, 361 &dev_attr_unchecked_isa_dma.attr,
362 &dev_attr_proc_name.attr, 362 &dev_attr_proc_name.attr,
363 &dev_attr_scan.attr, 363 &dev_attr_scan.attr,
364 &dev_attr_hstate.attr, 364 &dev_attr_hstate.attr,
365 &dev_attr_supported_mode.attr, 365 &dev_attr_supported_mode.attr,
366 &dev_attr_active_mode.attr, 366 &dev_attr_active_mode.attr,
367 &dev_attr_prot_capabilities.attr, 367 &dev_attr_prot_capabilities.attr,
368 &dev_attr_prot_guard_type.attr, 368 &dev_attr_prot_guard_type.attr,
369 &dev_attr_host_reset.attr, 369 &dev_attr_host_reset.attr,
370 &dev_attr_eh_deadline.attr, 370 &dev_attr_eh_deadline.attr,
371 NULL 371 NULL
372 }; 372 };
373 373
374 struct attribute_group scsi_shost_attr_group = { 374 struct attribute_group scsi_shost_attr_group = {
375 .attrs = scsi_sysfs_shost_attrs, 375 .attrs = scsi_sysfs_shost_attrs,
376 }; 376 };
377 377
378 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = { 378 const struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
379 &scsi_shost_attr_group, 379 &scsi_shost_attr_group,
380 NULL 380 NULL
381 }; 381 };
382 382
383 static void scsi_device_cls_release(struct device *class_dev) 383 static void scsi_device_cls_release(struct device *class_dev)
384 { 384 {
385 struct scsi_device *sdev; 385 struct scsi_device *sdev;
386 386
387 sdev = class_to_sdev(class_dev); 387 sdev = class_to_sdev(class_dev);
388 put_device(&sdev->sdev_gendev); 388 put_device(&sdev->sdev_gendev);
389 } 389 }
390 390
391 static void scsi_device_dev_release_usercontext(struct work_struct *work) 391 static void scsi_device_dev_release_usercontext(struct work_struct *work)
392 { 392 {
393 struct scsi_device *sdev; 393 struct scsi_device *sdev;
394 struct device *parent; 394 struct device *parent;
395 struct list_head *this, *tmp; 395 struct list_head *this, *tmp;
396 unsigned long flags; 396 unsigned long flags;
397 397
398 sdev = container_of(work, struct scsi_device, ew.work); 398 sdev = container_of(work, struct scsi_device, ew.work);
399 399
400 parent = sdev->sdev_gendev.parent; 400 parent = sdev->sdev_gendev.parent;
401 401
402 spin_lock_irqsave(sdev->host->host_lock, flags); 402 spin_lock_irqsave(sdev->host->host_lock, flags);
403 list_del(&sdev->siblings); 403 list_del(&sdev->siblings);
404 list_del(&sdev->same_target_siblings); 404 list_del(&sdev->same_target_siblings);
405 list_del(&sdev->starved_entry); 405 list_del(&sdev->starved_entry);
406 spin_unlock_irqrestore(sdev->host->host_lock, flags); 406 spin_unlock_irqrestore(sdev->host->host_lock, flags);
407 407
408 cancel_work_sync(&sdev->event_work); 408 cancel_work_sync(&sdev->event_work);
409 409
410 list_for_each_safe(this, tmp, &sdev->event_list) { 410 list_for_each_safe(this, tmp, &sdev->event_list) {
411 struct scsi_event *evt; 411 struct scsi_event *evt;
412 412
413 evt = list_entry(this, struct scsi_event, node); 413 evt = list_entry(this, struct scsi_event, node);
414 list_del(&evt->node); 414 list_del(&evt->node);
415 kfree(evt); 415 kfree(evt);
416 } 416 }
417 417
418 blk_put_queue(sdev->request_queue); 418 blk_put_queue(sdev->request_queue);
419 /* NULL queue means the device can't be used */ 419 /* NULL queue means the device can't be used */
420 sdev->request_queue = NULL; 420 sdev->request_queue = NULL;
421 421
422 kfree(sdev->vpd_pg83); 422 kfree(sdev->vpd_pg83);
423 kfree(sdev->vpd_pg80); 423 kfree(sdev->vpd_pg80);
424 kfree(sdev->inquiry); 424 kfree(sdev->inquiry);
425 kfree(sdev); 425 kfree(sdev);
426 426
427 if (parent) 427 if (parent)
428 put_device(parent); 428 put_device(parent);
429 } 429 }
430 430
431 static void scsi_device_dev_release(struct device *dev) 431 static void scsi_device_dev_release(struct device *dev)
432 { 432 {
433 struct scsi_device *sdp = to_scsi_device(dev); 433 struct scsi_device *sdp = to_scsi_device(dev);
434 execute_in_process_context(scsi_device_dev_release_usercontext, 434 execute_in_process_context(scsi_device_dev_release_usercontext,
435 &sdp->ew); 435 &sdp->ew);
436 } 436 }
437 437
438 static struct class sdev_class = { 438 static struct class sdev_class = {
439 .name = "scsi_device", 439 .name = "scsi_device",
440 .dev_release = scsi_device_cls_release, 440 .dev_release = scsi_device_cls_release,
441 }; 441 };
442 442
443 /* all probing is done in the individual ->probe routines */ 443 /* all probing is done in the individual ->probe routines */
444 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) 444 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
445 { 445 {
446 struct scsi_device *sdp; 446 struct scsi_device *sdp;
447 447
448 if (dev->type != &scsi_dev_type) 448 if (dev->type != &scsi_dev_type)
449 return 0; 449 return 0;
450 450
451 sdp = to_scsi_device(dev); 451 sdp = to_scsi_device(dev);
452 if (sdp->no_uld_attach) 452 if (sdp->no_uld_attach)
453 return 0; 453 return 0;
454 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; 454 return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
455 } 455 }
456 456
457 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 457 static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
458 { 458 {
459 struct scsi_device *sdev; 459 struct scsi_device *sdev;
460 460
461 if (dev->type != &scsi_dev_type) 461 if (dev->type != &scsi_dev_type)
462 return 0; 462 return 0;
463 463
464 sdev = to_scsi_device(dev); 464 sdev = to_scsi_device(dev);
465 465
466 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); 466 add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type);
467 return 0; 467 return 0;
468 } 468 }
469 469
470 struct bus_type scsi_bus_type = { 470 struct bus_type scsi_bus_type = {
471 .name = "scsi", 471 .name = "scsi",
472 .match = scsi_bus_match, 472 .match = scsi_bus_match,
473 .uevent = scsi_bus_uevent, 473 .uevent = scsi_bus_uevent,
474 #ifdef CONFIG_PM 474 #ifdef CONFIG_PM
475 .pm = &scsi_bus_pm_ops, 475 .pm = &scsi_bus_pm_ops,
476 #endif 476 #endif
477 }; 477 };
478 EXPORT_SYMBOL_GPL(scsi_bus_type); 478 EXPORT_SYMBOL_GPL(scsi_bus_type);
479 479
480 int scsi_sysfs_register(void) 480 int scsi_sysfs_register(void)
481 { 481 {
482 int error; 482 int error;
483 483
484 error = bus_register(&scsi_bus_type); 484 error = bus_register(&scsi_bus_type);
485 if (!error) { 485 if (!error) {
486 error = class_register(&sdev_class); 486 error = class_register(&sdev_class);
487 if (error) 487 if (error)
488 bus_unregister(&scsi_bus_type); 488 bus_unregister(&scsi_bus_type);
489 } 489 }
490 490
491 return error; 491 return error;
492 } 492 }
493 493
494 void scsi_sysfs_unregister(void) 494 void scsi_sysfs_unregister(void)
495 { 495 {
496 class_unregister(&sdev_class); 496 class_unregister(&sdev_class);
497 bus_unregister(&scsi_bus_type); 497 bus_unregister(&scsi_bus_type);
498 } 498 }
499 499
500 /* 500 /*
501 * sdev_show_function: macro to create an attr function that can be used to 501 * sdev_show_function: macro to create an attr function that can be used to
502 * show a non-bit field. 502 * show a non-bit field.
503 */ 503 */
504 #define sdev_show_function(field, format_string) \ 504 #define sdev_show_function(field, format_string) \
505 static ssize_t \ 505 static ssize_t \
506 sdev_show_##field (struct device *dev, struct device_attribute *attr, \ 506 sdev_show_##field (struct device *dev, struct device_attribute *attr, \
507 char *buf) \ 507 char *buf) \
508 { \ 508 { \
509 struct scsi_device *sdev; \ 509 struct scsi_device *sdev; \
510 sdev = to_scsi_device(dev); \ 510 sdev = to_scsi_device(dev); \
511 return snprintf (buf, 20, format_string, sdev->field); \ 511 return snprintf (buf, 20, format_string, sdev->field); \
512 } \ 512 } \
513 513
514 /* 514 /*
515 * sdev_rd_attr: macro to create a function and attribute variable for a 515 * sdev_rd_attr: macro to create a function and attribute variable for a
516 * read only field. 516 * read only field.
517 */ 517 */
518 #define sdev_rd_attr(field, format_string) \ 518 #define sdev_rd_attr(field, format_string) \
519 sdev_show_function(field, format_string) \ 519 sdev_show_function(field, format_string) \
520 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); 520 static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL);
521 521
522 522
523 /* 523 /*
524 * sdev_rw_attr: create a function and attribute variable for a 524 * sdev_rw_attr: create a function and attribute variable for a
525 * read/write field. 525 * read/write field.
526 */ 526 */
527 #define sdev_rw_attr(field, format_string) \ 527 #define sdev_rw_attr(field, format_string) \
528 sdev_show_function(field, format_string) \ 528 sdev_show_function(field, format_string) \
529 \ 529 \
530 static ssize_t \ 530 static ssize_t \
531 sdev_store_##field (struct device *dev, struct device_attribute *attr, \ 531 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
532 const char *buf, size_t count) \ 532 const char *buf, size_t count) \
533 { \ 533 { \
534 struct scsi_device *sdev; \ 534 struct scsi_device *sdev; \
535 sdev = to_scsi_device(dev); \ 535 sdev = to_scsi_device(dev); \
536 sscanf (buf, format_string, &sdev->field); \ 536 sscanf (buf, format_string, &sdev->field); \
537 return count; \ 537 return count; \
538 } \ 538 } \
539 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); 539 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
540 540
541 /* Currently we don't export bit fields, but we might in future, 541 /* Currently we don't export bit fields, but we might in future,
542 * so leave this code in */ 542 * so leave this code in */
543 #if 0 543 #if 0
544 /* 544 /*
545 * sdev_rd_attr: create a function and attribute variable for a 545 * sdev_rd_attr: create a function and attribute variable for a
546 * read/write bit field. 546 * read/write bit field.
547 */ 547 */
548 #define sdev_rw_attr_bit(field) \ 548 #define sdev_rw_attr_bit(field) \
549 sdev_show_function(field, "%d\n") \ 549 sdev_show_function(field, "%d\n") \
550 \ 550 \
551 static ssize_t \ 551 static ssize_t \
552 sdev_store_##field (struct device *dev, struct device_attribute *attr, \ 552 sdev_store_##field (struct device *dev, struct device_attribute *attr, \
553 const char *buf, size_t count) \ 553 const char *buf, size_t count) \
554 { \ 554 { \
555 int ret; \ 555 int ret; \
556 struct scsi_device *sdev; \ 556 struct scsi_device *sdev; \
557 ret = scsi_sdev_check_buf_bit(buf); \ 557 ret = scsi_sdev_check_buf_bit(buf); \
558 if (ret >= 0) { \ 558 if (ret >= 0) { \
559 sdev = to_scsi_device(dev); \ 559 sdev = to_scsi_device(dev); \
560 sdev->field = ret; \ 560 sdev->field = ret; \
561 ret = count; \ 561 ret = count; \
562 } \ 562 } \
563 return ret; \ 563 return ret; \
564 } \ 564 } \
565 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); 565 static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field);
566 566
567 /* 567 /*
568 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", 568 * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1",
569 * else return -EINVAL. 569 * else return -EINVAL.
570 */ 570 */
571 static int scsi_sdev_check_buf_bit(const char *buf) 571 static int scsi_sdev_check_buf_bit(const char *buf)
572 { 572 {
573 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { 573 if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) {
574 if (buf[0] == '1') 574 if (buf[0] == '1')
575 return 1; 575 return 1;
576 else if (buf[0] == '0') 576 else if (buf[0] == '0')
577 return 0; 577 return 0;
578 else 578 else
579 return -EINVAL; 579 return -EINVAL;
580 } else 580 } else
581 return -EINVAL; 581 return -EINVAL;
582 } 582 }
583 #endif 583 #endif
584 /* 584 /*
585 * Create the actual show/store functions and data structures. 585 * Create the actual show/store functions and data structures.
586 */ 586 */
587 sdev_rd_attr (device_blocked, "%d\n");
588 sdev_rd_attr (type, "%d\n"); 587 sdev_rd_attr (type, "%d\n");
589 sdev_rd_attr (scsi_level, "%d\n"); 588 sdev_rd_attr (scsi_level, "%d\n");
590 sdev_rd_attr (vendor, "%.8s\n"); 589 sdev_rd_attr (vendor, "%.8s\n");
591 sdev_rd_attr (model, "%.16s\n"); 590 sdev_rd_attr (model, "%.16s\n");
592 sdev_rd_attr (rev, "%.4s\n"); 591 sdev_rd_attr (rev, "%.4s\n");
593 592
594 static ssize_t 593 static ssize_t
595 sdev_show_device_busy(struct device *dev, struct device_attribute *attr, 594 sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
596 char *buf) 595 char *buf)
597 { 596 {
598 struct scsi_device *sdev = to_scsi_device(dev); 597 struct scsi_device *sdev = to_scsi_device(dev);
599 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy)); 598 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
600 } 599 }
601 static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); 600 static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
601
602 static ssize_t
603 sdev_show_device_blocked(struct device *dev, struct device_attribute *attr,
604 char *buf)
605 {
606 struct scsi_device *sdev = to_scsi_device(dev);
607 return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
608 }
609 static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL);
602 610
603 /* 611 /*
604 * TODO: can we make these symlinks to the block layer ones? 612 * TODO: can we make these symlinks to the block layer ones?
605 */ 613 */
606 static ssize_t 614 static ssize_t
607 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) 615 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
608 { 616 {
609 struct scsi_device *sdev; 617 struct scsi_device *sdev;
610 sdev = to_scsi_device(dev); 618 sdev = to_scsi_device(dev);
611 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); 619 return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
612 } 620 }
613 621
614 static ssize_t 622 static ssize_t
615 sdev_store_timeout (struct device *dev, struct device_attribute *attr, 623 sdev_store_timeout (struct device *dev, struct device_attribute *attr,
616 const char *buf, size_t count) 624 const char *buf, size_t count)
617 { 625 {
618 struct scsi_device *sdev; 626 struct scsi_device *sdev;
619 int timeout; 627 int timeout;
620 sdev = to_scsi_device(dev); 628 sdev = to_scsi_device(dev);
621 sscanf (buf, "%d\n", &timeout); 629 sscanf (buf, "%d\n", &timeout);
622 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); 630 blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
623 return count; 631 return count;
624 } 632 }
625 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); 633 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
626 634
627 static ssize_t 635 static ssize_t
628 sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) 636 sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf)
629 { 637 {
630 struct scsi_device *sdev; 638 struct scsi_device *sdev;
631 sdev = to_scsi_device(dev); 639 sdev = to_scsi_device(dev);
632 return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); 640 return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ);
633 } 641 }
634 642
635 static ssize_t 643 static ssize_t
636 sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, 644 sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr,
637 const char *buf, size_t count) 645 const char *buf, size_t count)
638 { 646 {
639 struct scsi_device *sdev; 647 struct scsi_device *sdev;
640 unsigned int eh_timeout; 648 unsigned int eh_timeout;
641 int err; 649 int err;
642 650
643 if (!capable(CAP_SYS_ADMIN)) 651 if (!capable(CAP_SYS_ADMIN))
644 return -EACCES; 652 return -EACCES;
645 653
646 sdev = to_scsi_device(dev); 654 sdev = to_scsi_device(dev);
647 err = kstrtouint(buf, 10, &eh_timeout); 655 err = kstrtouint(buf, 10, &eh_timeout);
648 if (err) 656 if (err)
649 return err; 657 return err;
650 sdev->eh_timeout = eh_timeout * HZ; 658 sdev->eh_timeout = eh_timeout * HZ;
651 659
652 return count; 660 return count;
653 } 661 }
654 static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); 662 static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout);
655 663
656 static ssize_t 664 static ssize_t
657 store_rescan_field (struct device *dev, struct device_attribute *attr, 665 store_rescan_field (struct device *dev, struct device_attribute *attr,
658 const char *buf, size_t count) 666 const char *buf, size_t count)
659 { 667 {
660 scsi_rescan_device(dev); 668 scsi_rescan_device(dev);
661 return count; 669 return count;
662 } 670 }
663 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); 671 static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
664 672
665 static ssize_t 673 static ssize_t
666 sdev_store_delete(struct device *dev, struct device_attribute *attr, 674 sdev_store_delete(struct device *dev, struct device_attribute *attr,
667 const char *buf, size_t count) 675 const char *buf, size_t count)
668 { 676 {
669 if (device_remove_file_self(dev, attr)) 677 if (device_remove_file_self(dev, attr))
670 scsi_remove_device(to_scsi_device(dev)); 678 scsi_remove_device(to_scsi_device(dev));
671 return count; 679 return count;
672 }; 680 };
673 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); 681 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
674 682
675 static ssize_t 683 static ssize_t
676 store_state_field(struct device *dev, struct device_attribute *attr, 684 store_state_field(struct device *dev, struct device_attribute *attr,
677 const char *buf, size_t count) 685 const char *buf, size_t count)
678 { 686 {
679 int i; 687 int i;
680 struct scsi_device *sdev = to_scsi_device(dev); 688 struct scsi_device *sdev = to_scsi_device(dev);
681 enum scsi_device_state state = 0; 689 enum scsi_device_state state = 0;
682 690
683 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { 691 for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
684 const int len = strlen(sdev_states[i].name); 692 const int len = strlen(sdev_states[i].name);
685 if (strncmp(sdev_states[i].name, buf, len) == 0 && 693 if (strncmp(sdev_states[i].name, buf, len) == 0 &&
686 buf[len] == '\n') { 694 buf[len] == '\n') {
687 state = sdev_states[i].value; 695 state = sdev_states[i].value;
688 break; 696 break;
689 } 697 }
690 } 698 }
691 if (!state) 699 if (!state)
692 return -EINVAL; 700 return -EINVAL;
693 701
694 if (scsi_device_set_state(sdev, state)) 702 if (scsi_device_set_state(sdev, state))
695 return -EINVAL; 703 return -EINVAL;
696 return count; 704 return count;
697 } 705 }
698 706
699 static ssize_t 707 static ssize_t
700 show_state_field(struct device *dev, struct device_attribute *attr, char *buf) 708 show_state_field(struct device *dev, struct device_attribute *attr, char *buf)
701 { 709 {
702 struct scsi_device *sdev = to_scsi_device(dev); 710 struct scsi_device *sdev = to_scsi_device(dev);
703 const char *name = scsi_device_state_name(sdev->sdev_state); 711 const char *name = scsi_device_state_name(sdev->sdev_state);
704 712
705 if (!name) 713 if (!name)
706 return -EINVAL; 714 return -EINVAL;
707 715
708 return snprintf(buf, 20, "%s\n", name); 716 return snprintf(buf, 20, "%s\n", name);
709 } 717 }
710 718
711 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); 719 static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field);
712 720
713 static ssize_t 721 static ssize_t
714 show_queue_type_field(struct device *dev, struct device_attribute *attr, 722 show_queue_type_field(struct device *dev, struct device_attribute *attr,
715 char *buf) 723 char *buf)
716 { 724 {
717 struct scsi_device *sdev = to_scsi_device(dev); 725 struct scsi_device *sdev = to_scsi_device(dev);
718 const char *name = "none"; 726 const char *name = "none";
719 727
720 if (sdev->ordered_tags) 728 if (sdev->ordered_tags)
721 name = "ordered"; 729 name = "ordered";
722 else if (sdev->simple_tags) 730 else if (sdev->simple_tags)
723 name = "simple"; 731 name = "simple";
724 732
725 return snprintf(buf, 20, "%s\n", name); 733 return snprintf(buf, 20, "%s\n", name);
726 } 734 }
727 735
728 static ssize_t 736 static ssize_t
729 store_queue_type_field(struct device *dev, struct device_attribute *attr, 737 store_queue_type_field(struct device *dev, struct device_attribute *attr,
730 const char *buf, size_t count) 738 const char *buf, size_t count)
731 { 739 {
732 struct scsi_device *sdev = to_scsi_device(dev); 740 struct scsi_device *sdev = to_scsi_device(dev);
733 struct scsi_host_template *sht = sdev->host->hostt; 741 struct scsi_host_template *sht = sdev->host->hostt;
734 int tag_type = 0, retval; 742 int tag_type = 0, retval;
735 int prev_tag_type = scsi_get_tag_type(sdev); 743 int prev_tag_type = scsi_get_tag_type(sdev);
736 744
737 if (!sdev->tagged_supported || !sht->change_queue_type) 745 if (!sdev->tagged_supported || !sht->change_queue_type)
738 return -EINVAL; 746 return -EINVAL;
739 747
740 if (strncmp(buf, "ordered", 7) == 0) 748 if (strncmp(buf, "ordered", 7) == 0)
741 tag_type = MSG_ORDERED_TAG; 749 tag_type = MSG_ORDERED_TAG;
742 else if (strncmp(buf, "simple", 6) == 0) 750 else if (strncmp(buf, "simple", 6) == 0)
743 tag_type = MSG_SIMPLE_TAG; 751 tag_type = MSG_SIMPLE_TAG;
744 else if (strncmp(buf, "none", 4) != 0) 752 else if (strncmp(buf, "none", 4) != 0)
745 return -EINVAL; 753 return -EINVAL;
746 754
747 if (tag_type == prev_tag_type) 755 if (tag_type == prev_tag_type)
748 return count; 756 return count;
749 757
750 retval = sht->change_queue_type(sdev, tag_type); 758 retval = sht->change_queue_type(sdev, tag_type);
751 if (retval < 0) 759 if (retval < 0)
752 return retval; 760 return retval;
753 761
754 return count; 762 return count;
755 } 763 }
756 764
757 static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, 765 static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
758 store_queue_type_field); 766 store_queue_type_field);
759 767
760 #define sdev_vpd_pg_attr(_page) \ 768 #define sdev_vpd_pg_attr(_page) \
761 static ssize_t \ 769 static ssize_t \
762 show_vpd_##_page(struct file *filp, struct kobject *kobj, \ 770 show_vpd_##_page(struct file *filp, struct kobject *kobj, \
763 struct bin_attribute *bin_attr, \ 771 struct bin_attribute *bin_attr, \
764 char *buf, loff_t off, size_t count) \ 772 char *buf, loff_t off, size_t count) \
765 { \ 773 { \
766 struct device *dev = container_of(kobj, struct device, kobj); \ 774 struct device *dev = container_of(kobj, struct device, kobj); \
767 struct scsi_device *sdev = to_scsi_device(dev); \ 775 struct scsi_device *sdev = to_scsi_device(dev); \
768 if (!sdev->vpd_##_page) \ 776 if (!sdev->vpd_##_page) \
769 return -EINVAL; \ 777 return -EINVAL; \
770 return memory_read_from_buffer(buf, count, &off, \ 778 return memory_read_from_buffer(buf, count, &off, \
771 sdev->vpd_##_page, \ 779 sdev->vpd_##_page, \
772 sdev->vpd_##_page##_len); \ 780 sdev->vpd_##_page##_len); \
773 } \ 781 } \
774 static struct bin_attribute dev_attr_vpd_##_page = { \ 782 static struct bin_attribute dev_attr_vpd_##_page = { \
775 .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ 783 .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \
776 .size = 0, \ 784 .size = 0, \
777 .read = show_vpd_##_page, \ 785 .read = show_vpd_##_page, \
778 }; 786 };
779 787
780 sdev_vpd_pg_attr(pg83); 788 sdev_vpd_pg_attr(pg83);
781 sdev_vpd_pg_attr(pg80); 789 sdev_vpd_pg_attr(pg80);
782 790
783 static ssize_t 791 static ssize_t
784 show_iostat_counterbits(struct device *dev, struct device_attribute *attr, 792 show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
785 char *buf) 793 char *buf)
786 { 794 {
787 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); 795 return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
788 } 796 }
789 797
790 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); 798 static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);
791 799
792 #define show_sdev_iostat(field) \ 800 #define show_sdev_iostat(field) \
793 static ssize_t \ 801 static ssize_t \
794 show_iostat_##field(struct device *dev, struct device_attribute *attr, \ 802 show_iostat_##field(struct device *dev, struct device_attribute *attr, \
795 char *buf) \ 803 char *buf) \
796 { \ 804 { \
797 struct scsi_device *sdev = to_scsi_device(dev); \ 805 struct scsi_device *sdev = to_scsi_device(dev); \
798 unsigned long long count = atomic_read(&sdev->field); \ 806 unsigned long long count = atomic_read(&sdev->field); \
799 return snprintf(buf, 20, "0x%llx\n", count); \ 807 return snprintf(buf, 20, "0x%llx\n", count); \
800 } \ 808 } \
801 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) 809 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
802 810
803 show_sdev_iostat(iorequest_cnt); 811 show_sdev_iostat(iorequest_cnt);
804 show_sdev_iostat(iodone_cnt); 812 show_sdev_iostat(iodone_cnt);
805 show_sdev_iostat(ioerr_cnt); 813 show_sdev_iostat(ioerr_cnt);
806 814
807 static ssize_t 815 static ssize_t
808 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) 816 sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
809 { 817 {
810 struct scsi_device *sdev; 818 struct scsi_device *sdev;
811 sdev = to_scsi_device(dev); 819 sdev = to_scsi_device(dev);
812 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); 820 return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type);
813 } 821 }
814 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); 822 static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL);
815 823
816 #define DECLARE_EVT_SHOW(name, Cap_name) \ 824 #define DECLARE_EVT_SHOW(name, Cap_name) \
817 static ssize_t \ 825 static ssize_t \
818 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ 826 sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \
819 char *buf) \ 827 char *buf) \
820 { \ 828 { \
821 struct scsi_device *sdev = to_scsi_device(dev); \ 829 struct scsi_device *sdev = to_scsi_device(dev); \
822 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ 830 int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\
823 return snprintf(buf, 20, "%d\n", val); \ 831 return snprintf(buf, 20, "%d\n", val); \
824 } 832 }
825 833
826 #define DECLARE_EVT_STORE(name, Cap_name) \ 834 #define DECLARE_EVT_STORE(name, Cap_name) \
827 static ssize_t \ 835 static ssize_t \
828 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ 836 sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
829 const char *buf, size_t count) \ 837 const char *buf, size_t count) \
830 { \ 838 { \
831 struct scsi_device *sdev = to_scsi_device(dev); \ 839 struct scsi_device *sdev = to_scsi_device(dev); \
832 int val = simple_strtoul(buf, NULL, 0); \ 840 int val = simple_strtoul(buf, NULL, 0); \
833 if (val == 0) \ 841 if (val == 0) \
834 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ 842 clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
835 else if (val == 1) \ 843 else if (val == 1) \
836 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ 844 set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \
837 else \ 845 else \
838 return -EINVAL; \ 846 return -EINVAL; \
839 return count; \ 847 return count; \
840 } 848 }
841 849
842 #define DECLARE_EVT(name, Cap_name) \ 850 #define DECLARE_EVT(name, Cap_name) \
843 DECLARE_EVT_SHOW(name, Cap_name) \ 851 DECLARE_EVT_SHOW(name, Cap_name) \
844 DECLARE_EVT_STORE(name, Cap_name) \ 852 DECLARE_EVT_STORE(name, Cap_name) \
845 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ 853 static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \
846 sdev_store_evt_##name); 854 sdev_store_evt_##name);
847 #define REF_EVT(name) &dev_attr_evt_##name.attr 855 #define REF_EVT(name) &dev_attr_evt_##name.attr
848 856
849 DECLARE_EVT(media_change, MEDIA_CHANGE) 857 DECLARE_EVT(media_change, MEDIA_CHANGE)
850 DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) 858 DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
851 DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) 859 DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
852 DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) 860 DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
853 DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) 861 DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
854 DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) 862 DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
855 863
856 static ssize_t 864 static ssize_t
857 sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, 865 sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
858 const char *buf, size_t count) 866 const char *buf, size_t count)
859 { 867 {
860 int depth, retval; 868 int depth, retval;
861 struct scsi_device *sdev = to_scsi_device(dev); 869 struct scsi_device *sdev = to_scsi_device(dev);
862 struct scsi_host_template *sht = sdev->host->hostt; 870 struct scsi_host_template *sht = sdev->host->hostt;
863 871
864 if (!sht->change_queue_depth) 872 if (!sht->change_queue_depth)
865 return -EINVAL; 873 return -EINVAL;
866 874
867 depth = simple_strtoul(buf, NULL, 0); 875 depth = simple_strtoul(buf, NULL, 0);
868 876
869 if (depth < 1) 877 if (depth < 1)
870 return -EINVAL; 878 return -EINVAL;
871 879
872 retval = sht->change_queue_depth(sdev, depth, 880 retval = sht->change_queue_depth(sdev, depth,
873 SCSI_QDEPTH_DEFAULT); 881 SCSI_QDEPTH_DEFAULT);
874 if (retval < 0) 882 if (retval < 0)
875 return retval; 883 return retval;
876 884
877 sdev->max_queue_depth = sdev->queue_depth; 885 sdev->max_queue_depth = sdev->queue_depth;
878 886
879 return count; 887 return count;
880 } 888 }
881 sdev_show_function(queue_depth, "%d\n"); 889 sdev_show_function(queue_depth, "%d\n");
882 890
883 static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, 891 static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth,
884 sdev_store_queue_depth); 892 sdev_store_queue_depth);
885 893
886 static ssize_t 894 static ssize_t
887 sdev_show_queue_ramp_up_period(struct device *dev, 895 sdev_show_queue_ramp_up_period(struct device *dev,
888 struct device_attribute *attr, 896 struct device_attribute *attr,
889 char *buf) 897 char *buf)
890 { 898 {
891 struct scsi_device *sdev; 899 struct scsi_device *sdev;
892 sdev = to_scsi_device(dev); 900 sdev = to_scsi_device(dev);
893 return snprintf(buf, 20, "%u\n", 901 return snprintf(buf, 20, "%u\n",
894 jiffies_to_msecs(sdev->queue_ramp_up_period)); 902 jiffies_to_msecs(sdev->queue_ramp_up_period));
895 } 903 }
896 904
897 static ssize_t 905 static ssize_t
898 sdev_store_queue_ramp_up_period(struct device *dev, 906 sdev_store_queue_ramp_up_period(struct device *dev,
899 struct device_attribute *attr, 907 struct device_attribute *attr,
900 const char *buf, size_t count) 908 const char *buf, size_t count)
901 { 909 {
902 struct scsi_device *sdev = to_scsi_device(dev); 910 struct scsi_device *sdev = to_scsi_device(dev);
903 unsigned long period; 911 unsigned long period;
904 912
905 if (strict_strtoul(buf, 10, &period)) 913 if (strict_strtoul(buf, 10, &period))
906 return -EINVAL; 914 return -EINVAL;
907 915
908 sdev->queue_ramp_up_period = msecs_to_jiffies(period); 916 sdev->queue_ramp_up_period = msecs_to_jiffies(period);
909 return period; 917 return period;
910 } 918 }
911 919
912 static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, 920 static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR,
913 sdev_show_queue_ramp_up_period, 921 sdev_show_queue_ramp_up_period,
914 sdev_store_queue_ramp_up_period); 922 sdev_store_queue_ramp_up_period);
915 923
916 static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, 924 static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
917 struct attribute *attr, int i) 925 struct attribute *attr, int i)
918 { 926 {
919 struct device *dev = container_of(kobj, struct device, kobj); 927 struct device *dev = container_of(kobj, struct device, kobj);
920 struct scsi_device *sdev = to_scsi_device(dev); 928 struct scsi_device *sdev = to_scsi_device(dev);
921 929
922 930
923 if (attr == &dev_attr_queue_depth.attr && 931 if (attr == &dev_attr_queue_depth.attr &&
924 !sdev->host->hostt->change_queue_depth) 932 !sdev->host->hostt->change_queue_depth)
925 return S_IRUGO; 933 return S_IRUGO;
926 934
927 if (attr == &dev_attr_queue_ramp_up_period.attr && 935 if (attr == &dev_attr_queue_ramp_up_period.attr &&
928 !sdev->host->hostt->change_queue_depth) 936 !sdev->host->hostt->change_queue_depth)
929 return 0; 937 return 0;
930 938
931 if (attr == &dev_attr_queue_type.attr && 939 if (attr == &dev_attr_queue_type.attr &&
932 !sdev->host->hostt->change_queue_type) 940 !sdev->host->hostt->change_queue_type)
933 return S_IRUGO; 941 return S_IRUGO;
934 942
935 return attr->mode; 943 return attr->mode;
936 } 944 }
937 945
938 /* Default template for device attributes. May NOT be modified */ 946 /* Default template for device attributes. May NOT be modified */
939 static struct attribute *scsi_sdev_attrs[] = { 947 static struct attribute *scsi_sdev_attrs[] = {
940 &dev_attr_device_blocked.attr, 948 &dev_attr_device_blocked.attr,
941 &dev_attr_type.attr, 949 &dev_attr_type.attr,
942 &dev_attr_scsi_level.attr, 950 &dev_attr_scsi_level.attr,
943 &dev_attr_device_busy.attr, 951 &dev_attr_device_busy.attr,
944 &dev_attr_vendor.attr, 952 &dev_attr_vendor.attr,
945 &dev_attr_model.attr, 953 &dev_attr_model.attr,
946 &dev_attr_rev.attr, 954 &dev_attr_rev.attr,
947 &dev_attr_rescan.attr, 955 &dev_attr_rescan.attr,
948 &dev_attr_delete.attr, 956 &dev_attr_delete.attr,
949 &dev_attr_state.attr, 957 &dev_attr_state.attr,
950 &dev_attr_timeout.attr, 958 &dev_attr_timeout.attr,
951 &dev_attr_eh_timeout.attr, 959 &dev_attr_eh_timeout.attr,
952 &dev_attr_iocounterbits.attr, 960 &dev_attr_iocounterbits.attr,
953 &dev_attr_iorequest_cnt.attr, 961 &dev_attr_iorequest_cnt.attr,
954 &dev_attr_iodone_cnt.attr, 962 &dev_attr_iodone_cnt.attr,
955 &dev_attr_ioerr_cnt.attr, 963 &dev_attr_ioerr_cnt.attr,
956 &dev_attr_modalias.attr, 964 &dev_attr_modalias.attr,
957 &dev_attr_queue_depth.attr, 965 &dev_attr_queue_depth.attr,
958 &dev_attr_queue_type.attr, 966 &dev_attr_queue_type.attr,
959 &dev_attr_queue_ramp_up_period.attr, 967 &dev_attr_queue_ramp_up_period.attr,
960 REF_EVT(media_change), 968 REF_EVT(media_change),
961 REF_EVT(inquiry_change_reported), 969 REF_EVT(inquiry_change_reported),
962 REF_EVT(capacity_change_reported), 970 REF_EVT(capacity_change_reported),
963 REF_EVT(soft_threshold_reached), 971 REF_EVT(soft_threshold_reached),
964 REF_EVT(mode_parameter_change_reported), 972 REF_EVT(mode_parameter_change_reported),
965 REF_EVT(lun_change_reported), 973 REF_EVT(lun_change_reported),
966 NULL 974 NULL
967 }; 975 };
968 976
969 static struct bin_attribute *scsi_sdev_bin_attrs[] = { 977 static struct bin_attribute *scsi_sdev_bin_attrs[] = {
970 &dev_attr_vpd_pg83, 978 &dev_attr_vpd_pg83,
971 &dev_attr_vpd_pg80, 979 &dev_attr_vpd_pg80,
972 NULL 980 NULL
973 }; 981 };
974 static struct attribute_group scsi_sdev_attr_group = { 982 static struct attribute_group scsi_sdev_attr_group = {
975 .attrs = scsi_sdev_attrs, 983 .attrs = scsi_sdev_attrs,
976 .bin_attrs = scsi_sdev_bin_attrs, 984 .bin_attrs = scsi_sdev_bin_attrs,
977 .is_visible = scsi_sdev_attr_is_visible, 985 .is_visible = scsi_sdev_attr_is_visible,
978 }; 986 };
979 987
980 static const struct attribute_group *scsi_sdev_attr_groups[] = { 988 static const struct attribute_group *scsi_sdev_attr_groups[] = {
981 &scsi_sdev_attr_group, 989 &scsi_sdev_attr_group,
982 NULL 990 NULL
983 }; 991 };
984 992
985 static int scsi_target_add(struct scsi_target *starget) 993 static int scsi_target_add(struct scsi_target *starget)
986 { 994 {
987 int error; 995 int error;
988 996
989 if (starget->state != STARGET_CREATED) 997 if (starget->state != STARGET_CREATED)
990 return 0; 998 return 0;
991 999
992 error = device_add(&starget->dev); 1000 error = device_add(&starget->dev);
993 if (error) { 1001 if (error) {
994 dev_err(&starget->dev, "target device_add failed, error %d\n", error); 1002 dev_err(&starget->dev, "target device_add failed, error %d\n", error);
995 return error; 1003 return error;
996 } 1004 }
997 transport_add_device(&starget->dev); 1005 transport_add_device(&starget->dev);
998 starget->state = STARGET_RUNNING; 1006 starget->state = STARGET_RUNNING;
999 1007
1000 pm_runtime_set_active(&starget->dev); 1008 pm_runtime_set_active(&starget->dev);
1001 pm_runtime_enable(&starget->dev); 1009 pm_runtime_enable(&starget->dev);
1002 device_enable_async_suspend(&starget->dev); 1010 device_enable_async_suspend(&starget->dev);
1003 1011
1004 return 0; 1012 return 0;
1005 } 1013 }
1006 1014
1007 /** 1015 /**
1008 * scsi_sysfs_add_sdev - add scsi device to sysfs 1016 * scsi_sysfs_add_sdev - add scsi device to sysfs
1009 * @sdev: scsi_device to add 1017 * @sdev: scsi_device to add
1010 * 1018 *
1011 * Return value: 1019 * Return value:
1012 * 0 on Success / non-zero on Failure 1020 * 0 on Success / non-zero on Failure
1013 **/ 1021 **/
1014 int scsi_sysfs_add_sdev(struct scsi_device *sdev) 1022 int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1015 { 1023 {
1016 int error, i; 1024 int error, i;
1017 struct request_queue *rq = sdev->request_queue; 1025 struct request_queue *rq = sdev->request_queue;
1018 struct scsi_target *starget = sdev->sdev_target; 1026 struct scsi_target *starget = sdev->sdev_target;
1019 1027
1020 error = scsi_device_set_state(sdev, SDEV_RUNNING); 1028 error = scsi_device_set_state(sdev, SDEV_RUNNING);
1021 if (error) 1029 if (error)
1022 return error; 1030 return error;
1023 1031
1024 error = scsi_target_add(starget); 1032 error = scsi_target_add(starget);
1025 if (error) 1033 if (error)
1026 return error; 1034 return error;
1027 1035
1028 transport_configure_device(&starget->dev); 1036 transport_configure_device(&starget->dev);
1029 1037
1030 device_enable_async_suspend(&sdev->sdev_gendev); 1038 device_enable_async_suspend(&sdev->sdev_gendev);
1031 scsi_autopm_get_target(starget); 1039 scsi_autopm_get_target(starget);
1032 pm_runtime_set_active(&sdev->sdev_gendev); 1040 pm_runtime_set_active(&sdev->sdev_gendev);
1033 pm_runtime_forbid(&sdev->sdev_gendev); 1041 pm_runtime_forbid(&sdev->sdev_gendev);
1034 pm_runtime_enable(&sdev->sdev_gendev); 1042 pm_runtime_enable(&sdev->sdev_gendev);
1035 scsi_autopm_put_target(starget); 1043 scsi_autopm_put_target(starget);
1036 1044
1037 /* The following call will keep sdev active indefinitely, until 1045 /* The following call will keep sdev active indefinitely, until
1038 * its driver does a corresponding scsi_autopm_pm_device(). Only 1046 * its driver does a corresponding scsi_autopm_pm_device(). Only
1039 * drivers supporting autosuspend will do this. 1047 * drivers supporting autosuspend will do this.
1040 */ 1048 */
1041 scsi_autopm_get_device(sdev); 1049 scsi_autopm_get_device(sdev);
1042 1050
1043 error = device_add(&sdev->sdev_gendev); 1051 error = device_add(&sdev->sdev_gendev);
1044 if (error) { 1052 if (error) {
1045 sdev_printk(KERN_INFO, sdev, 1053 sdev_printk(KERN_INFO, sdev,
1046 "failed to add device: %d\n", error); 1054 "failed to add device: %d\n", error);
1047 return error; 1055 return error;
1048 } 1056 }
1049 device_enable_async_suspend(&sdev->sdev_dev); 1057 device_enable_async_suspend(&sdev->sdev_dev);
1050 error = device_add(&sdev->sdev_dev); 1058 error = device_add(&sdev->sdev_dev);
1051 if (error) { 1059 if (error) {
1052 sdev_printk(KERN_INFO, sdev, 1060 sdev_printk(KERN_INFO, sdev,
1053 "failed to add class device: %d\n", error); 1061 "failed to add class device: %d\n", error);
1054 device_del(&sdev->sdev_gendev); 1062 device_del(&sdev->sdev_gendev);
1055 return error; 1063 return error;
1056 } 1064 }
1057 transport_add_device(&sdev->sdev_gendev); 1065 transport_add_device(&sdev->sdev_gendev);
1058 sdev->is_visible = 1; 1066 sdev->is_visible = 1;
1059 1067
1060 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 1068 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
1061 1069
1062 if (error) 1070 if (error)
1063 /* we're treating error on bsg register as non-fatal, 1071 /* we're treating error on bsg register as non-fatal,
1064 * so pretend nothing went wrong */ 1072 * so pretend nothing went wrong */
1065 sdev_printk(KERN_INFO, sdev, 1073 sdev_printk(KERN_INFO, sdev,
1066 "Failed to register bsg queue, errno=%d\n", error); 1074 "Failed to register bsg queue, errno=%d\n", error);
1067 1075
1068 /* add additional host specific attributes */ 1076 /* add additional host specific attributes */
1069 if (sdev->host->hostt->sdev_attrs) { 1077 if (sdev->host->hostt->sdev_attrs) {
1070 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 1078 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
1071 error = device_create_file(&sdev->sdev_gendev, 1079 error = device_create_file(&sdev->sdev_gendev,
1072 sdev->host->hostt->sdev_attrs[i]); 1080 sdev->host->hostt->sdev_attrs[i]);
1073 if (error) 1081 if (error)
1074 return error; 1082 return error;
1075 } 1083 }
1076 } 1084 }
1077 1085
1078 return error; 1086 return error;
1079 } 1087 }
1080 1088
1081 void __scsi_remove_device(struct scsi_device *sdev) 1089 void __scsi_remove_device(struct scsi_device *sdev)
1082 { 1090 {
1083 struct device *dev = &sdev->sdev_gendev; 1091 struct device *dev = &sdev->sdev_gendev;
1084 1092
1085 if (sdev->is_visible) { 1093 if (sdev->is_visible) {
1086 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 1094 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
1087 return; 1095 return;
1088 1096
1089 bsg_unregister_queue(sdev->request_queue); 1097 bsg_unregister_queue(sdev->request_queue);
1090 device_unregister(&sdev->sdev_dev); 1098 device_unregister(&sdev->sdev_dev);
1091 transport_remove_device(dev); 1099 transport_remove_device(dev);
1092 device_del(dev); 1100 device_del(dev);
1093 } else 1101 } else
1094 put_device(&sdev->sdev_dev); 1102 put_device(&sdev->sdev_dev);
1095 1103
1096 /* 1104 /*
1097 * Stop accepting new requests and wait until all queuecommand() and 1105 * Stop accepting new requests and wait until all queuecommand() and
1098 * scsi_run_queue() invocations have finished before tearing down the 1106 * scsi_run_queue() invocations have finished before tearing down the
1099 * device. 1107 * device.
1100 */ 1108 */
1101 scsi_device_set_state(sdev, SDEV_DEL); 1109 scsi_device_set_state(sdev, SDEV_DEL);
1102 blk_cleanup_queue(sdev->request_queue); 1110 blk_cleanup_queue(sdev->request_queue);
1103 cancel_work_sync(&sdev->requeue_work); 1111 cancel_work_sync(&sdev->requeue_work);
1104 1112
1105 if (sdev->host->hostt->slave_destroy) 1113 if (sdev->host->hostt->slave_destroy)
1106 sdev->host->hostt->slave_destroy(sdev); 1114 sdev->host->hostt->slave_destroy(sdev);
1107 transport_destroy_device(dev); 1115 transport_destroy_device(dev);
1108 1116
1109 /* 1117 /*
1110 * Paired with the kref_get() in scsi_sysfs_initialize(). We have 1118 * Paired with the kref_get() in scsi_sysfs_initialize(). We have
1111 * remoed sysfs visibility from the device, so make the target 1119 * remoed sysfs visibility from the device, so make the target
1112 * invisible if this was the last device underneath it. 1120 * invisible if this was the last device underneath it.
1113 */ 1121 */
1114 scsi_target_reap(scsi_target(sdev)); 1122 scsi_target_reap(scsi_target(sdev));
1115 1123
1116 put_device(dev); 1124 put_device(dev);
1117 } 1125 }
1118 1126
1119 /** 1127 /**
1120 * scsi_remove_device - unregister a device from the scsi bus 1128 * scsi_remove_device - unregister a device from the scsi bus
1121 * @sdev: scsi_device to unregister 1129 * @sdev: scsi_device to unregister
1122 **/ 1130 **/
1123 void scsi_remove_device(struct scsi_device *sdev) 1131 void scsi_remove_device(struct scsi_device *sdev)
1124 { 1132 {
1125 struct Scsi_Host *shost = sdev->host; 1133 struct Scsi_Host *shost = sdev->host;
1126 1134
1127 mutex_lock(&shost->scan_mutex); 1135 mutex_lock(&shost->scan_mutex);
1128 __scsi_remove_device(sdev); 1136 __scsi_remove_device(sdev);
1129 mutex_unlock(&shost->scan_mutex); 1137 mutex_unlock(&shost->scan_mutex);
1130 } 1138 }
1131 EXPORT_SYMBOL(scsi_remove_device); 1139 EXPORT_SYMBOL(scsi_remove_device);
1132 1140
1133 static void __scsi_remove_target(struct scsi_target *starget) 1141 static void __scsi_remove_target(struct scsi_target *starget)
1134 { 1142 {
1135 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1143 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1136 unsigned long flags; 1144 unsigned long flags;
1137 struct scsi_device *sdev; 1145 struct scsi_device *sdev;
1138 1146
1139 spin_lock_irqsave(shost->host_lock, flags); 1147 spin_lock_irqsave(shost->host_lock, flags);
1140 restart: 1148 restart:
1141 list_for_each_entry(sdev, &shost->__devices, siblings) { 1149 list_for_each_entry(sdev, &shost->__devices, siblings) {
1142 if (sdev->channel != starget->channel || 1150 if (sdev->channel != starget->channel ||
1143 sdev->id != starget->id || 1151 sdev->id != starget->id ||
1144 scsi_device_get(sdev)) 1152 scsi_device_get(sdev))
1145 continue; 1153 continue;
1146 spin_unlock_irqrestore(shost->host_lock, flags); 1154 spin_unlock_irqrestore(shost->host_lock, flags);
1147 scsi_remove_device(sdev); 1155 scsi_remove_device(sdev);
1148 scsi_device_put(sdev); 1156 scsi_device_put(sdev);
1149 spin_lock_irqsave(shost->host_lock, flags); 1157 spin_lock_irqsave(shost->host_lock, flags);
1150 goto restart; 1158 goto restart;
1151 } 1159 }
1152 spin_unlock_irqrestore(shost->host_lock, flags); 1160 spin_unlock_irqrestore(shost->host_lock, flags);
1153 } 1161 }
1154 1162
1155 /** 1163 /**
1156 * scsi_remove_target - try to remove a target and all its devices 1164 * scsi_remove_target - try to remove a target and all its devices
1157 * @dev: generic starget or parent of generic stargets to be removed 1165 * @dev: generic starget or parent of generic stargets to be removed
1158 * 1166 *
1159 * Note: This is slightly racy. It is possible that if the user 1167 * Note: This is slightly racy. It is possible that if the user
1160 * requests the addition of another device then the target won't be 1168 * requests the addition of another device then the target won't be
1161 * removed. 1169 * removed.
1162 */ 1170 */
1163 void scsi_remove_target(struct device *dev) 1171 void scsi_remove_target(struct device *dev)
1164 { 1172 {
1165 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1173 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1166 struct scsi_target *starget, *last = NULL; 1174 struct scsi_target *starget, *last = NULL;
1167 unsigned long flags; 1175 unsigned long flags;
1168 1176
1169 /* remove targets being careful to lookup next entry before 1177 /* remove targets being careful to lookup next entry before
1170 * deleting the last 1178 * deleting the last
1171 */ 1179 */
1172 spin_lock_irqsave(shost->host_lock, flags); 1180 spin_lock_irqsave(shost->host_lock, flags);
1173 list_for_each_entry(starget, &shost->__targets, siblings) { 1181 list_for_each_entry(starget, &shost->__targets, siblings) {
1174 if (starget->state == STARGET_DEL) 1182 if (starget->state == STARGET_DEL)
1175 continue; 1183 continue;
1176 if (starget->dev.parent == dev || &starget->dev == dev) { 1184 if (starget->dev.parent == dev || &starget->dev == dev) {
1177 /* assuming new targets arrive at the end */ 1185 /* assuming new targets arrive at the end */
1178 kref_get(&starget->reap_ref); 1186 kref_get(&starget->reap_ref);
1179 spin_unlock_irqrestore(shost->host_lock, flags); 1187 spin_unlock_irqrestore(shost->host_lock, flags);
1180 if (last) 1188 if (last)
1181 scsi_target_reap(last); 1189 scsi_target_reap(last);
1182 last = starget; 1190 last = starget;
1183 __scsi_remove_target(starget); 1191 __scsi_remove_target(starget);
1184 spin_lock_irqsave(shost->host_lock, flags); 1192 spin_lock_irqsave(shost->host_lock, flags);
1185 } 1193 }
1186 } 1194 }
1187 spin_unlock_irqrestore(shost->host_lock, flags); 1195 spin_unlock_irqrestore(shost->host_lock, flags);
1188 1196
1189 if (last) 1197 if (last)
1190 scsi_target_reap(last); 1198 scsi_target_reap(last);
1191 } 1199 }
1192 EXPORT_SYMBOL(scsi_remove_target); 1200 EXPORT_SYMBOL(scsi_remove_target);
1193 1201
1194 int scsi_register_driver(struct device_driver *drv) 1202 int scsi_register_driver(struct device_driver *drv)
1195 { 1203 {
1196 drv->bus = &scsi_bus_type; 1204 drv->bus = &scsi_bus_type;
1197 1205
1198 return driver_register(drv); 1206 return driver_register(drv);
1199 } 1207 }
1200 EXPORT_SYMBOL(scsi_register_driver); 1208 EXPORT_SYMBOL(scsi_register_driver);
1201 1209
1202 int scsi_register_interface(struct class_interface *intf) 1210 int scsi_register_interface(struct class_interface *intf)
1203 { 1211 {
1204 intf->class = &sdev_class; 1212 intf->class = &sdev_class;
1205 1213
1206 return class_interface_register(intf); 1214 return class_interface_register(intf);
1207 } 1215 }
1208 EXPORT_SYMBOL(scsi_register_interface); 1216 EXPORT_SYMBOL(scsi_register_interface);
1209 1217
1210 /** 1218 /**
1211 * scsi_sysfs_add_host - add scsi host to subsystem 1219 * scsi_sysfs_add_host - add scsi host to subsystem
1212 * @shost: scsi host struct to add to subsystem 1220 * @shost: scsi host struct to add to subsystem
1213 **/ 1221 **/
1214 int scsi_sysfs_add_host(struct Scsi_Host *shost) 1222 int scsi_sysfs_add_host(struct Scsi_Host *shost)
1215 { 1223 {
1216 int error, i; 1224 int error, i;
1217 1225
1218 /* add host specific attributes */ 1226 /* add host specific attributes */
1219 if (shost->hostt->shost_attrs) { 1227 if (shost->hostt->shost_attrs) {
1220 for (i = 0; shost->hostt->shost_attrs[i]; i++) { 1228 for (i = 0; shost->hostt->shost_attrs[i]; i++) {
1221 error = device_create_file(&shost->shost_dev, 1229 error = device_create_file(&shost->shost_dev,
1222 shost->hostt->shost_attrs[i]); 1230 shost->hostt->shost_attrs[i]);
1223 if (error) 1231 if (error)
1224 return error; 1232 return error;
1225 } 1233 }
1226 } 1234 }
1227 1235
1228 transport_register_device(&shost->shost_gendev); 1236 transport_register_device(&shost->shost_gendev);
1229 transport_configure_device(&shost->shost_gendev); 1237 transport_configure_device(&shost->shost_gendev);
1230 return 0; 1238 return 0;
1231 } 1239 }
1232 1240
1233 static struct device_type scsi_dev_type = { 1241 static struct device_type scsi_dev_type = {
1234 .name = "scsi_device", 1242 .name = "scsi_device",
1235 .release = scsi_device_dev_release, 1243 .release = scsi_device_dev_release,
1236 .groups = scsi_sdev_attr_groups, 1244 .groups = scsi_sdev_attr_groups,
1237 }; 1245 };
1238 1246
1239 void scsi_sysfs_device_initialize(struct scsi_device *sdev) 1247 void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1240 { 1248 {
1241 unsigned long flags; 1249 unsigned long flags;
1242 struct Scsi_Host *shost = sdev->host; 1250 struct Scsi_Host *shost = sdev->host;
1243 struct scsi_target *starget = sdev->sdev_target; 1251 struct scsi_target *starget = sdev->sdev_target;
1244 1252
1245 device_initialize(&sdev->sdev_gendev); 1253 device_initialize(&sdev->sdev_gendev);
1246 sdev->sdev_gendev.bus = &scsi_bus_type; 1254 sdev->sdev_gendev.bus = &scsi_bus_type;
1247 sdev->sdev_gendev.type = &scsi_dev_type; 1255 sdev->sdev_gendev.type = &scsi_dev_type;
1248 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", 1256 dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
1249 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1257 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1250 1258
1251 device_initialize(&sdev->sdev_dev); 1259 device_initialize(&sdev->sdev_dev);
1252 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); 1260 sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
1253 sdev->sdev_dev.class = &sdev_class; 1261 sdev->sdev_dev.class = &sdev_class;
1254 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", 1262 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
1255 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1263 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1256 sdev->scsi_level = starget->scsi_level; 1264 sdev->scsi_level = starget->scsi_level;
1257 transport_setup_device(&sdev->sdev_gendev); 1265 transport_setup_device(&sdev->sdev_gendev);
1258 spin_lock_irqsave(shost->host_lock, flags); 1266 spin_lock_irqsave(shost->host_lock, flags);
1259 list_add_tail(&sdev->same_target_siblings, &starget->devices); 1267 list_add_tail(&sdev->same_target_siblings, &starget->devices);
1260 list_add_tail(&sdev->siblings, &shost->__devices); 1268 list_add_tail(&sdev->siblings, &shost->__devices);
1261 spin_unlock_irqrestore(shost->host_lock, flags); 1269 spin_unlock_irqrestore(shost->host_lock, flags);
1262 /* 1270 /*
1263 * device can now only be removed via __scsi_remove_device() so hold 1271 * device can now only be removed via __scsi_remove_device() so hold
1264 * the target. Target will be held in CREATED state until something 1272 * the target. Target will be held in CREATED state until something
1265 * beneath it becomes visible (in which case it moves to RUNNING) 1273 * beneath it becomes visible (in which case it moves to RUNNING)
1266 */ 1274 */
1267 kref_get(&starget->reap_ref); 1275 kref_get(&starget->reap_ref);
1268 } 1276 }
1269 1277
1270 int scsi_is_sdev_device(const struct device *dev) 1278 int scsi_is_sdev_device(const struct device *dev)
1271 { 1279 {
1272 return dev->type == &scsi_dev_type; 1280 return dev->type == &scsi_dev_type;
1273 } 1281 }
1274 EXPORT_SYMBOL(scsi_is_sdev_device); 1282 EXPORT_SYMBOL(scsi_is_sdev_device);
1275 1283
1276 /* A blank transport template that is used in drivers that don't 1284 /* A blank transport template that is used in drivers that don't
1277 * yet implement Transport Attributes */ 1285 * yet implement Transport Attributes */
1278 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; 1286 struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, };
include/scsi/scsi_device.h
1 #ifndef _SCSI_SCSI_DEVICE_H 1 #ifndef _SCSI_SCSI_DEVICE_H
2 #define _SCSI_SCSI_DEVICE_H 2 #define _SCSI_SCSI_DEVICE_H
3 3
4 #include <linux/list.h> 4 #include <linux/list.h>
5 #include <linux/spinlock.h> 5 #include <linux/spinlock.h>
6 #include <linux/workqueue.h> 6 #include <linux/workqueue.h>
7 #include <linux/blkdev.h> 7 #include <linux/blkdev.h>
8 #include <scsi/scsi.h> 8 #include <scsi/scsi.h>
9 #include <linux/atomic.h> 9 #include <linux/atomic.h>
10 10
11 struct device; 11 struct device;
12 struct request_queue; 12 struct request_queue;
13 struct scsi_cmnd; 13 struct scsi_cmnd;
14 struct scsi_lun; 14 struct scsi_lun;
15 struct scsi_sense_hdr; 15 struct scsi_sense_hdr;
16 16
17 struct scsi_mode_data { 17 struct scsi_mode_data {
18 __u32 length; 18 __u32 length;
19 __u16 block_descriptor_length; 19 __u16 block_descriptor_length;
20 __u8 medium_type; 20 __u8 medium_type;
21 __u8 device_specific; 21 __u8 device_specific;
22 __u8 header_length; 22 __u8 header_length;
23 __u8 longlba:1; 23 __u8 longlba:1;
24 }; 24 };
25 25
26 /* 26 /*
27 * sdev state: If you alter this, you also need to alter scsi_sysfs.c 27 * sdev state: If you alter this, you also need to alter scsi_sysfs.c
28 * (for the ascii descriptions) and the state model enforcer: 28 * (for the ascii descriptions) and the state model enforcer:
29 * scsi_lib:scsi_device_set_state(). 29 * scsi_lib:scsi_device_set_state().
30 */ 30 */
31 enum scsi_device_state { 31 enum scsi_device_state {
32 SDEV_CREATED = 1, /* device created but not added to sysfs 32 SDEV_CREATED = 1, /* device created but not added to sysfs
33 * Only internal commands allowed (for inq) */ 33 * Only internal commands allowed (for inq) */
34 SDEV_RUNNING, /* device properly configured 34 SDEV_RUNNING, /* device properly configured
35 * All commands allowed */ 35 * All commands allowed */
36 SDEV_CANCEL, /* beginning to delete device 36 SDEV_CANCEL, /* beginning to delete device
37 * Only error handler commands allowed */ 37 * Only error handler commands allowed */
38 SDEV_DEL, /* device deleted 38 SDEV_DEL, /* device deleted
39 * no commands allowed */ 39 * no commands allowed */
40 SDEV_QUIESCE, /* Device quiescent. No block commands 40 SDEV_QUIESCE, /* Device quiescent. No block commands
41 * will be accepted, only specials (which 41 * will be accepted, only specials (which
42 * originate in the mid-layer) */ 42 * originate in the mid-layer) */
43 SDEV_OFFLINE, /* Device offlined (by error handling or 43 SDEV_OFFLINE, /* Device offlined (by error handling or
44 * user request */ 44 * user request */
45 SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */ 45 SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */
46 SDEV_BLOCK, /* Device blocked by scsi lld. No 46 SDEV_BLOCK, /* Device blocked by scsi lld. No
47 * scsi commands from user or midlayer 47 * scsi commands from user or midlayer
48 * should be issued to the scsi 48 * should be issued to the scsi
49 * lld. */ 49 * lld. */
50 SDEV_CREATED_BLOCK, /* same as above but for created devices */ 50 SDEV_CREATED_BLOCK, /* same as above but for created devices */
51 }; 51 };
52 52
53 enum scsi_device_event { 53 enum scsi_device_event {
54 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ 54 SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
55 SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ 55 SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */
56 SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */ 56 SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */
57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ 57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ 58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ 59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
60 60
61 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, 61 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
62 SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED, 62 SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED,
63 63
64 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 64 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
65 }; 65 };
66 66
67 struct scsi_event { 67 struct scsi_event {
68 enum scsi_device_event evt_type; 68 enum scsi_device_event evt_type;
69 struct list_head node; 69 struct list_head node;
70 70
71 /* put union of data structures, for non-simple event types, 71 /* put union of data structures, for non-simple event types,
72 * here 72 * here
73 */ 73 */
74 }; 74 };
75 75
76 struct scsi_device { 76 struct scsi_device {
77 struct Scsi_Host *host; 77 struct Scsi_Host *host;
78 struct request_queue *request_queue; 78 struct request_queue *request_queue;
79 79
80 /* the next two are protected by the host->host_lock */ 80 /* the next two are protected by the host->host_lock */
81 struct list_head siblings; /* list of all devices on this host */ 81 struct list_head siblings; /* list of all devices on this host */
82 struct list_head same_target_siblings; /* just the devices sharing same target id */ 82 struct list_head same_target_siblings; /* just the devices sharing same target id */
83 83
84 atomic_t device_busy; /* commands actually active on LLDD */ 84 atomic_t device_busy; /* commands actually active on LLDD */
85 atomic_t device_blocked; /* Device returned QUEUE_FULL. */
86
85 spinlock_t list_lock; 87 spinlock_t list_lock;
86 struct list_head cmd_list; /* queue of in use SCSI Command structures */ 88 struct list_head cmd_list; /* queue of in use SCSI Command structures */
87 struct list_head starved_entry; 89 struct list_head starved_entry;
88 struct scsi_cmnd *current_cmnd; /* currently active command */ 90 struct scsi_cmnd *current_cmnd; /* currently active command */
89 unsigned short queue_depth; /* How deep of a queue we want */ 91 unsigned short queue_depth; /* How deep of a queue we want */
90 unsigned short max_queue_depth; /* max queue depth */ 92 unsigned short max_queue_depth; /* max queue depth */
91 unsigned short last_queue_full_depth; /* These two are used by */ 93 unsigned short last_queue_full_depth; /* These two are used by */
92 unsigned short last_queue_full_count; /* scsi_track_queue_full() */ 94 unsigned short last_queue_full_count; /* scsi_track_queue_full() */
93 unsigned long last_queue_full_time; /* last queue full time */ 95 unsigned long last_queue_full_time; /* last queue full time */
94 unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ 96 unsigned long queue_ramp_up_period; /* ramp up period in jiffies */
95 #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) 97 #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ)
96 98
97 unsigned long last_queue_ramp_up; /* last queue ramp up time */ 99 unsigned long last_queue_ramp_up; /* last queue ramp up time */
98 100
99 unsigned int id, channel; 101 unsigned int id, channel;
100 u64 lun; 102 u64 lun;
101 unsigned int manufacturer; /* Manufacturer of device, for using 103 unsigned int manufacturer; /* Manufacturer of device, for using
102 * vendor-specific cmd's */ 104 * vendor-specific cmd's */
103 unsigned sector_size; /* size in bytes */ 105 unsigned sector_size; /* size in bytes */
104 106
105 void *hostdata; /* available to low-level driver */ 107 void *hostdata; /* available to low-level driver */
106 char type; 108 char type;
107 char scsi_level; 109 char scsi_level;
108 char inq_periph_qual; /* PQ from INQUIRY data */ 110 char inq_periph_qual; /* PQ from INQUIRY data */
109 unsigned char inquiry_len; /* valid bytes in 'inquiry' */ 111 unsigned char inquiry_len; /* valid bytes in 'inquiry' */
110 unsigned char * inquiry; /* INQUIRY response data */ 112 unsigned char * inquiry; /* INQUIRY response data */
111 const char * vendor; /* [back_compat] point into 'inquiry' ... */ 113 const char * vendor; /* [back_compat] point into 'inquiry' ... */
112 const char * model; /* ... after scan; point to static string */ 114 const char * model; /* ... after scan; point to static string */
113 const char * rev; /* ... "nullnullnullnull" before scan */ 115 const char * rev; /* ... "nullnullnullnull" before scan */
114 116
115 #define SCSI_VPD_PG_LEN 255 117 #define SCSI_VPD_PG_LEN 255
116 int vpd_pg83_len; 118 int vpd_pg83_len;
117 unsigned char *vpd_pg83; 119 unsigned char *vpd_pg83;
118 int vpd_pg80_len; 120 int vpd_pg80_len;
119 unsigned char *vpd_pg80; 121 unsigned char *vpd_pg80;
120 unsigned char current_tag; /* current tag */ 122 unsigned char current_tag; /* current tag */
121 struct scsi_target *sdev_target; /* used only for single_lun */ 123 struct scsi_target *sdev_target; /* used only for single_lun */
122 124
123 unsigned int sdev_bflags; /* black/white flags as also found in 125 unsigned int sdev_bflags; /* black/white flags as also found in
124 * scsi_devinfo.[hc]. For now used only to 126 * scsi_devinfo.[hc]. For now used only to
125 * pass settings from slave_alloc to scsi 127 * pass settings from slave_alloc to scsi
126 * core. */ 128 * core. */
127 unsigned int eh_timeout; /* Error handling timeout */ 129 unsigned int eh_timeout; /* Error handling timeout */
128 unsigned writeable:1; 130 unsigned writeable:1;
129 unsigned removable:1; 131 unsigned removable:1;
130 unsigned changed:1; /* Data invalid due to media change */ 132 unsigned changed:1; /* Data invalid due to media change */
131 unsigned busy:1; /* Used to prevent races */ 133 unsigned busy:1; /* Used to prevent races */
132 unsigned lockable:1; /* Able to prevent media removal */ 134 unsigned lockable:1; /* Able to prevent media removal */
133 unsigned locked:1; /* Media removal disabled */ 135 unsigned locked:1; /* Media removal disabled */
134 unsigned borken:1; /* Tell the Seagate driver to be 136 unsigned borken:1; /* Tell the Seagate driver to be
135 * painfully slow on this device */ 137 * painfully slow on this device */
136 unsigned disconnect:1; /* can disconnect */ 138 unsigned disconnect:1; /* can disconnect */
137 unsigned soft_reset:1; /* Uses soft reset option */ 139 unsigned soft_reset:1; /* Uses soft reset option */
138 unsigned sdtr:1; /* Device supports SDTR messages */ 140 unsigned sdtr:1; /* Device supports SDTR messages */
139 unsigned wdtr:1; /* Device supports WDTR messages */ 141 unsigned wdtr:1; /* Device supports WDTR messages */
140 unsigned ppr:1; /* Device supports PPR messages */ 142 unsigned ppr:1; /* Device supports PPR messages */
141 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ 143 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
142 unsigned simple_tags:1; /* simple queue tag messages are enabled */ 144 unsigned simple_tags:1; /* simple queue tag messages are enabled */
143 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */ 145 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
144 unsigned was_reset:1; /* There was a bus reset on the bus for 146 unsigned was_reset:1; /* There was a bus reset on the bus for
145 * this device */ 147 * this device */
146 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN 148 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
147 * because we did a bus reset. */ 149 * because we did a bus reset. */
148 unsigned use_10_for_rw:1; /* first try 10-byte read / write */ 150 unsigned use_10_for_rw:1; /* first try 10-byte read / write */
149 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ 151 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
150 unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ 152 unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
151 unsigned no_write_same:1; /* no WRITE SAME command */ 153 unsigned no_write_same:1; /* no WRITE SAME command */
152 unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ 154 unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
153 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ 155 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
154 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ 156 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
155 unsigned skip_vpd_pages:1; /* do not read VPD pages */ 157 unsigned skip_vpd_pages:1; /* do not read VPD pages */
156 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ 158 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
157 unsigned no_start_on_add:1; /* do not issue start on add */ 159 unsigned no_start_on_add:1; /* do not issue start on add */
158 unsigned allow_restart:1; /* issue START_UNIT in error handler */ 160 unsigned allow_restart:1; /* issue START_UNIT in error handler */
159 unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ 161 unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
160 unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ 162 unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
161 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ 163 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
162 unsigned select_no_atn:1; 164 unsigned select_no_atn:1;
163 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ 165 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
164 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ 166 unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */
165 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 167 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
166 unsigned last_sector_bug:1; /* do not use multisector accesses on 168 unsigned last_sector_bug:1; /* do not use multisector accesses on
167 SD_LAST_BUGGY_SECTORS */ 169 SD_LAST_BUGGY_SECTORS */
168 unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ 170 unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */
169 unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ 171 unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
170 unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */ 172 unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */
171 unsigned is_visible:1; /* is the device visible in sysfs */ 173 unsigned is_visible:1; /* is the device visible in sysfs */
172 unsigned wce_default_on:1; /* Cache is ON by default */ 174 unsigned wce_default_on:1; /* Cache is ON by default */
173 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ 175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
174 unsigned broken_fua:1; /* Don't set FUA bit */ 176 unsigned broken_fua:1; /* Don't set FUA bit */
175 177
176 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 178 atomic_t disk_events_disable_depth; /* disable depth for disk events */
177 179
178 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 180 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
179 DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */ 181 DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */
180 struct list_head event_list; /* asserted events */ 182 struct list_head event_list; /* asserted events */
181 struct work_struct event_work; 183 struct work_struct event_work;
182 184
183 unsigned int device_blocked; /* Device returned QUEUE_FULL. */
184
185 unsigned int max_device_blocked; /* what device_blocked counts down from */ 185 unsigned int max_device_blocked; /* what device_blocked counts down from */
186 #define SCSI_DEFAULT_DEVICE_BLOCKED 3 186 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
187 187
188 atomic_t iorequest_cnt; 188 atomic_t iorequest_cnt;
189 atomic_t iodone_cnt; 189 atomic_t iodone_cnt;
190 atomic_t ioerr_cnt; 190 atomic_t ioerr_cnt;
191 191
192 struct device sdev_gendev, 192 struct device sdev_gendev,
193 sdev_dev; 193 sdev_dev;
194 194
195 struct execute_work ew; /* used to get process context on put */ 195 struct execute_work ew; /* used to get process context on put */
196 struct work_struct requeue_work; 196 struct work_struct requeue_work;
197 197
198 struct scsi_dh_data *scsi_dh_data; 198 struct scsi_dh_data *scsi_dh_data;
199 enum scsi_device_state sdev_state; 199 enum scsi_device_state sdev_state;
200 unsigned long sdev_data[0]; 200 unsigned long sdev_data[0];
201 } __attribute__((aligned(sizeof(unsigned long)))); 201 } __attribute__((aligned(sizeof(unsigned long))));
202 202
203 struct scsi_dh_devlist { 203 struct scsi_dh_devlist {
204 char *vendor; 204 char *vendor;
205 char *model; 205 char *model;
206 }; 206 };
207 207
208 typedef void (*activate_complete)(void *, int); 208 typedef void (*activate_complete)(void *, int);
209 struct scsi_device_handler { 209 struct scsi_device_handler {
210 /* Used by the infrastructure */ 210 /* Used by the infrastructure */
211 struct list_head list; /* list of scsi_device_handlers */ 211 struct list_head list; /* list of scsi_device_handlers */
212 212
213 /* Filled by the hardware handler */ 213 /* Filled by the hardware handler */
214 struct module *module; 214 struct module *module;
215 const char *name; 215 const char *name;
216 const struct scsi_dh_devlist *devlist; 216 const struct scsi_dh_devlist *devlist;
217 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *); 217 int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
218 int (*attach)(struct scsi_device *); 218 int (*attach)(struct scsi_device *);
219 void (*detach)(struct scsi_device *); 219 void (*detach)(struct scsi_device *);
220 int (*activate)(struct scsi_device *, activate_complete, void *); 220 int (*activate)(struct scsi_device *, activate_complete, void *);
221 int (*prep_fn)(struct scsi_device *, struct request *); 221 int (*prep_fn)(struct scsi_device *, struct request *);
222 int (*set_params)(struct scsi_device *, const char *); 222 int (*set_params)(struct scsi_device *, const char *);
223 bool (*match)(struct scsi_device *); 223 bool (*match)(struct scsi_device *);
224 }; 224 };
225 225
226 struct scsi_dh_data { 226 struct scsi_dh_data {
227 struct scsi_device_handler *scsi_dh; 227 struct scsi_device_handler *scsi_dh;
228 struct scsi_device *sdev; 228 struct scsi_device *sdev;
229 struct kref kref; 229 struct kref kref;
230 char buf[0]; 230 char buf[0];
231 }; 231 };
232 232
233 #define to_scsi_device(d) \ 233 #define to_scsi_device(d) \
234 container_of(d, struct scsi_device, sdev_gendev) 234 container_of(d, struct scsi_device, sdev_gendev)
235 #define class_to_sdev(d) \ 235 #define class_to_sdev(d) \
236 container_of(d, struct scsi_device, sdev_dev) 236 container_of(d, struct scsi_device, sdev_dev)
237 #define transport_class_to_sdev(class_dev) \ 237 #define transport_class_to_sdev(class_dev) \
238 to_scsi_device(class_dev->parent) 238 to_scsi_device(class_dev->parent)
239 239
240 #define sdev_printk(prefix, sdev, fmt, a...) \ 240 #define sdev_printk(prefix, sdev, fmt, a...) \
241 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a) 241 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
242 242
243 #define sdev_dbg(sdev, fmt, a...) \ 243 #define sdev_dbg(sdev, fmt, a...) \
244 dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) 244 dev_dbg(&(sdev)->sdev_gendev, fmt, ##a)
245 245
246 #define scmd_printk(prefix, scmd, fmt, a...) \ 246 #define scmd_printk(prefix, scmd, fmt, a...) \
247 (scmd)->request->rq_disk ? \ 247 (scmd)->request->rq_disk ? \
248 sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \ 248 sdev_printk(prefix, (scmd)->device, "[%s] " fmt, \
249 (scmd)->request->rq_disk->disk_name, ##a) : \ 249 (scmd)->request->rq_disk->disk_name, ##a) : \
250 sdev_printk(prefix, (scmd)->device, fmt, ##a) 250 sdev_printk(prefix, (scmd)->device, fmt, ##a)
251 251
252 #define scmd_dbg(scmd, fmt, a...) \ 252 #define scmd_dbg(scmd, fmt, a...) \
253 do { \ 253 do { \
254 if ((scmd)->request->rq_disk) \ 254 if ((scmd)->request->rq_disk) \
255 sdev_dbg((scmd)->device, "[%s] " fmt, \ 255 sdev_dbg((scmd)->device, "[%s] " fmt, \
256 (scmd)->request->rq_disk->disk_name, ##a);\ 256 (scmd)->request->rq_disk->disk_name, ##a);\
257 else \ 257 else \
258 sdev_dbg((scmd)->device, fmt, ##a); \ 258 sdev_dbg((scmd)->device, fmt, ##a); \
259 } while (0) 259 } while (0)
260 260
261 enum scsi_target_state { 261 enum scsi_target_state {
262 STARGET_CREATED = 1, 262 STARGET_CREATED = 1,
263 STARGET_RUNNING, 263 STARGET_RUNNING,
264 STARGET_DEL, 264 STARGET_DEL,
265 }; 265 };
266 266
267 /* 267 /*
268 * scsi_target: representation of a scsi target, for now, this is only 268 * scsi_target: representation of a scsi target, for now, this is only
269 * used for single_lun devices. If no one has active IO to the target, 269 * used for single_lun devices. If no one has active IO to the target,
270 * starget_sdev_user is NULL, else it points to the active sdev. 270 * starget_sdev_user is NULL, else it points to the active sdev.
271 */ 271 */
272 struct scsi_target { 272 struct scsi_target {
273 struct scsi_device *starget_sdev_user; 273 struct scsi_device *starget_sdev_user;
274 struct list_head siblings; 274 struct list_head siblings;
275 struct list_head devices; 275 struct list_head devices;
276 struct device dev; 276 struct device dev;
277 struct kref reap_ref; /* last put renders target invisible */ 277 struct kref reap_ref; /* last put renders target invisible */
278 unsigned int channel; 278 unsigned int channel;
279 unsigned int id; /* target id ... replace 279 unsigned int id; /* target id ... replace
280 * scsi_device.id eventually */ 280 * scsi_device.id eventually */
281 unsigned int create:1; /* signal that it needs to be added */ 281 unsigned int create:1; /* signal that it needs to be added */
282 unsigned int single_lun:1; /* Indicates we should only 282 unsigned int single_lun:1; /* Indicates we should only
283 * allow I/O to one of the luns 283 * allow I/O to one of the luns
284 * for the device at a time. */ 284 * for the device at a time. */
285 unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f 285 unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f
286 * means no lun present. */ 286 * means no lun present. */
287 unsigned int no_report_luns:1; /* Don't use 287 unsigned int no_report_luns:1; /* Don't use
288 * REPORT LUNS for scanning. */ 288 * REPORT LUNS for scanning. */
289 unsigned int expecting_lun_change:1; /* A device has reported 289 unsigned int expecting_lun_change:1; /* A device has reported
290 * a 3F/0E UA, other devices on 290 * a 3F/0E UA, other devices on
291 * the same target will also. */ 291 * the same target will also. */
292 /* commands actually active on LLD. */ 292 /* commands actually active on LLD. */
293 atomic_t target_busy; 293 atomic_t target_busy;
294 atomic_t target_blocked;
295
294 /* 296 /*
295 * LLDs should set this in the slave_alloc host template callout. 297 * LLDs should set this in the slave_alloc host template callout.
296 * If set to zero then there is not limit. 298 * If set to zero then there is not limit.
297 */ 299 */
298 unsigned int can_queue; 300 unsigned int can_queue;
299 unsigned int target_blocked;
300 unsigned int max_target_blocked; 301 unsigned int max_target_blocked;
301 #define SCSI_DEFAULT_TARGET_BLOCKED 3 302 #define SCSI_DEFAULT_TARGET_BLOCKED 3
302 303
303 char scsi_level; 304 char scsi_level;
304 enum scsi_target_state state; 305 enum scsi_target_state state;
305 void *hostdata; /* available to low-level driver */ 306 void *hostdata; /* available to low-level driver */
306 unsigned long starget_data[0]; /* for the transport */ 307 unsigned long starget_data[0]; /* for the transport */
307 /* starget_data must be the last element!!!! */ 308 /* starget_data must be the last element!!!! */
308 } __attribute__((aligned(sizeof(unsigned long)))); 309 } __attribute__((aligned(sizeof(unsigned long))));
309 310
310 #define to_scsi_target(d) container_of(d, struct scsi_target, dev) 311 #define to_scsi_target(d) container_of(d, struct scsi_target, dev)
311 static inline struct scsi_target *scsi_target(struct scsi_device *sdev) 312 static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
312 { 313 {
313 return to_scsi_target(sdev->sdev_gendev.parent); 314 return to_scsi_target(sdev->sdev_gendev.parent);
314 } 315 }
315 #define transport_class_to_starget(class_dev) \ 316 #define transport_class_to_starget(class_dev) \
316 to_scsi_target(class_dev->parent) 317 to_scsi_target(class_dev->parent)
317 318
318 #define starget_printk(prefix, starget, fmt, a...) \ 319 #define starget_printk(prefix, starget, fmt, a...) \
319 dev_printk(prefix, &(starget)->dev, fmt, ##a) 320 dev_printk(prefix, &(starget)->dev, fmt, ##a)
320 321
321 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, 322 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
322 uint, uint, u64, void *hostdata); 323 uint, uint, u64, void *hostdata);
323 extern int scsi_add_device(struct Scsi_Host *host, uint channel, 324 extern int scsi_add_device(struct Scsi_Host *host, uint channel,
324 uint target, u64 lun); 325 uint target, u64 lun);
325 extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); 326 extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh);
326 extern void scsi_remove_device(struct scsi_device *); 327 extern void scsi_remove_device(struct scsi_device *);
327 extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); 328 extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
328 void scsi_attach_vpd(struct scsi_device *sdev); 329 void scsi_attach_vpd(struct scsi_device *sdev);
329 330
330 extern int scsi_device_get(struct scsi_device *); 331 extern int scsi_device_get(struct scsi_device *);
331 extern void scsi_device_put(struct scsi_device *); 332 extern void scsi_device_put(struct scsi_device *);
332 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, 333 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
333 uint, uint, u64); 334 uint, uint, u64);
334 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, 335 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *,
335 uint, uint, u64); 336 uint, uint, u64);
336 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, 337 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *,
337 u64); 338 u64);
338 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, 339 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
339 u64); 340 u64);
340 extern void starget_for_each_device(struct scsi_target *, void *, 341 extern void starget_for_each_device(struct scsi_target *, void *,
341 void (*fn)(struct scsi_device *, void *)); 342 void (*fn)(struct scsi_device *, void *));
342 extern void __starget_for_each_device(struct scsi_target *, void *, 343 extern void __starget_for_each_device(struct scsi_target *, void *,
343 void (*fn)(struct scsi_device *, 344 void (*fn)(struct scsi_device *,
344 void *)); 345 void *));
345 346
346 /* only exposed to implement shost_for_each_device */ 347 /* only exposed to implement shost_for_each_device */
347 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, 348 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
348 struct scsi_device *); 349 struct scsi_device *);
349 350
350 /** 351 /**
351 * shost_for_each_device - iterate over all devices of a host 352 * shost_for_each_device - iterate over all devices of a host
352 * @sdev: the &struct scsi_device to use as a cursor 353 * @sdev: the &struct scsi_device to use as a cursor
353 * @shost: the &struct scsi_host to iterate over 354 * @shost: the &struct scsi_host to iterate over
354 * 355 *
355 * Iterator that returns each device attached to @shost. This loop 356 * Iterator that returns each device attached to @shost. This loop
356 * takes a reference on each device and releases it at the end. If 357 * takes a reference on each device and releases it at the end. If
357 * you break out of the loop, you must call scsi_device_put(sdev). 358 * you break out of the loop, you must call scsi_device_put(sdev).
358 */ 359 */
359 #define shost_for_each_device(sdev, shost) \ 360 #define shost_for_each_device(sdev, shost) \
360 for ((sdev) = __scsi_iterate_devices((shost), NULL); \ 361 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
361 (sdev); \ 362 (sdev); \
362 (sdev) = __scsi_iterate_devices((shost), (sdev))) 363 (sdev) = __scsi_iterate_devices((shost), (sdev)))
363 364
364 /** 365 /**
365 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) 366 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
366 * @sdev: the &struct scsi_device to use as a cursor 367 * @sdev: the &struct scsi_device to use as a cursor
367 * @shost: the &struct scsi_host to iterate over 368 * @shost: the &struct scsi_host to iterate over
368 * 369 *
369 * Iterator that returns each device attached to @shost. It does _not_ 370 * Iterator that returns each device attached to @shost. It does _not_
370 * take a reference on the scsi_device, so the whole loop must be 371 * take a reference on the scsi_device, so the whole loop must be
371 * protected by shost->host_lock. 372 * protected by shost->host_lock.
372 * 373 *
373 * Note: The only reason to use this is because you need to access the 374 * Note: The only reason to use this is because you need to access the
374 * device list in interrupt context. Otherwise you really want to use 375 * device list in interrupt context. Otherwise you really want to use
375 * shost_for_each_device instead. 376 * shost_for_each_device instead.
376 */ 377 */
377 #define __shost_for_each_device(sdev, shost) \ 378 #define __shost_for_each_device(sdev, shost) \
378 list_for_each_entry((sdev), &((shost)->__devices), siblings) 379 list_for_each_entry((sdev), &((shost)->__devices), siblings)
379 380
380 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int); 381 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
381 extern int scsi_track_queue_full(struct scsi_device *, int); 382 extern int scsi_track_queue_full(struct scsi_device *, int);
382 383
383 extern int scsi_set_medium_removal(struct scsi_device *, char); 384 extern int scsi_set_medium_removal(struct scsi_device *, char);
384 385
385 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 386 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
386 unsigned char *buffer, int len, int timeout, 387 unsigned char *buffer, int len, int timeout,
387 int retries, struct scsi_mode_data *data, 388 int retries, struct scsi_mode_data *data,
388 struct scsi_sense_hdr *); 389 struct scsi_sense_hdr *);
389 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, 390 extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
390 int modepage, unsigned char *buffer, int len, 391 int modepage, unsigned char *buffer, int len,
391 int timeout, int retries, 392 int timeout, int retries,
392 struct scsi_mode_data *data, 393 struct scsi_mode_data *data,
393 struct scsi_sense_hdr *); 394 struct scsi_sense_hdr *);
394 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, 395 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
395 int retries, struct scsi_sense_hdr *sshdr); 396 int retries, struct scsi_sense_hdr *sshdr);
396 extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, 397 extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
397 int buf_len); 398 int buf_len);
398 extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, 399 extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
399 unsigned int len, unsigned char opcode); 400 unsigned int len, unsigned char opcode);
400 extern int scsi_device_set_state(struct scsi_device *sdev, 401 extern int scsi_device_set_state(struct scsi_device *sdev,
401 enum scsi_device_state state); 402 enum scsi_device_state state);
402 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, 403 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
403 gfp_t gfpflags); 404 gfp_t gfpflags);
404 extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); 405 extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt);
405 extern void sdev_evt_send_simple(struct scsi_device *sdev, 406 extern void sdev_evt_send_simple(struct scsi_device *sdev,
406 enum scsi_device_event evt_type, gfp_t gfpflags); 407 enum scsi_device_event evt_type, gfp_t gfpflags);
407 extern int scsi_device_quiesce(struct scsi_device *sdev); 408 extern int scsi_device_quiesce(struct scsi_device *sdev);
408 extern void scsi_device_resume(struct scsi_device *sdev); 409 extern void scsi_device_resume(struct scsi_device *sdev);
409 extern void scsi_target_quiesce(struct scsi_target *); 410 extern void scsi_target_quiesce(struct scsi_target *);
410 extern void scsi_target_resume(struct scsi_target *); 411 extern void scsi_target_resume(struct scsi_target *);
411 extern void scsi_scan_target(struct device *parent, unsigned int channel, 412 extern void scsi_scan_target(struct device *parent, unsigned int channel,
412 unsigned int id, u64 lun, int rescan); 413 unsigned int id, u64 lun, int rescan);
413 extern void scsi_target_reap(struct scsi_target *); 414 extern void scsi_target_reap(struct scsi_target *);
414 extern void scsi_target_block(struct device *); 415 extern void scsi_target_block(struct device *);
415 extern void scsi_target_unblock(struct device *, enum scsi_device_state); 416 extern void scsi_target_unblock(struct device *, enum scsi_device_state);
416 extern void scsi_remove_target(struct device *); 417 extern void scsi_remove_target(struct device *);
417 extern void int_to_scsilun(u64, struct scsi_lun *); 418 extern void int_to_scsilun(u64, struct scsi_lun *);
418 extern u64 scsilun_to_int(struct scsi_lun *); 419 extern u64 scsilun_to_int(struct scsi_lun *);
419 extern const char *scsi_device_state_name(enum scsi_device_state); 420 extern const char *scsi_device_state_name(enum scsi_device_state);
420 extern int scsi_is_sdev_device(const struct device *); 421 extern int scsi_is_sdev_device(const struct device *);
421 extern int scsi_is_target_device(const struct device *); 422 extern int scsi_is_target_device(const struct device *);
422 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 423 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
423 int data_direction, void *buffer, unsigned bufflen, 424 int data_direction, void *buffer, unsigned bufflen,
424 unsigned char *sense, int timeout, int retries, 425 unsigned char *sense, int timeout, int retries,
425 u64 flags, int *resid); 426 u64 flags, int *resid);
426 extern int scsi_execute_req_flags(struct scsi_device *sdev, 427 extern int scsi_execute_req_flags(struct scsi_device *sdev,
427 const unsigned char *cmd, int data_direction, void *buffer, 428 const unsigned char *cmd, int data_direction, void *buffer,
428 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 429 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
429 int retries, int *resid, u64 flags); 430 int retries, int *resid, u64 flags);
430 static inline int scsi_execute_req(struct scsi_device *sdev, 431 static inline int scsi_execute_req(struct scsi_device *sdev,
431 const unsigned char *cmd, int data_direction, void *buffer, 432 const unsigned char *cmd, int data_direction, void *buffer,
432 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 433 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
433 int retries, int *resid) 434 int retries, int *resid)
434 { 435 {
435 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, 436 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
436 bufflen, sshdr, timeout, retries, resid, 0); 437 bufflen, sshdr, timeout, retries, resid, 0);
437 } 438 }
438 extern void sdev_disable_disk_events(struct scsi_device *sdev); 439 extern void sdev_disable_disk_events(struct scsi_device *sdev);
439 extern void sdev_enable_disk_events(struct scsi_device *sdev); 440 extern void sdev_enable_disk_events(struct scsi_device *sdev);
440 441
441 #ifdef CONFIG_PM_RUNTIME 442 #ifdef CONFIG_PM_RUNTIME
442 extern int scsi_autopm_get_device(struct scsi_device *); 443 extern int scsi_autopm_get_device(struct scsi_device *);
443 extern void scsi_autopm_put_device(struct scsi_device *); 444 extern void scsi_autopm_put_device(struct scsi_device *);
444 #else 445 #else
445 static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } 446 static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; }
446 static inline void scsi_autopm_put_device(struct scsi_device *d) {} 447 static inline void scsi_autopm_put_device(struct scsi_device *d) {}
447 #endif /* CONFIG_PM_RUNTIME */ 448 #endif /* CONFIG_PM_RUNTIME */
448 449
449 static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) 450 static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev)
450 { 451 {
451 return device_reprobe(&sdev->sdev_gendev); 452 return device_reprobe(&sdev->sdev_gendev);
452 } 453 }
453 454
454 static inline unsigned int sdev_channel(struct scsi_device *sdev) 455 static inline unsigned int sdev_channel(struct scsi_device *sdev)
455 { 456 {
456 return sdev->channel; 457 return sdev->channel;
457 } 458 }
458 459
459 static inline unsigned int sdev_id(struct scsi_device *sdev) 460 static inline unsigned int sdev_id(struct scsi_device *sdev)
460 { 461 {
461 return sdev->id; 462 return sdev->id;
462 } 463 }
463 464
464 #define scmd_id(scmd) sdev_id((scmd)->device) 465 #define scmd_id(scmd) sdev_id((scmd)->device)
465 #define scmd_channel(scmd) sdev_channel((scmd)->device) 466 #define scmd_channel(scmd) sdev_channel((scmd)->device)
466 467
467 /* 468 /*
468 * checks for positions of the SCSI state machine 469 * checks for positions of the SCSI state machine
469 */ 470 */
470 static inline int scsi_device_online(struct scsi_device *sdev) 471 static inline int scsi_device_online(struct scsi_device *sdev)
471 { 472 {
472 return (sdev->sdev_state != SDEV_OFFLINE && 473 return (sdev->sdev_state != SDEV_OFFLINE &&
473 sdev->sdev_state != SDEV_TRANSPORT_OFFLINE && 474 sdev->sdev_state != SDEV_TRANSPORT_OFFLINE &&
474 sdev->sdev_state != SDEV_DEL); 475 sdev->sdev_state != SDEV_DEL);
475 } 476 }
476 static inline int scsi_device_blocked(struct scsi_device *sdev) 477 static inline int scsi_device_blocked(struct scsi_device *sdev)
477 { 478 {
478 return sdev->sdev_state == SDEV_BLOCK || 479 return sdev->sdev_state == SDEV_BLOCK ||
479 sdev->sdev_state == SDEV_CREATED_BLOCK; 480 sdev->sdev_state == SDEV_CREATED_BLOCK;
480 } 481 }
481 static inline int scsi_device_created(struct scsi_device *sdev) 482 static inline int scsi_device_created(struct scsi_device *sdev)
482 { 483 {
483 return sdev->sdev_state == SDEV_CREATED || 484 return sdev->sdev_state == SDEV_CREATED ||
484 sdev->sdev_state == SDEV_CREATED_BLOCK; 485 sdev->sdev_state == SDEV_CREATED_BLOCK;
485 } 486 }
486 487
487 /* accessor functions for the SCSI parameters */ 488 /* accessor functions for the SCSI parameters */
488 static inline int scsi_device_sync(struct scsi_device *sdev) 489 static inline int scsi_device_sync(struct scsi_device *sdev)
489 { 490 {
490 return sdev->sdtr; 491 return sdev->sdtr;
491 } 492 }
492 static inline int scsi_device_wide(struct scsi_device *sdev) 493 static inline int scsi_device_wide(struct scsi_device *sdev)
493 { 494 {
494 return sdev->wdtr; 495 return sdev->wdtr;
495 } 496 }
496 static inline int scsi_device_dt(struct scsi_device *sdev) 497 static inline int scsi_device_dt(struct scsi_device *sdev)
497 { 498 {
498 return sdev->ppr; 499 return sdev->ppr;
499 } 500 }
500 static inline int scsi_device_dt_only(struct scsi_device *sdev) 501 static inline int scsi_device_dt_only(struct scsi_device *sdev)
501 { 502 {
502 if (sdev->inquiry_len < 57) 503 if (sdev->inquiry_len < 57)
503 return 0; 504 return 0;
504 return (sdev->inquiry[56] & 0x0c) == 0x04; 505 return (sdev->inquiry[56] & 0x0c) == 0x04;
505 } 506 }
506 static inline int scsi_device_ius(struct scsi_device *sdev) 507 static inline int scsi_device_ius(struct scsi_device *sdev)
507 { 508 {
508 if (sdev->inquiry_len < 57) 509 if (sdev->inquiry_len < 57)
509 return 0; 510 return 0;
510 return sdev->inquiry[56] & 0x01; 511 return sdev->inquiry[56] & 0x01;
511 } 512 }
512 static inline int scsi_device_qas(struct scsi_device *sdev) 513 static inline int scsi_device_qas(struct scsi_device *sdev)
513 { 514 {
514 if (sdev->inquiry_len < 57) 515 if (sdev->inquiry_len < 57)
515 return 0; 516 return 0;
516 return sdev->inquiry[56] & 0x02; 517 return sdev->inquiry[56] & 0x02;
517 } 518 }
518 static inline int scsi_device_enclosure(struct scsi_device *sdev) 519 static inline int scsi_device_enclosure(struct scsi_device *sdev)
519 { 520 {
520 return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; 521 return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1;
521 } 522 }
522 523
523 static inline int scsi_device_protection(struct scsi_device *sdev) 524 static inline int scsi_device_protection(struct scsi_device *sdev)
524 { 525 {
525 if (sdev->no_dif) 526 if (sdev->no_dif)
526 return 0; 527 return 0;
527 528
528 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); 529 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
529 } 530 }
530 531
531 static inline int scsi_device_tpgs(struct scsi_device *sdev) 532 static inline int scsi_device_tpgs(struct scsi_device *sdev)
532 { 533 {
533 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; 534 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
534 } 535 }
535 536
536 #define MODULE_ALIAS_SCSI_DEVICE(type) \ 537 #define MODULE_ALIAS_SCSI_DEVICE(type) \
537 MODULE_ALIAS("scsi:t-" __stringify(type) "*") 538 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
538 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" 539 #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
include/scsi/scsi_host.h
1 #ifndef _SCSI_SCSI_HOST_H 1 #ifndef _SCSI_SCSI_HOST_H
2 #define _SCSI_SCSI_HOST_H 2 #define _SCSI_SCSI_HOST_H
3 3
4 #include <linux/device.h> 4 #include <linux/device.h>
5 #include <linux/list.h> 5 #include <linux/list.h>
6 #include <linux/types.h> 6 #include <linux/types.h>
7 #include <linux/workqueue.h> 7 #include <linux/workqueue.h>
8 #include <linux/mutex.h> 8 #include <linux/mutex.h>
9 #include <linux/seq_file.h> 9 #include <linux/seq_file.h>
10 #include <scsi/scsi.h> 10 #include <scsi/scsi.h>
11 11
12 struct request_queue; 12 struct request_queue;
13 struct block_device; 13 struct block_device;
14 struct completion; 14 struct completion;
15 struct module; 15 struct module;
16 struct scsi_cmnd; 16 struct scsi_cmnd;
17 struct scsi_device; 17 struct scsi_device;
18 struct scsi_host_cmd_pool; 18 struct scsi_host_cmd_pool;
19 struct scsi_target; 19 struct scsi_target;
20 struct Scsi_Host; 20 struct Scsi_Host;
21 struct scsi_host_cmd_pool; 21 struct scsi_host_cmd_pool;
22 struct scsi_transport_template; 22 struct scsi_transport_template;
23 struct blk_queue_tags; 23 struct blk_queue_tags;
24 24
25 25
26 /* 26 /*
27 * The various choices mean: 27 * The various choices mean:
28 * NONE: Self evident. Host adapter is not capable of scatter-gather. 28 * NONE: Self evident. Host adapter is not capable of scatter-gather.
29 * ALL: Means that the host adapter module can do scatter-gather, 29 * ALL: Means that the host adapter module can do scatter-gather,
30 * and that there is no limit to the size of the table to which 30 * and that there is no limit to the size of the table to which
31 * we scatter/gather data. The value we set here is the maximum 31 * we scatter/gather data. The value we set here is the maximum
32 * single element sglist. To use chained sglists, the adapter 32 * single element sglist. To use chained sglists, the adapter
33 * has to set a value beyond ALL (and correctly use the chain 33 * has to set a value beyond ALL (and correctly use the chain
34 * handling API. 34 * handling API.
35 * Anything else: Indicates the maximum number of chains that can be 35 * Anything else: Indicates the maximum number of chains that can be
36 * used in one scatter-gather request. 36 * used in one scatter-gather request.
37 */ 37 */
38 #define SG_NONE 0 38 #define SG_NONE 0
39 #define SG_ALL SCSI_MAX_SG_SEGMENTS 39 #define SG_ALL SCSI_MAX_SG_SEGMENTS
40 40
41 #define MODE_UNKNOWN 0x00 41 #define MODE_UNKNOWN 0x00
42 #define MODE_INITIATOR 0x01 42 #define MODE_INITIATOR 0x01
43 #define MODE_TARGET 0x02 43 #define MODE_TARGET 0x02
44 44
45 #define DISABLE_CLUSTERING 0 45 #define DISABLE_CLUSTERING 0
46 #define ENABLE_CLUSTERING 1 46 #define ENABLE_CLUSTERING 1
47 47
48 enum { 48 enum {
49 SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */ 49 SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
50 SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */ 50 SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
51 SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */ 51 SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshold event */
52 }; 52 };
53 53
54 struct scsi_host_template { 54 struct scsi_host_template {
55 struct module *module; 55 struct module *module;
56 const char *name; 56 const char *name;
57 57
58 /* 58 /*
59 * Used to initialize old-style drivers. For new-style drivers 59 * Used to initialize old-style drivers. For new-style drivers
60 * just perform all work in your module initialization function. 60 * just perform all work in your module initialization function.
61 * 61 *
62 * Status: OBSOLETE 62 * Status: OBSOLETE
63 */ 63 */
64 int (* detect)(struct scsi_host_template *); 64 int (* detect)(struct scsi_host_template *);
65 65
66 /* 66 /*
67 * Used as unload callback for hosts with old-style drivers. 67 * Used as unload callback for hosts with old-style drivers.
68 * 68 *
69 * Status: OBSOLETE 69 * Status: OBSOLETE
70 */ 70 */
71 int (* release)(struct Scsi_Host *); 71 int (* release)(struct Scsi_Host *);
72 72
73 /* 73 /*
74 * The info function will return whatever useful information the 74 * The info function will return whatever useful information the
75 * developer sees fit. If not provided, then the name field will 75 * developer sees fit. If not provided, then the name field will
76 * be used instead. 76 * be used instead.
77 * 77 *
78 * Status: OPTIONAL 78 * Status: OPTIONAL
79 */ 79 */
80 const char *(* info)(struct Scsi_Host *); 80 const char *(* info)(struct Scsi_Host *);
81 81
82 /* 82 /*
83 * Ioctl interface 83 * Ioctl interface
84 * 84 *
85 * Status: OPTIONAL 85 * Status: OPTIONAL
86 */ 86 */
87 int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); 87 int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
88 88
89 89
90 #ifdef CONFIG_COMPAT 90 #ifdef CONFIG_COMPAT
91 /* 91 /*
92 * Compat handler. Handle 32bit ABI. 92 * Compat handler. Handle 32bit ABI.
93 * When unknown ioctl is passed return -ENOIOCTLCMD. 93 * When unknown ioctl is passed return -ENOIOCTLCMD.
94 * 94 *
95 * Status: OPTIONAL 95 * Status: OPTIONAL
96 */ 96 */
97 int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); 97 int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg);
98 #endif 98 #endif
99 99
100 /* 100 /*
101 * The queuecommand function is used to queue up a scsi 101 * The queuecommand function is used to queue up a scsi
102 * command block to the LLDD. When the driver finished 102 * command block to the LLDD. When the driver finished
103 * processing the command the done callback is invoked. 103 * processing the command the done callback is invoked.
104 * 104 *
105 * If queuecommand returns 0, then the HBA has accepted the 105 * If queuecommand returns 0, then the HBA has accepted the
106 * command. The done() function must be called on the command 106 * command. The done() function must be called on the command
107 * when the driver has finished with it. (you may call done on the 107 * when the driver has finished with it. (you may call done on the
108 * command before queuecommand returns, but in this case you 108 * command before queuecommand returns, but in this case you
109 * *must* return 0 from queuecommand). 109 * *must* return 0 from queuecommand).
110 * 110 *
111 * Queuecommand may also reject the command, in which case it may 111 * Queuecommand may also reject the command, in which case it may
112 * not touch the command and must not call done() for it. 112 * not touch the command and must not call done() for it.
113 * 113 *
114 * There are two possible rejection returns: 114 * There are two possible rejection returns:
115 * 115 *
116 * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but 116 * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
117 * allow commands to other devices serviced by this host. 117 * allow commands to other devices serviced by this host.
118 * 118 *
119 * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this 119 * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
120 * host temporarily. 120 * host temporarily.
121 * 121 *
122 * For compatibility, any other non-zero return is treated the 122 * For compatibility, any other non-zero return is treated the
123 * same as SCSI_MLQUEUE_HOST_BUSY. 123 * same as SCSI_MLQUEUE_HOST_BUSY.
124 * 124 *
125 * NOTE: "temporarily" means either until the next command for# 125 * NOTE: "temporarily" means either until the next command for#
126 * this device/host completes, or a period of time determined by 126 * this device/host completes, or a period of time determined by
127 * I/O pressure in the system if there are no other outstanding 127 * I/O pressure in the system if there are no other outstanding
128 * commands. 128 * commands.
129 * 129 *
130 * STATUS: REQUIRED 130 * STATUS: REQUIRED
131 */ 131 */
132 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); 132 int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
133 133
134 /* 134 /*
135 * This is an error handling strategy routine. You don't need to 135 * This is an error handling strategy routine. You don't need to
136 * define one of these if you don't want to - there is a default 136 * define one of these if you don't want to - there is a default
137 * routine that is present that should work in most cases. For those 137 * routine that is present that should work in most cases. For those
138 * driver authors that have the inclination and ability to write their 138 * driver authors that have the inclination and ability to write their
139 * own strategy routine, this is where it is specified. Note - the 139 * own strategy routine, this is where it is specified. Note - the
140 * strategy routine is *ALWAYS* run in the context of the kernel eh 140 * strategy routine is *ALWAYS* run in the context of the kernel eh
141 * thread. Thus you are guaranteed to *NOT* be in an interrupt 141 * thread. Thus you are guaranteed to *NOT* be in an interrupt
142 * handler when you execute this, and you are also guaranteed to 142 * handler when you execute this, and you are also guaranteed to
143 * *NOT* have any other commands being queued while you are in the 143 * *NOT* have any other commands being queued while you are in the
144 * strategy routine. When you return from this function, operations 144 * strategy routine. When you return from this function, operations
145 * return to normal. 145 * return to normal.
146 * 146 *
147 * See scsi_error.c scsi_unjam_host for additional comments about 147 * See scsi_error.c scsi_unjam_host for additional comments about
148 * what this function should and should not be attempting to do. 148 * what this function should and should not be attempting to do.
149 * 149 *
150 * Status: REQUIRED (at least one of them) 150 * Status: REQUIRED (at least one of them)
151 */ 151 */
152 int (* eh_abort_handler)(struct scsi_cmnd *); 152 int (* eh_abort_handler)(struct scsi_cmnd *);
153 int (* eh_device_reset_handler)(struct scsi_cmnd *); 153 int (* eh_device_reset_handler)(struct scsi_cmnd *);
154 int (* eh_target_reset_handler)(struct scsi_cmnd *); 154 int (* eh_target_reset_handler)(struct scsi_cmnd *);
155 int (* eh_bus_reset_handler)(struct scsi_cmnd *); 155 int (* eh_bus_reset_handler)(struct scsi_cmnd *);
156 int (* eh_host_reset_handler)(struct scsi_cmnd *); 156 int (* eh_host_reset_handler)(struct scsi_cmnd *);
157 157
158 /* 158 /*
159 * Before the mid layer attempts to scan for a new device where none 159 * Before the mid layer attempts to scan for a new device where none
160 * currently exists, it will call this entry in your driver. Should 160 * currently exists, it will call this entry in your driver. Should
161 * your driver need to allocate any structs or perform any other init 161 * your driver need to allocate any structs or perform any other init
162 * items in order to send commands to a currently unused target/lun 162 * items in order to send commands to a currently unused target/lun
163 * combo, then this is where you can perform those allocations. This 163 * combo, then this is where you can perform those allocations. This
164 * is specifically so that drivers won't have to perform any kind of 164 * is specifically so that drivers won't have to perform any kind of
165 * "is this a new device" checks in their queuecommand routine, 165 * "is this a new device" checks in their queuecommand routine,
166 * thereby making the hot path a bit quicker. 166 * thereby making the hot path a bit quicker.
167 * 167 *
168 * Return values: 0 on success, non-0 on failure 168 * Return values: 0 on success, non-0 on failure
169 * 169 *
170 * Deallocation: If we didn't find any devices at this ID, you will 170 * Deallocation: If we didn't find any devices at this ID, you will
171 * get an immediate call to slave_destroy(). If we find something 171 * get an immediate call to slave_destroy(). If we find something
172 * here then you will get a call to slave_configure(), then the 172 * here then you will get a call to slave_configure(), then the
173 * device will be used for however long it is kept around, then when 173 * device will be used for however long it is kept around, then when
174 * the device is removed from the system (or * possibly at reboot 174 * the device is removed from the system (or * possibly at reboot
175 * time), you will then get a call to slave_destroy(). This is 175 * time), you will then get a call to slave_destroy(). This is
176 * assuming you implement slave_configure and slave_destroy. 176 * assuming you implement slave_configure and slave_destroy.
177 * However, if you allocate memory and hang it off the device struct, 177 * However, if you allocate memory and hang it off the device struct,
178 * then you must implement the slave_destroy() routine at a minimum 178 * then you must implement the slave_destroy() routine at a minimum
179 * in order to avoid leaking memory 179 * in order to avoid leaking memory
180 * each time a device is tore down. 180 * each time a device is tore down.
181 * 181 *
182 * Status: OPTIONAL 182 * Status: OPTIONAL
183 */ 183 */
184 int (* slave_alloc)(struct scsi_device *); 184 int (* slave_alloc)(struct scsi_device *);
185 185
186 /* 186 /*
187 * Once the device has responded to an INQUIRY and we know the 187 * Once the device has responded to an INQUIRY and we know the
188 * device is online, we call into the low level driver with the 188 * device is online, we call into the low level driver with the
189 * struct scsi_device *. If the low level device driver implements 189 * struct scsi_device *. If the low level device driver implements
190 * this function, it *must* perform the task of setting the queue 190 * this function, it *must* perform the task of setting the queue
191 * depth on the device. All other tasks are optional and depend 191 * depth on the device. All other tasks are optional and depend
192 * on what the driver supports and various implementation details. 192 * on what the driver supports and various implementation details.
193 * 193 *
194 * Things currently recommended to be handled at this time include: 194 * Things currently recommended to be handled at this time include:
195 * 195 *
196 * 1. Setting the device queue depth. Proper setting of this is 196 * 1. Setting the device queue depth. Proper setting of this is
197 * described in the comments for scsi_adjust_queue_depth. 197 * described in the comments for scsi_adjust_queue_depth.
198 * 2. Determining if the device supports the various synchronous 198 * 2. Determining if the device supports the various synchronous
199 * negotiation protocols. The device struct will already have 199 * negotiation protocols. The device struct will already have
200 * responded to INQUIRY and the results of the standard items 200 * responded to INQUIRY and the results of the standard items
201 * will have been shoved into the various device flag bits, eg. 201 * will have been shoved into the various device flag bits, eg.
202 * device->sdtr will be true if the device supports SDTR messages. 202 * device->sdtr will be true if the device supports SDTR messages.
203 * 3. Allocating command structs that the device will need. 203 * 3. Allocating command structs that the device will need.
204 * 4. Setting the default timeout on this device (if needed). 204 * 4. Setting the default timeout on this device (if needed).
205 * 5. Anything else the low level driver might want to do on a device 205 * 5. Anything else the low level driver might want to do on a device
206 * specific setup basis... 206 * specific setup basis...
207 * 6. Return 0 on success, non-0 on error. The device will be marked 207 * 6. Return 0 on success, non-0 on error. The device will be marked
208 * as offline on error so that no access will occur. If you return 208 * as offline on error so that no access will occur. If you return
209 * non-0, your slave_destroy routine will never get called for this 209 * non-0, your slave_destroy routine will never get called for this
210 * device, so don't leave any loose memory hanging around, clean 210 * device, so don't leave any loose memory hanging around, clean
211 * up after yourself before returning non-0 211 * up after yourself before returning non-0
212 * 212 *
213 * Status: OPTIONAL 213 * Status: OPTIONAL
214 */ 214 */
215 int (* slave_configure)(struct scsi_device *); 215 int (* slave_configure)(struct scsi_device *);
216 216
217 /* 217 /*
218 * Immediately prior to deallocating the device and after all activity 218 * Immediately prior to deallocating the device and after all activity
219 * has ceased the mid layer calls this point so that the low level 219 * has ceased the mid layer calls this point so that the low level
220 * driver may completely detach itself from the scsi device and vice 220 * driver may completely detach itself from the scsi device and vice
221 * versa. The low level driver is responsible for freeing any memory 221 * versa. The low level driver is responsible for freeing any memory
222 * it allocated in the slave_alloc or slave_configure calls. 222 * it allocated in the slave_alloc or slave_configure calls.
223 * 223 *
224 * Status: OPTIONAL 224 * Status: OPTIONAL
225 */ 225 */
226 void (* slave_destroy)(struct scsi_device *); 226 void (* slave_destroy)(struct scsi_device *);
227 227
228 /* 228 /*
229 * Before the mid layer attempts to scan for a new device attached 229 * Before the mid layer attempts to scan for a new device attached
230 * to a target where no target currently exists, it will call this 230 * to a target where no target currently exists, it will call this
231 * entry in your driver. Should your driver need to allocate any 231 * entry in your driver. Should your driver need to allocate any
232 * structs or perform any other init items in order to send commands 232 * structs or perform any other init items in order to send commands
233 * to a currently unused target, then this is where you can perform 233 * to a currently unused target, then this is where you can perform
234 * those allocations. 234 * those allocations.
235 * 235 *
236 * Return values: 0 on success, non-0 on failure 236 * Return values: 0 on success, non-0 on failure
237 * 237 *
238 * Status: OPTIONAL 238 * Status: OPTIONAL
239 */ 239 */
240 int (* target_alloc)(struct scsi_target *); 240 int (* target_alloc)(struct scsi_target *);
241 241
242 /* 242 /*
243 * Immediately prior to deallocating the target structure, and 243 * Immediately prior to deallocating the target structure, and
244 * after all activity to attached scsi devices has ceased, the 244 * after all activity to attached scsi devices has ceased, the
245 * midlayer calls this point so that the driver may deallocate 245 * midlayer calls this point so that the driver may deallocate
246 * and terminate any references to the target. 246 * and terminate any references to the target.
247 * 247 *
248 * Status: OPTIONAL 248 * Status: OPTIONAL
249 */ 249 */
250 void (* target_destroy)(struct scsi_target *); 250 void (* target_destroy)(struct scsi_target *);
251 251
252 /* 252 /*
253 * If a host has the ability to discover targets on its own instead 253 * If a host has the ability to discover targets on its own instead
254 * of scanning the entire bus, it can fill in this function and 254 * of scanning the entire bus, it can fill in this function and
255 * call scsi_scan_host(). This function will be called periodically 255 * call scsi_scan_host(). This function will be called periodically
256 * until it returns 1 with the scsi_host and the elapsed time of 256 * until it returns 1 with the scsi_host and the elapsed time of
257 * the scan in jiffies. 257 * the scan in jiffies.
258 * 258 *
259 * Status: OPTIONAL 259 * Status: OPTIONAL
260 */ 260 */
261 int (* scan_finished)(struct Scsi_Host *, unsigned long); 261 int (* scan_finished)(struct Scsi_Host *, unsigned long);
262 262
263 /* 263 /*
264 * If the host wants to be called before the scan starts, but 264 * If the host wants to be called before the scan starts, but
265 * after the midlayer has set up ready for the scan, it can fill 265 * after the midlayer has set up ready for the scan, it can fill
266 * in this function. 266 * in this function.
267 * 267 *
268 * Status: OPTIONAL 268 * Status: OPTIONAL
269 */ 269 */
270 void (* scan_start)(struct Scsi_Host *); 270 void (* scan_start)(struct Scsi_Host *);
271 271
272 /* 272 /*
273 * Fill in this function to allow the queue depth of this host 273 * Fill in this function to allow the queue depth of this host
274 * to be changeable (on a per device basis). Returns either 274 * to be changeable (on a per device basis). Returns either
275 * the current queue depth setting (may be different from what 275 * the current queue depth setting (may be different from what
276 * was passed in) or an error. An error should only be 276 * was passed in) or an error. An error should only be
277 * returned if the requested depth is legal but the driver was 277 * returned if the requested depth is legal but the driver was
278 * unable to set it. If the requested depth is illegal, the 278 * unable to set it. If the requested depth is illegal, the
279 * driver should set and return the closest legal queue depth. 279 * driver should set and return the closest legal queue depth.
280 * 280 *
281 * Status: OPTIONAL 281 * Status: OPTIONAL
282 */ 282 */
283 int (* change_queue_depth)(struct scsi_device *, int, int); 283 int (* change_queue_depth)(struct scsi_device *, int, int);
284 284
285 /* 285 /*
286 * Fill in this function to allow the changing of tag types 286 * Fill in this function to allow the changing of tag types
287 * (this also allows the enabling/disabling of tag command 287 * (this also allows the enabling/disabling of tag command
288 * queueing). An error should only be returned if something 288 * queueing). An error should only be returned if something
289 * went wrong in the driver while trying to set the tag type. 289 * went wrong in the driver while trying to set the tag type.
290 * If the driver doesn't support the requested tag type, then 290 * If the driver doesn't support the requested tag type, then
291 * it should set the closest type it does support without 291 * it should set the closest type it does support without
292 * returning an error. Returns the actual tag type set. 292 * returning an error. Returns the actual tag type set.
293 * 293 *
294 * Status: OPTIONAL 294 * Status: OPTIONAL
295 */ 295 */
296 int (* change_queue_type)(struct scsi_device *, int); 296 int (* change_queue_type)(struct scsi_device *, int);
297 297
298 /* 298 /*
299 * This function determines the BIOS parameters for a given 299 * This function determines the BIOS parameters for a given
300 * harddisk. These tend to be numbers that are made up by 300 * harddisk. These tend to be numbers that are made up by
301 * the host adapter. Parameters: 301 * the host adapter. Parameters:
302 * size, device, list (heads, sectors, cylinders) 302 * size, device, list (heads, sectors, cylinders)
303 * 303 *
304 * Status: OPTIONAL 304 * Status: OPTIONAL
305 */ 305 */
306 int (* bios_param)(struct scsi_device *, struct block_device *, 306 int (* bios_param)(struct scsi_device *, struct block_device *,
307 sector_t, int []); 307 sector_t, int []);
308 308
309 /* 309 /*
310 * This function is called when one or more partitions on the 310 * This function is called when one or more partitions on the
311 * device reach beyond the end of the device. 311 * device reach beyond the end of the device.
312 * 312 *
313 * Status: OPTIONAL 313 * Status: OPTIONAL
314 */ 314 */
315 void (*unlock_native_capacity)(struct scsi_device *); 315 void (*unlock_native_capacity)(struct scsi_device *);
316 316
317 /* 317 /*
318 * Can be used to export driver statistics and other infos to the 318 * Can be used to export driver statistics and other infos to the
319 * world outside the kernel ie. userspace and it also provides an 319 * world outside the kernel ie. userspace and it also provides an
320 * interface to feed the driver with information. 320 * interface to feed the driver with information.
321 * 321 *
322 * Status: OBSOLETE 322 * Status: OBSOLETE
323 */ 323 */
324 int (*show_info)(struct seq_file *, struct Scsi_Host *); 324 int (*show_info)(struct seq_file *, struct Scsi_Host *);
325 int (*write_info)(struct Scsi_Host *, char *, int); 325 int (*write_info)(struct Scsi_Host *, char *, int);
326 326
327 /* 327 /*
328 * This is an optional routine that allows the transport to become 328 * This is an optional routine that allows the transport to become
329 * involved when a scsi io timer fires. The return value tells the 329 * involved when a scsi io timer fires. The return value tells the
330 * timer routine how to finish the io timeout handling: 330 * timer routine how to finish the io timeout handling:
331 * EH_HANDLED: I fixed the error, please complete the command 331 * EH_HANDLED: I fixed the error, please complete the command
332 * EH_RESET_TIMER: I need more time, reset the timer and 332 * EH_RESET_TIMER: I need more time, reset the timer and
333 * begin counting again 333 * begin counting again
334 * EH_NOT_HANDLED Begin normal error recovery 334 * EH_NOT_HANDLED Begin normal error recovery
335 * 335 *
336 * Status: OPTIONAL 336 * Status: OPTIONAL
337 */ 337 */
338 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); 338 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
339 339
340 /* This is an optional routine that allows transport to initiate 340 /* This is an optional routine that allows transport to initiate
341 * LLD adapter or firmware reset using sysfs attribute. 341 * LLD adapter or firmware reset using sysfs attribute.
342 * 342 *
343 * Return values: 0 on success, -ve value on failure. 343 * Return values: 0 on success, -ve value on failure.
344 * 344 *
345 * Status: OPTIONAL 345 * Status: OPTIONAL
346 */ 346 */
347 347
348 int (*host_reset)(struct Scsi_Host *shost, int reset_type); 348 int (*host_reset)(struct Scsi_Host *shost, int reset_type);
349 #define SCSI_ADAPTER_RESET 1 349 #define SCSI_ADAPTER_RESET 1
350 #define SCSI_FIRMWARE_RESET 2 350 #define SCSI_FIRMWARE_RESET 2
351 351
352 352
353 /* 353 /*
354 * Name of proc directory 354 * Name of proc directory
355 */ 355 */
356 const char *proc_name; 356 const char *proc_name;
357 357
358 /* 358 /*
359 * Used to store the procfs directory if a driver implements the 359 * Used to store the procfs directory if a driver implements the
360 * show_info method. 360 * show_info method.
361 */ 361 */
362 struct proc_dir_entry *proc_dir; 362 struct proc_dir_entry *proc_dir;
363 363
364 /* 364 /*
365 * This determines if we will use a non-interrupt driven 365 * This determines if we will use a non-interrupt driven
366 * or an interrupt driven scheme. It is set to the maximum number 366 * or an interrupt driven scheme. It is set to the maximum number
367 * of simultaneous commands a given host adapter will accept. 367 * of simultaneous commands a given host adapter will accept.
368 */ 368 */
369 int can_queue; 369 int can_queue;
370 370
371 /* 371 /*
372 * In many instances, especially where disconnect / reconnect are 372 * In many instances, especially where disconnect / reconnect are
373 * supported, our host also has an ID on the SCSI bus. If this is 373 * supported, our host also has an ID on the SCSI bus. If this is
374 * the case, then it must be reserved. Please set this_id to -1 if 374 * the case, then it must be reserved. Please set this_id to -1 if
375 * your setup is in single initiator mode, and the host lacks an 375 * your setup is in single initiator mode, and the host lacks an
376 * ID. 376 * ID.
377 */ 377 */
378 int this_id; 378 int this_id;
379 379
380 /* 380 /*
381 * This determines the degree to which the host adapter is capable 381 * This determines the degree to which the host adapter is capable
382 * of scatter-gather. 382 * of scatter-gather.
383 */ 383 */
384 unsigned short sg_tablesize; 384 unsigned short sg_tablesize;
385 unsigned short sg_prot_tablesize; 385 unsigned short sg_prot_tablesize;
386 386
387 /* 387 /*
388 * Set this if the host adapter has limitations beside segment count. 388 * Set this if the host adapter has limitations beside segment count.
389 */ 389 */
390 unsigned int max_sectors; 390 unsigned int max_sectors;
391 391
392 /* 392 /*
393 * DMA scatter gather segment boundary limit. A segment crossing this 393 * DMA scatter gather segment boundary limit. A segment crossing this
394 * boundary will be split in two. 394 * boundary will be split in two.
395 */ 395 */
396 unsigned long dma_boundary; 396 unsigned long dma_boundary;
397 397
398 /* 398 /*
399 * This specifies "machine infinity" for host templates which don't 399 * This specifies "machine infinity" for host templates which don't
400 * limit the transfer size. Note this limit represents an absolute 400 * limit the transfer size. Note this limit represents an absolute
401 * maximum, and may be over the transfer limits allowed for 401 * maximum, and may be over the transfer limits allowed for
402 * individual devices (e.g. 256 for SCSI-1). 402 * individual devices (e.g. 256 for SCSI-1).
403 */ 403 */
404 #define SCSI_DEFAULT_MAX_SECTORS 1024 404 #define SCSI_DEFAULT_MAX_SECTORS 1024
405 405
406 /* 406 /*
407 * True if this host adapter can make good use of linked commands. 407 * True if this host adapter can make good use of linked commands.
408 * This will allow more than one command to be queued to a given 408 * This will allow more than one command to be queued to a given
409 * unit on a given host. Set this to the maximum number of command 409 * unit on a given host. Set this to the maximum number of command
410 * blocks to be provided for each device. Set this to 1 for one 410 * blocks to be provided for each device. Set this to 1 for one
411 * command block per lun, 2 for two, etc. Do not set this to 0. 411 * command block per lun, 2 for two, etc. Do not set this to 0.
412 * You should make sure that the host adapter will do the right thing 412 * You should make sure that the host adapter will do the right thing
413 * before you try setting this above 1. 413 * before you try setting this above 1.
414 */ 414 */
415 short cmd_per_lun; 415 short cmd_per_lun;
416 416
417 /* 417 /*
418 * present contains counter indicating how many boards of this 418 * present contains counter indicating how many boards of this
419 * type were found when we did the scan. 419 * type were found when we did the scan.
420 */ 420 */
421 unsigned char present; 421 unsigned char present;
422 422
423 /* 423 /*
424 * This specifies the mode that a LLD supports. 424 * This specifies the mode that a LLD supports.
425 */ 425 */
426 unsigned supported_mode:2; 426 unsigned supported_mode:2;
427 427
428 /* 428 /*
429 * True if this host adapter uses unchecked DMA onto an ISA bus. 429 * True if this host adapter uses unchecked DMA onto an ISA bus.
430 */ 430 */
431 unsigned unchecked_isa_dma:1; 431 unsigned unchecked_isa_dma:1;
432 432
433 /* 433 /*
434 * True if this host adapter can make good use of clustering. 434 * True if this host adapter can make good use of clustering.
435 * I originally thought that if the tablesize was large that it 435 * I originally thought that if the tablesize was large that it
436 * was a waste of CPU cycles to prepare a cluster list, but 436 * was a waste of CPU cycles to prepare a cluster list, but
437 * it works out that the Buslogic is faster if you use a smaller 437 * it works out that the Buslogic is faster if you use a smaller
438 * number of segments (i.e. use clustering). I guess it is 438 * number of segments (i.e. use clustering). I guess it is
439 * inefficient. 439 * inefficient.
440 */ 440 */
441 unsigned use_clustering:1; 441 unsigned use_clustering:1;
442 442
443 /* 443 /*
444 * True for emulated SCSI host adapters (e.g. ATAPI). 444 * True for emulated SCSI host adapters (e.g. ATAPI).
445 */ 445 */
446 unsigned emulated:1; 446 unsigned emulated:1;
447 447
448 /* 448 /*
449 * True if the low-level driver performs its own reset-settle delays. 449 * True if the low-level driver performs its own reset-settle delays.
450 */ 450 */
451 unsigned skip_settle_delay:1; 451 unsigned skip_settle_delay:1;
452 452
453 /* 453 /*
454 * True if we are using ordered write support. 454 * True if we are using ordered write support.
455 */ 455 */
456 unsigned ordered_tag:1; 456 unsigned ordered_tag:1;
457 457
458 /* True if the controller does not support WRITE SAME */ 458 /* True if the controller does not support WRITE SAME */
459 unsigned no_write_same:1; 459 unsigned no_write_same:1;
460 460
461 /* 461 /*
462 * True if asynchronous aborts are not supported 462 * True if asynchronous aborts are not supported
463 */ 463 */
464 unsigned no_async_abort:1; 464 unsigned no_async_abort:1;
465 465
466 /* 466 /*
467 * Countdown for host blocking with no commands outstanding. 467 * Countdown for host blocking with no commands outstanding.
468 */ 468 */
469 unsigned int max_host_blocked; 469 unsigned int max_host_blocked;
470 470
471 /* 471 /*
472 * Default value for the blocking. If the queue is empty, 472 * Default value for the blocking. If the queue is empty,
473 * host_blocked counts down in the request_fn until it restarts 473 * host_blocked counts down in the request_fn until it restarts
474 * host operations as zero is reached. 474 * host operations as zero is reached.
475 * 475 *
476 * FIXME: This should probably be a value in the template 476 * FIXME: This should probably be a value in the template
477 */ 477 */
478 #define SCSI_DEFAULT_HOST_BLOCKED 7 478 #define SCSI_DEFAULT_HOST_BLOCKED 7
479 479
480 /* 480 /*
481 * Pointer to the sysfs class properties for this host, NULL terminated. 481 * Pointer to the sysfs class properties for this host, NULL terminated.
482 */ 482 */
483 struct device_attribute **shost_attrs; 483 struct device_attribute **shost_attrs;
484 484
485 /* 485 /*
486 * Pointer to the SCSI device properties for this host, NULL terminated. 486 * Pointer to the SCSI device properties for this host, NULL terminated.
487 */ 487 */
488 struct device_attribute **sdev_attrs; 488 struct device_attribute **sdev_attrs;
489 489
490 /* 490 /*
491 * List of hosts per template. 491 * List of hosts per template.
492 * 492 *
493 * This is only for use by scsi_module.c for legacy templates. 493 * This is only for use by scsi_module.c for legacy templates.
494 * For these access to it is synchronized implicitly by 494 * For these access to it is synchronized implicitly by
495 * module_init/module_exit. 495 * module_init/module_exit.
496 */ 496 */
497 struct list_head legacy_hosts; 497 struct list_head legacy_hosts;
498 498
499 /* 499 /*
500 * Vendor Identifier associated with the host 500 * Vendor Identifier associated with the host
501 * 501 *
502 * Note: When specifying vendor_id, be sure to read the 502 * Note: When specifying vendor_id, be sure to read the
503 * Vendor Type and ID formatting requirements specified in 503 * Vendor Type and ID formatting requirements specified in
504 * scsi_netlink.h 504 * scsi_netlink.h
505 */ 505 */
506 u64 vendor_id; 506 u64 vendor_id;
507 507
508 /* 508 /*
509 * Additional per-command data allocated for the driver. 509 * Additional per-command data allocated for the driver.
510 */ 510 */
511 unsigned int cmd_size; 511 unsigned int cmd_size;
512 struct scsi_host_cmd_pool *cmd_pool; 512 struct scsi_host_cmd_pool *cmd_pool;
513 }; 513 };
514 514
515 /* 515 /*
516 * Temporary #define for host lock push down. Can be removed when all 516 * Temporary #define for host lock push down. Can be removed when all
517 * drivers have been updated to take advantage of unlocked 517 * drivers have been updated to take advantage of unlocked
518 * queuecommand. 518 * queuecommand.
519 * 519 *
520 */ 520 */
521 #define DEF_SCSI_QCMD(func_name) \ 521 #define DEF_SCSI_QCMD(func_name) \
522 int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ 522 int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
523 { \ 523 { \
524 unsigned long irq_flags; \ 524 unsigned long irq_flags; \
525 int rc; \ 525 int rc; \
526 spin_lock_irqsave(shost->host_lock, irq_flags); \ 526 spin_lock_irqsave(shost->host_lock, irq_flags); \
527 scsi_cmd_get_serial(shost, cmd); \ 527 scsi_cmd_get_serial(shost, cmd); \
528 rc = func_name##_lck (cmd, cmd->scsi_done); \ 528 rc = func_name##_lck (cmd, cmd->scsi_done); \
529 spin_unlock_irqrestore(shost->host_lock, irq_flags); \ 529 spin_unlock_irqrestore(shost->host_lock, irq_flags); \
530 return rc; \ 530 return rc; \
531 } 531 }
532 532
533 533
534 /* 534 /*
535 * shost state: If you alter this, you also need to alter scsi_sysfs.c 535 * shost state: If you alter this, you also need to alter scsi_sysfs.c
536 * (for the ascii descriptions) and the state model enforcer: 536 * (for the ascii descriptions) and the state model enforcer:
537 * scsi_host_set_state() 537 * scsi_host_set_state()
538 */ 538 */
539 enum scsi_host_state { 539 enum scsi_host_state {
540 SHOST_CREATED = 1, 540 SHOST_CREATED = 1,
541 SHOST_RUNNING, 541 SHOST_RUNNING,
542 SHOST_CANCEL, 542 SHOST_CANCEL,
543 SHOST_DEL, 543 SHOST_DEL,
544 SHOST_RECOVERY, 544 SHOST_RECOVERY,
545 SHOST_CANCEL_RECOVERY, 545 SHOST_CANCEL_RECOVERY,
546 SHOST_DEL_RECOVERY, 546 SHOST_DEL_RECOVERY,
547 }; 547 };
548 548
549 struct Scsi_Host { 549 struct Scsi_Host {
550 /* 550 /*
551 * __devices is protected by the host_lock, but you should 551 * __devices is protected by the host_lock, but you should
552 * usually use scsi_device_lookup / shost_for_each_device 552 * usually use scsi_device_lookup / shost_for_each_device
553 * to access it and don't care about locking yourself. 553 * to access it and don't care about locking yourself.
554 * In the rare case of beeing in irq context you can use 554 * In the rare case of beeing in irq context you can use
555 * their __ prefixed variants with the lock held. NEVER 555 * their __ prefixed variants with the lock held. NEVER
556 * access this list directly from a driver. 556 * access this list directly from a driver.
557 */ 557 */
558 struct list_head __devices; 558 struct list_head __devices;
559 struct list_head __targets; 559 struct list_head __targets;
560 560
561 struct scsi_host_cmd_pool *cmd_pool; 561 struct scsi_host_cmd_pool *cmd_pool;
562 spinlock_t free_list_lock; 562 spinlock_t free_list_lock;
563 struct list_head free_list; /* backup store of cmd structs */ 563 struct list_head free_list; /* backup store of cmd structs */
564 struct list_head starved_list; 564 struct list_head starved_list;
565 565
566 spinlock_t default_lock; 566 spinlock_t default_lock;
567 spinlock_t *host_lock; 567 spinlock_t *host_lock;
568 568
569 struct mutex scan_mutex;/* serialize scanning activity */ 569 struct mutex scan_mutex;/* serialize scanning activity */
570 570
571 struct list_head eh_cmd_q; 571 struct list_head eh_cmd_q;
572 struct task_struct * ehandler; /* Error recovery thread. */ 572 struct task_struct * ehandler; /* Error recovery thread. */
573 struct completion * eh_action; /* Wait for specific actions on the 573 struct completion * eh_action; /* Wait for specific actions on the
574 host. */ 574 host. */
575 wait_queue_head_t host_wait; 575 wait_queue_head_t host_wait;
576 struct scsi_host_template *hostt; 576 struct scsi_host_template *hostt;
577 struct scsi_transport_template *transportt; 577 struct scsi_transport_template *transportt;
578 578
579 /* 579 /*
580 * Area to keep a shared tag map (if needed, will be 580 * Area to keep a shared tag map (if needed, will be
581 * NULL if not). 581 * NULL if not).
582 */ 582 */
583 struct blk_queue_tag *bqt; 583 struct blk_queue_tag *bqt;
584 584
585 atomic_t host_busy; /* commands actually active on low-level */ 585 atomic_t host_busy; /* commands actually active on low-level */
586 atomic_t host_blocked;
587
586 unsigned int host_failed; /* commands that failed. 588 unsigned int host_failed; /* commands that failed.
587 protected by host_lock */ 589 protected by host_lock */
588 unsigned int host_eh_scheduled; /* EH scheduled without command */ 590 unsigned int host_eh_scheduled; /* EH scheduled without command */
589 591
590 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ 592 unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
591 593
592 /* next two fields are used to bound the time spent in error handling */ 594 /* next two fields are used to bound the time spent in error handling */
593 int eh_deadline; 595 int eh_deadline;
594 unsigned long last_reset; 596 unsigned long last_reset;
595 597
596 598
597 /* 599 /*
598 * These three parameters can be used to allow for wide scsi, 600 * These three parameters can be used to allow for wide scsi,
599 * and for host adapters that support multiple busses 601 * and for host adapters that support multiple busses
600 * The first two should be set to 1 more than the actual max id 602 * The first two should be set to 1 more than the actual max id
601 * or lun (e.g. 8 for SCSI parallel systems). 603 * or lun (e.g. 8 for SCSI parallel systems).
602 */ 604 */
603 unsigned int max_channel; 605 unsigned int max_channel;
604 unsigned int max_id; 606 unsigned int max_id;
605 u64 max_lun; 607 u64 max_lun;
606 608
607 /* 609 /*
608 * This is a unique identifier that must be assigned so that we 610 * This is a unique identifier that must be assigned so that we
609 * have some way of identifying each detected host adapter properly 611 * have some way of identifying each detected host adapter properly
610 * and uniquely. For hosts that do not support more than one card 612 * and uniquely. For hosts that do not support more than one card
611 * in the system at one time, this does not need to be set. It is 613 * in the system at one time, this does not need to be set. It is
612 * initialized to 0 in scsi_register. 614 * initialized to 0 in scsi_register.
613 */ 615 */
614 unsigned int unique_id; 616 unsigned int unique_id;
615 617
616 /* 618 /*
617 * The maximum length of SCSI commands that this host can accept. 619 * The maximum length of SCSI commands that this host can accept.
618 * Probably 12 for most host adapters, but could be 16 for others. 620 * Probably 12 for most host adapters, but could be 16 for others.
619 * or 260 if the driver supports variable length cdbs. 621 * or 260 if the driver supports variable length cdbs.
620 * For drivers that don't set this field, a value of 12 is 622 * For drivers that don't set this field, a value of 12 is
621 * assumed. 623 * assumed.
622 */ 624 */
623 unsigned short max_cmd_len; 625 unsigned short max_cmd_len;
624 626
625 int this_id; 627 int this_id;
626 int can_queue; 628 int can_queue;
627 short cmd_per_lun; 629 short cmd_per_lun;
628 short unsigned int sg_tablesize; 630 short unsigned int sg_tablesize;
629 short unsigned int sg_prot_tablesize; 631 short unsigned int sg_prot_tablesize;
630 unsigned int max_sectors; 632 unsigned int max_sectors;
631 unsigned long dma_boundary; 633 unsigned long dma_boundary;
632 /* 634 /*
633 * Used to assign serial numbers to the cmds. 635 * Used to assign serial numbers to the cmds.
634 * Protected by the host lock. 636 * Protected by the host lock.
635 */ 637 */
636 unsigned long cmd_serial_number; 638 unsigned long cmd_serial_number;
637 639
638 unsigned active_mode:2; 640 unsigned active_mode:2;
639 unsigned unchecked_isa_dma:1; 641 unsigned unchecked_isa_dma:1;
640 unsigned use_clustering:1; 642 unsigned use_clustering:1;
641 unsigned use_blk_tcq:1; 643 unsigned use_blk_tcq:1;
642 644
643 /* 645 /*
644 * Host has requested that no further requests come through for the 646 * Host has requested that no further requests come through for the
645 * time being. 647 * time being.
646 */ 648 */
647 unsigned host_self_blocked:1; 649 unsigned host_self_blocked:1;
648 650
649 /* 651 /*
650 * Host uses correct SCSI ordering not PC ordering. The bit is 652 * Host uses correct SCSI ordering not PC ordering. The bit is
651 * set for the minority of drivers whose authors actually read 653 * set for the minority of drivers whose authors actually read
652 * the spec ;). 654 * the spec ;).
653 */ 655 */
654 unsigned reverse_ordering:1; 656 unsigned reverse_ordering:1;
655 657
656 /* 658 /*
657 * Ordered write support 659 * Ordered write support
658 */ 660 */
659 unsigned ordered_tag:1; 661 unsigned ordered_tag:1;
660 662
661 /* Task mgmt function in progress */ 663 /* Task mgmt function in progress */
662 unsigned tmf_in_progress:1; 664 unsigned tmf_in_progress:1;
663 665
664 /* Asynchronous scan in progress */ 666 /* Asynchronous scan in progress */
665 unsigned async_scan:1; 667 unsigned async_scan:1;
666 668
667 /* Don't resume host in EH */ 669 /* Don't resume host in EH */
668 unsigned eh_noresume:1; 670 unsigned eh_noresume:1;
669 671
670 /* The controller does not support WRITE SAME */ 672 /* The controller does not support WRITE SAME */
671 unsigned no_write_same:1; 673 unsigned no_write_same:1;
672 674
673 /* 675 /*
674 * Optional work queue to be utilized by the transport 676 * Optional work queue to be utilized by the transport
675 */ 677 */
676 char work_q_name[20]; 678 char work_q_name[20];
677 struct workqueue_struct *work_q; 679 struct workqueue_struct *work_q;
678 680
679 /* 681 /*
680 * Task management function work queue 682 * Task management function work queue
681 */ 683 */
682 struct workqueue_struct *tmf_work_q; 684 struct workqueue_struct *tmf_work_q;
683
684 /*
685 * Host has rejected a command because it was busy.
686 */
687 unsigned int host_blocked;
688 685
689 /* 686 /*
690 * Value host_blocked counts down from 687 * Value host_blocked counts down from
691 */ 688 */
692 unsigned int max_host_blocked; 689 unsigned int max_host_blocked;
693 690
694 /* Protection Information */ 691 /* Protection Information */
695 unsigned int prot_capabilities; 692 unsigned int prot_capabilities;
696 unsigned char prot_guard_type; 693 unsigned char prot_guard_type;
697 694
698 /* 695 /*
699 * q used for scsi_tgt msgs, async events or any other requests that 696 * q used for scsi_tgt msgs, async events or any other requests that
700 * need to be processed in userspace 697 * need to be processed in userspace
701 */ 698 */
702 struct request_queue *uspace_req_q; 699 struct request_queue *uspace_req_q;
703 700
704 /* legacy crap */ 701 /* legacy crap */
705 unsigned long base; 702 unsigned long base;
706 unsigned long io_port; 703 unsigned long io_port;
707 unsigned char n_io_port; 704 unsigned char n_io_port;
708 unsigned char dma_channel; 705 unsigned char dma_channel;
709 unsigned int irq; 706 unsigned int irq;
710 707
711 708
712 enum scsi_host_state shost_state; 709 enum scsi_host_state shost_state;
713 710
714 /* ldm bits */ 711 /* ldm bits */
715 struct device shost_gendev, shost_dev; 712 struct device shost_gendev, shost_dev;
716 713
717 /* 714 /*
718 * List of hosts per template. 715 * List of hosts per template.
719 * 716 *
720 * This is only for use by scsi_module.c for legacy templates. 717 * This is only for use by scsi_module.c for legacy templates.
721 * For these access to it is synchronized implicitly by 718 * For these access to it is synchronized implicitly by
722 * module_init/module_exit. 719 * module_init/module_exit.
723 */ 720 */
724 struct list_head sht_legacy_list; 721 struct list_head sht_legacy_list;
725 722
726 /* 723 /*
727 * Points to the transport data (if any) which is allocated 724 * Points to the transport data (if any) which is allocated
728 * separately 725 * separately
729 */ 726 */
730 void *shost_data; 727 void *shost_data;
731 728
732 /* 729 /*
733 * Points to the physical bus device we'd use to do DMA 730 * Points to the physical bus device we'd use to do DMA
734 * Needed just in case we have virtual hosts. 731 * Needed just in case we have virtual hosts.
735 */ 732 */
736 struct device *dma_dev; 733 struct device *dma_dev;
737 734
738 /* 735 /*
739 * We should ensure that this is aligned, both for better performance 736 * We should ensure that this is aligned, both for better performance
740 * and also because some compilers (m68k) don't automatically force 737 * and also because some compilers (m68k) don't automatically force
741 * alignment to a long boundary. 738 * alignment to a long boundary.
742 */ 739 */
743 unsigned long hostdata[0] /* Used for storage of host specific stuff */ 740 unsigned long hostdata[0] /* Used for storage of host specific stuff */
744 __attribute__ ((aligned (sizeof(unsigned long)))); 741 __attribute__ ((aligned (sizeof(unsigned long))));
745 }; 742 };
746 743
747 #define class_to_shost(d) \ 744 #define class_to_shost(d) \
748 container_of(d, struct Scsi_Host, shost_dev) 745 container_of(d, struct Scsi_Host, shost_dev)
749 746
750 #define shost_printk(prefix, shost, fmt, a...) \ 747 #define shost_printk(prefix, shost, fmt, a...) \
751 dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) 748 dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
752 749
753 static inline void *shost_priv(struct Scsi_Host *shost) 750 static inline void *shost_priv(struct Scsi_Host *shost)
754 { 751 {
755 return (void *)shost->hostdata; 752 return (void *)shost->hostdata;
756 } 753 }
757 754
758 int scsi_is_host_device(const struct device *); 755 int scsi_is_host_device(const struct device *);
759 756
760 static inline struct Scsi_Host *dev_to_shost(struct device *dev) 757 static inline struct Scsi_Host *dev_to_shost(struct device *dev)
761 { 758 {
762 while (!scsi_is_host_device(dev)) { 759 while (!scsi_is_host_device(dev)) {
763 if (!dev->parent) 760 if (!dev->parent)
764 return NULL; 761 return NULL;
765 dev = dev->parent; 762 dev = dev->parent;
766 } 763 }
767 return container_of(dev, struct Scsi_Host, shost_gendev); 764 return container_of(dev, struct Scsi_Host, shost_gendev);
768 } 765 }
769 766
770 static inline int scsi_host_in_recovery(struct Scsi_Host *shost) 767 static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
771 { 768 {
772 return shost->shost_state == SHOST_RECOVERY || 769 return shost->shost_state == SHOST_RECOVERY ||
773 shost->shost_state == SHOST_CANCEL_RECOVERY || 770 shost->shost_state == SHOST_CANCEL_RECOVERY ||
774 shost->shost_state == SHOST_DEL_RECOVERY || 771 shost->shost_state == SHOST_DEL_RECOVERY ||
775 shost->tmf_in_progress; 772 shost->tmf_in_progress;
776 } 773 }
777 774
778 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 775 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
779 extern void scsi_flush_work(struct Scsi_Host *); 776 extern void scsi_flush_work(struct Scsi_Host *);
780 777
781 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); 778 extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
782 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, 779 extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
783 struct device *, 780 struct device *,
784 struct device *); 781 struct device *);
785 extern void scsi_scan_host(struct Scsi_Host *); 782 extern void scsi_scan_host(struct Scsi_Host *);
786 extern void scsi_rescan_device(struct device *); 783 extern void scsi_rescan_device(struct device *);
787 extern void scsi_remove_host(struct Scsi_Host *); 784 extern void scsi_remove_host(struct Scsi_Host *);
788 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); 785 extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
789 extern void scsi_host_put(struct Scsi_Host *t); 786 extern void scsi_host_put(struct Scsi_Host *t);
790 extern struct Scsi_Host *scsi_host_lookup(unsigned short); 787 extern struct Scsi_Host *scsi_host_lookup(unsigned short);
791 extern const char *scsi_host_state_name(enum scsi_host_state); 788 extern const char *scsi_host_state_name(enum scsi_host_state);
792 extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *); 789 extern void scsi_cmd_get_serial(struct Scsi_Host *, struct scsi_cmnd *);
793 790
794 static inline int __must_check scsi_add_host(struct Scsi_Host *host, 791 static inline int __must_check scsi_add_host(struct Scsi_Host *host,
795 struct device *dev) 792 struct device *dev)
796 { 793 {
797 return scsi_add_host_with_dma(host, dev, dev); 794 return scsi_add_host_with_dma(host, dev, dev);
798 } 795 }
799 796
800 static inline struct device *scsi_get_device(struct Scsi_Host *shost) 797 static inline struct device *scsi_get_device(struct Scsi_Host *shost)
801 { 798 {
802 return shost->shost_gendev.parent; 799 return shost->shost_gendev.parent;
803 } 800 }
804 801
805 /** 802 /**
806 * scsi_host_scan_allowed - Is scanning of this host allowed 803 * scsi_host_scan_allowed - Is scanning of this host allowed
807 * @shost: Pointer to Scsi_Host. 804 * @shost: Pointer to Scsi_Host.
808 **/ 805 **/
809 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) 806 static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
810 { 807 {
811 return shost->shost_state == SHOST_RUNNING || 808 return shost->shost_state == SHOST_RUNNING ||
812 shost->shost_state == SHOST_RECOVERY; 809 shost->shost_state == SHOST_RECOVERY;
813 } 810 }
814 811
815 extern void scsi_unblock_requests(struct Scsi_Host *); 812 extern void scsi_unblock_requests(struct Scsi_Host *);
816 extern void scsi_block_requests(struct Scsi_Host *); 813 extern void scsi_block_requests(struct Scsi_Host *);
817 814
818 struct class_container; 815 struct class_container;
819 816
820 extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, 817 extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
821 void (*) (struct request_queue *)); 818 void (*) (struct request_queue *));
822 /* 819 /*
823 * These two functions are used to allocate and free a pseudo device 820 * These two functions are used to allocate and free a pseudo device
824 * which will connect to the host adapter itself rather than any 821 * which will connect to the host adapter itself rather than any
825 * physical device. You must deallocate when you are done with the 822 * physical device. You must deallocate when you are done with the
826 * thing. This physical pseudo-device isn't real and won't be available 823 * thing. This physical pseudo-device isn't real and won't be available
827 * from any high-level drivers. 824 * from any high-level drivers.
828 */ 825 */
829 extern void scsi_free_host_dev(struct scsi_device *); 826 extern void scsi_free_host_dev(struct scsi_device *);
830 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); 827 extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
831 828
832 /* 829 /*
833 * DIF defines the exchange of protection information between 830 * DIF defines the exchange of protection information between
834 * initiator and SBC block device. 831 * initiator and SBC block device.
835 * 832 *
836 * DIX defines the exchange of protection information between OS and 833 * DIX defines the exchange of protection information between OS and
837 * initiator. 834 * initiator.
838 */ 835 */
839 enum scsi_host_prot_capabilities { 836 enum scsi_host_prot_capabilities {
840 SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ 837 SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
841 SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ 838 SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
842 SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ 839 SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
843 840
844 SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ 841 SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
845 SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ 842 SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
846 SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ 843 SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
847 SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ 844 SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
848 }; 845 };
849 846
850 /* 847 /*
851 * SCSI hosts which support the Data Integrity Extensions must 848 * SCSI hosts which support the Data Integrity Extensions must
852 * indicate their capabilities by setting the prot_capabilities using 849 * indicate their capabilities by setting the prot_capabilities using
853 * this call. 850 * this call.
854 */ 851 */
855 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) 852 static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
856 { 853 {
857 shost->prot_capabilities = mask; 854 shost->prot_capabilities = mask;
858 } 855 }
859 856
860 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) 857 static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
861 { 858 {
862 return shost->prot_capabilities; 859 return shost->prot_capabilities;
863 } 860 }
864 861
865 static inline int scsi_host_prot_dma(struct Scsi_Host *shost) 862 static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
866 { 863 {
867 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; 864 return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
868 } 865 }
869 866
870 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 867 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
871 { 868 {
872 static unsigned char cap[] = { 0, 869 static unsigned char cap[] = { 0,
873 SHOST_DIF_TYPE1_PROTECTION, 870 SHOST_DIF_TYPE1_PROTECTION,
874 SHOST_DIF_TYPE2_PROTECTION, 871 SHOST_DIF_TYPE2_PROTECTION,
875 SHOST_DIF_TYPE3_PROTECTION }; 872 SHOST_DIF_TYPE3_PROTECTION };
876 873
877 if (target_type >= ARRAY_SIZE(cap)) 874 if (target_type >= ARRAY_SIZE(cap))
878 return 0; 875 return 0;
879 876
880 return shost->prot_capabilities & cap[target_type] ? target_type : 0; 877 return shost->prot_capabilities & cap[target_type] ? target_type : 0;
881 } 878 }
882 879
883 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) 880 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
884 { 881 {
885 #if defined(CONFIG_BLK_DEV_INTEGRITY) 882 #if defined(CONFIG_BLK_DEV_INTEGRITY)
886 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, 883 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
887 SHOST_DIX_TYPE1_PROTECTION, 884 SHOST_DIX_TYPE1_PROTECTION,
888 SHOST_DIX_TYPE2_PROTECTION, 885 SHOST_DIX_TYPE2_PROTECTION,
889 SHOST_DIX_TYPE3_PROTECTION }; 886 SHOST_DIX_TYPE3_PROTECTION };
890 887
891 if (target_type >= ARRAY_SIZE(cap)) 888 if (target_type >= ARRAY_SIZE(cap))
892 return 0; 889 return 0;
893 890
894 return shost->prot_capabilities & cap[target_type]; 891 return shost->prot_capabilities & cap[target_type];
895 #endif 892 #endif
896 return 0; 893 return 0;
897 } 894 }
898 895
899 /* 896 /*
900 * All DIX-capable initiators must support the T10-mandated CRC 897 * All DIX-capable initiators must support the T10-mandated CRC
901 * checksum. Controllers can optionally implement the IP checksum 898 * checksum. Controllers can optionally implement the IP checksum
902 * scheme which has much lower impact on system performance. Note 899 * scheme which has much lower impact on system performance. Note
903 * that the main rationale for the checksum is to match integrity 900 * that the main rationale for the checksum is to match integrity
904 * metadata with data. Detecting bit errors are a job for ECC memory 901 * metadata with data. Detecting bit errors are a job for ECC memory
905 * and buses. 902 * and buses.
906 */ 903 */
907 904
908 enum scsi_host_guard_type { 905 enum scsi_host_guard_type {
909 SHOST_DIX_GUARD_CRC = 1 << 0, 906 SHOST_DIX_GUARD_CRC = 1 << 0,
910 SHOST_DIX_GUARD_IP = 1 << 1, 907 SHOST_DIX_GUARD_IP = 1 << 1,
911 }; 908 };
912 909
913 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) 910 static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
914 { 911 {
915 shost->prot_guard_type = type; 912 shost->prot_guard_type = type;
916 } 913 }
917 914
918 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) 915 static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
919 { 916 {
920 return shost->prot_guard_type; 917 return shost->prot_guard_type;
921 } 918 }
922 919
923 /* legacy interfaces */ 920 /* legacy interfaces */
924 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); 921 extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int);
925 extern void scsi_unregister(struct Scsi_Host *); 922 extern void scsi_unregister(struct Scsi_Host *);
926 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); 923 extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
927 924