Blame view
drivers/scsi/scsi_lib.c
82.6 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
1da177e4c
|
2 |
/* |
d285203cf
|
3 4 |
* Copyright (C) 1999 Eric Youngdale * Copyright (C) 2014 Christoph Hellwig |
1da177e4c
|
5 6 7 8 9 10 11 12 |
* * SCSI queueing library. * Initial versions: Eric Youngdale (eric@andante.org). * Based upon conversations with large numbers * of people at Linux Expo. */ #include <linux/bio.h> |
d3f46f39b
|
13 |
#include <linux/bitops.h> |
1da177e4c
|
14 15 16 |
#include <linux/blkdev.h> #include <linux/completion.h> #include <linux/kernel.h> |
09703660e
|
17 |
#include <linux/export.h> |
1da177e4c
|
18 19 20 |
#include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> |
faead26d7
|
21 |
#include <linux/hardirq.h> |
c6132da17
|
22 |
#include <linux/scatterlist.h> |
d285203cf
|
23 |
#include <linux/blk-mq.h> |
f1569ff1d
|
24 |
#include <linux/ratelimit.h> |
a8aa39785
|
25 |
#include <asm/unaligned.h> |
1da177e4c
|
26 27 |
#include <scsi/scsi.h> |
beb404875
|
28 |
#include <scsi/scsi_cmnd.h> |
1da177e4c
|
29 30 31 32 33 |
#include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> |
7aa686d35
|
34 |
#include <scsi/scsi_transport.h> /* __scsi_init_queue() */ |
ee14c674e
|
35 |
#include <scsi/scsi_dh.h> |
1da177e4c
|
36 |
|
3b5382c45
|
37 |
#include <trace/events/scsi.h> |
0eebd005d
|
38 |
#include "scsi_debugfs.h" |
1da177e4c
|
39 40 |
#include "scsi_priv.h" #include "scsi_logging.h" |
92524fa12
|
41 42 43 44 |
/* * Size of integrity metadata is usually small, 1 inline sg should * cover normal cases. */ |
3e99b3b13
|
45 46 47 48 |
#ifdef CONFIG_ARCH_NO_SG_CHAIN #define SCSI_INLINE_PROT_SG_CNT 0 #define SCSI_INLINE_SG_CNT 0 #else |
92524fa12
|
49 |
#define SCSI_INLINE_PROT_SG_CNT 1 |
3dccdf53c
|
50 |
#define SCSI_INLINE_SG_CNT 2 |
3e99b3b13
|
51 |
#endif |
3dccdf53c
|
52 |
|
e9c787e65
|
53 |
static struct kmem_cache *scsi_sdb_cache; |
0a6ac4ee7
|
54 55 56 |
static struct kmem_cache *scsi_sense_cache; static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); |
1da177e4c
|
57 |
|
a45a1f361
|
58 |
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); |
0a6ac4ee7
|
59 |
static inline struct kmem_cache * |
8e6882545
|
60 |
scsi_select_sense_cache(bool unchecked_isa_dma) |
0a6ac4ee7
|
61 |
{ |
8e6882545
|
62 |
return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; |
0a6ac4ee7
|
63 |
} |
8e6882545
|
64 65 |
static void scsi_free_sense_buffer(bool unchecked_isa_dma, unsigned char *sense_buffer) |
0a6ac4ee7
|
66 |
{ |
8e6882545
|
67 68 |
kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), sense_buffer); |
0a6ac4ee7
|
69 |
} |
8e6882545
|
70 |
static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, |
e9c787e65
|
71 |
gfp_t gfp_mask, int numa_node) |
0a6ac4ee7
|
72 |
{ |
8e6882545
|
73 74 |
return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), gfp_mask, numa_node); |
0a6ac4ee7
|
75 76 77 78 79 80 |
} int scsi_init_sense_cache(struct Scsi_Host *shost) { struct kmem_cache *cache; int ret = 0; |
f9b0530fa
|
81 |
mutex_lock(&scsi_sense_cache_mutex); |
8e6882545
|
82 |
cache = scsi_select_sense_cache(shost->unchecked_isa_dma); |
0a6ac4ee7
|
83 |
if (cache) |
f9b0530fa
|
84 |
goto exit; |
0a6ac4ee7
|
85 |
|
0a6ac4ee7
|
86 87 88 |
if (shost->unchecked_isa_dma) { scsi_sense_isadma_cache = kmem_cache_create("scsi_sense_cache(DMA)", |
0afe76e88
|
89 90 |
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); |
0a6ac4ee7
|
91 92 93 94 |
if (!scsi_sense_isadma_cache) ret = -ENOMEM; } else { scsi_sense_cache = |
0afe76e88
|
95 96 97 |
kmem_cache_create_usercopy("scsi_sense_cache", SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, 0, SCSI_SENSE_BUFFERSIZE, NULL); |
0a6ac4ee7
|
98 99 100 |
if (!scsi_sense_cache) ret = -ENOMEM; } |
f9b0530fa
|
101 |
exit: |
0a6ac4ee7
|
102 103 104 |
mutex_unlock(&scsi_sense_cache_mutex); return ret; } |
6f9a35e2d
|
105 |
|
a488e7497
|
106 107 108 109 110 111 |
/* * When to reinvoke queueing after a resource shortage. It's 3 msecs to * not change behaviour from the previous unplug mechanism, experimentation * may prove this needs changing. */ #define SCSI_QUEUE_DELAY 3 |
de3e8bf33
|
112 113 |
static void scsi_set_blocked(struct scsi_cmnd *cmd, int reason) |
1da177e4c
|
114 115 116 |
{ struct Scsi_Host *host = cmd->device->host; struct scsi_device *device = cmd->device; |
f0c0a376d
|
117 |
struct scsi_target *starget = scsi_target(device); |
1da177e4c
|
118 119 |
/* |
d8c37e7b9
|
120 |
* Set the appropriate busy bit for the device/host. |
1da177e4c
|
121 122 123 124 125 126 127 128 129 130 131 |
* * If the host/device isn't busy, assume that something actually * completed, and that we should be able to queue a command now. * * Note that the prior mid-layer assumption that any host could * always queue at least one command is now broken. The mid-layer * will implement a user specifiable stall (see * scsi_host.max_host_blocked and scsi_device.max_device_blocked) * if a command is requeued with no other commands outstanding * either for the device or for the host. */ |
f0c0a376d
|
132 133 |
switch (reason) { case SCSI_MLQUEUE_HOST_BUSY: |
cd9070c9c
|
134 |
atomic_set(&host->host_blocked, host->max_host_blocked); |
f0c0a376d
|
135 136 |
break; case SCSI_MLQUEUE_DEVICE_BUSY: |
573e59135
|
137 |
case SCSI_MLQUEUE_EH_RETRY: |
cd9070c9c
|
138 139 |
atomic_set(&device->device_blocked, device->max_device_blocked); |
f0c0a376d
|
140 141 |
break; case SCSI_MLQUEUE_TARGET_BUSY: |
cd9070c9c
|
142 143 |
atomic_set(&starget->target_blocked, starget->max_target_blocked); |
f0c0a376d
|
144 145 |
break; } |
de3e8bf33
|
146 |
} |
d285203cf
|
147 148 |
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) { |
a45a1f361
|
149 150 151 152 153 154 |
if (cmd->request->rq_flags & RQF_DONTPREP) { cmd->request->rq_flags &= ~RQF_DONTPREP; scsi_mq_uninit_cmd(cmd); } else { WARN_ON_ONCE(true); } |
2b053aca7
|
155 |
blk_mq_requeue_request(cmd->request, true); |
d285203cf
|
156 |
} |
de3e8bf33
|
157 158 159 160 161 162 163 164 165 166 167 168 |
/** * __scsi_queue_insert - private queue insertion * @cmd: The SCSI command being requeued * @reason: The reason for the requeue * @unbusy: Whether the queue should be unbusied * * This is a private queue insertion. The public interface * scsi_queue_insert() always assumes the queue should be unbusied * because it's always called before the completion. This function is * for a requeue after completion, which should only occur in this * file. */ |
08640e81d
|
169 |
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) |
de3e8bf33
|
170 171 |
{ struct scsi_device *device = cmd->device; |
de3e8bf33
|
172 173 174 175 176 177 |
SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, "Inserting command %p into mlqueue ", cmd)); scsi_set_blocked(cmd, reason); |
1da177e4c
|
178 179 |
/* |
1da177e4c
|
180 181 182 |
* Decrement the counters, since these commands are no longer * active on the host/device. */ |
4f5299ac4
|
183 |
if (unbusy) |
6eb045e09
|
184 |
scsi_device_unbusy(device, cmd); |
1da177e4c
|
185 186 |
/* |
a1bf9d1d9
|
187 |
* Requeue this command. It will go before all other commands |
b485462ac
|
188 189 190 |
* that are already in the queue. Schedule requeue work under * lock such that the kblockd_schedule_work() call happens * before blk_cleanup_queue() finishes. |
a488e7497
|
191 |
*/ |
644373a42
|
192 |
cmd->result = 0; |
f664a3cc1
|
193 |
|
f664a3cc1
|
194 |
blk_mq_requeue_request(cmd->request, true); |
1da177e4c
|
195 |
} |
4f5299ac4
|
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
/* * Function: scsi_queue_insert() * * Purpose: Insert a command in the midlevel queue. * * Arguments: cmd - command that we are adding to queue. * reason - why we are inserting command to queue. * * Lock status: Assumed that lock is not held upon entry. * * Returns: Nothing. * * Notes: We do this for one of two cases. Either the host is busy * and it cannot accept any more commands for the time being, * or the device returned QUEUE_FULL and can accept no more * commands. * Notes: This could be called either from an interrupt context or a * normal process context. */ |
84feb1664
|
215 |
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
4f5299ac4
|
216 |
{ |
08640e81d
|
217 |
__scsi_queue_insert(cmd, reason, true); |
4f5299ac4
|
218 |
} |
e80640213
|
219 |
|
76aaf87b4
|
220 221 |
/** |
704f83928
|
222 |
* __scsi_execute - insert request and wait for the result |
76aaf87b4
|
223 224 225 226 227 228 229 230 231 232 233 234 235 |
* @sdev: scsi device * @cmd: scsi command * @data_direction: data direction * @buffer: data buffer * @bufflen: len of buffer * @sense: optional sense buffer * @sshdr: optional decoded sense header * @timeout: request timeout in seconds * @retries: number of times to retry request * @flags: flags for ->cmd_flags * @rq_flags: flags for ->rq_flags * @resid: optional residual length * |
17d5363b8
|
236 237 |
* Returns the scsi_cmnd result field if a command was executed, or a negative * Linux error code if we didn't get that far. |
76aaf87b4
|
238 |
*/ |
704f83928
|
239 |
int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
33aa687db
|
240 |
int data_direction, void *buffer, unsigned bufflen, |
3949e2f04
|
241 242 243 |
unsigned char *sense, struct scsi_sense_hdr *sshdr, int timeout, int retries, u64 flags, req_flags_t rq_flags, int *resid) |
392160335
|
244 245 |
{ struct request *req; |
82ed4db49
|
246 |
struct scsi_request *rq; |
392160335
|
247 |
int ret = DRIVER_ERROR << 24; |
ff005a066
|
248 |
req = blk_get_request(sdev->request_queue, |
aebf526b5
|
249 |
data_direction == DMA_TO_DEVICE ? |
039c635f4
|
250 |
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT); |
a492f0754
|
251 |
if (IS_ERR(req)) |
bfe159a51
|
252 |
return ret; |
82ed4db49
|
253 |
rq = scsi_req(req); |
392160335
|
254 255 |
if (bufflen && blk_rq_map_kern(sdev->request_queue, req, |
0eb0b63c1
|
256 |
buffer, bufflen, GFP_NOIO)) |
392160335
|
257 |
goto out; |
82ed4db49
|
258 259 |
rq->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(rq->cmd, cmd, rq->cmd_len); |
64c7f1d15
|
260 |
rq->retries = retries; |
392160335
|
261 |
req->timeout = timeout; |
e80640213
|
262 |
req->cmd_flags |= flags; |
039c635f4
|
263 |
req->rq_flags |= rq_flags | RQF_QUIET; |
392160335
|
264 265 266 267 268 |
/* * head injection *required* here otherwise quiesce won't work */ blk_execute_rq(req->q, NULL, req, 1); |
bdb2b8cab
|
269 270 271 272 273 274 |
/* * Some devices (USB mass-storage in particular) may transfer * garbage data together with a residue indicating that the data * is invalid. Prevent the garbage from being misinterpreted * and prevent security leaks by zeroing out the excess data. */ |
82ed4db49
|
275 276 |
if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); |
bdb2b8cab
|
277 |
|
f4f4e47e4
|
278 |
if (resid) |
82ed4db49
|
279 280 281 |
*resid = rq->resid_len; if (sense && rq->sense_len) memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); |
3949e2f04
|
282 283 |
if (sshdr) scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); |
17d5363b8
|
284 |
ret = rq->result; |
392160335
|
285 286 287 288 289 |
out: blk_put_request(req); return ret; } |
704f83928
|
290 |
EXPORT_SYMBOL(__scsi_execute); |
392160335
|
291 |
|
1da177e4c
|
292 293 294 295 296 297 298 |
/* * Function: scsi_init_cmd_errh() * * Purpose: Initialize cmd fields related to error handling. * * Arguments: cmd - command that is ready to be queued. * |
1da177e4c
|
299 300 301 302 |
* Notes: This function has the job of initializing a number of * fields related to error handling. Typically this will * be called once for each command, as required. */ |
631c228cd
|
303 |
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
1da177e4c
|
304 |
{ |
30b0c37b2
|
305 |
scsi_set_resid(cmd, 0); |
b80ca4f7e
|
306 |
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
1da177e4c
|
307 |
if (cmd->cmd_len == 0) |
db4742dd8
|
308 |
cmd->cmd_len = scsi_command_size(cmd->cmnd); |
1da177e4c
|
309 |
} |
3bd6f43f5
|
310 |
/* |
6eb045e09
|
311 312 313 314 315 |
* Wake up the error handler if necessary. Avoid as follows that the error * handler is not woken up if host in-flight requests number == * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination * with an RCU read lock in this function to ensure that this function in * its entirety either finishes before scsi_eh_scmd_add() increases the |
3bd6f43f5
|
316 317 318 |
* host_failed counter or that it notices the shost state change made by * scsi_eh_scmd_add(). */ |
6eb045e09
|
319 |
static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
1da177e4c
|
320 |
{ |
1da177e4c
|
321 |
unsigned long flags; |
3bd6f43f5
|
322 |
rcu_read_lock(); |
6eb045e09
|
323 |
__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
3bd6f43f5
|
324 |
if (unlikely(scsi_host_in_recovery(shost))) { |
746650160
|
325 |
spin_lock_irqsave(shost->host_lock, flags); |
3bd6f43f5
|
326 327 |
if (shost->host_failed || shost->host_eh_scheduled) scsi_eh_wakeup(shost); |
746650160
|
328 329 |
spin_unlock_irqrestore(shost->host_lock, flags); } |
3bd6f43f5
|
330 331 |
rcu_read_unlock(); } |
6eb045e09
|
332 |
void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) |
3bd6f43f5
|
333 334 335 |
{ struct Scsi_Host *shost = sdev->host; struct scsi_target *starget = scsi_target(sdev); |
6eb045e09
|
336 |
scsi_dec_host_busy(shost, cmd); |
3bd6f43f5
|
337 338 339 |
if (starget->can_queue > 0) atomic_dec(&starget->target_busy); |
746650160
|
340 |
|
71e75c97f
|
341 |
atomic_dec(&sdev->device_busy); |
1da177e4c
|
342 |
} |
d285203cf
|
343 344 |
static void scsi_kick_queue(struct request_queue *q) { |
f664a3cc1
|
345 |
blk_mq_run_hw_queues(q, false); |
d285203cf
|
346 |
} |
1da177e4c
|
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
/* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - * including current_sdev first. * * Called with *no* scsi locks held. */ static void scsi_single_lun_run(struct scsi_device *current_sdev) { struct Scsi_Host *shost = current_sdev->host; struct scsi_device *sdev, *tmp; struct scsi_target *starget = scsi_target(current_sdev); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); starget->starget_sdev_user = NULL; spin_unlock_irqrestore(shost->host_lock, flags); /* * Call blk_run_queue for all LUNs on the target, starting with * current_sdev. We race with others (to set starget_sdev_user), * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ |
d285203cf
|
371 |
scsi_kick_queue(current_sdev->request_queue); |
1da177e4c
|
372 373 374 375 376 377 378 379 380 381 382 383 |
spin_lock_irqsave(shost->host_lock, flags); if (starget->starget_sdev_user) goto out; list_for_each_entry_safe(sdev, tmp, &starget->devices, same_target_siblings) { if (sdev == current_sdev) continue; if (scsi_device_get(sdev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); |
d285203cf
|
384 |
scsi_kick_queue(sdev->request_queue); |
1da177e4c
|
385 386 387 388 389 390 391 |
spin_lock_irqsave(shost->host_lock, flags); scsi_device_put(sdev); } out: spin_unlock_irqrestore(shost->host_lock, flags); } |
cd9070c9c
|
392 |
static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
9d1125170
|
393 |
{ |
cd9070c9c
|
394 395 396 397 398 |
if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) return true; if (atomic_read(&sdev->device_blocked) > 0) return true; return false; |
9d1125170
|
399 |
} |
cd9070c9c
|
400 |
static inline bool scsi_target_is_busy(struct scsi_target *starget) |
f0c0a376d
|
401 |
{ |
2ccbb0080
|
402 403 404 405 406 407 |
if (starget->can_queue > 0) { if (atomic_read(&starget->target_busy) >= starget->can_queue) return true; if (atomic_read(&starget->target_blocked) > 0) return true; } |
cd9070c9c
|
408 |
return false; |
f0c0a376d
|
409 |
} |
cd9070c9c
|
410 |
static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
9d1125170
|
411 |
{ |
cd9070c9c
|
412 413 414 415 416 |
if (atomic_read(&shost->host_blocked) > 0) return true; if (shost->host_self_blocked) return true; return false; |
9d1125170
|
417 |
} |
21a05df54
|
418 |
static void scsi_starved_list_run(struct Scsi_Host *shost) |
1da177e4c
|
419 |
{ |
2a3a59e5c
|
420 |
LIST_HEAD(starved_list); |
21a05df54
|
421 |
struct scsi_device *sdev; |
1da177e4c
|
422 |
unsigned long flags; |
1da177e4c
|
423 |
spin_lock_irqsave(shost->host_lock, flags); |
2a3a59e5c
|
424 425 426 |
list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { |
e2eb7244b
|
427 |
struct request_queue *slq; |
1da177e4c
|
428 429 430 431 432 433 434 435 436 437 |
/* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn * drops the queue_lock and can add us back to the * starved_list. * * host_lock protects the starved_list and starved_entry. * scsi_request_fn must get the host_lock before checking * or modifying starved_list or starved_entry. */ |
2a3a59e5c
|
438 |
if (scsi_host_is_busy(shost)) |
f0c0a376d
|
439 |
break; |
f0c0a376d
|
440 |
|
2a3a59e5c
|
441 442 443 |
sdev = list_entry(starved_list.next, struct scsi_device, starved_entry); list_del_init(&sdev->starved_entry); |
f0c0a376d
|
444 445 446 447 448 |
if (scsi_target_is_busy(scsi_target(sdev))) { list_move_tail(&sdev->starved_entry, &shost->starved_list); continue; } |
e2eb7244b
|
449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
/* * Once we drop the host lock, a racing scsi_remove_device() * call may remove the sdev from the starved list and destroy * it and the queue. Mitigate by taking a reference to the * queue and never touching the sdev again after we drop the * host lock. Note: if __scsi_remove_device() invokes * blk_cleanup_queue() before the queue is run from this * function then blk_run_queue() will return immediately since * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. */ slq = sdev->request_queue; if (!blk_get_queue(slq)) continue; spin_unlock_irqrestore(shost->host_lock, flags); |
d285203cf
|
463 |
scsi_kick_queue(slq); |
e2eb7244b
|
464 465 466 |
blk_put_queue(slq); spin_lock_irqsave(shost->host_lock, flags); |
1da177e4c
|
467 |
} |
2a3a59e5c
|
468 469 |
/* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); |
1da177e4c
|
470 |
spin_unlock_irqrestore(shost->host_lock, flags); |
21a05df54
|
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 |
} /* * Function: scsi_run_queue() * * Purpose: Select a proper request queue to serve next * * Arguments: q - last request's queue * * Returns: Nothing * * Notes: The previous command was completely finished, start * a new one if possible. */ static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); if (!list_empty(&sdev->host->starved_list)) scsi_starved_list_run(sdev->host); |
1da177e4c
|
493 |
|
f664a3cc1
|
494 |
blk_mq_run_hw_queues(q, false); |
1da177e4c
|
495 |
} |
9937a5e2f
|
496 497 498 499 500 501 502 503 504 |
void scsi_requeue_run_queue(struct work_struct *work) { struct scsi_device *sdev; struct request_queue *q; sdev = container_of(work, struct scsi_device, requeue_work); q = sdev->request_queue; scsi_run_queue(q); } |
1da177e4c
|
505 506 507 508 509 510 511 |
void scsi_run_host_queues(struct Scsi_Host *shost) { struct scsi_device *sdev; shost_for_each_device(sdev, shost) scsi_run_queue(sdev->request_queue); } |
d285203cf
|
512 513 |
static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { |
57292b58d
|
514 |
if (!blk_rq_is_passthrough(cmd->request)) { |
d285203cf
|
515 516 517 518 519 520 521 522 523 524 |
struct scsi_driver *drv = scsi_cmd_to_driver(cmd); if (drv->uninit_command) drv->uninit_command(cmd); } } static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) |
3dccdf53c
|
525 526 |
sg_free_table_chained(&cmd->sdb.table, SCSI_INLINE_SG_CNT); |
d285203cf
|
527 |
if (scsi_prot_sg_count(cmd)) |
92524fa12
|
528 529 |
sg_free_table_chained(&cmd->prot_sdb->table, SCSI_INLINE_PROT_SG_CNT); |
d285203cf
|
530 531 532 533 |
} static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { |
d285203cf
|
534 535 |
scsi_mq_free_sgtables(cmd); scsi_uninit_cmd(cmd); |
d285203cf
|
536 |
} |
7e63b5a4a
|
537 |
/* Returns false when no more bytes to process, true if there are more */ |
2a842acab
|
538 |
static bool scsi_end_request(struct request *req, blk_status_t error, |
ae3d56d81
|
539 |
unsigned int bytes) |
f6d47e74f
|
540 |
{ |
bed2213d0
|
541 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
f6d47e74f
|
542 543 |
struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; |
f6d47e74f
|
544 545 546 |
if (blk_update_request(req, error, bytes)) return true; |
f6d47e74f
|
547 548 |
if (blk_queue_add_random(q)) add_disk_randomness(req->rq_disk); |
64104f703
|
549 550 551 552 |
if (!blk_rq_is_scsi(req)) { WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); cmd->flags &= ~SCMD_INITIALIZED; } |
f664a3cc1
|
553 |
/* |
db983f6ee
|
554 555 556 557 558 559 560 561 |
* Calling rcu_barrier() is not necessary here because the * SCSI error handler guarantees that the function called by * call_rcu() has been called before scsi_end_request() is * called. */ destroy_rcu_head(&cmd->rcu); /* |
f664a3cc1
|
562 563 564 565 566 567 568 |
* In the MQ case the command gets freed by __blk_mq_end_request, * so we have to do all cleanup that depends on it earlier. * * We also can't kick the queues from irq context, so we * will have to defer it to a workqueue. */ scsi_mq_uninit_cmd(cmd); |
f81426a84
|
569 |
|
a78b03bc7
|
570 571 572 573 574 |
/* * queue is still alive, so grab the ref for preventing it * from being cleaned up during running queue. */ percpu_ref_get(&q->q_usage_counter); |
f81426a84
|
575 |
|
f664a3cc1
|
576 |
__blk_mq_end_request(req, error); |
d285203cf
|
577 |
|
f664a3cc1
|
578 579 580 581 582 |
if (scsi_target(sdev)->single_lun || !list_empty(&sdev->host->starved_list)) kblockd_schedule_work(&sdev->requeue_work); else blk_mq_run_hw_queues(q, true); |
f6d47e74f
|
583 |
|
a78b03bc7
|
584 |
percpu_ref_put(&q->q_usage_counter); |
f6d47e74f
|
585 586 |
return false; } |
0f7f6234d
|
587 |
/** |
a77b32d8b
|
588 589 |
* scsi_result_to_blk_status - translate a SCSI result code into blk_status_t * @cmd: SCSI command |
0f7f6234d
|
590 591 |
* @result: scsi error code * |
a77b32d8b
|
592 593 |
* Translate a SCSI result code into a blk_status_t value. May reset the host * byte of @cmd->result. |
0f7f6234d
|
594 |
*/ |
a77b32d8b
|
595 |
static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) |
63583cca7
|
596 |
{ |
2a842acab
|
597 |
switch (host_byte(result)) { |
f4abab3f1
|
598 599 600 601 602 603 604 605 606 |
case DID_OK: /* * Also check the other bytes than the status byte in result * to handle the case when a SCSI LLD sets result to * DRIVER_SENSE << 24 without setting SAM_STAT_CHECK_CONDITION. */ if (scsi_status_is_good(result) && (result & ~0xff) == 0) return BLK_STS_OK; return BLK_STS_IOERR; |
63583cca7
|
607 |
case DID_TRANSPORT_FAILFAST: |
2a842acab
|
608 |
return BLK_STS_TRANSPORT; |
63583cca7
|
609 |
case DID_TARGET_FAILURE: |
2082ebc45
|
610 |
set_host_byte(cmd, DID_OK); |
2a842acab
|
611 |
return BLK_STS_TARGET; |
63583cca7
|
612 |
case DID_NEXUS_FAILURE: |
4a067cf82
|
613 |
set_host_byte(cmd, DID_OK); |
2a842acab
|
614 |
return BLK_STS_NEXUS; |
a9d6ceb83
|
615 616 |
case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); |
2a842acab
|
617 |
return BLK_STS_NOSPC; |
7e782af57
|
618 619 |
case DID_MEDIUM_ERROR: set_host_byte(cmd, DID_OK); |
2a842acab
|
620 |
return BLK_STS_MEDIUM; |
63583cca7
|
621 |
default: |
2a842acab
|
622 |
return BLK_STS_IOERR; |
63583cca7
|
623 |
} |
63583cca7
|
624 |
} |
4ae61c68f
|
625 626 627 628 629 |
/* Helper for scsi_io_completion() when "reprep" action required. */ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, struct request_queue *q) { /* A new command will be prepared and issued. */ |
f664a3cc1
|
630 |
scsi_mq_requeue_cmd(cmd); |
4ae61c68f
|
631 |
} |
da32baea1
|
632 633 |
/* Helper for scsi_io_completion() when special action required. */ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) |
1da177e4c
|
634 |
{ |
165125e1e
|
635 |
struct request_queue *q = cmd->device->request_queue; |
1da177e4c
|
636 |
struct request *req = cmd->request; |
da32baea1
|
637 |
int level = 0; |
b60af5b0a
|
638 639 |
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; |
ee60b2c52
|
640 |
unsigned long wait_for = (cmd->allowed + 1) * req->timeout; |
da32baea1
|
641 642 643 644 |
struct scsi_sense_hdr sshdr; bool sense_valid; bool sense_current = true; /* false implies "deferred sense" */ blk_status_t blk_stat; |
1da177e4c
|
645 |
|
da32baea1
|
646 647 648 |
sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) sense_current = !scsi_sense_is_deferred(&sshdr); |
03aba2f79
|
649 |
|
da32baea1
|
650 |
blk_stat = scsi_result_to_blk_status(cmd, result); |
3e695f89c
|
651 |
|
b60af5b0a
|
652 653 654 655 656 657 |
if (host_byte(result) == DID_RESET) { /* Third party bus reset or reset for error recovery * reasons. Just retry the command and see what * happens. */ action = ACTION_RETRY; |
da32baea1
|
658 |
} else if (sense_valid && sense_current) { |
1da177e4c
|
659 660 661 |
switch (sshdr.sense_key) { case UNIT_ATTENTION: if (cmd->device->removable) { |
03aba2f79
|
662 |
/* Detected disc change. Set a bit |
1da177e4c
|
663 664 665 |
* and quietly refuse further access. */ cmd->device->changed = 1; |
b60af5b0a
|
666 |
action = ACTION_FAIL; |
1da177e4c
|
667 |
} else { |
03aba2f79
|
668 669 670 |
/* Must have been a power glitch, or a * bus reset. Could not have been a * media change, so we just retry the |
b60af5b0a
|
671 |
* command and see what happens. |
03aba2f79
|
672 |
*/ |
b60af5b0a
|
673 |
action = ACTION_RETRY; |
1da177e4c
|
674 675 676 |
} break; case ILLEGAL_REQUEST: |
03aba2f79
|
677 678 679 680 681 682 683 684 |
/* If we had an ILLEGAL REQUEST returned, then * we may have performed an unsupported * command. The only thing this should be * would be a ten byte read where only a six * byte read was supported. Also, on a system * where READ CAPACITY failed, we may have * read past the end of the disk. */ |
26a68019c
|
685 686 |
if ((cmd->device->use_10_for_rw && sshdr.asc == 0x20 && sshdr.ascq == 0x00) && |
1da177e4c
|
687 688 |
(cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10)) { |
b60af5b0a
|
689 |
/* This will issue a new 6-byte command. */ |
1da177e4c
|
690 |
cmd->device->use_10_for_rw = 0; |
b60af5b0a
|
691 |
action = ACTION_REPREP; |
3e695f89c
|
692 |
} else if (sshdr.asc == 0x10) /* DIX */ { |
3e695f89c
|
693 |
action = ACTION_FAIL; |
da32baea1
|
694 |
blk_stat = BLK_STS_PROTECTION; |
c98a0eb0e
|
695 |
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
5db44863b
|
696 |
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
c98a0eb0e
|
697 |
action = ACTION_FAIL; |
da32baea1
|
698 |
blk_stat = BLK_STS_TARGET; |
b60af5b0a
|
699 700 701 |
} else action = ACTION_FAIL; break; |
511e44f42
|
702 |
case ABORTED_COMMAND: |
126c09829
|
703 |
action = ACTION_FAIL; |
e6c11dbb8
|
704 |
if (sshdr.asc == 0x10) /* DIF */ |
da32baea1
|
705 |
blk_stat = BLK_STS_PROTECTION; |
1da177e4c
|
706 707 |
break; case NOT_READY: |
03aba2f79
|
708 |
/* If the device is in the process of becoming |
f3e93f735
|
709 |
* ready, or has a temporary blockage, retry. |
1da177e4c
|
710 |
*/ |
f3e93f735
|
711 712 713 714 715 716 717 718 719 |
if (sshdr.asc == 0x04) { switch (sshdr.ascq) { case 0x01: /* becoming ready */ case 0x04: /* format in progress */ case 0x05: /* rebuild in progress */ case 0x06: /* recalculation in progress */ case 0x07: /* operation in progress */ case 0x08: /* Long write in progress */ case 0x09: /* self test in progress */ |
d8705f11d
|
720 |
case 0x14: /* space allocation in progress */ |
e37c7d9a0
|
721 722 723 724 |
case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ case 0x24: /* depopulation in progress */ |
b60af5b0a
|
725 |
action = ACTION_DELAYED_RETRY; |
f3e93f735
|
726 |
break; |
3dbf6a540
|
727 |
default: |
3dbf6a540
|
728 729 |
action = ACTION_FAIL; break; |
f3e93f735
|
730 |
} |
e6c11dbb8
|
731 |
} else |
b60af5b0a
|
732 |
action = ACTION_FAIL; |
b60af5b0a
|
733 |
break; |
1da177e4c
|
734 |
case VOLUME_OVERFLOW: |
03aba2f79
|
735 |
/* See SSC3rXX or current. */ |
b60af5b0a
|
736 737 |
action = ACTION_FAIL; break; |
1da177e4c
|
738 |
default: |
b60af5b0a
|
739 |
action = ACTION_FAIL; |
1da177e4c
|
740 741 |
break; } |
e6c11dbb8
|
742 |
} else |
b60af5b0a
|
743 |
action = ACTION_FAIL; |
b60af5b0a
|
744 |
|
ee60b2c52
|
745 |
if (action != ACTION_FAIL && |
e6c11dbb8
|
746 |
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) |
ee60b2c52
|
747 |
action = ACTION_FAIL; |
ee60b2c52
|
748 |
|
b60af5b0a
|
749 750 751 |
switch (action) { case ACTION_FAIL: /* Give up and fail the remainder of the request */ |
e80640213
|
752 |
if (!(req->rq_flags & RQF_QUIET)) { |
f1569ff1d
|
753 754 755 756 757 |
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if (unlikely(scsi_logging_level)) |
da32baea1
|
758 759 760 |
level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, SCSI_LOG_MLCOMPLETE_BITS); |
f1569ff1d
|
761 762 763 764 765 766 767 |
/* * if logging is enabled the failure will be printed * in scsi_log_completion(), so avoid duplicate messages */ if (!level && __ratelimit(&_rs)) { scsi_print_result(cmd, NULL, FAILED); |
c65be1a63
|
768 |
if (driver_byte(result) == DRIVER_SENSE) |
f1569ff1d
|
769 770 771 |
scsi_print_sense(cmd); scsi_print_command(cmd); } |
3173d8c34
|
772 |
} |
ae3d56d81
|
773 |
if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) |
f6d47e74f
|
774 |
return; |
bc85dc500
|
775 |
/*FALLTHRU*/ |
b60af5b0a
|
776 |
case ACTION_REPREP: |
4ae61c68f
|
777 |
scsi_io_completion_reprep(cmd, q); |
b60af5b0a
|
778 779 780 |
break; case ACTION_RETRY: /* Retry the same command immediately */ |
08640e81d
|
781 |
__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false); |
b60af5b0a
|
782 783 784 |
break; case ACTION_DELAYED_RETRY: /* Retry the same command after a delay */ |
08640e81d
|
785 |
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false); |
b60af5b0a
|
786 |
break; |
1da177e4c
|
787 788 |
} } |
1da177e4c
|
789 |
|
1da177e4c
|
790 |
/* |
ab8310846
|
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 |
* Helper for scsi_io_completion() when cmd->result is non-zero. Returns a * new result that may suppress further error checking. Also modifies * *blk_statp in some cases. */ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, blk_status_t *blk_statp) { bool sense_valid; bool sense_current = true; /* false implies "deferred sense" */ struct request *req = cmd->request; struct scsi_sense_hdr sshdr; sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) sense_current = !scsi_sense_is_deferred(&sshdr); if (blk_rq_is_passthrough(req)) { if (sense_valid) { /* * SG_IO wants current and deferred errors */ scsi_req(req)->sense_len = min(8 + cmd->sense_buffer[7], SCSI_SENSE_BUFFERSIZE); } if (sense_current) *blk_statp = scsi_result_to_blk_status(cmd, result); } else if (blk_rq_bytes(req) == 0 && sense_current) { /* * Flush commands do not transfers any data, and thus cannot use * good_bytes != blk_rq_bytes(req) as the signal for an error. * This sets *blk_statp explicitly for the problem case. */ *blk_statp = scsi_result_to_blk_status(cmd, result); } /* * Recovered errors need reporting, but they're always treated as * success, so fiddle the result code here. For passthrough requests * we already took a copy of the original into sreq->result which * is what gets returned to the user */ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { bool do_print = true; /* * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] * skip print since caller wants ATA registers. Only occurs * on SCSI ATA PASS_THROUGH commands when CK_COND=1 */ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) do_print = false; else if (req->rq_flags & RQF_QUIET) do_print = false; if (do_print) scsi_print_sense(cmd); result = 0; /* for passthrough, *blk_statp may be set */ *blk_statp = BLK_STS_OK; } /* * Another corner case: the SCSI status byte is non-zero but 'good'. * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related * intermediate statuses (both obsolete in SAM-4) as good. */ if (status_byte(result) && scsi_status_is_good(result)) { result = 0; *blk_statp = BLK_STS_OK; } return result; } /* |
1da177e4c
|
864 865 866 867 868 869 870 871 872 873 |
* Function: scsi_io_completion() * * Purpose: Completion processing for block device I/O requests. * * Arguments: cmd - command that is finished. * * Lock status: Assumed that no lock is held upon entry. * * Returns: Nothing * |
bc85dc500
|
874 875 876 |
* Notes: We will finish off the specified number of sectors. If we * are done, the command block will be released and the queue * function will be goosed. If we are not done then we have to |
b60af5b0a
|
877 |
* figure out what to do next: |
1da177e4c
|
878 |
* |
b60af5b0a
|
879 880 881 882 883 |
* a) We can call scsi_requeue_command(). The request * will be unprepared and put back on the queue. Then * a new command will be created for it. This should * be used if we made forward progress, or if we want * to switch from READ(10) to READ(6) for example. |
1da177e4c
|
884 |
* |
bc85dc500
|
885 |
* b) We can call __scsi_queue_insert(). The request will |
b60af5b0a
|
886 887 888 |
* be put back on the queue and retried using the same * command as before, possibly after a delay. * |
1f7cbb8e4
|
889 890 |
* c) We can call scsi_end_request() with blk_stat other than * BLK_STS_OK, to fail the remainder of the request. |
1da177e4c
|
891 |
*/ |
03aba2f79
|
892 |
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
1da177e4c
|
893 894 |
{ int result = cmd->result; |
165125e1e
|
895 |
struct request_queue *q = cmd->device->request_queue; |
1da177e4c
|
896 |
struct request *req = cmd->request; |
1f7cbb8e4
|
897 |
blk_status_t blk_stat = BLK_STS_OK; |
1da177e4c
|
898 |
|
0d437906f
|
899 |
if (unlikely(result)) /* a nz result may or may not be an error */ |
ab8310846
|
900 |
result = scsi_io_completion_nz_result(cmd, result, &blk_stat); |
631c228cd
|
901 |
|
0d437906f
|
902 |
if (unlikely(blk_rq_is_passthrough(req))) { |
27c419739
|
903 |
/* |
a77b32d8b
|
904 |
* scsi_result_to_blk_status may have reset the host_byte |
27c419739
|
905 |
*/ |
17d5363b8
|
906 |
scsi_req(req)->result = cmd->result; |
8e1695a07
|
907 |
} |
30b0c37b2
|
908 |
|
1da177e4c
|
909 |
/* |
1da177e4c
|
910 911 912 |
* Next deal with any sectors which we were able to correctly * handle. */ |
91921e016
|
913 914 915 916 |
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, "%u sectors total, %d bytes done. ", blk_rq_sectors(req), good_bytes)); |
d6b0c5372
|
917 |
|
a9bddd746
|
918 |
/* |
1f7cbb8e4
|
919 920 921 |
* Next deal with any sectors which we were able to correctly * handle. Failed, zero length commands always need to drop down * to retry code. Fast path should return in this block. |
d6b0c5372
|
922 |
*/ |
0d437906f
|
923 |
if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { |
ae3d56d81
|
924 |
if (likely(!scsi_end_request(req, blk_stat, good_bytes))) |
1f7cbb8e4
|
925 926 |
return; /* no bytes remaining */ } |
bc85dc500
|
927 |
|
0d437906f
|
928 929 |
/* Kill remainder if no retries. */ if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { |
ae3d56d81
|
930 |
if (scsi_end_request(req, blk_stat, blk_rq_bytes(req))) |
8e1695a07
|
931 932 |
WARN_ONCE(true, "Bytes remaining after failed, no-retry command"); |
f6d47e74f
|
933 |
return; |
bc85dc500
|
934 935 936 937 938 |
} /* * If there had been no error, but we have leftover bytes in the * requeues just queue the command up again. |
d6b0c5372
|
939 |
*/ |
0d437906f
|
940 |
if (likely(result == 0)) |
4ae61c68f
|
941 942 |
scsi_io_completion_reprep(cmd, q); else |
da32baea1
|
943 |
scsi_io_completion_action(cmd, result); |
1da177e4c
|
944 |
} |
1da177e4c
|
945 |
|
159b2cbf5
|
946 947 |
static blk_status_t scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) |
1da177e4c
|
948 |
{ |
6f9a35e2d
|
949 |
int count; |
1da177e4c
|
950 951 |
/* |
3b0031579
|
952 |
* If sg table allocation fails, requeue request later. |
1da177e4c
|
953 |
*/ |
f9d03f96b
|
954 |
if (unlikely(sg_alloc_table_chained(&sdb->table, |
4635873c5
|
955 |
blk_rq_nr_phys_segments(req), sdb->table.sgl, |
3dccdf53c
|
956 |
SCSI_INLINE_SG_CNT))) |
159b2cbf5
|
957 |
return BLK_STS_RESOURCE; |
1da177e4c
|
958 |
|
1da177e4c
|
959 960 961 962 |
/* * Next, walk the list, and fill in the addresses and sizes of * each segment. */ |
30b0c37b2
|
963 964 965 |
count = blk_rq_map_sg(req->q, req, sdb->table.sgl); BUG_ON(count > sdb->table.nents); sdb->table.nents = count; |
fd102b125
|
966 |
sdb->length = blk_rq_payload_bytes(req); |
159b2cbf5
|
967 |
return BLK_STS_OK; |
1da177e4c
|
968 |
} |
6f9a35e2d
|
969 970 971 972 973 974 975 976 |
/* * Function: scsi_init_io() * * Purpose: SCSI I/O initialize function. * * Arguments: cmd - Command descriptor we wish to initialize * |
159b2cbf5
|
977 978 979 |
* Returns: BLK_STS_OK on success * BLK_STS_RESOURCE if the failure is retryable * BLK_STS_IOERR if the failure is fatal |
6f9a35e2d
|
980 |
*/ |
159b2cbf5
|
981 |
blk_status_t scsi_init_io(struct scsi_cmnd *cmd) |
6f9a35e2d
|
982 |
{ |
13f05c8d8
|
983 |
struct request *rq = cmd->request; |
159b2cbf5
|
984 |
blk_status_t ret; |
13f05c8d8
|
985 |
|
fd3fc0b4d
|
986 |
if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) |
159b2cbf5
|
987 |
return BLK_STS_IOERR; |
635d98b1d
|
988 |
|
159b2cbf5
|
989 990 991 |
ret = scsi_init_sgtable(rq, &cmd->sdb); if (ret) return ret; |
6f9a35e2d
|
992 |
|
13f05c8d8
|
993 |
if (blk_integrity_rq(rq)) { |
7027ad72a
|
994 995 |
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; int ivecs, count; |
14784565f
|
996 |
if (WARN_ON_ONCE(!prot_sdb)) { |
91724c206
|
997 998 999 1000 1001 |
/* * This can happen if someone (e.g. multipath) * queues a command to a device on an adapter * that does not support DIX. */ |
159b2cbf5
|
1002 |
ret = BLK_STS_IOERR; |
14784565f
|
1003 |
goto out_free_sgtables; |
91724c206
|
1004 |
} |
13f05c8d8
|
1005 |
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
7027ad72a
|
1006 |
|
001d63be6
|
1007 |
if (sg_alloc_table_chained(&prot_sdb->table, ivecs, |
4635873c5
|
1008 |
prot_sdb->table.sgl, |
92524fa12
|
1009 |
SCSI_INLINE_PROT_SG_CNT)) { |
159b2cbf5
|
1010 |
ret = BLK_STS_RESOURCE; |
14784565f
|
1011 |
goto out_free_sgtables; |
7027ad72a
|
1012 |
} |
13f05c8d8
|
1013 |
count = blk_rq_map_integrity_sg(rq->q, rq->bio, |
7027ad72a
|
1014 |
prot_sdb->table.sgl); |
6f1d8a532
|
1015 1016 |
BUG_ON(count > ivecs); BUG_ON(count > queue_max_integrity_segments(rq->q)); |
7027ad72a
|
1017 1018 1019 1020 |
cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; } |
159b2cbf5
|
1021 |
return BLK_STS_OK; |
14784565f
|
1022 |
out_free_sgtables: |
f664a3cc1
|
1023 |
scsi_mq_free_sgtables(cmd); |
159b2cbf5
|
1024 |
return ret; |
6f9a35e2d
|
1025 |
} |
bb52d82f4
|
1026 |
EXPORT_SYMBOL(scsi_init_io); |
1da177e4c
|
1027 |
|
ca18d6f76
|
1028 |
/** |
832889f5e
|
1029 |
* scsi_initialize_rq - initialize struct scsi_cmnd partially |
35c0506f2
|
1030 |
* @rq: Request associated with the SCSI command to be initialized. |
ca18d6f76
|
1031 |
* |
832889f5e
|
1032 1033 1034 1035 |
* This function initializes the members of struct scsi_cmnd that must be * initialized before request processing starts and that won't be * reinitialized if a SCSI command is requeued. * |
64104f703
|
1036 1037 |
* Called from inside blk_get_request() for pass-through requests and from * inside scsi_init_command() for filesystem requests. |
ca18d6f76
|
1038 |
*/ |
e4c9470b9
|
1039 |
static void scsi_initialize_rq(struct request *rq) |
ca18d6f76
|
1040 |
{ |
c8d9cf22c
|
1041 1042 1043 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); scsi_req_init(&cmd->req); |
3be8828fc
|
1044 |
init_rcu_head(&cmd->rcu); |
832889f5e
|
1045 1046 |
cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; |
ca18d6f76
|
1047 |
} |
ca18d6f76
|
1048 |
|
b7e9e1fb7
|
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 |
/* * Only called when the request isn't completed by SCSI, and not freed by * SCSI */ static void scsi_cleanup_rq(struct request *rq) { if (rq->rq_flags & RQF_DONTPREP) { scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); rq->rq_flags &= ~RQF_DONTPREP; } } |
65ca846a5
|
1060 |
/* Called before a request is prepared. See also scsi_mq_prep_fn(). */ |
e9c787e65
|
1061 |
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) |
3b0031579
|
1062 |
{ |
e9c787e65
|
1063 1064 |
void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; |
64104f703
|
1065 1066 |
struct request *rq = blk_mq_rq_from_pdu(cmd); unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; |
832889f5e
|
1067 |
unsigned long jiffies_at_alloc; |
65ca846a5
|
1068 |
int retries, to_clear; |
6eb045e09
|
1069 |
bool in_flight; |
64104f703
|
1070 1071 1072 1073 1074 |
if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { flags |= SCMD_INITIALIZED; scsi_initialize_rq(rq); } |
3b0031579
|
1075 |
|
832889f5e
|
1076 1077 |
jiffies_at_alloc = cmd->jiffies_at_alloc; retries = cmd->retries; |
6eb045e09
|
1078 |
in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
65ca846a5
|
1079 1080 1081 1082 1083 1084 1085 1086 1087 |
/* * Zero out the cmd, except for the embedded scsi_request. Only clear * the driver-private command data if the LLD does not supply a * function to initialize that data. */ to_clear = sizeof(*cmd) - sizeof(cmd->req); if (!dev->host->hostt->init_cmd_priv) to_clear += dev->host->hostt->cmd_size; memset((char *)cmd + sizeof(cmd->req), 0, to_clear); |
3b0031579
|
1088 |
|
e9c787e65
|
1089 1090 1091 |
cmd->device = dev; cmd->sense_buffer = buf; cmd->prot_sdb = prot; |
64104f703
|
1092 |
cmd->flags = flags; |
e9c787e65
|
1093 |
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); |
832889f5e
|
1094 1095 |
cmd->jiffies_at_alloc = jiffies_at_alloc; cmd->retries = retries; |
6eb045e09
|
1096 1097 |
if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
64a87b244
|
1098 |
|
3b0031579
|
1099 |
} |
785ba83b4
|
1100 1101 |
static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) |
7b16318de
|
1102 |
{ |
bed2213d0
|
1103 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
3b0031579
|
1104 1105 |
/* |
aebf526b5
|
1106 |
* Passthrough requests may transfer data, in which case they must |
3b0031579
|
1107 1108 1109 1110 1111 |
* a bio attached to them. Or they might contain a SCSI command * that does not transfer data, in which case they may optionally * submit a request without an attached bio. */ if (req->bio) { |
159b2cbf5
|
1112 1113 1114 |
blk_status_t ret = scsi_init_io(cmd); if (unlikely(ret != BLK_STS_OK)) return ret; |
3b0031579
|
1115 |
} else { |
b07904103
|
1116 |
BUG_ON(blk_rq_bytes(req)); |
3b0031579
|
1117 |
|
30b0c37b2
|
1118 |
memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
3b0031579
|
1119 |
} |
7b16318de
|
1120 |
|
82ed4db49
|
1121 1122 |
cmd->cmd_len = scsi_req(req)->cmd_len; cmd->cmnd = scsi_req(req)->cmd; |
b07904103
|
1123 |
cmd->transfersize = blk_rq_bytes(req); |
64c7f1d15
|
1124 |
cmd->allowed = scsi_req(req)->retries; |
785ba83b4
|
1125 |
return BLK_STS_OK; |
7b16318de
|
1126 |
} |
7b16318de
|
1127 |
|
3b0031579
|
1128 |
/* |
aebf526b5
|
1129 |
* Setup a normal block command. These are simple request from filesystems |
3868cf8ea
|
1130 |
* that still need to be translated to SCSI CDBs from the ULD. |
3b0031579
|
1131 |
*/ |
785ba83b4
|
1132 1133 |
static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
1da177e4c
|
1134 |
{ |
bed2213d0
|
1135 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
a6a8d9f87
|
1136 |
|
ee14c674e
|
1137 |
if (unlikely(sdev->handler && sdev->handler->prep_fn)) { |
4c1cb67c0
|
1138 1139 1140 |
blk_status_t ret = sdev->handler->prep_fn(sdev, req); if (ret != BLK_STS_OK) return ret; |
a6a8d9f87
|
1141 |
} |
82ed4db49
|
1142 |
cmd->cmnd = scsi_req(req)->cmd = scsi_req(req)->__cmd; |
64a87b244
|
1143 |
memset(cmd->cmnd, 0, BLK_MAX_CDB); |
159b2cbf5
|
1144 |
return scsi_cmd_to_driver(cmd)->init_command(cmd); |
3b0031579
|
1145 |
} |
785ba83b4
|
1146 1147 |
static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) |
6af7a4ffa
|
1148 |
{ |
bed2213d0
|
1149 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
6af7a4ffa
|
1150 1151 1152 1153 1154 1155 1156 |
if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; else if (rq_data_dir(req) == WRITE) cmd->sc_data_direction = DMA_TO_DEVICE; else cmd->sc_data_direction = DMA_FROM_DEVICE; |
aebf526b5
|
1157 1158 1159 |
if (blk_rq_is_scsi(req)) return scsi_setup_scsi_cmnd(sdev, req); else |
6af7a4ffa
|
1160 |
return scsi_setup_fs_cmnd(sdev, req); |
6af7a4ffa
|
1161 |
} |
c092d4ec5
|
1162 |
static blk_status_t |
a1b73fc19
|
1163 |
scsi_prep_state_check(struct scsi_device *sdev, struct request *req) |
3b0031579
|
1164 |
{ |
c092d4ec5
|
1165 1166 1167 1168 1169 1170 1171 1172 |
switch (sdev->sdev_state) { case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: /* * If the device is offline we refuse to process any * commands. The device must be brought online * before trying any recovery commands. */ |
b0962c53b
|
1173 1174 1175 1176 1177 1178 |
if (!sdev->offline_already) { sdev->offline_already = true; sdev_printk(KERN_ERR, sdev, "rejecting I/O to offline device "); } |
c092d4ec5
|
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 |
return BLK_STS_IOERR; case SDEV_DEL: /* * If the device is fully deleted, we refuse to * process any commands as well. */ sdev_printk(KERN_ERR, sdev, "rejecting I/O to dead device "); return BLK_STS_IOERR; case SDEV_BLOCK: case SDEV_CREATED_BLOCK: return BLK_STS_RESOURCE; case SDEV_QUIESCE: /* * If the devices is blocked we defer normal commands. */ if (req && !(req->rq_flags & RQF_PREEMPT)) return BLK_STS_RESOURCE; return BLK_STS_OK; default: /* * For any other not fully online state we only allow * special commands. In particular any user initiated * command is not allowed. */ if (req && !(req->rq_flags & RQF_PREEMPT)) return BLK_STS_IOERR; return BLK_STS_OK; |
1da177e4c
|
1208 |
} |
7f9a6bc4e
|
1209 |
} |
1da177e4c
|
1210 |
|
1da177e4c
|
1211 1212 1213 1214 1215 1216 1217 1218 1219 |
/* * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else * return 0. * * Called with the queue_lock held. */ static inline int scsi_dev_queue_ready(struct request_queue *q, struct scsi_device *sdev) { |
71e75c97f
|
1220 1221 1222 |
unsigned int busy; busy = atomic_inc_return(&sdev->device_busy) - 1; |
cd9070c9c
|
1223 |
if (atomic_read(&sdev->device_blocked)) { |
71e75c97f
|
1224 1225 |
if (busy) goto out_dec; |
1da177e4c
|
1226 1227 1228 |
/* * unblock after device_blocked iterates to zero */ |
f664a3cc1
|
1229 |
if (atomic_dec_return(&sdev->device_blocked) > 0) |
71e75c97f
|
1230 |
goto out_dec; |
71e75c97f
|
1231 1232 1233 |
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, "unblocking device at zero depth ")); |
1da177e4c
|
1234 |
} |
71e75c97f
|
1235 1236 1237 |
if (busy >= sdev->queue_depth) goto out_dec; |
1da177e4c
|
1238 1239 |
return 1; |
71e75c97f
|
1240 1241 1242 |
out_dec: atomic_dec(&sdev->device_busy); return 0; |
1da177e4c
|
1243 |
} |
f0c0a376d
|
1244 1245 1246 |
/* * scsi_target_queue_ready: checks if there we can send commands to target * @sdev: scsi device on starget to check. |
f0c0a376d
|
1247 1248 1249 1250 1251 |
*/ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, struct scsi_device *sdev) { struct scsi_target *starget = scsi_target(sdev); |
7ae65c0f9
|
1252 |
unsigned int busy; |
f0c0a376d
|
1253 1254 |
if (starget->single_lun) { |
7ae65c0f9
|
1255 |
spin_lock_irq(shost->host_lock); |
f0c0a376d
|
1256 |
if (starget->starget_sdev_user && |
7ae65c0f9
|
1257 1258 1259 1260 |
starget->starget_sdev_user != sdev) { spin_unlock_irq(shost->host_lock); return 0; } |
f0c0a376d
|
1261 |
starget->starget_sdev_user = sdev; |
7ae65c0f9
|
1262 |
spin_unlock_irq(shost->host_lock); |
f0c0a376d
|
1263 |
} |
2ccbb0080
|
1264 1265 |
if (starget->can_queue <= 0) return 1; |
7ae65c0f9
|
1266 |
busy = atomic_inc_return(&starget->target_busy) - 1; |
cd9070c9c
|
1267 |
if (atomic_read(&starget->target_blocked) > 0) { |
7ae65c0f9
|
1268 1269 |
if (busy) goto starved; |
f0c0a376d
|
1270 1271 1272 |
/* * unblock after target_blocked iterates to zero */ |
cd9070c9c
|
1273 |
if (atomic_dec_return(&starget->target_blocked) > 0) |
7ae65c0f9
|
1274 |
goto out_dec; |
cf68d334d
|
1275 1276 1277 1278 |
SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, "unblocking target at zero depth ")); |
f0c0a376d
|
1279 |
} |
2ccbb0080
|
1280 |
if (busy >= starget->can_queue) |
7ae65c0f9
|
1281 |
goto starved; |
f0c0a376d
|
1282 |
|
7ae65c0f9
|
1283 1284 1285 1286 1287 |
return 1; starved: spin_lock_irq(shost->host_lock); list_move_tail(&sdev->starved_entry, &shost->starved_list); |
cf68d334d
|
1288 |
spin_unlock_irq(shost->host_lock); |
7ae65c0f9
|
1289 |
out_dec: |
2ccbb0080
|
1290 1291 |
if (starget->can_queue > 0) atomic_dec(&starget->target_busy); |
7ae65c0f9
|
1292 |
return 0; |
f0c0a376d
|
1293 |
} |
1da177e4c
|
1294 1295 1296 1297 |
/* * scsi_host_queue_ready: if we can send requests to shost, return 1 else * return 0. We must end up running the queue again whenever 0 is * returned, else IO can hang. |
1da177e4c
|
1298 1299 1300 |
*/ static inline int scsi_host_queue_ready(struct request_queue *q, struct Scsi_Host *shost, |
6eb045e09
|
1301 1302 |
struct scsi_device *sdev, struct scsi_cmnd *cmd) |
1da177e4c
|
1303 |
{ |
939647ee3
|
1304 |
if (scsi_host_in_recovery(shost)) |
746650160
|
1305 |
return 0; |
cd9070c9c
|
1306 |
if (atomic_read(&shost->host_blocked) > 0) { |
6eb045e09
|
1307 |
if (scsi_host_busy(shost) > 0) |
746650160
|
1308 |
goto starved; |
1da177e4c
|
1309 1310 1311 |
/* * unblock after host_blocked iterates to zero */ |
cd9070c9c
|
1312 |
if (atomic_dec_return(&shost->host_blocked) > 0) |
746650160
|
1313 |
goto out_dec; |
cf68d334d
|
1314 1315 1316 1317 1318 |
SCSI_LOG_MLQUEUE(3, shost_printk(KERN_INFO, shost, "unblocking host at zero depth ")); |
1da177e4c
|
1319 |
} |
746650160
|
1320 |
|
746650160
|
1321 1322 |
if (shost->host_self_blocked) goto starved; |
1da177e4c
|
1323 1324 |
/* We're OK to process the command, so we can't be starved */ |
746650160
|
1325 1326 1327 1328 1329 1330 |
if (!list_empty(&sdev->starved_entry)) { spin_lock_irq(shost->host_lock); if (!list_empty(&sdev->starved_entry)) list_del_init(&sdev->starved_entry); spin_unlock_irq(shost->host_lock); } |
1da177e4c
|
1331 |
|
6eb045e09
|
1332 |
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
746650160
|
1333 1334 1335 1336 1337 1338 |
return 1; starved: spin_lock_irq(shost->host_lock); if (list_empty(&sdev->starved_entry)) list_add_tail(&sdev->starved_entry, &shost->starved_list); |
cf68d334d
|
1339 |
spin_unlock_irq(shost->host_lock); |
746650160
|
1340 |
out_dec: |
6eb045e09
|
1341 |
scsi_dec_host_busy(shost, cmd); |
746650160
|
1342 |
return 0; |
1da177e4c
|
1343 1344 1345 |
} /* |
6c5121b78
|
1346 1347 1348 1349 1350 1351 1352 |
* Busy state exporting function for request stacking drivers. * * For efficiency, no lock is taken to check the busy state of * shost/starget/sdev, since the returned value is not guaranteed and * may be changed after request stacking drivers call the function, * regardless of taking lock or not. * |
67bd94130
|
1353 1354 1355 |
* When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi * needs to return 'not busy'. Otherwise, request stacking drivers * may hold requests forever. |
6c5121b78
|
1356 |
*/ |
f664a3cc1
|
1357 |
static bool scsi_mq_lld_busy(struct request_queue *q) |
6c5121b78
|
1358 1359 1360 |
{ struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; |
6c5121b78
|
1361 |
|
3f3299d5c
|
1362 |
if (blk_queue_dying(q)) |
f664a3cc1
|
1363 |
return false; |
6c5121b78
|
1364 1365 |
shost = sdev->host; |
6c5121b78
|
1366 |
|
b7e94a168
|
1367 1368 1369 1370 1371 1372 1373 |
/* * Ignore host/starget busy state. * Since block layer does not have a concept of fairness across * multiple queues, congestion of host/starget needs to be handled * in SCSI layer. */ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) |
f664a3cc1
|
1374 |
return true; |
e36e0c801
|
1375 |
|
f664a3cc1
|
1376 |
return false; |
1da177e4c
|
1377 |
} |
1aea6434e
|
1378 1379 |
static void scsi_softirq_done(struct request *rq) { |
bed2213d0
|
1380 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
242f9dcb8
|
1381 |
unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; |
1aea6434e
|
1382 1383 1384 |
int disposition; INIT_LIST_HEAD(&cmd->eh_entry); |
242f9dcb8
|
1385 1386 1387 |
atomic_inc(&cmd->device->iodone_cnt); if (cmd->result) atomic_inc(&cmd->device->ioerr_cnt); |
1aea6434e
|
1388 1389 1390 |
disposition = scsi_decide_disposition(cmd); if (disposition != SUCCESS && time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { |
463cdad83
|
1391 |
scmd_printk(KERN_ERR, cmd, |
1aea6434e
|
1392 1393 1394 1395 1396 |
"timing out command, waited %lus ", wait_for/HZ); disposition = SUCCESS; } |
91921e016
|
1397 |
|
1aea6434e
|
1398 1399 1400 1401 1402 1403 1404 |
scsi_log_completion(cmd, disposition); switch (disposition) { case SUCCESS: scsi_finish_command(cmd); break; case NEEDS_RETRY: |
596f482a9
|
1405 |
scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); |
1aea6434e
|
1406 1407 1408 1409 1410 |
break; case ADD_TO_MLQUEUE: scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); break; default: |
a06586325
|
1411 |
scsi_eh_scmd_add(cmd); |
2171b6d08
|
1412 |
break; |
1aea6434e
|
1413 1414 |
} } |
3b5382c45
|
1415 |
/** |
82042a2cd
|
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 |
* scsi_dispatch_command - Dispatch a command to the low-level driver. * @cmd: command block we are dispatching. * * Return: nonzero return request was rejected and device's queue needs to be * plugged. */ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; int rtn = 0; atomic_inc(&cmd->device->iorequest_cnt); /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT * returns an immediate error upwards, and signals * that the device is no longer present */ cmd->result = DID_NO_CONNECT << 16; goto done; } /* Check to see if the scsi lld made this device blocked. */ if (unlikely(scsi_device_blocked(cmd->device))) { /* * in blocked state, the command is just put back on * the device queue. The suspend state has already * blocked the queue so future requests should not * occur until the device transitions out of the * suspend state. */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked ")); return SCSI_MLQUEUE_DEVICE_BUSY; } /* Store the LUN value in cmnd, if needed. */ if (cmd->device->lun_in_cdb) cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | (cmd->device->lun << 5 & 0xe0); scsi_log_send(cmd); /* * Before we queue this command, check if the command * length exceeds what the host adapter can handle. */ if (cmd->cmd_len > cmd->device->host->max_cmd_len) { SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : command too long. " "cdb_size=%d host->max_cmd_len=%d ", cmd->cmd_len, cmd->device->host->max_cmd_len)); cmd->result = (DID_ABORT << 16); goto done; } if (unlikely(host->shost_state == SHOST_DEL)) { cmd->result = (DID_NO_CONNECT << 16); goto done; } trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) rtn = SCSI_MLQUEUE_HOST_BUSY; SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : request rejected ")); } return rtn; done: cmd->scsi_done(cmd); return 0; } |
be4c186c8
|
1498 |
/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ |
3dccdf53c
|
1499 |
static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) |
be4c186c8
|
1500 |
{ |
3dccdf53c
|
1501 |
return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * |
be4c186c8
|
1502 1503 |
sizeof(struct scatterlist); } |
785ba83b4
|
1504 |
static blk_status_t scsi_mq_prep_fn(struct request *req) |
d285203cf
|
1505 1506 1507 1508 |
{ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; |
d285203cf
|
1509 |
struct scatterlist *sg; |
08f784364
|
1510 |
scsi_init_command(sdev, cmd); |
d285203cf
|
1511 |
|
d285203cf
|
1512 |
cmd->request = req; |
d285203cf
|
1513 |
cmd->tag = req->tag; |
d285203cf
|
1514 |
cmd->prot_op = SCSI_PROT_NORMAL; |
d285203cf
|
1515 1516 1517 1518 |
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; if (scsi_host_get_prot(shost)) { |
d285203cf
|
1519 1520 1521 1522 1523 |
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); cmd->prot_sdb->table.sgl = (struct scatterlist *)(cmd->prot_sdb + 1); } |
fe052529e
|
1524 |
blk_mq_start_request(req); |
8fe8ffb12
|
1525 |
return scsi_setup_cmnd(sdev, req); |
d285203cf
|
1526 1527 1528 1529 |
} static void scsi_mq_done(struct scsi_cmnd *cmd) { |
f1342709d
|
1530 1531 |
if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; |
d285203cf
|
1532 |
trace_scsi_dispatch_cmd_done(cmd); |
f1342709d
|
1533 1534 1535 1536 1537 1538 1539 1540 1541 |
/* * If the block layer didn't complete the request due to a timeout * injection, scsi must clear its internal completed state so that the * timeout handler will see it needs to escalate its own error * recovery. */ if (unlikely(!blk_mq_complete_request(cmd->request))) clear_bit(SCMD_STATE_COMPLETE, &cmd->state); |
d285203cf
|
1542 |
} |
0df21c86b
|
1543 |
static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx) |
d285203cf
|
1544 |
{ |
0df21c86b
|
1545 1546 |
struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; |
0df21c86b
|
1547 |
|
0df21c86b
|
1548 |
atomic_dec(&sdev->device_busy); |
0df21c86b
|
1549 |
} |
88022d720
|
1550 |
static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) |
0df21c86b
|
1551 1552 |
{ struct request_queue *q = hctx->queue; |
d285203cf
|
1553 |
struct scsi_device *sdev = q->queuedata; |
d285203cf
|
1554 |
|
18c4f0a42
|
1555 1556 |
if (scsi_dev_queue_ready(q, sdev)) return true; |
0df21c86b
|
1557 |
|
7e70aa789
|
1558 1559 |
if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); |
88022d720
|
1560 |
return false; |
0df21c86b
|
1561 |
} |
fc17b6534
|
1562 |
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
74c450521
|
1563 |
const struct blk_mq_queue_data *bd) |
d285203cf
|
1564 |
{ |
74c450521
|
1565 |
struct request *req = bd->rq; |
d285203cf
|
1566 1567 1568 1569 |
struct request_queue *q = req->q; struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost = sdev->host; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); |
fc17b6534
|
1570 |
blk_status_t ret; |
d285203cf
|
1571 |
int reason; |
c092d4ec5
|
1572 1573 1574 1575 1576 1577 1578 1579 1580 |
/* * If the device is not in running state we will reject some or all * commands. */ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { ret = scsi_prep_state_check(sdev, req); if (ret != BLK_STS_OK) goto out_put_budget; } |
d285203cf
|
1581 |
|
fc17b6534
|
1582 |
ret = BLK_STS_RESOURCE; |
d285203cf
|
1583 |
if (!scsi_target_queue_ready(shost, sdev)) |
826a70a08
|
1584 |
goto out_put_budget; |
6eb045e09
|
1585 |
if (!scsi_host_queue_ready(q, shost, sdev, cmd)) |
d285203cf
|
1586 |
goto out_dec_target_busy; |
e80640213
|
1587 |
if (!(req->rq_flags & RQF_DONTPREP)) { |
785ba83b4
|
1588 |
ret = scsi_mq_prep_fn(req); |
fc17b6534
|
1589 |
if (ret != BLK_STS_OK) |
d285203cf
|
1590 |
goto out_dec_host_busy; |
e80640213
|
1591 |
req->rq_flags |= RQF_DONTPREP; |
fe052529e
|
1592 |
} else { |
cd464d838
|
1593 |
clear_bit(SCMD_STATE_COMPLETE, &cmd->state); |
fe052529e
|
1594 |
blk_mq_start_request(req); |
d285203cf
|
1595 |
} |
8930a6c20
|
1596 |
cmd->flags &= SCMD_PRESERVED_FLAGS; |
125c99bc8
|
1597 1598 |
if (sdev->simple_tags) cmd->flags |= SCMD_TAGGED; |
8930a6c20
|
1599 1600 |
if (bd->last) cmd->flags |= SCMD_LAST; |
b1dd2aac4
|
1601 |
|
d285203cf
|
1602 1603 1604 1605 1606 1607 |
scsi_init_cmd_errh(cmd); cmd->scsi_done = scsi_mq_done; reason = scsi_dispatch_cmd(cmd); if (reason) { scsi_set_blocked(cmd, reason); |
fc17b6534
|
1608 |
ret = BLK_STS_RESOURCE; |
d285203cf
|
1609 1610 |
goto out_dec_host_busy; } |
fc17b6534
|
1611 |
return BLK_STS_OK; |
d285203cf
|
1612 1613 |
out_dec_host_busy: |
6eb045e09
|
1614 |
scsi_dec_host_busy(shost, cmd); |
d285203cf
|
1615 1616 1617 |
out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); |
0df21c86b
|
1618 1619 |
out_put_budget: scsi_mq_put_budget(hctx); |
d285203cf
|
1620 |
switch (ret) { |
fc17b6534
|
1621 1622 1623 |
case BLK_STS_OK: break; case BLK_STS_RESOURCE: |
86ff7c2a8
|
1624 1625 1626 |
if (atomic_read(&sdev->device_busy) || scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; |
d285203cf
|
1627 |
break; |
fc17b6534
|
1628 |
default: |
be549d491
|
1629 1630 1631 1632 |
if (unlikely(!scsi_device_online(sdev))) scsi_req(req)->result = DID_NO_CONNECT << 16; else scsi_req(req)->result = DID_ERROR << 16; |
d285203cf
|
1633 |
/* |
be549d491
|
1634 |
* Make sure to release all allocated resources when |
d285203cf
|
1635 1636 1637 |
* we hit an error, as we will never see this command * again. */ |
e80640213
|
1638 |
if (req->rq_flags & RQF_DONTPREP) |
d285203cf
|
1639 1640 |
scsi_mq_uninit_cmd(cmd); break; |
d285203cf
|
1641 1642 1643 |
} return ret; } |
0152fb6b5
|
1644 1645 1646 1647 1648 1649 1650 |
static enum blk_eh_timer_return scsi_timeout(struct request *req, bool reserved) { if (reserved) return BLK_EH_RESET_TIMER; return scsi_times_out(req); } |
e7008ff5c
|
1651 1652 |
static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) |
d285203cf
|
1653 |
{ |
d6296d39e
|
1654 |
struct Scsi_Host *shost = set->driver_data; |
8e6882545
|
1655 |
const bool unchecked_isa_dma = shost->unchecked_isa_dma; |
d285203cf
|
1656 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
08f784364
|
1657 |
struct scatterlist *sg; |
65ca846a5
|
1658 |
int ret = 0; |
d285203cf
|
1659 |
|
8e6882545
|
1660 1661 1662 1663 |
if (unchecked_isa_dma) cmd->flags |= SCMD_UNCHECKED_ISA_DMA; cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, GFP_KERNEL, numa_node); |
d285203cf
|
1664 1665 |
if (!cmd->sense_buffer) return -ENOMEM; |
82ed4db49
|
1666 |
cmd->req.sense = cmd->sense_buffer; |
08f784364
|
1667 1668 1669 1670 |
if (scsi_host_get_prot(shost)) { sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; |
3dccdf53c
|
1671 |
cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); |
08f784364
|
1672 |
} |
65ca846a5
|
1673 1674 1675 1676 1677 1678 1679 1680 |
if (shost->hostt->init_cmd_priv) { ret = shost->hostt->init_cmd_priv(shost, cmd); if (ret < 0) scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); } return ret; |
d285203cf
|
1681 |
} |
e7008ff5c
|
1682 1683 |
static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) |
d285203cf
|
1684 |
{ |
65ca846a5
|
1685 |
struct Scsi_Host *shost = set->driver_data; |
d285203cf
|
1686 |
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
65ca846a5
|
1687 1688 |
if (shost->hostt->exit_cmd_priv) shost->hostt->exit_cmd_priv(shost, cmd); |
8e6882545
|
1689 1690 |
scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, cmd->sense_buffer); |
d285203cf
|
1691 |
} |
2d9c5c20c
|
1692 1693 1694 1695 1696 1697 |
static int scsi_map_queues(struct blk_mq_tag_set *set) { struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); if (shost->hostt->map_queues) return shost->hostt->map_queues(shost); |
99bbf484c
|
1698 |
return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); |
2d9c5c20c
|
1699 |
} |
d48777a63
|
1700 |
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
1da177e4c
|
1701 |
{ |
6f381fa34
|
1702 |
struct device *dev = shost->dma_dev; |
1da177e4c
|
1703 |
|
a8474ce23
|
1704 1705 1706 |
/* * this limit is imposed by hardware restrictions */ |
8a78362c4
|
1707 |
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
65e8617fb
|
1708 |
SG_MAX_SEGMENTS)); |
a8474ce23
|
1709 |
|
13f05c8d8
|
1710 1711 1712 1713 1714 1715 1716 |
if (scsi_host_prot_dma(shost)) { shost->sg_prot_tablesize = min_not_zero(shost->sg_prot_tablesize, (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); } |
1b5d9a6e9
|
1717 1718 1719 1720 |
if (dev->dma_mask) { shost->max_sectors = min_t(unsigned int, shost->max_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT); } |
086fa5ff0
|
1721 |
blk_queue_max_hw_sectors(q, shost->max_sectors); |
21e07dba9
|
1722 1723 |
if (shost->unchecked_isa_dma) blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); |
1da177e4c
|
1724 |
blk_queue_segment_boundary(q, shost->dma_boundary); |
99c84dbdc
|
1725 |
dma_set_seg_boundary(dev, shost->dma_boundary); |
1da177e4c
|
1726 |
|
a8cf59a66
|
1727 |
blk_queue_max_segment_size(q, shost->max_segment_size); |
7ad388d8e
|
1728 1729 |
blk_queue_virt_boundary(q, shost->virt_boundary_mask); dma_set_max_seg_size(dev, queue_max_segment_size(q)); |
465ff3185
|
1730 1731 |
/* |
90addc6b3
|
1732 1733 1734 1735 1736 |
* Set a reasonable default alignment: The larger of 32-byte (dword), * which is a common minimum for HBAs, and the minimum DMA alignment, * which is set by the platform. * * Devices that require a bigger alignment can increase it later. |
465ff3185
|
1737 |
*/ |
90addc6b3
|
1738 |
blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); |
d285203cf
|
1739 |
} |
d48777a63
|
1740 |
EXPORT_SYMBOL_GPL(__scsi_init_queue); |
465ff3185
|
1741 |
|
8930a6c20
|
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 |
static const struct blk_mq_ops scsi_mq_ops_no_commit = { .get_budget = scsi_mq_get_budget, .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, .complete = scsi_softirq_done, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, #endif .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, |
82a9ac713
|
1754 |
.cleanup_rq = scsi_cleanup_rq, |
8930a6c20
|
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 |
.busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, }; static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost = sdev->host; shost->hostt->commit_rqs(shost, hctx->queue_num); } |
f363b089b
|
1768 |
static const struct blk_mq_ops scsi_mq_ops = { |
0df21c86b
|
1769 1770 |
.get_budget = scsi_mq_get_budget, .put_budget = scsi_mq_put_budget, |
d285203cf
|
1771 |
.queue_rq = scsi_queue_rq, |
8930a6c20
|
1772 |
.commit_rqs = scsi_commit_rqs, |
d285203cf
|
1773 |
.complete = scsi_softirq_done, |
0152fb6b5
|
1774 |
.timeout = scsi_timeout, |
0eebd005d
|
1775 1776 1777 |
#ifdef CONFIG_BLK_DEBUG_FS .show_rq = scsi_show_rq, #endif |
e7008ff5c
|
1778 1779 |
.init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, |
ca18d6f76
|
1780 |
.initialize_rq_fn = scsi_initialize_rq, |
b7e9e1fb7
|
1781 |
.cleanup_rq = scsi_cleanup_rq, |
3a7ea2c48
|
1782 |
.busy = scsi_mq_lld_busy, |
2d9c5c20c
|
1783 |
.map_queues = scsi_map_queues, |
d285203cf
|
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 |
}; struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) { sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set); if (IS_ERR(sdev->request_queue)) return NULL; sdev->request_queue->queuedata = sdev; __scsi_init_queue(sdev->host, sdev->request_queue); |
17cb960f2
|
1794 |
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, sdev->request_queue); |
d285203cf
|
1795 1796 1797 1798 1799 |
return sdev->request_queue; } int scsi_mq_setup_tags(struct Scsi_Host *shost) { |
be4c186c8
|
1800 |
unsigned int cmd_size, sgl_size; |
d285203cf
|
1801 |
|
9393c8de6
|
1802 1803 |
sgl_size = max_t(unsigned int, sizeof(struct scatterlist), scsi_mq_inline_sgl_size(shost)); |
d285203cf
|
1804 1805 |
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) |
92524fa12
|
1806 1807 |
cmd_size += sizeof(struct scsi_data_buffer) + sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; |
d285203cf
|
1808 1809 |
memset(&shost->tag_set, 0, sizeof(shost->tag_set)); |
8930a6c20
|
1810 1811 1812 1813 |
if (shost->hostt->commit_rqs) shost->tag_set.ops = &scsi_mq_ops; else shost->tag_set.ops = &scsi_mq_ops_no_commit; |
efec4b90f
|
1814 |
shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; |
d285203cf
|
1815 1816 1817 |
shost->tag_set.queue_depth = shost->can_queue; shost->tag_set.cmd_size = cmd_size; shost->tag_set.numa_node = NUMA_NO_NODE; |
56d18f62f
|
1818 |
shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
24391c0dc
|
1819 1820 |
shost->tag_set.flags |= BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); |
d285203cf
|
1821 1822 1823 1824 1825 1826 1827 1828 1829 |
shost->tag_set.driver_data = shost; return blk_mq_alloc_tag_set(&shost->tag_set); } void scsi_mq_destroy_tags(struct Scsi_Host *shost) { blk_mq_free_tag_set(&shost->tag_set); } |
857de6e00
|
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 |
/** * scsi_device_from_queue - return sdev associated with a request_queue * @q: The request queue to return the sdev from * * Return the sdev associated with a request queue or NULL if the * request_queue does not reference a SCSI device. */ struct scsi_device *scsi_device_from_queue(struct request_queue *q) { struct scsi_device *sdev = NULL; |
6b6fa7a5c
|
1840 1841 |
if (q->mq_ops == &scsi_mq_ops_no_commit || q->mq_ops == &scsi_mq_ops) |
857de6e00
|
1842 1843 1844 1845 1846 1847 1848 |
sdev = q->queuedata; if (!sdev || !get_device(&sdev->sdev_gendev)) sdev = NULL; return sdev; } EXPORT_SYMBOL_GPL(scsi_device_from_queue); |
1da177e4c
|
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 |
/* * Function: scsi_block_requests() * * Purpose: Utility function used by low-level drivers to prevent further * commands from being queued to the device. * * Arguments: shost - Host in question * * Returns: Nothing * * Lock status: No locks are assumed held. * * Notes: There is no timer nor any other means by which the requests * get unblocked other than the low-level driver calling * scsi_unblock_requests(). */ void scsi_block_requests(struct Scsi_Host *shost) { shost->host_self_blocked = 1; } EXPORT_SYMBOL(scsi_block_requests); /* * Function: scsi_unblock_requests() * * Purpose: Utility function used by low-level drivers to allow further * commands from being queued to the device. * * Arguments: shost - Host in question * * Returns: Nothing * * Lock status: No locks are assumed held. * * Notes: There is no timer nor any other means by which the requests * get unblocked other than the low-level driver calling * scsi_unblock_requests(). * * This is done as an API function so that changes to the * internals of the scsi mid-layer won't require wholesale * changes to drivers that use this feature. */ void scsi_unblock_requests(struct Scsi_Host *shost) { shost->host_self_blocked = 0; scsi_run_host_queues(shost); } EXPORT_SYMBOL(scsi_unblock_requests); int __init scsi_init_queue(void) { |
6362abd3e
|
1900 1901 1902 1903 1904 1905 |
scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", sizeof(struct scsi_data_buffer), 0, 0, NULL); if (!scsi_sdb_cache) { printk(KERN_ERR "SCSI: can't init scsi sdb cache "); |
f078727b2
|
1906 |
return -ENOMEM; |
6f9a35e2d
|
1907 |
} |
1da177e4c
|
1908 1909 1910 1911 1912 |
return 0; } void scsi_exit_queue(void) { |
0a6ac4ee7
|
1913 1914 |
kmem_cache_destroy(scsi_sense_cache); kmem_cache_destroy(scsi_sense_isadma_cache); |
6362abd3e
|
1915 |
kmem_cache_destroy(scsi_sdb_cache); |
1da177e4c
|
1916 |
} |
5baba830e
|
1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 |
/** * scsi_mode_select - issue a mode select * @sdev: SCSI device to be queried * @pf: Page format bit (1 == standard, 0 == vendor specific) * @sp: Save page bit (0 == don't save, 1 == save) * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data |
eb44820c2
|
1929 |
* @sshdr: place to put sense data (or NULL if no sense to be collected). |
5baba830e
|
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 |
* must be SCSI_SENSE_BUFFERSIZE big. * * Returns zero if successful; negative error number or scsi * status on error * */ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { unsigned char cmd[10]; unsigned char *real_buffer; int ret; memset(cmd, 0, sizeof(cmd)); cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); if (sdev->use_10_for_ms) { if (len > 65535) return -EINVAL; real_buffer = kmalloc(8 + len, GFP_KERNEL); if (!real_buffer) return -ENOMEM; memcpy(real_buffer + 8, buffer, len); len += 8; real_buffer[0] = 0; real_buffer[1] = 0; real_buffer[2] = data->medium_type; real_buffer[3] = data->device_specific; real_buffer[4] = data->longlba ? 0x01 : 0; real_buffer[5] = 0; real_buffer[6] = data->block_descriptor_length >> 8; real_buffer[7] = data->block_descriptor_length; cmd[0] = MODE_SELECT_10; cmd[7] = len >> 8; cmd[8] = len; } else { if (len > 255 || data->block_descriptor_length > 255 || data->longlba) return -EINVAL; real_buffer = kmalloc(4 + len, GFP_KERNEL); if (!real_buffer) return -ENOMEM; memcpy(real_buffer + 4, buffer, len); len += 4; real_buffer[0] = 0; real_buffer[1] = data->medium_type; real_buffer[2] = data->device_specific; real_buffer[3] = data->block_descriptor_length; cmd[0] = MODE_SELECT; cmd[4] = len; } ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, |
f4f4e47e4
|
1989 |
sshdr, timeout, retries, NULL); |
5baba830e
|
1990 1991 1992 1993 |
kfree(real_buffer); return ret; } EXPORT_SYMBOL_GPL(scsi_mode_select); |
1da177e4c
|
1994 |
/** |
eb44820c2
|
1995 |
* scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
1cf72699c
|
1996 |
* @sdev: SCSI device to be queried |
1da177e4c
|
1997 1998 1999 2000 2001 2002 2003 |
* @dbd: set if mode sense will allow block descriptors to be returned * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout * @retries: number of retries before failing * @data: returns a structure abstracting the mode header data |
eb44820c2
|
2004 |
* @sshdr: place to put sense data (or NULL if no sense to be collected). |
1cf72699c
|
2005 |
* must be SCSI_SENSE_BUFFERSIZE big. |
1da177e4c
|
2006 2007 2008 2009 |
* * Returns zero if unsuccessful, or the header offset (either 4 * or 8 depending on whether a six or ten byte command was * issued) if successful. |
eb44820c2
|
2010 |
*/ |
1da177e4c
|
2011 |
int |
1cf72699c
|
2012 |
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1da177e4c
|
2013 |
unsigned char *buffer, int len, int timeout, int retries, |
5baba830e
|
2014 2015 |
struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { |
1da177e4c
|
2016 2017 2018 |
unsigned char cmd[12]; int use_10_for_ms; int header_length; |
0ae80ba91
|
2019 |
int result, retry_count = retries; |
ea73a9f23
|
2020 |
struct scsi_sense_hdr my_sshdr; |
1da177e4c
|
2021 2022 2023 |
memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); |
0ec969133
|
2024 2025 |
dbd = sdev->set_dbd_for_ms ? 8 : dbd; |
1da177e4c
|
2026 2027 |
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; |
ea73a9f23
|
2028 2029 2030 |
/* caller might not be interested in sense, but we need it */ if (!sshdr) sshdr = &my_sshdr; |
1da177e4c
|
2031 |
retry: |
1cf72699c
|
2032 |
use_10_for_ms = sdev->use_10_for_ms; |
1da177e4c
|
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 |
if (use_10_for_ms) { if (len < 8) len = 8; cmd[0] = MODE_SENSE_10; cmd[8] = len; header_length = 8; } else { if (len < 4) len = 4; cmd[0] = MODE_SENSE; cmd[4] = len; header_length = 4; } |
1da177e4c
|
2049 |
memset(buffer, 0, len); |
1cf72699c
|
2050 |
result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
f4f4e47e4
|
2051 |
sshdr, timeout, retries, NULL); |
1da177e4c
|
2052 2053 2054 2055 2056 |
/* This code looks awful: what it's doing is making sure an * ILLEGAL REQUEST sense return identifies the actual command * byte as the problem. MODE_SENSE commands can return * ILLEGAL REQUEST if the code page isn't supported */ |
1cf72699c
|
2057 |
if (use_10_for_ms && !scsi_status_is_good(result) && |
c65be1a63
|
2058 |
driver_byte(result) == DRIVER_SENSE) { |
ea73a9f23
|
2059 2060 2061 |
if (scsi_sense_valid(sshdr)) { if ((sshdr->sense_key == ILLEGAL_REQUEST) && (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { |
1da177e4c
|
2062 2063 2064 |
/* * Invalid command operation code */ |
1cf72699c
|
2065 |
sdev->use_10_for_ms = 0; |
1da177e4c
|
2066 2067 2068 2069 |
goto retry; } } } |
1cf72699c
|
2070 |
if(scsi_status_is_good(result)) { |
6d73c8514
|
2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 |
if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && (modepage == 6 || modepage == 8))) { /* Initio breakage? */ header_length = 0; data->length = 13; data->medium_type = 0; data->device_specific = 0; data->longlba = 0; data->block_descriptor_length = 0; } else if(use_10_for_ms) { |
1da177e4c
|
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 |
data->length = buffer[0]*256 + buffer[1] + 2; data->medium_type = buffer[2]; data->device_specific = buffer[3]; data->longlba = buffer[4] & 0x01; data->block_descriptor_length = buffer[6]*256 + buffer[7]; } else { data->length = buffer[0] + 1; data->medium_type = buffer[1]; data->device_specific = buffer[2]; data->block_descriptor_length = buffer[3]; } |
6d73c8514
|
2093 |
data->header_length = header_length; |
0ae80ba91
|
2094 2095 2096 2097 2098 |
} else if ((status_byte(result) == CHECK_CONDITION) && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && retry_count) { retry_count--; goto retry; |
1da177e4c
|
2099 |
} |
1cf72699c
|
2100 |
return result; |
1da177e4c
|
2101 2102 |
} EXPORT_SYMBOL(scsi_mode_sense); |
001aac257
|
2103 2104 2105 2106 2107 |
/** * scsi_test_unit_ready - test if unit is ready * @sdev: scsi device to change the state of. * @timeout: command timeout * @retries: number of retries before failing |
74a78ebda
|
2108 |
* @sshdr: outpout pointer for decoded sense information. |
001aac257
|
2109 2110 |
* * Returns zero if unsuccessful or an error if TUR failed. For |
9f8a2c23c
|
2111 |
* removable media, UNIT_ATTENTION sets ->changed flag. |
001aac257
|
2112 |
**/ |
1da177e4c
|
2113 |
int |
001aac257
|
2114 |
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
74a78ebda
|
2115 |
struct scsi_sense_hdr *sshdr) |
1da177e4c
|
2116 |
{ |
1da177e4c
|
2117 2118 2119 2120 |
char cmd[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0, }; int result; |
001aac257
|
2121 |
|
001aac257
|
2122 2123 2124 |
/* try to eat the UNIT_ATTENTION if there are enough retries */ do { result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, |
9b91fd34b
|
2125 |
timeout, 1, NULL); |
32c356d76
|
2126 2127 2128 2129 2130 |
if (sdev->removable && scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION) sdev->changed = 1; } while (scsi_sense_valid(sshdr) && sshdr->sense_key == UNIT_ATTENTION && --retries); |
001aac257
|
2131 |
|
1da177e4c
|
2132 2133 2134 2135 2136 |
return result; } EXPORT_SYMBOL(scsi_test_unit_ready); /** |
eb44820c2
|
2137 |
* scsi_device_set_state - Take the given device through the device state model. |
1da177e4c
|
2138 2139 2140 |
* @sdev: scsi device to change the state of. * @state: state to change to. * |
23cb27fd6
|
2141 |
* Returns zero if successful or an error if the requested |
1da177e4c
|
2142 |
* transition is illegal. |
eb44820c2
|
2143 |
*/ |
1da177e4c
|
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 |
int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) { enum scsi_device_state oldstate = sdev->sdev_state; if (state == oldstate) return 0; switch (state) { case SDEV_CREATED: |
6f4267e3b
|
2154 2155 2156 2157 2158 2159 2160 |
switch (oldstate) { case SDEV_CREATED_BLOCK: break; default: goto illegal; } break; |
1da177e4c
|
2161 2162 2163 2164 2165 |
case SDEV_RUNNING: switch (oldstate) { case SDEV_CREATED: case SDEV_OFFLINE: |
1b8d26206
|
2166 |
case SDEV_TRANSPORT_OFFLINE: |
1da177e4c
|
2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 |
case SDEV_QUIESCE: case SDEV_BLOCK: break; default: goto illegal; } break; case SDEV_QUIESCE: switch (oldstate) { case SDEV_RUNNING: case SDEV_OFFLINE: |
1b8d26206
|
2179 |
case SDEV_TRANSPORT_OFFLINE: |
1da177e4c
|
2180 2181 2182 2183 2184 2185 2186 |
break; default: goto illegal; } break; case SDEV_OFFLINE: |
1b8d26206
|
2187 |
case SDEV_TRANSPORT_OFFLINE: |
1da177e4c
|
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 |
switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: case SDEV_QUIESCE: case SDEV_BLOCK: break; default: goto illegal; } break; case SDEV_BLOCK: switch (oldstate) { |
1da177e4c
|
2201 |
case SDEV_RUNNING: |
6f4267e3b
|
2202 |
case SDEV_CREATED_BLOCK: |
a33e5bfb2
|
2203 |
case SDEV_OFFLINE: |
6f4267e3b
|
2204 2205 2206 2207 2208 2209 2210 2211 2212 |
break; default: goto illegal; } break; case SDEV_CREATED_BLOCK: switch (oldstate) { case SDEV_CREATED: |
1da177e4c
|
2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 |
break; default: goto illegal; } break; case SDEV_CANCEL: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: |
9ea729090
|
2223 |
case SDEV_QUIESCE: |
1da177e4c
|
2224 |
case SDEV_OFFLINE: |
1b8d26206
|
2225 |
case SDEV_TRANSPORT_OFFLINE: |
1da177e4c
|
2226 2227 2228 2229 2230 2231 2232 2233 |
break; default: goto illegal; } break; case SDEV_DEL: switch (oldstate) { |
309bd2712
|
2234 2235 2236 |
case SDEV_CREATED: case SDEV_RUNNING: case SDEV_OFFLINE: |
1b8d26206
|
2237 |
case SDEV_TRANSPORT_OFFLINE: |
1da177e4c
|
2238 |
case SDEV_CANCEL: |
255ee9320
|
2239 |
case SDEV_BLOCK: |
0516c08d1
|
2240 |
case SDEV_CREATED_BLOCK: |
1da177e4c
|
2241 2242 2243 2244 2245 2246 2247 |
break; default: goto illegal; } break; } |
b0962c53b
|
2248 |
sdev->offline_already = false; |
1da177e4c
|
2249 2250 2251 2252 |
sdev->sdev_state = state; return 0; illegal: |
91921e016
|
2253 |
SCSI_LOG_ERROR_RECOVERY(1, |
9ccfc756a
|
2254 |
sdev_printk(KERN_ERR, sdev, |
91921e016
|
2255 |
"Illegal state transition %s->%s", |
9ccfc756a
|
2256 2257 |
scsi_device_state_name(oldstate), scsi_device_state_name(state)) |
1da177e4c
|
2258 2259 2260 2261 2262 2263 |
); return -EINVAL; } EXPORT_SYMBOL(scsi_device_set_state); /** |
a341cd0f6
|
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 |
* sdev_evt_emit - emit a single SCSI device uevent * @sdev: associated SCSI device * @evt: event to emit * * Send a single uevent (scsi_event) to the associated scsi_device. */ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) { int idx = 0; char *envp[3]; switch (evt->evt_type) { case SDEV_EVT_MEDIA_CHANGE: envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; |
279afdfe7
|
2279 |
case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
d3d328919
|
2280 |
scsi_rescan_device(&sdev->sdev_gendev); |
279afdfe7
|
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 |
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; break; case SDEV_EVT_CAPACITY_CHANGE_REPORTED: envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; break; case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; break; case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; break; case SDEV_EVT_LUN_CHANGE_REPORTED: envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; break; |
14c3e677d
|
2295 2296 2297 |
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED"; break; |
cf3431bba
|
2298 2299 2300 |
case SDEV_EVT_POWER_ON_RESET_OCCURRED: envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED"; break; |
a341cd0f6
|
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 |
default: /* do nothing */ break; } envp[idx++] = NULL; kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp); } /** * sdev_evt_thread - send a uevent for each scsi event * @work: work struct for scsi_device * * Dispatch queued events to their associated scsi_device kobjects * as uevents. */ void scsi_evt_thread(struct work_struct *work) { struct scsi_device *sdev; |
279afdfe7
|
2321 |
enum scsi_device_event evt_type; |
a341cd0f6
|
2322 2323 2324 |
LIST_HEAD(event_list); sdev = container_of(work, struct scsi_device, event_work); |
279afdfe7
|
2325 2326 2327 |
for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) if (test_and_clear_bit(evt_type, sdev->pending_events)) sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); |
a341cd0f6
|
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 |
while (1) { struct scsi_event *evt; struct list_head *this, *tmp; unsigned long flags; spin_lock_irqsave(&sdev->list_lock, flags); list_splice_init(&sdev->event_list, &event_list); spin_unlock_irqrestore(&sdev->list_lock, flags); if (list_empty(&event_list)) break; list_for_each_safe(this, tmp, &event_list) { evt = list_entry(this, struct scsi_event, node); list_del(&evt->node); scsi_evt_emit(sdev, evt); kfree(evt); } } } /** * sdev_evt_send - send asserted event to uevent thread * @sdev: scsi_device event occurred on * @evt: event to send * * Assert scsi device event asynchronously. */ void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) { unsigned long flags; |
4d1566ed2
|
2359 2360 2361 2362 |
#if 0 /* FIXME: currently this check eliminates all media change events * for polled devices. Need to update to discriminate between AN * and polled events */ |
a341cd0f6
|
2363 2364 2365 2366 |
if (!test_bit(evt->evt_type, sdev->supported_events)) { kfree(evt); return; } |
4d1566ed2
|
2367 |
#endif |
a341cd0f6
|
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 |
spin_lock_irqsave(&sdev->list_lock, flags); list_add_tail(&evt->node, &sdev->event_list); schedule_work(&sdev->event_work); spin_unlock_irqrestore(&sdev->list_lock, flags); } EXPORT_SYMBOL_GPL(sdev_evt_send); /** * sdev_evt_alloc - allocate a new scsi event * @evt_type: type of event to allocate * @gfpflags: GFP flags for allocation * * Allocates and returns a new scsi_event. */ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, gfp_t gfpflags) { struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags); if (!evt) return NULL; evt->evt_type = evt_type; INIT_LIST_HEAD(&evt->node); /* evt_type-specific initialization, if any */ switch (evt_type) { case SDEV_EVT_MEDIA_CHANGE: |
279afdfe7
|
2396 2397 2398 2399 2400 |
case SDEV_EVT_INQUIRY_CHANGE_REPORTED: case SDEV_EVT_CAPACITY_CHANGE_REPORTED: case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: case SDEV_EVT_LUN_CHANGE_REPORTED: |
14c3e677d
|
2401 |
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
cf3431bba
|
2402 |
case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
a341cd0f6
|
2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 |
default: /* do nothing */ break; } return evt; } EXPORT_SYMBOL_GPL(sdev_evt_alloc); /** * sdev_evt_send_simple - send asserted event to uevent thread * @sdev: scsi_device event occurred on * @evt_type: type of event to send * @gfpflags: GFP flags for allocation * * Assert scsi device event asynchronously, given an event type. */ void sdev_evt_send_simple(struct scsi_device *sdev, enum scsi_device_event evt_type, gfp_t gfpflags) { struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); if (!evt) { sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM ", evt_type); return; } sdev_evt_send(sdev, evt); } EXPORT_SYMBOL_GPL(sdev_evt_send_simple); /** |
1da177e4c
|
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 |
* scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. * * This works by trying to transition to the SDEV_QUIESCE state * (which must be a legal transition). When the device is in this * state, only special requests will be accepted, all others will * be deferred. Since special requests may also be requeued requests, * a successful return doesn't guarantee the device will be * totally quiescent. * * Must be called with user context, may sleep. * * Returns zero if unsuccessful or an error if not. |
eb44820c2
|
2449 |
*/ |
1da177e4c
|
2450 2451 2452 |
int scsi_device_quiesce(struct scsi_device *sdev) { |
3a0a52997
|
2453 |
struct request_queue *q = sdev->request_queue; |
0db6ca8a5
|
2454 |
int err; |
3a0a52997
|
2455 2456 2457 2458 2459 2460 |
/* * It is allowed to call scsi_device_quiesce() multiple times from * the same context but concurrent scsi_device_quiesce() calls are * not allowed. */ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); |
cd84a62e0
|
2461 2462 2463 2464 |
if (sdev->quiesced_by == current) return 0; blk_set_pm_only(q); |
3a0a52997
|
2465 2466 2467 |
blk_mq_freeze_queue(q); /* |
cd84a62e0
|
2468 |
* Ensure that the effect of blk_set_pm_only() will be visible |
3a0a52997
|
2469 2470 2471 2472 2473 2474 |
* for percpu_ref_tryget() callers that occur after the queue * unfreeze even if the queue was already frozen before this function * was called. See also https://lwn.net/Articles/573497/. */ synchronize_rcu(); blk_mq_unfreeze_queue(q); |
0db6ca8a5
|
2475 2476 |
mutex_lock(&sdev->state_mutex); err = scsi_device_set_state(sdev, SDEV_QUIESCE); |
3a0a52997
|
2477 2478 2479 |
if (err == 0) sdev->quiesced_by = current; else |
cd84a62e0
|
2480 |
blk_clear_pm_only(q); |
0db6ca8a5
|
2481 |
mutex_unlock(&sdev->state_mutex); |
3a0a52997
|
2482 |
return err; |
1da177e4c
|
2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 |
} EXPORT_SYMBOL(scsi_device_quiesce); /** * scsi_device_resume - Restart user issued commands to a quiesced device. * @sdev: scsi device to resume. * * Moves the device from quiesced back to running and restarts the * queues. * * Must be called with user context, may sleep. |
eb44820c2
|
2494 |
*/ |
a7a20d103
|
2495 |
void scsi_device_resume(struct scsi_device *sdev) |
1da177e4c
|
2496 |
{ |
a7a20d103
|
2497 2498 2499 2500 |
/* check if the device state was mutated prior to resume, and if * so assume the state is being managed elsewhere (for example * device deleted during suspend) */ |
0db6ca8a5
|
2501 |
mutex_lock(&sdev->state_mutex); |
17605afaa
|
2502 2503 2504 2505 |
if (sdev->quiesced_by) { sdev->quiesced_by = NULL; blk_clear_pm_only(sdev->request_queue); } |
3a0a52997
|
2506 2507 |
if (sdev->sdev_state == SDEV_QUIESCE) scsi_device_set_state(sdev, SDEV_RUNNING); |
0db6ca8a5
|
2508 |
mutex_unlock(&sdev->state_mutex); |
1da177e4c
|
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 |
} EXPORT_SYMBOL(scsi_device_resume); static void device_quiesce_fn(struct scsi_device *sdev, void *data) { scsi_device_quiesce(sdev); } void scsi_target_quiesce(struct scsi_target *starget) { starget_for_each_device(starget, NULL, device_quiesce_fn); } EXPORT_SYMBOL(scsi_target_quiesce); static void device_resume_fn(struct scsi_device *sdev, void *data) { scsi_device_resume(sdev); } void scsi_target_resume(struct scsi_target *starget) { starget_for_each_device(starget, NULL, device_resume_fn); } EXPORT_SYMBOL(scsi_target_resume); /** |
551eb598e
|
2539 2540 |
* scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state * @sdev: device to block |
1da177e4c
|
2541 |
* |
551eb598e
|
2542 |
* Pause SCSI command processing on the specified device. Does not sleep. |
1da177e4c
|
2543 |
* |
551eb598e
|
2544 |
* Returns zero if successful or a negative error code upon failure. |
669f04417
|
2545 |
* |
551eb598e
|
2546 2547 2548 2549 2550 |
* Notes: * This routine transitions the device to the SDEV_BLOCK state (which must be * a legal transition). When the device is in this state, command processing * is paused until the device leaves the SDEV_BLOCK state. See also * scsi_internal_device_unblock_nowait(). |
eb44820c2
|
2551 |
*/ |
551eb598e
|
2552 |
int scsi_internal_device_block_nowait(struct scsi_device *sdev) |
1da177e4c
|
2553 |
{ |
165125e1e
|
2554 |
struct request_queue *q = sdev->request_queue; |
1da177e4c
|
2555 2556 2557 |
int err = 0; err = scsi_device_set_state(sdev, SDEV_BLOCK); |
6f4267e3b
|
2558 2559 2560 2561 2562 2563 |
if (err) { err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); if (err) return err; } |
1da177e4c
|
2564 2565 2566 2567 2568 2569 |
/* * The device has transitioned to SDEV_BLOCK. Stop the * block layer from calling the midlayer with this device's * request queue. */ |
f664a3cc1
|
2570 |
blk_mq_quiesce_queue_nowait(q); |
1da177e4c
|
2571 2572 |
return 0; } |
551eb598e
|
2573 |
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); |
1da177e4c
|
2574 |
/** |
551eb598e
|
2575 2576 2577 2578 2579 |
* scsi_internal_device_block - try to transition to the SDEV_BLOCK state * @sdev: device to block * * Pause SCSI command processing on the specified device and wait until all * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. |
1da177e4c
|
2580 |
* |
551eb598e
|
2581 |
* Returns zero if successful or a negative error code upon failure. |
1da177e4c
|
2582 |
* |
551eb598e
|
2583 2584 2585 2586 2587 |
* Note: * This routine transitions the device to the SDEV_BLOCK state (which must be * a legal transition). When the device is in this state, command processing * is paused until the device leaves the SDEV_BLOCK state. See also * scsi_internal_device_unblock(). |
eb44820c2
|
2588 |
*/ |
551eb598e
|
2589 |
static int scsi_internal_device_block(struct scsi_device *sdev) |
1da177e4c
|
2590 |
{ |
551eb598e
|
2591 2592 |
struct request_queue *q = sdev->request_queue; int err; |
0db6ca8a5
|
2593 |
mutex_lock(&sdev->state_mutex); |
551eb598e
|
2594 |
err = scsi_internal_device_block_nowait(sdev); |
f664a3cc1
|
2595 2596 |
if (err == 0) blk_mq_quiesce_queue(q); |
0db6ca8a5
|
2597 |
mutex_unlock(&sdev->state_mutex); |
551eb598e
|
2598 2599 |
return err; } |
1da177e4c
|
2600 |
|
66483a4a9
|
2601 2602 2603 |
void scsi_start_queue(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; |
5d9fb5cc1
|
2604 |
|
f664a3cc1
|
2605 |
blk_mq_unquiesce_queue(q); |
66483a4a9
|
2606 |
} |
1da177e4c
|
2607 |
/** |
43f7571be
|
2608 |
* scsi_internal_device_unblock_nowait - resume a device after a block request |
1da177e4c
|
2609 |
* @sdev: device to resume |
43f7571be
|
2610 |
* @new_state: state to set the device to after unblocking |
1da177e4c
|
2611 |
* |
43f7571be
|
2612 2613 |
* Restart the device queue for a previously suspended SCSI device. Does not * sleep. |
1da177e4c
|
2614 |
* |
43f7571be
|
2615 |
* Returns zero if successful or a negative error code upon failure. |
1da177e4c
|
2616 |
* |
43f7571be
|
2617 2618 2619 2620 |
* Notes: * This routine transitions the device to the SDEV_RUNNING state or to one of * the offline states (which must be a legal transition) allowing the midlayer * to goose the queue for this device. |
eb44820c2
|
2621 |
*/ |
43f7571be
|
2622 2623 |
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, enum scsi_device_state new_state) |
1da177e4c
|
2624 |
{ |
09addb1d1
|
2625 2626 2627 2628 2629 2630 2631 |
switch (new_state) { case SDEV_RUNNING: case SDEV_TRANSPORT_OFFLINE: break; default: return -EINVAL; } |
5d9fb5cc1
|
2632 2633 2634 |
/* * Try to transition the scsi device to SDEV_RUNNING or one of the * offlined states and goose the device queue if successful. |
1da177e4c
|
2635 |
*/ |
8cd1ec78c
|
2636 2637 2638 |
switch (sdev->sdev_state) { case SDEV_BLOCK: case SDEV_TRANSPORT_OFFLINE: |
5d9fb5cc1
|
2639 |
sdev->sdev_state = new_state; |
8cd1ec78c
|
2640 2641 |
break; case SDEV_CREATED_BLOCK: |
5d9fb5cc1
|
2642 2643 2644 2645 2646 |
if (new_state == SDEV_TRANSPORT_OFFLINE || new_state == SDEV_OFFLINE) sdev->sdev_state = new_state; else sdev->sdev_state = SDEV_CREATED; |
8cd1ec78c
|
2647 2648 2649 2650 2651 |
break; case SDEV_CANCEL: case SDEV_OFFLINE: break; default: |
5c10e63c9
|
2652 |
return -EINVAL; |
8cd1ec78c
|
2653 |
} |
66483a4a9
|
2654 |
scsi_start_queue(sdev); |
1da177e4c
|
2655 2656 2657 |
return 0; } |
43f7571be
|
2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 |
EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); /** * scsi_internal_device_unblock - resume a device after a block request * @sdev: device to resume * @new_state: state to set the device to after unblocking * * Restart the device queue for a previously suspended SCSI device. May sleep. * * Returns zero if successful or a negative error code upon failure. * * Notes: * This routine transitions the device to the SDEV_RUNNING state or to one of * the offline states (which must be a legal transition) allowing the midlayer * to goose the queue for this device. */ static int scsi_internal_device_unblock(struct scsi_device *sdev, enum scsi_device_state new_state) { |
0db6ca8a5
|
2677 2678 2679 2680 2681 2682 2683 |
int ret; mutex_lock(&sdev->state_mutex); ret = scsi_internal_device_unblock_nowait(sdev, new_state); mutex_unlock(&sdev->state_mutex); return ret; |
43f7571be
|
2684 |
} |
1da177e4c
|
2685 2686 2687 2688 |
static void device_block(struct scsi_device *sdev, void *data) { |
94ef80a5f
|
2689 2690 2691 2692 2693 2694 2695 |
int ret; ret = scsi_internal_device_block(sdev); WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d ", dev_name(&sdev->sdev_gendev), ret); |
1da177e4c
|
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 |
} static int target_block(struct device *dev, void *data) { if (scsi_is_target_device(dev)) starget_for_each_device(to_scsi_target(dev), NULL, device_block); return 0; } void scsi_target_block(struct device *dev) { if (scsi_is_target_device(dev)) starget_for_each_device(to_scsi_target(dev), NULL, device_block); else device_for_each_child(dev, NULL, target_block); } EXPORT_SYMBOL_GPL(scsi_target_block); static void device_unblock(struct scsi_device *sdev, void *data) { |
5d9fb5cc1
|
2721 |
scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); |
1da177e4c
|
2722 2723 2724 2725 2726 2727 |
} static int target_unblock(struct device *dev, void *data) { if (scsi_is_target_device(dev)) |
5d9fb5cc1
|
2728 |
starget_for_each_device(to_scsi_target(dev), data, |
1da177e4c
|
2729 2730 2731 2732 2733 |
device_unblock); return 0; } void |
5d9fb5cc1
|
2734 |
scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) |
1da177e4c
|
2735 2736 |
{ if (scsi_is_target_device(dev)) |
5d9fb5cc1
|
2737 |
starget_for_each_device(to_scsi_target(dev), &new_state, |
1da177e4c
|
2738 2739 |
device_unblock); else |
5d9fb5cc1
|
2740 |
device_for_each_child(dev, &new_state, target_unblock); |
1da177e4c
|
2741 2742 |
} EXPORT_SYMBOL_GPL(scsi_target_unblock); |
cdb8c2a6d
|
2743 |
|
2bb955840
|
2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 |
int scsi_host_block(struct Scsi_Host *shost) { struct scsi_device *sdev; int ret = 0; shost_for_each_device(sdev, shost) { ret = scsi_internal_device_block(sdev); if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(scsi_host_block); int scsi_host_unblock(struct Scsi_Host *shost, int new_state) { struct scsi_device *sdev; int ret = 0; shost_for_each_device(sdev, shost) { ret = scsi_internal_device_unblock(sdev, new_state); if (ret) break; } return ret; } EXPORT_SYMBOL_GPL(scsi_host_unblock); |
cdb8c2a6d
|
2773 2774 |
/** * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt |
eb44820c2
|
2775 |
* @sgl: scatter-gather list |
cdb8c2a6d
|
2776 2777 2778 2779 2780 2781 |
* @sg_count: number of segments in sg * @offset: offset in bytes into sg, on return offset into the mapped area * @len: bytes to map, on return number of bytes mapped * * Returns virtual address of the start of the mapped page */ |
c6132da17
|
2782 |
void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
cdb8c2a6d
|
2783 2784 2785 2786 |
size_t *offset, size_t *len) { int i; size_t sg_len = 0, len_complete = 0; |
c6132da17
|
2787 |
struct scatterlist *sg; |
cdb8c2a6d
|
2788 |
struct page *page; |
22cfefb56
|
2789 |
WARN_ON(!irqs_disabled()); |
c6132da17
|
2790 |
for_each_sg(sgl, sg, sg_count, i) { |
cdb8c2a6d
|
2791 |
len_complete = sg_len; /* Complete sg-entries */ |
c6132da17
|
2792 |
sg_len += sg->length; |
cdb8c2a6d
|
2793 2794 2795 2796 2797 |
if (sg_len > *offset) break; } if (unlikely(i == sg_count)) { |
169e1a2a8
|
2798 2799 2800 |
printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " "elements %d ", |
cadbd4a5e
|
2801 |
__func__, sg_len, *offset, sg_count); |
cdb8c2a6d
|
2802 2803 2804 2805 2806 |
WARN_ON(1); return NULL; } /* Offset starting from the beginning of first page in this sg-entry */ |
c6132da17
|
2807 |
*offset = *offset - len_complete + sg->offset; |
cdb8c2a6d
|
2808 2809 |
/* Assumption: contiguous pages can be accessed as "page + i" */ |
45711f1af
|
2810 |
page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); |
cdb8c2a6d
|
2811 2812 2813 2814 2815 2816 |
*offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ sg_len = PAGE_SIZE - *offset; if (*len > sg_len) *len = sg_len; |
77dfce076
|
2817 |
return kmap_atomic(page); |
cdb8c2a6d
|
2818 2819 2820 2821 |
} EXPORT_SYMBOL(scsi_kmap_atomic_sg); /** |
eb44820c2
|
2822 |
* scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
cdb8c2a6d
|
2823 2824 2825 2826 |
* @virt: virtual address to be unmapped */ void scsi_kunmap_atomic_sg(void *virt) { |
77dfce076
|
2827 |
kunmap_atomic(virt); |
cdb8c2a6d
|
2828 2829 |
} EXPORT_SYMBOL(scsi_kunmap_atomic_sg); |
6f4c827e6
|
2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 |
void sdev_disable_disk_events(struct scsi_device *sdev) { atomic_inc(&sdev->disk_events_disable_depth); } EXPORT_SYMBOL(sdev_disable_disk_events); void sdev_enable_disk_events(struct scsi_device *sdev) { if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) return; atomic_dec(&sdev->disk_events_disable_depth); } EXPORT_SYMBOL(sdev_enable_disk_events); |
9983bed39
|
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 |
/** * scsi_vpd_lun_id - return a unique device identification * @sdev: SCSI device * @id: buffer for the identification * @id_len: length of the buffer * * Copies a unique device identification into @id based * on the information in the VPD page 0x83 of the device. * The string will be formatted as a SCSI name string. * * Returns the length of the identification or error on failure. * If the identifier is longer than the supplied buffer the actual * identifier length is returned and the buffer is not zero-padded. */ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) { u8 cur_id_type = 0xff; u8 cur_id_size = 0; |
ccf1e0045
|
2863 2864 |
const unsigned char *d, *cur_id_str; const struct scsi_vpd *vpd_pg83; |
9983bed39
|
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 |
int id_size = -EINVAL; rcu_read_lock(); vpd_pg83 = rcu_dereference(sdev->vpd_pg83); if (!vpd_pg83) { rcu_read_unlock(); return -ENXIO; } /* * Look for the correct descriptor. * Order of preference for lun descriptor: * - SCSI name string * - NAA IEEE Registered Extended * - EUI-64 based 16-byte * - EUI-64 based 12-byte * - NAA IEEE Registered * - NAA IEEE Extended |
d230823a1
|
2883 |
* - T10 Vendor ID |
9983bed39
|
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 |
* as longer descriptors reduce the likelyhood * of identification clashes. */ /* The id string must be at least 20 bytes + terminating NULL byte */ if (id_len < 21) { rcu_read_unlock(); return -EINVAL; } memset(id, 0, id_len); |
ccf1e0045
|
2895 2896 |
d = vpd_pg83->data + 4; while (d < vpd_pg83->data + vpd_pg83->len) { |
9983bed39
|
2897 2898 2899 2900 2901 |
/* Skip designators not referring to the LUN */ if ((d[1] & 0x30) != 0x00) goto next_desig; switch (d[1] & 0xf) { |
d230823a1
|
2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 |
case 0x1: /* T10 Vendor ID */ if (cur_id_size > d[3]) break; /* Prefer anything */ if (cur_id_type > 0x01 && cur_id_type != 0xff) break; cur_id_size = d[3]; if (cur_id_size + 4 > id_len) cur_id_size = id_len - 4; cur_id_str = d + 4; cur_id_type = d[1] & 0xf; id_size = snprintf(id, id_len, "t10.%*pE", cur_id_size, cur_id_str); break; |
9983bed39
|
2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 |
case 0x2: /* EUI-64 */ if (cur_id_size > d[3]) break; /* Prefer NAA IEEE Registered Extended */ if (cur_id_type == 0x3 && cur_id_size == d[3]) break; cur_id_size = d[3]; cur_id_str = d + 4; cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, "eui.%8phN", cur_id_str); break; case 12: id_size = snprintf(id, id_len, "eui.%12phN", cur_id_str); break; case 16: id_size = snprintf(id, id_len, "eui.%16phN", cur_id_str); break; default: cur_id_size = 0; break; } break; case 0x3: /* NAA */ if (cur_id_size > d[3]) break; cur_id_size = d[3]; cur_id_str = d + 4; cur_id_type = d[1] & 0xf; switch (cur_id_size) { case 8: id_size = snprintf(id, id_len, "naa.%8phN", cur_id_str); break; case 16: id_size = snprintf(id, id_len, "naa.%16phN", cur_id_str); break; default: cur_id_size = 0; break; } break; case 0x8: /* SCSI name string */ if (cur_id_size + 4 > d[3]) break; /* Prefer others for truncated descriptor */ if (cur_id_size && d[3] > id_len) break; cur_id_size = id_size = d[3]; cur_id_str = d + 4; cur_id_type = d[1] & 0xf; if (cur_id_size >= id_len) cur_id_size = id_len - 1; memcpy(id, cur_id_str, cur_id_size); /* Decrease priority for truncated descriptor */ if (cur_id_size != id_size) cur_id_size = 6; break; default: break; } next_desig: d += d[3] + 4; } rcu_read_unlock(); return id_size; } EXPORT_SYMBOL(scsi_vpd_lun_id); |
a8aa39785
|
3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 |
/* * scsi_vpd_tpg_id - return a target port group identifier * @sdev: SCSI device * * Returns the Target Port Group identifier from the information * froom VPD page 0x83 of the device. * * Returns the identifier or error on failure. */ int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) { |
ccf1e0045
|
3012 3013 |
const unsigned char *d; const struct scsi_vpd *vpd_pg83; |
a8aa39785
|
3014 3015 3016 3017 3018 3019 3020 3021 |
int group_id = -EAGAIN, rel_port = -1; rcu_read_lock(); vpd_pg83 = rcu_dereference(sdev->vpd_pg83); if (!vpd_pg83) { rcu_read_unlock(); return -ENXIO; } |
ccf1e0045
|
3022 3023 |
d = vpd_pg83->data + 4; while (d < vpd_pg83->data + vpd_pg83->len) { |
a8aa39785
|
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 |
switch (d[1] & 0xf) { case 0x4: /* Relative target port */ rel_port = get_unaligned_be16(&d[6]); break; case 0x5: /* Target port group */ group_id = get_unaligned_be16(&d[6]); break; default: break; } d += d[3] + 4; } rcu_read_unlock(); if (group_id >= 0 && rel_id && rel_port != -1) *rel_id = rel_port; return group_id; } EXPORT_SYMBOL(scsi_vpd_tpg_id); |