Commit 210588c0344e7daf1351aa3c68b0cd9141ab80dc
Committed by
Greg Kroah-Hartman
1 parent
7e920411dd
Exists in
smarct4x-processor-sdk-linux-03.00.00.04
and in
2 other branches
scsi: Add intermediate STARGET_REMOVE state to scsi_target_state
commit f05795d3d771f30a7bdc3a138bf714b06d42aa95 upstream. Add intermediate STARGET_REMOVE state to scsi_target_state to avoid running into the BUG_ON() in scsi_target_reap(). The STARGET_REMOVE state is only valid in the path from scsi_remove_target() to scsi_target_destroy() indicating this target is going to be removed. This re-fixes the problem introduced in commits bc3f02a795d3 ("[SCSI] scsi_remove_target: fix softlockup regression on hot remove") and 40998193560d ("scsi: restart list search after unlock in scsi_remove_target") in a more comprehensive way. [mkp: Included James' fix for scsi_target_destroy()] Signed-off-by: Johannes Thumshirn <jthumshirn@suse.de> Fixes: 40998193560dab6c3ce8d25f4fa58a23e252ef38 Reported-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Ewan D. Milne <emilne@redhat.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: James Bottomley <jejb@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 3 changed files with 4 additions and 0 deletions Inline Diff
drivers/scsi/scsi_scan.c
1 | /* | 1 | /* |
2 | * scsi_scan.c | 2 | * scsi_scan.c |
3 | * | 3 | * |
4 | * Copyright (C) 2000 Eric Youngdale, | 4 | * Copyright (C) 2000 Eric Youngdale, |
5 | * Copyright (C) 2002 Patrick Mansfield | 5 | * Copyright (C) 2002 Patrick Mansfield |
6 | * | 6 | * |
7 | * The general scanning/probing algorithm is as follows, exceptions are | 7 | * The general scanning/probing algorithm is as follows, exceptions are |
8 | * made to it depending on device specific flags, compilation options, and | 8 | * made to it depending on device specific flags, compilation options, and |
9 | * global variable (boot or module load time) settings. | 9 | * global variable (boot or module load time) settings. |
10 | * | 10 | * |
11 | * A specific LUN is scanned via an INQUIRY command; if the LUN has a | 11 | * A specific LUN is scanned via an INQUIRY command; if the LUN has a |
12 | * device attached, a scsi_device is allocated and setup for it. | 12 | * device attached, a scsi_device is allocated and setup for it. |
13 | * | 13 | * |
14 | * For every id of every channel on the given host: | 14 | * For every id of every channel on the given host: |
15 | * | 15 | * |
16 | * Scan LUN 0; if the target responds to LUN 0 (even if there is no | 16 | * Scan LUN 0; if the target responds to LUN 0 (even if there is no |
17 | * device or storage attached to LUN 0): | 17 | * device or storage attached to LUN 0): |
18 | * | 18 | * |
19 | * If LUN 0 has a device attached, allocate and setup a | 19 | * If LUN 0 has a device attached, allocate and setup a |
20 | * scsi_device for it. | 20 | * scsi_device for it. |
21 | * | 21 | * |
22 | * If target is SCSI-3 or up, issue a REPORT LUN, and scan | 22 | * If target is SCSI-3 or up, issue a REPORT LUN, and scan |
23 | * all of the LUNs returned by the REPORT LUN; else, | 23 | * all of the LUNs returned by the REPORT LUN; else, |
24 | * sequentially scan LUNs up until some maximum is reached, | 24 | * sequentially scan LUNs up until some maximum is reached, |
25 | * or a LUN is seen that cannot have a device attached to it. | 25 | * or a LUN is seen that cannot have a device attached to it. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/moduleparam.h> | 29 | #include <linux/moduleparam.h> |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/blkdev.h> | 31 | #include <linux/blkdev.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/kthread.h> | 33 | #include <linux/kthread.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/async.h> | 35 | #include <linux/async.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <asm/unaligned.h> | 37 | #include <asm/unaligned.h> |
38 | 38 | ||
39 | #include <scsi/scsi.h> | 39 | #include <scsi/scsi.h> |
40 | #include <scsi/scsi_cmnd.h> | 40 | #include <scsi/scsi_cmnd.h> |
41 | #include <scsi/scsi_device.h> | 41 | #include <scsi/scsi_device.h> |
42 | #include <scsi/scsi_driver.h> | 42 | #include <scsi/scsi_driver.h> |
43 | #include <scsi/scsi_devinfo.h> | 43 | #include <scsi/scsi_devinfo.h> |
44 | #include <scsi/scsi_host.h> | 44 | #include <scsi/scsi_host.h> |
45 | #include <scsi/scsi_transport.h> | 45 | #include <scsi/scsi_transport.h> |
46 | #include <scsi/scsi_eh.h> | 46 | #include <scsi/scsi_eh.h> |
47 | 47 | ||
48 | #include "scsi_priv.h" | 48 | #include "scsi_priv.h" |
49 | #include "scsi_logging.h" | 49 | #include "scsi_logging.h" |
50 | 50 | ||
51 | #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ | 51 | #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ |
52 | " SCSI scanning, some SCSI devices might not be configured\n" | 52 | " SCSI scanning, some SCSI devices might not be configured\n" |
53 | 53 | ||
54 | /* | 54 | /* |
55 | * Default timeout | 55 | * Default timeout |
56 | */ | 56 | */ |
57 | #define SCSI_TIMEOUT (2*HZ) | 57 | #define SCSI_TIMEOUT (2*HZ) |
58 | #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ) | 58 | #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ) |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Prefix values for the SCSI id's (stored in sysfs name field) | 61 | * Prefix values for the SCSI id's (stored in sysfs name field) |
62 | */ | 62 | */ |
63 | #define SCSI_UID_SER_NUM 'S' | 63 | #define SCSI_UID_SER_NUM 'S' |
64 | #define SCSI_UID_UNKNOWN 'Z' | 64 | #define SCSI_UID_UNKNOWN 'Z' |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Return values of some of the scanning functions. | 67 | * Return values of some of the scanning functions. |
68 | * | 68 | * |
69 | * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this | 69 | * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this |
70 | * includes allocation or general failures preventing IO from being sent. | 70 | * includes allocation or general failures preventing IO from being sent. |
71 | * | 71 | * |
72 | * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available | 72 | * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available |
73 | * on the given LUN. | 73 | * on the given LUN. |
74 | * | 74 | * |
75 | * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a | 75 | * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a |
76 | * given LUN. | 76 | * given LUN. |
77 | */ | 77 | */ |
78 | #define SCSI_SCAN_NO_RESPONSE 0 | 78 | #define SCSI_SCAN_NO_RESPONSE 0 |
79 | #define SCSI_SCAN_TARGET_PRESENT 1 | 79 | #define SCSI_SCAN_TARGET_PRESENT 1 |
80 | #define SCSI_SCAN_LUN_PRESENT 2 | 80 | #define SCSI_SCAN_LUN_PRESENT 2 |
81 | 81 | ||
82 | static const char *scsi_null_device_strs = "nullnullnullnull"; | 82 | static const char *scsi_null_device_strs = "nullnullnullnull"; |
83 | 83 | ||
84 | #define MAX_SCSI_LUNS 512 | 84 | #define MAX_SCSI_LUNS 512 |
85 | 85 | ||
86 | static u64 max_scsi_luns = MAX_SCSI_LUNS; | 86 | static u64 max_scsi_luns = MAX_SCSI_LUNS; |
87 | 87 | ||
88 | module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); | 88 | module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); |
89 | MODULE_PARM_DESC(max_luns, | 89 | MODULE_PARM_DESC(max_luns, |
90 | "last scsi LUN (should be between 1 and 2^64-1)"); | 90 | "last scsi LUN (should be between 1 and 2^64-1)"); |
91 | 91 | ||
92 | #ifdef CONFIG_SCSI_SCAN_ASYNC | 92 | #ifdef CONFIG_SCSI_SCAN_ASYNC |
93 | #define SCSI_SCAN_TYPE_DEFAULT "async" | 93 | #define SCSI_SCAN_TYPE_DEFAULT "async" |
94 | #else | 94 | #else |
95 | #define SCSI_SCAN_TYPE_DEFAULT "sync" | 95 | #define SCSI_SCAN_TYPE_DEFAULT "sync" |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; | 98 | char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; |
99 | 99 | ||
100 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); | 100 | module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO); |
101 | MODULE_PARM_DESC(scan, "sync, async or none"); | 101 | MODULE_PARM_DESC(scan, "sync, async or none"); |
102 | 102 | ||
103 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; | 103 | static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; |
104 | 104 | ||
105 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); | 105 | module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); |
106 | MODULE_PARM_DESC(inq_timeout, | 106 | MODULE_PARM_DESC(inq_timeout, |
107 | "Timeout (in seconds) waiting for devices to answer INQUIRY." | 107 | "Timeout (in seconds) waiting for devices to answer INQUIRY." |
108 | " Default is 20. Some devices may need more; most need less."); | 108 | " Default is 20. Some devices may need more; most need less."); |
109 | 109 | ||
110 | /* This lock protects only this list */ | 110 | /* This lock protects only this list */ |
111 | static DEFINE_SPINLOCK(async_scan_lock); | 111 | static DEFINE_SPINLOCK(async_scan_lock); |
112 | static LIST_HEAD(scanning_hosts); | 112 | static LIST_HEAD(scanning_hosts); |
113 | 113 | ||
114 | struct async_scan_data { | 114 | struct async_scan_data { |
115 | struct list_head list; | 115 | struct list_head list; |
116 | struct Scsi_Host *shost; | 116 | struct Scsi_Host *shost; |
117 | struct completion prev_finished; | 117 | struct completion prev_finished; |
118 | }; | 118 | }; |
119 | 119 | ||
120 | /** | 120 | /** |
121 | * scsi_complete_async_scans - Wait for asynchronous scans to complete | 121 | * scsi_complete_async_scans - Wait for asynchronous scans to complete |
122 | * | 122 | * |
123 | * When this function returns, any host which started scanning before | 123 | * When this function returns, any host which started scanning before |
124 | * this function was called will have finished its scan. Hosts which | 124 | * this function was called will have finished its scan. Hosts which |
125 | * started scanning after this function was called may or may not have | 125 | * started scanning after this function was called may or may not have |
126 | * finished. | 126 | * finished. |
127 | */ | 127 | */ |
128 | int scsi_complete_async_scans(void) | 128 | int scsi_complete_async_scans(void) |
129 | { | 129 | { |
130 | struct async_scan_data *data; | 130 | struct async_scan_data *data; |
131 | 131 | ||
132 | do { | 132 | do { |
133 | if (list_empty(&scanning_hosts)) | 133 | if (list_empty(&scanning_hosts)) |
134 | return 0; | 134 | return 0; |
135 | /* If we can't get memory immediately, that's OK. Just | 135 | /* If we can't get memory immediately, that's OK. Just |
136 | * sleep a little. Even if we never get memory, the async | 136 | * sleep a little. Even if we never get memory, the async |
137 | * scans will finish eventually. | 137 | * scans will finish eventually. |
138 | */ | 138 | */ |
139 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 139 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
140 | if (!data) | 140 | if (!data) |
141 | msleep(1); | 141 | msleep(1); |
142 | } while (!data); | 142 | } while (!data); |
143 | 143 | ||
144 | data->shost = NULL; | 144 | data->shost = NULL; |
145 | init_completion(&data->prev_finished); | 145 | init_completion(&data->prev_finished); |
146 | 146 | ||
147 | spin_lock(&async_scan_lock); | 147 | spin_lock(&async_scan_lock); |
148 | /* Check that there's still somebody else on the list */ | 148 | /* Check that there's still somebody else on the list */ |
149 | if (list_empty(&scanning_hosts)) | 149 | if (list_empty(&scanning_hosts)) |
150 | goto done; | 150 | goto done; |
151 | list_add_tail(&data->list, &scanning_hosts); | 151 | list_add_tail(&data->list, &scanning_hosts); |
152 | spin_unlock(&async_scan_lock); | 152 | spin_unlock(&async_scan_lock); |
153 | 153 | ||
154 | printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); | 154 | printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); |
155 | wait_for_completion(&data->prev_finished); | 155 | wait_for_completion(&data->prev_finished); |
156 | 156 | ||
157 | spin_lock(&async_scan_lock); | 157 | spin_lock(&async_scan_lock); |
158 | list_del(&data->list); | 158 | list_del(&data->list); |
159 | if (!list_empty(&scanning_hosts)) { | 159 | if (!list_empty(&scanning_hosts)) { |
160 | struct async_scan_data *next = list_entry(scanning_hosts.next, | 160 | struct async_scan_data *next = list_entry(scanning_hosts.next, |
161 | struct async_scan_data, list); | 161 | struct async_scan_data, list); |
162 | complete(&next->prev_finished); | 162 | complete(&next->prev_finished); |
163 | } | 163 | } |
164 | done: | 164 | done: |
165 | spin_unlock(&async_scan_lock); | 165 | spin_unlock(&async_scan_lock); |
166 | 166 | ||
167 | kfree(data); | 167 | kfree(data); |
168 | return 0; | 168 | return 0; |
169 | } | 169 | } |
170 | 170 | ||
171 | /** | 171 | /** |
172 | * scsi_unlock_floptical - unlock device via a special MODE SENSE command | 172 | * scsi_unlock_floptical - unlock device via a special MODE SENSE command |
173 | * @sdev: scsi device to send command to | 173 | * @sdev: scsi device to send command to |
174 | * @result: area to store the result of the MODE SENSE | 174 | * @result: area to store the result of the MODE SENSE |
175 | * | 175 | * |
176 | * Description: | 176 | * Description: |
177 | * Send a vendor specific MODE SENSE (not a MODE SELECT) command. | 177 | * Send a vendor specific MODE SENSE (not a MODE SELECT) command. |
178 | * Called for BLIST_KEY devices. | 178 | * Called for BLIST_KEY devices. |
179 | **/ | 179 | **/ |
180 | static void scsi_unlock_floptical(struct scsi_device *sdev, | 180 | static void scsi_unlock_floptical(struct scsi_device *sdev, |
181 | unsigned char *result) | 181 | unsigned char *result) |
182 | { | 182 | { |
183 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 183 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
184 | 184 | ||
185 | sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); | 185 | sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); |
186 | scsi_cmd[0] = MODE_SENSE; | 186 | scsi_cmd[0] = MODE_SENSE; |
187 | scsi_cmd[1] = 0; | 187 | scsi_cmd[1] = 0; |
188 | scsi_cmd[2] = 0x2e; | 188 | scsi_cmd[2] = 0x2e; |
189 | scsi_cmd[3] = 0; | 189 | scsi_cmd[3] = 0; |
190 | scsi_cmd[4] = 0x2a; /* size */ | 190 | scsi_cmd[4] = 0x2a; /* size */ |
191 | scsi_cmd[5] = 0; | 191 | scsi_cmd[5] = 0; |
192 | scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL, | 192 | scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL, |
193 | SCSI_TIMEOUT, 3, NULL); | 193 | SCSI_TIMEOUT, 3, NULL); |
194 | } | 194 | } |
195 | 195 | ||
196 | /** | 196 | /** |
197 | * scsi_alloc_sdev - allocate and setup a scsi_Device | 197 | * scsi_alloc_sdev - allocate and setup a scsi_Device |
198 | * @starget: which target to allocate a &scsi_device for | 198 | * @starget: which target to allocate a &scsi_device for |
199 | * @lun: which lun | 199 | * @lun: which lun |
200 | * @hostdata: usually NULL and set by ->slave_alloc instead | 200 | * @hostdata: usually NULL and set by ->slave_alloc instead |
201 | * | 201 | * |
202 | * Description: | 202 | * Description: |
203 | * Allocate, initialize for io, and return a pointer to a scsi_Device. | 203 | * Allocate, initialize for io, and return a pointer to a scsi_Device. |
204 | * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and | 204 | * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and |
205 | * adds scsi_Device to the appropriate list. | 205 | * adds scsi_Device to the appropriate list. |
206 | * | 206 | * |
207 | * Return value: | 207 | * Return value: |
208 | * scsi_Device pointer, or NULL on failure. | 208 | * scsi_Device pointer, or NULL on failure. |
209 | **/ | 209 | **/ |
210 | static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, | 210 | static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, |
211 | u64 lun, void *hostdata) | 211 | u64 lun, void *hostdata) |
212 | { | 212 | { |
213 | struct scsi_device *sdev; | 213 | struct scsi_device *sdev; |
214 | int display_failure_msg = 1, ret; | 214 | int display_failure_msg = 1, ret; |
215 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 215 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
216 | extern void scsi_evt_thread(struct work_struct *work); | 216 | extern void scsi_evt_thread(struct work_struct *work); |
217 | extern void scsi_requeue_run_queue(struct work_struct *work); | 217 | extern void scsi_requeue_run_queue(struct work_struct *work); |
218 | 218 | ||
219 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, | 219 | sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, |
220 | GFP_ATOMIC); | 220 | GFP_ATOMIC); |
221 | if (!sdev) | 221 | if (!sdev) |
222 | goto out; | 222 | goto out; |
223 | 223 | ||
224 | sdev->vendor = scsi_null_device_strs; | 224 | sdev->vendor = scsi_null_device_strs; |
225 | sdev->model = scsi_null_device_strs; | 225 | sdev->model = scsi_null_device_strs; |
226 | sdev->rev = scsi_null_device_strs; | 226 | sdev->rev = scsi_null_device_strs; |
227 | sdev->host = shost; | 227 | sdev->host = shost; |
228 | sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; | 228 | sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; |
229 | sdev->id = starget->id; | 229 | sdev->id = starget->id; |
230 | sdev->lun = lun; | 230 | sdev->lun = lun; |
231 | sdev->channel = starget->channel; | 231 | sdev->channel = starget->channel; |
232 | sdev->sdev_state = SDEV_CREATED; | 232 | sdev->sdev_state = SDEV_CREATED; |
233 | INIT_LIST_HEAD(&sdev->siblings); | 233 | INIT_LIST_HEAD(&sdev->siblings); |
234 | INIT_LIST_HEAD(&sdev->same_target_siblings); | 234 | INIT_LIST_HEAD(&sdev->same_target_siblings); |
235 | INIT_LIST_HEAD(&sdev->cmd_list); | 235 | INIT_LIST_HEAD(&sdev->cmd_list); |
236 | INIT_LIST_HEAD(&sdev->starved_entry); | 236 | INIT_LIST_HEAD(&sdev->starved_entry); |
237 | INIT_LIST_HEAD(&sdev->event_list); | 237 | INIT_LIST_HEAD(&sdev->event_list); |
238 | spin_lock_init(&sdev->list_lock); | 238 | spin_lock_init(&sdev->list_lock); |
239 | INIT_WORK(&sdev->event_work, scsi_evt_thread); | 239 | INIT_WORK(&sdev->event_work, scsi_evt_thread); |
240 | INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); | 240 | INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); |
241 | 241 | ||
242 | sdev->sdev_gendev.parent = get_device(&starget->dev); | 242 | sdev->sdev_gendev.parent = get_device(&starget->dev); |
243 | sdev->sdev_target = starget; | 243 | sdev->sdev_target = starget; |
244 | 244 | ||
245 | /* usually NULL and set by ->slave_alloc instead */ | 245 | /* usually NULL and set by ->slave_alloc instead */ |
246 | sdev->hostdata = hostdata; | 246 | sdev->hostdata = hostdata; |
247 | 247 | ||
248 | /* if the device needs this changing, it may do so in the | 248 | /* if the device needs this changing, it may do so in the |
249 | * slave_configure function */ | 249 | * slave_configure function */ |
250 | sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; | 250 | sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; |
251 | 251 | ||
252 | /* | 252 | /* |
253 | * Some low level driver could use device->type | 253 | * Some low level driver could use device->type |
254 | */ | 254 | */ |
255 | sdev->type = -1; | 255 | sdev->type = -1; |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * Assume that the device will have handshaking problems, | 258 | * Assume that the device will have handshaking problems, |
259 | * and then fix this field later if it turns out it | 259 | * and then fix this field later if it turns out it |
260 | * doesn't | 260 | * doesn't |
261 | */ | 261 | */ |
262 | sdev->borken = 1; | 262 | sdev->borken = 1; |
263 | 263 | ||
264 | if (shost_use_blk_mq(shost)) | 264 | if (shost_use_blk_mq(shost)) |
265 | sdev->request_queue = scsi_mq_alloc_queue(sdev); | 265 | sdev->request_queue = scsi_mq_alloc_queue(sdev); |
266 | else | 266 | else |
267 | sdev->request_queue = scsi_alloc_queue(sdev); | 267 | sdev->request_queue = scsi_alloc_queue(sdev); |
268 | if (!sdev->request_queue) { | 268 | if (!sdev->request_queue) { |
269 | /* release fn is set up in scsi_sysfs_device_initialise, so | 269 | /* release fn is set up in scsi_sysfs_device_initialise, so |
270 | * have to free and put manually here */ | 270 | * have to free and put manually here */ |
271 | put_device(&starget->dev); | 271 | put_device(&starget->dev); |
272 | kfree(sdev); | 272 | kfree(sdev); |
273 | goto out; | 273 | goto out; |
274 | } | 274 | } |
275 | WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); | 275 | WARN_ON_ONCE(!blk_get_queue(sdev->request_queue)); |
276 | sdev->request_queue->queuedata = sdev; | 276 | sdev->request_queue->queuedata = sdev; |
277 | 277 | ||
278 | if (!shost_use_blk_mq(sdev->host)) { | 278 | if (!shost_use_blk_mq(sdev->host)) { |
279 | blk_queue_init_tags(sdev->request_queue, | 279 | blk_queue_init_tags(sdev->request_queue, |
280 | sdev->host->cmd_per_lun, shost->bqt, | 280 | sdev->host->cmd_per_lun, shost->bqt, |
281 | shost->hostt->tag_alloc_policy); | 281 | shost->hostt->tag_alloc_policy); |
282 | } | 282 | } |
283 | scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ? | 283 | scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ? |
284 | sdev->host->cmd_per_lun : 1); | 284 | sdev->host->cmd_per_lun : 1); |
285 | 285 | ||
286 | scsi_sysfs_device_initialize(sdev); | 286 | scsi_sysfs_device_initialize(sdev); |
287 | 287 | ||
288 | if (shost->hostt->slave_alloc) { | 288 | if (shost->hostt->slave_alloc) { |
289 | ret = shost->hostt->slave_alloc(sdev); | 289 | ret = shost->hostt->slave_alloc(sdev); |
290 | if (ret) { | 290 | if (ret) { |
291 | /* | 291 | /* |
292 | * if LLDD reports slave not present, don't clutter | 292 | * if LLDD reports slave not present, don't clutter |
293 | * console with alloc failure messages | 293 | * console with alloc failure messages |
294 | */ | 294 | */ |
295 | if (ret == -ENXIO) | 295 | if (ret == -ENXIO) |
296 | display_failure_msg = 0; | 296 | display_failure_msg = 0; |
297 | goto out_device_destroy; | 297 | goto out_device_destroy; |
298 | } | 298 | } |
299 | } | 299 | } |
300 | 300 | ||
301 | return sdev; | 301 | return sdev; |
302 | 302 | ||
303 | out_device_destroy: | 303 | out_device_destroy: |
304 | __scsi_remove_device(sdev); | 304 | __scsi_remove_device(sdev); |
305 | out: | 305 | out: |
306 | if (display_failure_msg) | 306 | if (display_failure_msg) |
307 | printk(ALLOC_FAILURE_MSG, __func__); | 307 | printk(ALLOC_FAILURE_MSG, __func__); |
308 | return NULL; | 308 | return NULL; |
309 | } | 309 | } |
310 | 310 | ||
311 | static void scsi_target_destroy(struct scsi_target *starget) | 311 | static void scsi_target_destroy(struct scsi_target *starget) |
312 | { | 312 | { |
313 | struct device *dev = &starget->dev; | 313 | struct device *dev = &starget->dev; |
314 | struct Scsi_Host *shost = dev_to_shost(dev->parent); | 314 | struct Scsi_Host *shost = dev_to_shost(dev->parent); |
315 | unsigned long flags; | 315 | unsigned long flags; |
316 | 316 | ||
317 | BUG_ON(starget->state == STARGET_DEL); | ||
317 | starget->state = STARGET_DEL; | 318 | starget->state = STARGET_DEL; |
318 | transport_destroy_device(dev); | 319 | transport_destroy_device(dev); |
319 | spin_lock_irqsave(shost->host_lock, flags); | 320 | spin_lock_irqsave(shost->host_lock, flags); |
320 | if (shost->hostt->target_destroy) | 321 | if (shost->hostt->target_destroy) |
321 | shost->hostt->target_destroy(starget); | 322 | shost->hostt->target_destroy(starget); |
322 | list_del_init(&starget->siblings); | 323 | list_del_init(&starget->siblings); |
323 | spin_unlock_irqrestore(shost->host_lock, flags); | 324 | spin_unlock_irqrestore(shost->host_lock, flags); |
324 | put_device(dev); | 325 | put_device(dev); |
325 | } | 326 | } |
326 | 327 | ||
327 | static void scsi_target_dev_release(struct device *dev) | 328 | static void scsi_target_dev_release(struct device *dev) |
328 | { | 329 | { |
329 | struct device *parent = dev->parent; | 330 | struct device *parent = dev->parent; |
330 | struct scsi_target *starget = to_scsi_target(dev); | 331 | struct scsi_target *starget = to_scsi_target(dev); |
331 | 332 | ||
332 | kfree(starget); | 333 | kfree(starget); |
333 | put_device(parent); | 334 | put_device(parent); |
334 | } | 335 | } |
335 | 336 | ||
336 | static struct device_type scsi_target_type = { | 337 | static struct device_type scsi_target_type = { |
337 | .name = "scsi_target", | 338 | .name = "scsi_target", |
338 | .release = scsi_target_dev_release, | 339 | .release = scsi_target_dev_release, |
339 | }; | 340 | }; |
340 | 341 | ||
341 | int scsi_is_target_device(const struct device *dev) | 342 | int scsi_is_target_device(const struct device *dev) |
342 | { | 343 | { |
343 | return dev->type == &scsi_target_type; | 344 | return dev->type == &scsi_target_type; |
344 | } | 345 | } |
345 | EXPORT_SYMBOL(scsi_is_target_device); | 346 | EXPORT_SYMBOL(scsi_is_target_device); |
346 | 347 | ||
347 | static struct scsi_target *__scsi_find_target(struct device *parent, | 348 | static struct scsi_target *__scsi_find_target(struct device *parent, |
348 | int channel, uint id) | 349 | int channel, uint id) |
349 | { | 350 | { |
350 | struct scsi_target *starget, *found_starget = NULL; | 351 | struct scsi_target *starget, *found_starget = NULL; |
351 | struct Scsi_Host *shost = dev_to_shost(parent); | 352 | struct Scsi_Host *shost = dev_to_shost(parent); |
352 | /* | 353 | /* |
353 | * Search for an existing target for this sdev. | 354 | * Search for an existing target for this sdev. |
354 | */ | 355 | */ |
355 | list_for_each_entry(starget, &shost->__targets, siblings) { | 356 | list_for_each_entry(starget, &shost->__targets, siblings) { |
356 | if (starget->id == id && | 357 | if (starget->id == id && |
357 | starget->channel == channel) { | 358 | starget->channel == channel) { |
358 | found_starget = starget; | 359 | found_starget = starget; |
359 | break; | 360 | break; |
360 | } | 361 | } |
361 | } | 362 | } |
362 | if (found_starget) | 363 | if (found_starget) |
363 | get_device(&found_starget->dev); | 364 | get_device(&found_starget->dev); |
364 | 365 | ||
365 | return found_starget; | 366 | return found_starget; |
366 | } | 367 | } |
367 | 368 | ||
368 | /** | 369 | /** |
369 | * scsi_target_reap_ref_release - remove target from visibility | 370 | * scsi_target_reap_ref_release - remove target from visibility |
370 | * @kref: the reap_ref in the target being released | 371 | * @kref: the reap_ref in the target being released |
371 | * | 372 | * |
372 | * Called on last put of reap_ref, which is the indication that no device | 373 | * Called on last put of reap_ref, which is the indication that no device |
373 | * under this target is visible anymore, so render the target invisible in | 374 | * under this target is visible anymore, so render the target invisible in |
374 | * sysfs. Note: we have to be in user context here because the target reaps | 375 | * sysfs. Note: we have to be in user context here because the target reaps |
375 | * should be done in places where the scsi device visibility is being removed. | 376 | * should be done in places where the scsi device visibility is being removed. |
376 | */ | 377 | */ |
377 | static void scsi_target_reap_ref_release(struct kref *kref) | 378 | static void scsi_target_reap_ref_release(struct kref *kref) |
378 | { | 379 | { |
379 | struct scsi_target *starget | 380 | struct scsi_target *starget |
380 | = container_of(kref, struct scsi_target, reap_ref); | 381 | = container_of(kref, struct scsi_target, reap_ref); |
381 | 382 | ||
382 | /* | 383 | /* |
383 | * if we get here and the target is still in the CREATED state that | 384 | * if we get here and the target is still in the CREATED state that |
384 | * means it was allocated but never made visible (because a scan | 385 | * means it was allocated but never made visible (because a scan |
385 | * turned up no LUNs), so don't call device_del() on it. | 386 | * turned up no LUNs), so don't call device_del() on it. |
386 | */ | 387 | */ |
387 | if (starget->state != STARGET_CREATED) { | 388 | if (starget->state != STARGET_CREATED) { |
388 | transport_remove_device(&starget->dev); | 389 | transport_remove_device(&starget->dev); |
389 | device_del(&starget->dev); | 390 | device_del(&starget->dev); |
390 | } | 391 | } |
391 | scsi_target_destroy(starget); | 392 | scsi_target_destroy(starget); |
392 | } | 393 | } |
393 | 394 | ||
394 | static void scsi_target_reap_ref_put(struct scsi_target *starget) | 395 | static void scsi_target_reap_ref_put(struct scsi_target *starget) |
395 | { | 396 | { |
396 | kref_put(&starget->reap_ref, scsi_target_reap_ref_release); | 397 | kref_put(&starget->reap_ref, scsi_target_reap_ref_release); |
397 | } | 398 | } |
398 | 399 | ||
399 | /** | 400 | /** |
400 | * scsi_alloc_target - allocate a new or find an existing target | 401 | * scsi_alloc_target - allocate a new or find an existing target |
401 | * @parent: parent of the target (need not be a scsi host) | 402 | * @parent: parent of the target (need not be a scsi host) |
402 | * @channel: target channel number (zero if no channels) | 403 | * @channel: target channel number (zero if no channels) |
403 | * @id: target id number | 404 | * @id: target id number |
404 | * | 405 | * |
405 | * Return an existing target if one exists, provided it hasn't already | 406 | * Return an existing target if one exists, provided it hasn't already |
406 | * gone into STARGET_DEL state, otherwise allocate a new target. | 407 | * gone into STARGET_DEL state, otherwise allocate a new target. |
407 | * | 408 | * |
408 | * The target is returned with an incremented reference, so the caller | 409 | * The target is returned with an incremented reference, so the caller |
409 | * is responsible for both reaping and doing a last put | 410 | * is responsible for both reaping and doing a last put |
410 | */ | 411 | */ |
411 | static struct scsi_target *scsi_alloc_target(struct device *parent, | 412 | static struct scsi_target *scsi_alloc_target(struct device *parent, |
412 | int channel, uint id) | 413 | int channel, uint id) |
413 | { | 414 | { |
414 | struct Scsi_Host *shost = dev_to_shost(parent); | 415 | struct Scsi_Host *shost = dev_to_shost(parent); |
415 | struct device *dev = NULL; | 416 | struct device *dev = NULL; |
416 | unsigned long flags; | 417 | unsigned long flags; |
417 | const int size = sizeof(struct scsi_target) | 418 | const int size = sizeof(struct scsi_target) |
418 | + shost->transportt->target_size; | 419 | + shost->transportt->target_size; |
419 | struct scsi_target *starget; | 420 | struct scsi_target *starget; |
420 | struct scsi_target *found_target; | 421 | struct scsi_target *found_target; |
421 | int error, ref_got; | 422 | int error, ref_got; |
422 | 423 | ||
423 | starget = kzalloc(size, GFP_KERNEL); | 424 | starget = kzalloc(size, GFP_KERNEL); |
424 | if (!starget) { | 425 | if (!starget) { |
425 | printk(KERN_ERR "%s: allocation failure\n", __func__); | 426 | printk(KERN_ERR "%s: allocation failure\n", __func__); |
426 | return NULL; | 427 | return NULL; |
427 | } | 428 | } |
428 | dev = &starget->dev; | 429 | dev = &starget->dev; |
429 | device_initialize(dev); | 430 | device_initialize(dev); |
430 | kref_init(&starget->reap_ref); | 431 | kref_init(&starget->reap_ref); |
431 | dev->parent = get_device(parent); | 432 | dev->parent = get_device(parent); |
432 | dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); | 433 | dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); |
433 | dev->bus = &scsi_bus_type; | 434 | dev->bus = &scsi_bus_type; |
434 | dev->type = &scsi_target_type; | 435 | dev->type = &scsi_target_type; |
435 | starget->id = id; | 436 | starget->id = id; |
436 | starget->channel = channel; | 437 | starget->channel = channel; |
437 | starget->can_queue = 0; | 438 | starget->can_queue = 0; |
438 | INIT_LIST_HEAD(&starget->siblings); | 439 | INIT_LIST_HEAD(&starget->siblings); |
439 | INIT_LIST_HEAD(&starget->devices); | 440 | INIT_LIST_HEAD(&starget->devices); |
440 | starget->state = STARGET_CREATED; | 441 | starget->state = STARGET_CREATED; |
441 | starget->scsi_level = SCSI_2; | 442 | starget->scsi_level = SCSI_2; |
442 | starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; | 443 | starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; |
443 | retry: | 444 | retry: |
444 | spin_lock_irqsave(shost->host_lock, flags); | 445 | spin_lock_irqsave(shost->host_lock, flags); |
445 | 446 | ||
446 | found_target = __scsi_find_target(parent, channel, id); | 447 | found_target = __scsi_find_target(parent, channel, id); |
447 | if (found_target) | 448 | if (found_target) |
448 | goto found; | 449 | goto found; |
449 | 450 | ||
450 | list_add_tail(&starget->siblings, &shost->__targets); | 451 | list_add_tail(&starget->siblings, &shost->__targets); |
451 | spin_unlock_irqrestore(shost->host_lock, flags); | 452 | spin_unlock_irqrestore(shost->host_lock, flags); |
452 | /* allocate and add */ | 453 | /* allocate and add */ |
453 | transport_setup_device(dev); | 454 | transport_setup_device(dev); |
454 | if (shost->hostt->target_alloc) { | 455 | if (shost->hostt->target_alloc) { |
455 | error = shost->hostt->target_alloc(starget); | 456 | error = shost->hostt->target_alloc(starget); |
456 | 457 | ||
457 | if(error) { | 458 | if(error) { |
458 | dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); | 459 | dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); |
459 | /* don't want scsi_target_reap to do the final | 460 | /* don't want scsi_target_reap to do the final |
460 | * put because it will be under the host lock */ | 461 | * put because it will be under the host lock */ |
461 | scsi_target_destroy(starget); | 462 | scsi_target_destroy(starget); |
462 | return NULL; | 463 | return NULL; |
463 | } | 464 | } |
464 | } | 465 | } |
465 | get_device(dev); | 466 | get_device(dev); |
466 | 467 | ||
467 | return starget; | 468 | return starget; |
468 | 469 | ||
469 | found: | 470 | found: |
470 | /* | 471 | /* |
471 | * release routine already fired if kref is zero, so if we can still | 472 | * release routine already fired if kref is zero, so if we can still |
472 | * take the reference, the target must be alive. If we can't, it must | 473 | * take the reference, the target must be alive. If we can't, it must |
473 | * be dying and we need to wait for a new target | 474 | * be dying and we need to wait for a new target |
474 | */ | 475 | */ |
475 | ref_got = kref_get_unless_zero(&found_target->reap_ref); | 476 | ref_got = kref_get_unless_zero(&found_target->reap_ref); |
476 | 477 | ||
477 | spin_unlock_irqrestore(shost->host_lock, flags); | 478 | spin_unlock_irqrestore(shost->host_lock, flags); |
478 | if (ref_got) { | 479 | if (ref_got) { |
479 | put_device(dev); | 480 | put_device(dev); |
480 | return found_target; | 481 | return found_target; |
481 | } | 482 | } |
482 | /* | 483 | /* |
483 | * Unfortunately, we found a dying target; need to wait until it's | 484 | * Unfortunately, we found a dying target; need to wait until it's |
484 | * dead before we can get a new one. There is an anomaly here. We | 485 | * dead before we can get a new one. There is an anomaly here. We |
485 | * *should* call scsi_target_reap() to balance the kref_get() of the | 486 | * *should* call scsi_target_reap() to balance the kref_get() of the |
486 | * reap_ref above. However, since the target being released, it's | 487 | * reap_ref above. However, since the target being released, it's |
487 | * already invisible and the reap_ref is irrelevant. If we call | 488 | * already invisible and the reap_ref is irrelevant. If we call |
488 | * scsi_target_reap() we might spuriously do another device_del() on | 489 | * scsi_target_reap() we might spuriously do another device_del() on |
489 | * an already invisible target. | 490 | * an already invisible target. |
490 | */ | 491 | */ |
491 | put_device(&found_target->dev); | 492 | put_device(&found_target->dev); |
492 | /* | 493 | /* |
493 | * length of time is irrelevant here, we just want to yield the CPU | 494 | * length of time is irrelevant here, we just want to yield the CPU |
494 | * for a tick to avoid busy waiting for the target to die. | 495 | * for a tick to avoid busy waiting for the target to die. |
495 | */ | 496 | */ |
496 | msleep(1); | 497 | msleep(1); |
497 | goto retry; | 498 | goto retry; |
498 | } | 499 | } |
499 | 500 | ||
500 | /** | 501 | /** |
501 | * scsi_target_reap - check to see if target is in use and destroy if not | 502 | * scsi_target_reap - check to see if target is in use and destroy if not |
502 | * @starget: target to be checked | 503 | * @starget: target to be checked |
503 | * | 504 | * |
504 | * This is used after removing a LUN or doing a last put of the target | 505 | * This is used after removing a LUN or doing a last put of the target |
505 | * it checks atomically that nothing is using the target and removes | 506 | * it checks atomically that nothing is using the target and removes |
506 | * it if so. | 507 | * it if so. |
507 | */ | 508 | */ |
508 | void scsi_target_reap(struct scsi_target *starget) | 509 | void scsi_target_reap(struct scsi_target *starget) |
509 | { | 510 | { |
510 | /* | 511 | /* |
511 | * serious problem if this triggers: STARGET_DEL is only set in the if | 512 | * serious problem if this triggers: STARGET_DEL is only set in the if |
512 | * the reap_ref drops to zero, so we're trying to do another final put | 513 | * the reap_ref drops to zero, so we're trying to do another final put |
513 | * on an already released kref | 514 | * on an already released kref |
514 | */ | 515 | */ |
515 | BUG_ON(starget->state == STARGET_DEL); | 516 | BUG_ON(starget->state == STARGET_DEL); |
516 | scsi_target_reap_ref_put(starget); | 517 | scsi_target_reap_ref_put(starget); |
517 | } | 518 | } |
518 | 519 | ||
519 | /** | 520 | /** |
520 | * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string | 521 | * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string |
521 | * @s: INQUIRY result string to sanitize | 522 | * @s: INQUIRY result string to sanitize |
522 | * @len: length of the string | 523 | * @len: length of the string |
523 | * | 524 | * |
524 | * Description: | 525 | * Description: |
525 | * The SCSI spec says that INQUIRY vendor, product, and revision | 526 | * The SCSI spec says that INQUIRY vendor, product, and revision |
526 | * strings must consist entirely of graphic ASCII characters, | 527 | * strings must consist entirely of graphic ASCII characters, |
527 | * padded on the right with spaces. Since not all devices obey | 528 | * padded on the right with spaces. Since not all devices obey |
528 | * this rule, we will replace non-graphic or non-ASCII characters | 529 | * this rule, we will replace non-graphic or non-ASCII characters |
529 | * with spaces. Exception: a NUL character is interpreted as a | 530 | * with spaces. Exception: a NUL character is interpreted as a |
530 | * string terminator, so all the following characters are set to | 531 | * string terminator, so all the following characters are set to |
531 | * spaces. | 532 | * spaces. |
532 | **/ | 533 | **/ |
533 | static void sanitize_inquiry_string(unsigned char *s, int len) | 534 | static void sanitize_inquiry_string(unsigned char *s, int len) |
534 | { | 535 | { |
535 | int terminated = 0; | 536 | int terminated = 0; |
536 | 537 | ||
537 | for (; len > 0; (--len, ++s)) { | 538 | for (; len > 0; (--len, ++s)) { |
538 | if (*s == 0) | 539 | if (*s == 0) |
539 | terminated = 1; | 540 | terminated = 1; |
540 | if (terminated || *s < 0x20 || *s > 0x7e) | 541 | if (terminated || *s < 0x20 || *s > 0x7e) |
541 | *s = ' '; | 542 | *s = ' '; |
542 | } | 543 | } |
543 | } | 544 | } |
544 | 545 | ||
545 | /** | 546 | /** |
546 | * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY | 547 | * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY |
547 | * @sdev: scsi_device to probe | 548 | * @sdev: scsi_device to probe |
548 | * @inq_result: area to store the INQUIRY result | 549 | * @inq_result: area to store the INQUIRY result |
549 | * @result_len: len of inq_result | 550 | * @result_len: len of inq_result |
550 | * @bflags: store any bflags found here | 551 | * @bflags: store any bflags found here |
551 | * | 552 | * |
552 | * Description: | 553 | * Description: |
553 | * Probe the lun associated with @req using a standard SCSI INQUIRY; | 554 | * Probe the lun associated with @req using a standard SCSI INQUIRY; |
554 | * | 555 | * |
555 | * If the INQUIRY is successful, zero is returned and the | 556 | * If the INQUIRY is successful, zero is returned and the |
556 | * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length | 557 | * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length |
557 | * are copied to the scsi_device any flags value is stored in *@bflags. | 558 | * are copied to the scsi_device any flags value is stored in *@bflags. |
558 | **/ | 559 | **/ |
559 | static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | 560 | static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, |
560 | int result_len, int *bflags) | 561 | int result_len, int *bflags) |
561 | { | 562 | { |
562 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 563 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
563 | int first_inquiry_len, try_inquiry_len, next_inquiry_len; | 564 | int first_inquiry_len, try_inquiry_len, next_inquiry_len; |
564 | int response_len = 0; | 565 | int response_len = 0; |
565 | int pass, count, result; | 566 | int pass, count, result; |
566 | struct scsi_sense_hdr sshdr; | 567 | struct scsi_sense_hdr sshdr; |
567 | 568 | ||
568 | *bflags = 0; | 569 | *bflags = 0; |
569 | 570 | ||
570 | /* Perform up to 3 passes. The first pass uses a conservative | 571 | /* Perform up to 3 passes. The first pass uses a conservative |
571 | * transfer length of 36 unless sdev->inquiry_len specifies a | 572 | * transfer length of 36 unless sdev->inquiry_len specifies a |
572 | * different value. */ | 573 | * different value. */ |
573 | first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; | 574 | first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; |
574 | try_inquiry_len = first_inquiry_len; | 575 | try_inquiry_len = first_inquiry_len; |
575 | pass = 1; | 576 | pass = 1; |
576 | 577 | ||
577 | next_pass: | 578 | next_pass: |
578 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, | 579 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
579 | "scsi scan: INQUIRY pass %d length %d\n", | 580 | "scsi scan: INQUIRY pass %d length %d\n", |
580 | pass, try_inquiry_len)); | 581 | pass, try_inquiry_len)); |
581 | 582 | ||
582 | /* Each pass gets up to three chances to ignore Unit Attention */ | 583 | /* Each pass gets up to three chances to ignore Unit Attention */ |
583 | for (count = 0; count < 3; ++count) { | 584 | for (count = 0; count < 3; ++count) { |
584 | int resid; | 585 | int resid; |
585 | 586 | ||
586 | memset(scsi_cmd, 0, 6); | 587 | memset(scsi_cmd, 0, 6); |
587 | scsi_cmd[0] = INQUIRY; | 588 | scsi_cmd[0] = INQUIRY; |
588 | scsi_cmd[4] = (unsigned char) try_inquiry_len; | 589 | scsi_cmd[4] = (unsigned char) try_inquiry_len; |
589 | 590 | ||
590 | memset(inq_result, 0, try_inquiry_len); | 591 | memset(inq_result, 0, try_inquiry_len); |
591 | 592 | ||
592 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, | 593 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, |
593 | inq_result, try_inquiry_len, &sshdr, | 594 | inq_result, try_inquiry_len, &sshdr, |
594 | HZ / 2 + HZ * scsi_inq_timeout, 3, | 595 | HZ / 2 + HZ * scsi_inq_timeout, 3, |
595 | &resid); | 596 | &resid); |
596 | 597 | ||
597 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, | 598 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
598 | "scsi scan: INQUIRY %s with code 0x%x\n", | 599 | "scsi scan: INQUIRY %s with code 0x%x\n", |
599 | result ? "failed" : "successful", result)); | 600 | result ? "failed" : "successful", result)); |
600 | 601 | ||
601 | if (result) { | 602 | if (result) { |
602 | /* | 603 | /* |
603 | * not-ready to ready transition [asc/ascq=0x28/0x0] | 604 | * not-ready to ready transition [asc/ascq=0x28/0x0] |
604 | * or power-on, reset [asc/ascq=0x29/0x0], continue. | 605 | * or power-on, reset [asc/ascq=0x29/0x0], continue. |
605 | * INQUIRY should not yield UNIT_ATTENTION | 606 | * INQUIRY should not yield UNIT_ATTENTION |
606 | * but many buggy devices do so anyway. | 607 | * but many buggy devices do so anyway. |
607 | */ | 608 | */ |
608 | if ((driver_byte(result) & DRIVER_SENSE) && | 609 | if ((driver_byte(result) & DRIVER_SENSE) && |
609 | scsi_sense_valid(&sshdr)) { | 610 | scsi_sense_valid(&sshdr)) { |
610 | if ((sshdr.sense_key == UNIT_ATTENTION) && | 611 | if ((sshdr.sense_key == UNIT_ATTENTION) && |
611 | ((sshdr.asc == 0x28) || | 612 | ((sshdr.asc == 0x28) || |
612 | (sshdr.asc == 0x29)) && | 613 | (sshdr.asc == 0x29)) && |
613 | (sshdr.ascq == 0)) | 614 | (sshdr.ascq == 0)) |
614 | continue; | 615 | continue; |
615 | } | 616 | } |
616 | } else { | 617 | } else { |
617 | /* | 618 | /* |
618 | * if nothing was transferred, we try | 619 | * if nothing was transferred, we try |
619 | * again. It's a workaround for some USB | 620 | * again. It's a workaround for some USB |
620 | * devices. | 621 | * devices. |
621 | */ | 622 | */ |
622 | if (resid == try_inquiry_len) | 623 | if (resid == try_inquiry_len) |
623 | continue; | 624 | continue; |
624 | } | 625 | } |
625 | break; | 626 | break; |
626 | } | 627 | } |
627 | 628 | ||
628 | if (result == 0) { | 629 | if (result == 0) { |
629 | sanitize_inquiry_string(&inq_result[8], 8); | 630 | sanitize_inquiry_string(&inq_result[8], 8); |
630 | sanitize_inquiry_string(&inq_result[16], 16); | 631 | sanitize_inquiry_string(&inq_result[16], 16); |
631 | sanitize_inquiry_string(&inq_result[32], 4); | 632 | sanitize_inquiry_string(&inq_result[32], 4); |
632 | 633 | ||
633 | response_len = inq_result[4] + 5; | 634 | response_len = inq_result[4] + 5; |
634 | if (response_len > 255) | 635 | if (response_len > 255) |
635 | response_len = first_inquiry_len; /* sanity */ | 636 | response_len = first_inquiry_len; /* sanity */ |
636 | 637 | ||
637 | /* | 638 | /* |
638 | * Get any flags for this device. | 639 | * Get any flags for this device. |
639 | * | 640 | * |
640 | * XXX add a bflags to scsi_device, and replace the | 641 | * XXX add a bflags to scsi_device, and replace the |
641 | * corresponding bit fields in scsi_device, so bflags | 642 | * corresponding bit fields in scsi_device, so bflags |
642 | * need not be passed as an argument. | 643 | * need not be passed as an argument. |
643 | */ | 644 | */ |
644 | *bflags = scsi_get_device_flags(sdev, &inq_result[8], | 645 | *bflags = scsi_get_device_flags(sdev, &inq_result[8], |
645 | &inq_result[16]); | 646 | &inq_result[16]); |
646 | 647 | ||
647 | /* When the first pass succeeds we gain information about | 648 | /* When the first pass succeeds we gain information about |
648 | * what larger transfer lengths might work. */ | 649 | * what larger transfer lengths might work. */ |
649 | if (pass == 1) { | 650 | if (pass == 1) { |
650 | if (BLIST_INQUIRY_36 & *bflags) | 651 | if (BLIST_INQUIRY_36 & *bflags) |
651 | next_inquiry_len = 36; | 652 | next_inquiry_len = 36; |
652 | else if (BLIST_INQUIRY_58 & *bflags) | 653 | else if (BLIST_INQUIRY_58 & *bflags) |
653 | next_inquiry_len = 58; | 654 | next_inquiry_len = 58; |
654 | else if (sdev->inquiry_len) | 655 | else if (sdev->inquiry_len) |
655 | next_inquiry_len = sdev->inquiry_len; | 656 | next_inquiry_len = sdev->inquiry_len; |
656 | else | 657 | else |
657 | next_inquiry_len = response_len; | 658 | next_inquiry_len = response_len; |
658 | 659 | ||
659 | /* If more data is available perform the second pass */ | 660 | /* If more data is available perform the second pass */ |
660 | if (next_inquiry_len > try_inquiry_len) { | 661 | if (next_inquiry_len > try_inquiry_len) { |
661 | try_inquiry_len = next_inquiry_len; | 662 | try_inquiry_len = next_inquiry_len; |
662 | pass = 2; | 663 | pass = 2; |
663 | goto next_pass; | 664 | goto next_pass; |
664 | } | 665 | } |
665 | } | 666 | } |
666 | 667 | ||
667 | } else if (pass == 2) { | 668 | } else if (pass == 2) { |
668 | sdev_printk(KERN_INFO, sdev, | 669 | sdev_printk(KERN_INFO, sdev, |
669 | "scsi scan: %d byte inquiry failed. " | 670 | "scsi scan: %d byte inquiry failed. " |
670 | "Consider BLIST_INQUIRY_36 for this device\n", | 671 | "Consider BLIST_INQUIRY_36 for this device\n", |
671 | try_inquiry_len); | 672 | try_inquiry_len); |
672 | 673 | ||
673 | /* If this pass failed, the third pass goes back and transfers | 674 | /* If this pass failed, the third pass goes back and transfers |
674 | * the same amount as we successfully got in the first pass. */ | 675 | * the same amount as we successfully got in the first pass. */ |
675 | try_inquiry_len = first_inquiry_len; | 676 | try_inquiry_len = first_inquiry_len; |
676 | pass = 3; | 677 | pass = 3; |
677 | goto next_pass; | 678 | goto next_pass; |
678 | } | 679 | } |
679 | 680 | ||
680 | /* If the last transfer attempt got an error, assume the | 681 | /* If the last transfer attempt got an error, assume the |
681 | * peripheral doesn't exist or is dead. */ | 682 | * peripheral doesn't exist or is dead. */ |
682 | if (result) | 683 | if (result) |
683 | return -EIO; | 684 | return -EIO; |
684 | 685 | ||
685 | /* Don't report any more data than the device says is valid */ | 686 | /* Don't report any more data than the device says is valid */ |
686 | sdev->inquiry_len = min(try_inquiry_len, response_len); | 687 | sdev->inquiry_len = min(try_inquiry_len, response_len); |
687 | 688 | ||
688 | /* | 689 | /* |
689 | * XXX Abort if the response length is less than 36? If less than | 690 | * XXX Abort if the response length is less than 36? If less than |
690 | * 32, the lookup of the device flags (above) could be invalid, | 691 | * 32, the lookup of the device flags (above) could be invalid, |
691 | * and it would be possible to take an incorrect action - we do | 692 | * and it would be possible to take an incorrect action - we do |
692 | * not want to hang because of a short INQUIRY. On the flip side, | 693 | * not want to hang because of a short INQUIRY. On the flip side, |
693 | * if the device is spun down or becoming ready (and so it gives a | 694 | * if the device is spun down or becoming ready (and so it gives a |
694 | * short INQUIRY), an abort here prevents any further use of the | 695 | * short INQUIRY), an abort here prevents any further use of the |
695 | * device, including spin up. | 696 | * device, including spin up. |
696 | * | 697 | * |
697 | * On the whole, the best approach seems to be to assume the first | 698 | * On the whole, the best approach seems to be to assume the first |
698 | * 36 bytes are valid no matter what the device says. That's | 699 | * 36 bytes are valid no matter what the device says. That's |
699 | * better than copying < 36 bytes to the inquiry-result buffer | 700 | * better than copying < 36 bytes to the inquiry-result buffer |
700 | * and displaying garbage for the Vendor, Product, or Revision | 701 | * and displaying garbage for the Vendor, Product, or Revision |
701 | * strings. | 702 | * strings. |
702 | */ | 703 | */ |
703 | if (sdev->inquiry_len < 36) { | 704 | if (sdev->inquiry_len < 36) { |
704 | if (!sdev->host->short_inquiry) { | 705 | if (!sdev->host->short_inquiry) { |
705 | shost_printk(KERN_INFO, sdev->host, | 706 | shost_printk(KERN_INFO, sdev->host, |
706 | "scsi scan: INQUIRY result too short (%d)," | 707 | "scsi scan: INQUIRY result too short (%d)," |
707 | " using 36\n", sdev->inquiry_len); | 708 | " using 36\n", sdev->inquiry_len); |
708 | sdev->host->short_inquiry = 1; | 709 | sdev->host->short_inquiry = 1; |
709 | } | 710 | } |
710 | sdev->inquiry_len = 36; | 711 | sdev->inquiry_len = 36; |
711 | } | 712 | } |
712 | 713 | ||
713 | /* | 714 | /* |
714 | * Related to the above issue: | 715 | * Related to the above issue: |
715 | * | 716 | * |
716 | * XXX Devices (disk or all?) should be sent a TEST UNIT READY, | 717 | * XXX Devices (disk or all?) should be sent a TEST UNIT READY, |
717 | * and if not ready, sent a START_STOP to start (maybe spin up) and | 718 | * and if not ready, sent a START_STOP to start (maybe spin up) and |
718 | * then send the INQUIRY again, since the INQUIRY can change after | 719 | * then send the INQUIRY again, since the INQUIRY can change after |
719 | * a device is initialized. | 720 | * a device is initialized. |
720 | * | 721 | * |
721 | * Ideally, start a device if explicitly asked to do so. This | 722 | * Ideally, start a device if explicitly asked to do so. This |
722 | * assumes that a device is spun up on power on, spun down on | 723 | * assumes that a device is spun up on power on, spun down on |
723 | * request, and then spun up on request. | 724 | * request, and then spun up on request. |
724 | */ | 725 | */ |
725 | 726 | ||
726 | /* | 727 | /* |
727 | * The scanning code needs to know the scsi_level, even if no | 728 | * The scanning code needs to know the scsi_level, even if no |
728 | * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so | 729 | * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so |
729 | * non-zero LUNs can be scanned. | 730 | * non-zero LUNs can be scanned. |
730 | */ | 731 | */ |
731 | sdev->scsi_level = inq_result[2] & 0x07; | 732 | sdev->scsi_level = inq_result[2] & 0x07; |
732 | if (sdev->scsi_level >= 2 || | 733 | if (sdev->scsi_level >= 2 || |
733 | (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) | 734 | (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) |
734 | sdev->scsi_level++; | 735 | sdev->scsi_level++; |
735 | sdev->sdev_target->scsi_level = sdev->scsi_level; | 736 | sdev->sdev_target->scsi_level = sdev->scsi_level; |
736 | 737 | ||
737 | /* | 738 | /* |
738 | * If SCSI-2 or lower, and if the transport requires it, | 739 | * If SCSI-2 or lower, and if the transport requires it, |
739 | * store the LUN value in CDB[1]. | 740 | * store the LUN value in CDB[1]. |
740 | */ | 741 | */ |
741 | sdev->lun_in_cdb = 0; | 742 | sdev->lun_in_cdb = 0; |
742 | if (sdev->scsi_level <= SCSI_2 && | 743 | if (sdev->scsi_level <= SCSI_2 && |
743 | sdev->scsi_level != SCSI_UNKNOWN && | 744 | sdev->scsi_level != SCSI_UNKNOWN && |
744 | !sdev->host->no_scsi2_lun_in_cdb) | 745 | !sdev->host->no_scsi2_lun_in_cdb) |
745 | sdev->lun_in_cdb = 1; | 746 | sdev->lun_in_cdb = 1; |
746 | 747 | ||
747 | return 0; | 748 | return 0; |
748 | } | 749 | } |
749 | 750 | ||
750 | /** | 751 | /** |
751 | * scsi_add_lun - allocate and fully initialze a scsi_device | 752 | * scsi_add_lun - allocate and fully initialze a scsi_device |
752 | * @sdev: holds information to be stored in the new scsi_device | 753 | * @sdev: holds information to be stored in the new scsi_device |
753 | * @inq_result: holds the result of a previous INQUIRY to the LUN | 754 | * @inq_result: holds the result of a previous INQUIRY to the LUN |
754 | * @bflags: black/white list flag | 755 | * @bflags: black/white list flag |
755 | * @async: 1 if this device is being scanned asynchronously | 756 | * @async: 1 if this device is being scanned asynchronously |
756 | * | 757 | * |
757 | * Description: | 758 | * Description: |
758 | * Initialize the scsi_device @sdev. Optionally set fields based | 759 | * Initialize the scsi_device @sdev. Optionally set fields based |
759 | * on values in *@bflags. | 760 | * on values in *@bflags. |
760 | * | 761 | * |
761 | * Return: | 762 | * Return: |
762 | * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device | 763 | * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device |
763 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized | 764 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized |
764 | **/ | 765 | **/ |
765 | static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | 766 | static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, |
766 | int *bflags, int async) | 767 | int *bflags, int async) |
767 | { | 768 | { |
768 | int ret; | 769 | int ret; |
769 | 770 | ||
770 | /* | 771 | /* |
771 | * XXX do not save the inquiry, since it can change underneath us, | 772 | * XXX do not save the inquiry, since it can change underneath us, |
772 | * save just vendor/model/rev. | 773 | * save just vendor/model/rev. |
773 | * | 774 | * |
774 | * Rather than save it and have an ioctl that retrieves the saved | 775 | * Rather than save it and have an ioctl that retrieves the saved |
775 | * value, have an ioctl that executes the same INQUIRY code used | 776 | * value, have an ioctl that executes the same INQUIRY code used |
776 | * in scsi_probe_lun, let user level programs doing INQUIRY | 777 | * in scsi_probe_lun, let user level programs doing INQUIRY |
777 | * scanning run at their own risk, or supply a user level program | 778 | * scanning run at their own risk, or supply a user level program |
778 | * that can correctly scan. | 779 | * that can correctly scan. |
779 | */ | 780 | */ |
780 | 781 | ||
781 | /* | 782 | /* |
782 | * Copy at least 36 bytes of INQUIRY data, so that we don't | 783 | * Copy at least 36 bytes of INQUIRY data, so that we don't |
783 | * dereference unallocated memory when accessing the Vendor, | 784 | * dereference unallocated memory when accessing the Vendor, |
784 | * Product, and Revision strings. Badly behaved devices may set | 785 | * Product, and Revision strings. Badly behaved devices may set |
785 | * the INQUIRY Additional Length byte to a small value, indicating | 786 | * the INQUIRY Additional Length byte to a small value, indicating |
786 | * these strings are invalid, but often they contain plausible data | 787 | * these strings are invalid, but often they contain plausible data |
787 | * nonetheless. It doesn't matter if the device sent < 36 bytes | 788 | * nonetheless. It doesn't matter if the device sent < 36 bytes |
788 | * total, since scsi_probe_lun() initializes inq_result with 0s. | 789 | * total, since scsi_probe_lun() initializes inq_result with 0s. |
789 | */ | 790 | */ |
790 | sdev->inquiry = kmemdup(inq_result, | 791 | sdev->inquiry = kmemdup(inq_result, |
791 | max_t(size_t, sdev->inquiry_len, 36), | 792 | max_t(size_t, sdev->inquiry_len, 36), |
792 | GFP_ATOMIC); | 793 | GFP_ATOMIC); |
793 | if (sdev->inquiry == NULL) | 794 | if (sdev->inquiry == NULL) |
794 | return SCSI_SCAN_NO_RESPONSE; | 795 | return SCSI_SCAN_NO_RESPONSE; |
795 | 796 | ||
796 | sdev->vendor = (char *) (sdev->inquiry + 8); | 797 | sdev->vendor = (char *) (sdev->inquiry + 8); |
797 | sdev->model = (char *) (sdev->inquiry + 16); | 798 | sdev->model = (char *) (sdev->inquiry + 16); |
798 | sdev->rev = (char *) (sdev->inquiry + 32); | 799 | sdev->rev = (char *) (sdev->inquiry + 32); |
799 | 800 | ||
800 | if (strncmp(sdev->vendor, "ATA ", 8) == 0) { | 801 | if (strncmp(sdev->vendor, "ATA ", 8) == 0) { |
801 | /* | 802 | /* |
802 | * sata emulation layer device. This is a hack to work around | 803 | * sata emulation layer device. This is a hack to work around |
803 | * the SATL power management specifications which state that | 804 | * the SATL power management specifications which state that |
804 | * when the SATL detects the device has gone into standby | 805 | * when the SATL detects the device has gone into standby |
805 | * mode, it shall respond with NOT READY. | 806 | * mode, it shall respond with NOT READY. |
806 | */ | 807 | */ |
807 | sdev->allow_restart = 1; | 808 | sdev->allow_restart = 1; |
808 | } | 809 | } |
809 | 810 | ||
810 | if (*bflags & BLIST_ISROM) { | 811 | if (*bflags & BLIST_ISROM) { |
811 | sdev->type = TYPE_ROM; | 812 | sdev->type = TYPE_ROM; |
812 | sdev->removable = 1; | 813 | sdev->removable = 1; |
813 | } else { | 814 | } else { |
814 | sdev->type = (inq_result[0] & 0x1f); | 815 | sdev->type = (inq_result[0] & 0x1f); |
815 | sdev->removable = (inq_result[1] & 0x80) >> 7; | 816 | sdev->removable = (inq_result[1] & 0x80) >> 7; |
816 | 817 | ||
817 | /* | 818 | /* |
818 | * some devices may respond with wrong type for | 819 | * some devices may respond with wrong type for |
819 | * well-known logical units. Force well-known type | 820 | * well-known logical units. Force well-known type |
820 | * to enumerate them correctly. | 821 | * to enumerate them correctly. |
821 | */ | 822 | */ |
822 | if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { | 823 | if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { |
823 | sdev_printk(KERN_WARNING, sdev, | 824 | sdev_printk(KERN_WARNING, sdev, |
824 | "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", | 825 | "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", |
825 | __func__, sdev->type, (unsigned int)sdev->lun); | 826 | __func__, sdev->type, (unsigned int)sdev->lun); |
826 | sdev->type = TYPE_WLUN; | 827 | sdev->type = TYPE_WLUN; |
827 | } | 828 | } |
828 | 829 | ||
829 | } | 830 | } |
830 | 831 | ||
831 | if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { | 832 | if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { |
832 | /* RBC and MMC devices can return SCSI-3 compliance and yet | 833 | /* RBC and MMC devices can return SCSI-3 compliance and yet |
833 | * still not support REPORT LUNS, so make them act as | 834 | * still not support REPORT LUNS, so make them act as |
834 | * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is | 835 | * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is |
835 | * specifically set */ | 836 | * specifically set */ |
836 | if ((*bflags & BLIST_REPORTLUN2) == 0) | 837 | if ((*bflags & BLIST_REPORTLUN2) == 0) |
837 | *bflags |= BLIST_NOREPORTLUN; | 838 | *bflags |= BLIST_NOREPORTLUN; |
838 | } | 839 | } |
839 | 840 | ||
840 | /* | 841 | /* |
841 | * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI | 842 | * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI |
842 | * spec says: The device server is capable of supporting the | 843 | * spec says: The device server is capable of supporting the |
843 | * specified peripheral device type on this logical unit. However, | 844 | * specified peripheral device type on this logical unit. However, |
844 | * the physical device is not currently connected to this logical | 845 | * the physical device is not currently connected to this logical |
845 | * unit. | 846 | * unit. |
846 | * | 847 | * |
847 | * The above is vague, as it implies that we could treat 001 and | 848 | * The above is vague, as it implies that we could treat 001 and |
848 | * 011 the same. Stay compatible with previous code, and create a | 849 | * 011 the same. Stay compatible with previous code, and create a |
849 | * scsi_device for a PQ of 1 | 850 | * scsi_device for a PQ of 1 |
850 | * | 851 | * |
851 | * Don't set the device offline here; rather let the upper | 852 | * Don't set the device offline here; rather let the upper |
852 | * level drivers eval the PQ to decide whether they should | 853 | * level drivers eval the PQ to decide whether they should |
853 | * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. | 854 | * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. |
854 | */ | 855 | */ |
855 | 856 | ||
856 | sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; | 857 | sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; |
857 | sdev->lockable = sdev->removable; | 858 | sdev->lockable = sdev->removable; |
858 | sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); | 859 | sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); |
859 | 860 | ||
860 | if (sdev->scsi_level >= SCSI_3 || | 861 | if (sdev->scsi_level >= SCSI_3 || |
861 | (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) | 862 | (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) |
862 | sdev->ppr = 1; | 863 | sdev->ppr = 1; |
863 | if (inq_result[7] & 0x60) | 864 | if (inq_result[7] & 0x60) |
864 | sdev->wdtr = 1; | 865 | sdev->wdtr = 1; |
865 | if (inq_result[7] & 0x10) | 866 | if (inq_result[7] & 0x10) |
866 | sdev->sdtr = 1; | 867 | sdev->sdtr = 1; |
867 | 868 | ||
868 | sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " | 869 | sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " |
869 | "ANSI: %d%s\n", scsi_device_type(sdev->type), | 870 | "ANSI: %d%s\n", scsi_device_type(sdev->type), |
870 | sdev->vendor, sdev->model, sdev->rev, | 871 | sdev->vendor, sdev->model, sdev->rev, |
871 | sdev->inq_periph_qual, inq_result[2] & 0x07, | 872 | sdev->inq_periph_qual, inq_result[2] & 0x07, |
872 | (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); | 873 | (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); |
873 | 874 | ||
874 | if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && | 875 | if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && |
875 | !(*bflags & BLIST_NOTQ)) { | 876 | !(*bflags & BLIST_NOTQ)) { |
876 | sdev->tagged_supported = 1; | 877 | sdev->tagged_supported = 1; |
877 | sdev->simple_tags = 1; | 878 | sdev->simple_tags = 1; |
878 | } | 879 | } |
879 | 880 | ||
880 | /* | 881 | /* |
881 | * Some devices (Texel CD ROM drives) have handshaking problems | 882 | * Some devices (Texel CD ROM drives) have handshaking problems |
882 | * when used with the Seagate controllers. borken is initialized | 883 | * when used with the Seagate controllers. borken is initialized |
883 | * to 1, and then set it to 0 here. | 884 | * to 1, and then set it to 0 here. |
884 | */ | 885 | */ |
885 | if ((*bflags & BLIST_BORKEN) == 0) | 886 | if ((*bflags & BLIST_BORKEN) == 0) |
886 | sdev->borken = 0; | 887 | sdev->borken = 0; |
887 | 888 | ||
888 | if (*bflags & BLIST_NO_ULD_ATTACH) | 889 | if (*bflags & BLIST_NO_ULD_ATTACH) |
889 | sdev->no_uld_attach = 1; | 890 | sdev->no_uld_attach = 1; |
890 | 891 | ||
891 | /* | 892 | /* |
892 | * Apparently some really broken devices (contrary to the SCSI | 893 | * Apparently some really broken devices (contrary to the SCSI |
893 | * standards) need to be selected without asserting ATN | 894 | * standards) need to be selected without asserting ATN |
894 | */ | 895 | */ |
895 | if (*bflags & BLIST_SELECT_NO_ATN) | 896 | if (*bflags & BLIST_SELECT_NO_ATN) |
896 | sdev->select_no_atn = 1; | 897 | sdev->select_no_atn = 1; |
897 | 898 | ||
898 | /* | 899 | /* |
899 | * Maximum 512 sector transfer length | 900 | * Maximum 512 sector transfer length |
900 | * broken RA4x00 Compaq Disk Array | 901 | * broken RA4x00 Compaq Disk Array |
901 | */ | 902 | */ |
902 | if (*bflags & BLIST_MAX_512) | 903 | if (*bflags & BLIST_MAX_512) |
903 | blk_queue_max_hw_sectors(sdev->request_queue, 512); | 904 | blk_queue_max_hw_sectors(sdev->request_queue, 512); |
904 | /* | 905 | /* |
905 | * Max 1024 sector transfer length for targets that report incorrect | 906 | * Max 1024 sector transfer length for targets that report incorrect |
906 | * max/optimal lengths and relied on the old block layer safe default | 907 | * max/optimal lengths and relied on the old block layer safe default |
907 | */ | 908 | */ |
908 | else if (*bflags & BLIST_MAX_1024) | 909 | else if (*bflags & BLIST_MAX_1024) |
909 | blk_queue_max_hw_sectors(sdev->request_queue, 1024); | 910 | blk_queue_max_hw_sectors(sdev->request_queue, 1024); |
910 | 911 | ||
911 | /* | 912 | /* |
912 | * Some devices may not want to have a start command automatically | 913 | * Some devices may not want to have a start command automatically |
913 | * issued when a device is added. | 914 | * issued when a device is added. |
914 | */ | 915 | */ |
915 | if (*bflags & BLIST_NOSTARTONADD) | 916 | if (*bflags & BLIST_NOSTARTONADD) |
916 | sdev->no_start_on_add = 1; | 917 | sdev->no_start_on_add = 1; |
917 | 918 | ||
918 | if (*bflags & BLIST_SINGLELUN) | 919 | if (*bflags & BLIST_SINGLELUN) |
919 | scsi_target(sdev)->single_lun = 1; | 920 | scsi_target(sdev)->single_lun = 1; |
920 | 921 | ||
921 | sdev->use_10_for_rw = 1; | 922 | sdev->use_10_for_rw = 1; |
922 | 923 | ||
923 | if (*bflags & BLIST_MS_SKIP_PAGE_08) | 924 | if (*bflags & BLIST_MS_SKIP_PAGE_08) |
924 | sdev->skip_ms_page_8 = 1; | 925 | sdev->skip_ms_page_8 = 1; |
925 | 926 | ||
926 | if (*bflags & BLIST_MS_SKIP_PAGE_3F) | 927 | if (*bflags & BLIST_MS_SKIP_PAGE_3F) |
927 | sdev->skip_ms_page_3f = 1; | 928 | sdev->skip_ms_page_3f = 1; |
928 | 929 | ||
929 | if (*bflags & BLIST_USE_10_BYTE_MS) | 930 | if (*bflags & BLIST_USE_10_BYTE_MS) |
930 | sdev->use_10_for_ms = 1; | 931 | sdev->use_10_for_ms = 1; |
931 | 932 | ||
932 | /* some devices don't like REPORT SUPPORTED OPERATION CODES | 933 | /* some devices don't like REPORT SUPPORTED OPERATION CODES |
933 | * and will simply timeout causing sd_mod init to take a very | 934 | * and will simply timeout causing sd_mod init to take a very |
934 | * very long time */ | 935 | * very long time */ |
935 | if (*bflags & BLIST_NO_RSOC) | 936 | if (*bflags & BLIST_NO_RSOC) |
936 | sdev->no_report_opcodes = 1; | 937 | sdev->no_report_opcodes = 1; |
937 | 938 | ||
938 | /* set the device running here so that slave configure | 939 | /* set the device running here so that slave configure |
939 | * may do I/O */ | 940 | * may do I/O */ |
940 | ret = scsi_device_set_state(sdev, SDEV_RUNNING); | 941 | ret = scsi_device_set_state(sdev, SDEV_RUNNING); |
941 | if (ret) { | 942 | if (ret) { |
942 | ret = scsi_device_set_state(sdev, SDEV_BLOCK); | 943 | ret = scsi_device_set_state(sdev, SDEV_BLOCK); |
943 | 944 | ||
944 | if (ret) { | 945 | if (ret) { |
945 | sdev_printk(KERN_ERR, sdev, | 946 | sdev_printk(KERN_ERR, sdev, |
946 | "in wrong state %s to complete scan\n", | 947 | "in wrong state %s to complete scan\n", |
947 | scsi_device_state_name(sdev->sdev_state)); | 948 | scsi_device_state_name(sdev->sdev_state)); |
948 | return SCSI_SCAN_NO_RESPONSE; | 949 | return SCSI_SCAN_NO_RESPONSE; |
949 | } | 950 | } |
950 | } | 951 | } |
951 | 952 | ||
952 | if (*bflags & BLIST_MS_192_BYTES_FOR_3F) | 953 | if (*bflags & BLIST_MS_192_BYTES_FOR_3F) |
953 | sdev->use_192_bytes_for_3f = 1; | 954 | sdev->use_192_bytes_for_3f = 1; |
954 | 955 | ||
955 | if (*bflags & BLIST_NOT_LOCKABLE) | 956 | if (*bflags & BLIST_NOT_LOCKABLE) |
956 | sdev->lockable = 0; | 957 | sdev->lockable = 0; |
957 | 958 | ||
958 | if (*bflags & BLIST_RETRY_HWERROR) | 959 | if (*bflags & BLIST_RETRY_HWERROR) |
959 | sdev->retry_hwerror = 1; | 960 | sdev->retry_hwerror = 1; |
960 | 961 | ||
961 | if (*bflags & BLIST_NO_DIF) | 962 | if (*bflags & BLIST_NO_DIF) |
962 | sdev->no_dif = 1; | 963 | sdev->no_dif = 1; |
963 | 964 | ||
964 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; | 965 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; |
965 | 966 | ||
966 | if (*bflags & BLIST_TRY_VPD_PAGES) | 967 | if (*bflags & BLIST_TRY_VPD_PAGES) |
967 | sdev->try_vpd_pages = 1; | 968 | sdev->try_vpd_pages = 1; |
968 | else if (*bflags & BLIST_SKIP_VPD_PAGES) | 969 | else if (*bflags & BLIST_SKIP_VPD_PAGES) |
969 | sdev->skip_vpd_pages = 1; | 970 | sdev->skip_vpd_pages = 1; |
970 | 971 | ||
971 | transport_configure_device(&sdev->sdev_gendev); | 972 | transport_configure_device(&sdev->sdev_gendev); |
972 | 973 | ||
973 | if (sdev->host->hostt->slave_configure) { | 974 | if (sdev->host->hostt->slave_configure) { |
974 | ret = sdev->host->hostt->slave_configure(sdev); | 975 | ret = sdev->host->hostt->slave_configure(sdev); |
975 | if (ret) { | 976 | if (ret) { |
976 | /* | 977 | /* |
977 | * if LLDD reports slave not present, don't clutter | 978 | * if LLDD reports slave not present, don't clutter |
978 | * console with alloc failure messages | 979 | * console with alloc failure messages |
979 | */ | 980 | */ |
980 | if (ret != -ENXIO) { | 981 | if (ret != -ENXIO) { |
981 | sdev_printk(KERN_ERR, sdev, | 982 | sdev_printk(KERN_ERR, sdev, |
982 | "failed to configure device\n"); | 983 | "failed to configure device\n"); |
983 | } | 984 | } |
984 | return SCSI_SCAN_NO_RESPONSE; | 985 | return SCSI_SCAN_NO_RESPONSE; |
985 | } | 986 | } |
986 | } | 987 | } |
987 | 988 | ||
988 | if (sdev->scsi_level >= SCSI_3) | 989 | if (sdev->scsi_level >= SCSI_3) |
989 | scsi_attach_vpd(sdev); | 990 | scsi_attach_vpd(sdev); |
990 | 991 | ||
991 | sdev->max_queue_depth = sdev->queue_depth; | 992 | sdev->max_queue_depth = sdev->queue_depth; |
992 | 993 | ||
993 | /* | 994 | /* |
994 | * Ok, the device is now all set up, we can | 995 | * Ok, the device is now all set up, we can |
995 | * register it and tell the rest of the kernel | 996 | * register it and tell the rest of the kernel |
996 | * about it. | 997 | * about it. |
997 | */ | 998 | */ |
998 | if (!async && scsi_sysfs_add_sdev(sdev) != 0) | 999 | if (!async && scsi_sysfs_add_sdev(sdev) != 0) |
999 | return SCSI_SCAN_NO_RESPONSE; | 1000 | return SCSI_SCAN_NO_RESPONSE; |
1000 | 1001 | ||
1001 | return SCSI_SCAN_LUN_PRESENT; | 1002 | return SCSI_SCAN_LUN_PRESENT; |
1002 | } | 1003 | } |
1003 | 1004 | ||
1004 | #ifdef CONFIG_SCSI_LOGGING | 1005 | #ifdef CONFIG_SCSI_LOGGING |
1005 | /** | 1006 | /** |
1006 | * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace | 1007 | * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace |
1007 | * @buf: Output buffer with at least end-first+1 bytes of space | 1008 | * @buf: Output buffer with at least end-first+1 bytes of space |
1008 | * @inq: Inquiry buffer (input) | 1009 | * @inq: Inquiry buffer (input) |
1009 | * @first: Offset of string into inq | 1010 | * @first: Offset of string into inq |
1010 | * @end: Index after last character in inq | 1011 | * @end: Index after last character in inq |
1011 | */ | 1012 | */ |
1012 | static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, | 1013 | static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, |
1013 | unsigned first, unsigned end) | 1014 | unsigned first, unsigned end) |
1014 | { | 1015 | { |
1015 | unsigned term = 0, idx; | 1016 | unsigned term = 0, idx; |
1016 | 1017 | ||
1017 | for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { | 1018 | for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { |
1018 | if (inq[idx+first] > ' ') { | 1019 | if (inq[idx+first] > ' ') { |
1019 | buf[idx] = inq[idx+first]; | 1020 | buf[idx] = inq[idx+first]; |
1020 | term = idx+1; | 1021 | term = idx+1; |
1021 | } else { | 1022 | } else { |
1022 | buf[idx] = ' '; | 1023 | buf[idx] = ' '; |
1023 | } | 1024 | } |
1024 | } | 1025 | } |
1025 | buf[term] = 0; | 1026 | buf[term] = 0; |
1026 | return buf; | 1027 | return buf; |
1027 | } | 1028 | } |
1028 | #endif | 1029 | #endif |
1029 | 1030 | ||
1030 | /** | 1031 | /** |
1031 | * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it | 1032 | * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it |
1032 | * @starget: pointer to target device structure | 1033 | * @starget: pointer to target device structure |
1033 | * @lun: LUN of target device | 1034 | * @lun: LUN of target device |
1034 | * @bflagsp: store bflags here if not NULL | 1035 | * @bflagsp: store bflags here if not NULL |
1035 | * @sdevp: probe the LUN corresponding to this scsi_device | 1036 | * @sdevp: probe the LUN corresponding to this scsi_device |
1036 | * @rescan: if nonzero skip some code only needed on first scan | 1037 | * @rescan: if nonzero skip some code only needed on first scan |
1037 | * @hostdata: passed to scsi_alloc_sdev() | 1038 | * @hostdata: passed to scsi_alloc_sdev() |
1038 | * | 1039 | * |
1039 | * Description: | 1040 | * Description: |
1040 | * Call scsi_probe_lun, if a LUN with an attached device is found, | 1041 | * Call scsi_probe_lun, if a LUN with an attached device is found, |
1041 | * allocate and set it up by calling scsi_add_lun. | 1042 | * allocate and set it up by calling scsi_add_lun. |
1042 | * | 1043 | * |
1043 | * Return: | 1044 | * Return: |
1044 | * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device | 1045 | * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device |
1045 | * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is | 1046 | * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is |
1046 | * attached at the LUN | 1047 | * attached at the LUN |
1047 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized | 1048 | * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized |
1048 | **/ | 1049 | **/ |
1049 | static int scsi_probe_and_add_lun(struct scsi_target *starget, | 1050 | static int scsi_probe_and_add_lun(struct scsi_target *starget, |
1050 | u64 lun, int *bflagsp, | 1051 | u64 lun, int *bflagsp, |
1051 | struct scsi_device **sdevp, int rescan, | 1052 | struct scsi_device **sdevp, int rescan, |
1052 | void *hostdata) | 1053 | void *hostdata) |
1053 | { | 1054 | { |
1054 | struct scsi_device *sdev; | 1055 | struct scsi_device *sdev; |
1055 | unsigned char *result; | 1056 | unsigned char *result; |
1056 | int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256; | 1057 | int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256; |
1057 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 1058 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
1058 | 1059 | ||
1059 | /* | 1060 | /* |
1060 | * The rescan flag is used as an optimization, the first scan of a | 1061 | * The rescan flag is used as an optimization, the first scan of a |
1061 | * host adapter calls into here with rescan == 0. | 1062 | * host adapter calls into here with rescan == 0. |
1062 | */ | 1063 | */ |
1063 | sdev = scsi_device_lookup_by_target(starget, lun); | 1064 | sdev = scsi_device_lookup_by_target(starget, lun); |
1064 | if (sdev) { | 1065 | if (sdev) { |
1065 | if (rescan || !scsi_device_created(sdev)) { | 1066 | if (rescan || !scsi_device_created(sdev)) { |
1066 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, | 1067 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
1067 | "scsi scan: device exists on %s\n", | 1068 | "scsi scan: device exists on %s\n", |
1068 | dev_name(&sdev->sdev_gendev))); | 1069 | dev_name(&sdev->sdev_gendev))); |
1069 | if (sdevp) | 1070 | if (sdevp) |
1070 | *sdevp = sdev; | 1071 | *sdevp = sdev; |
1071 | else | 1072 | else |
1072 | scsi_device_put(sdev); | 1073 | scsi_device_put(sdev); |
1073 | 1074 | ||
1074 | if (bflagsp) | 1075 | if (bflagsp) |
1075 | *bflagsp = scsi_get_device_flags(sdev, | 1076 | *bflagsp = scsi_get_device_flags(sdev, |
1076 | sdev->vendor, | 1077 | sdev->vendor, |
1077 | sdev->model); | 1078 | sdev->model); |
1078 | return SCSI_SCAN_LUN_PRESENT; | 1079 | return SCSI_SCAN_LUN_PRESENT; |
1079 | } | 1080 | } |
1080 | scsi_device_put(sdev); | 1081 | scsi_device_put(sdev); |
1081 | } else | 1082 | } else |
1082 | sdev = scsi_alloc_sdev(starget, lun, hostdata); | 1083 | sdev = scsi_alloc_sdev(starget, lun, hostdata); |
1083 | if (!sdev) | 1084 | if (!sdev) |
1084 | goto out; | 1085 | goto out; |
1085 | 1086 | ||
1086 | result = kmalloc(result_len, GFP_ATOMIC | | 1087 | result = kmalloc(result_len, GFP_ATOMIC | |
1087 | ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); | 1088 | ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); |
1088 | if (!result) | 1089 | if (!result) |
1089 | goto out_free_sdev; | 1090 | goto out_free_sdev; |
1090 | 1091 | ||
1091 | if (scsi_probe_lun(sdev, result, result_len, &bflags)) | 1092 | if (scsi_probe_lun(sdev, result, result_len, &bflags)) |
1092 | goto out_free_result; | 1093 | goto out_free_result; |
1093 | 1094 | ||
1094 | if (bflagsp) | 1095 | if (bflagsp) |
1095 | *bflagsp = bflags; | 1096 | *bflagsp = bflags; |
1096 | /* | 1097 | /* |
1097 | * result contains valid SCSI INQUIRY data. | 1098 | * result contains valid SCSI INQUIRY data. |
1098 | */ | 1099 | */ |
1099 | if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) { | 1100 | if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) { |
1100 | /* | 1101 | /* |
1101 | * For a Peripheral qualifier 3 (011b), the SCSI | 1102 | * For a Peripheral qualifier 3 (011b), the SCSI |
1102 | * spec says: The device server is not capable of | 1103 | * spec says: The device server is not capable of |
1103 | * supporting a physical device on this logical | 1104 | * supporting a physical device on this logical |
1104 | * unit. | 1105 | * unit. |
1105 | * | 1106 | * |
1106 | * For disks, this implies that there is no | 1107 | * For disks, this implies that there is no |
1107 | * logical disk configured at sdev->lun, but there | 1108 | * logical disk configured at sdev->lun, but there |
1108 | * is a target id responding. | 1109 | * is a target id responding. |
1109 | */ | 1110 | */ |
1110 | SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" | 1111 | SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" |
1111 | " peripheral qualifier of 3, device not" | 1112 | " peripheral qualifier of 3, device not" |
1112 | " added\n")) | 1113 | " added\n")) |
1113 | if (lun == 0) { | 1114 | if (lun == 0) { |
1114 | SCSI_LOG_SCAN_BUS(1, { | 1115 | SCSI_LOG_SCAN_BUS(1, { |
1115 | unsigned char vend[9]; | 1116 | unsigned char vend[9]; |
1116 | unsigned char mod[17]; | 1117 | unsigned char mod[17]; |
1117 | 1118 | ||
1118 | sdev_printk(KERN_INFO, sdev, | 1119 | sdev_printk(KERN_INFO, sdev, |
1119 | "scsi scan: consider passing scsi_mod." | 1120 | "scsi scan: consider passing scsi_mod." |
1120 | "dev_flags=%s:%s:0x240 or 0x1000240\n", | 1121 | "dev_flags=%s:%s:0x240 or 0x1000240\n", |
1121 | scsi_inq_str(vend, result, 8, 16), | 1122 | scsi_inq_str(vend, result, 8, 16), |
1122 | scsi_inq_str(mod, result, 16, 32)); | 1123 | scsi_inq_str(mod, result, 16, 32)); |
1123 | }); | 1124 | }); |
1124 | 1125 | ||
1125 | } | 1126 | } |
1126 | 1127 | ||
1127 | res = SCSI_SCAN_TARGET_PRESENT; | 1128 | res = SCSI_SCAN_TARGET_PRESENT; |
1128 | goto out_free_result; | 1129 | goto out_free_result; |
1129 | } | 1130 | } |
1130 | 1131 | ||
1131 | /* | 1132 | /* |
1132 | * Some targets may set slight variations of PQ and PDT to signal | 1133 | * Some targets may set slight variations of PQ and PDT to signal |
1133 | * that no LUN is present, so don't add sdev in these cases. | 1134 | * that no LUN is present, so don't add sdev in these cases. |
1134 | * Two specific examples are: | 1135 | * Two specific examples are: |
1135 | * 1) NetApp targets: return PQ=1, PDT=0x1f | 1136 | * 1) NetApp targets: return PQ=1, PDT=0x1f |
1136 | * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" | 1137 | * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" |
1137 | * in the UFI 1.0 spec (we cannot rely on reserved bits). | 1138 | * in the UFI 1.0 spec (we cannot rely on reserved bits). |
1138 | * | 1139 | * |
1139 | * References: | 1140 | * References: |
1140 | * 1) SCSI SPC-3, pp. 145-146 | 1141 | * 1) SCSI SPC-3, pp. 145-146 |
1141 | * PQ=1: "A peripheral device having the specified peripheral | 1142 | * PQ=1: "A peripheral device having the specified peripheral |
1142 | * device type is not connected to this logical unit. However, the | 1143 | * device type is not connected to this logical unit. However, the |
1143 | * device server is capable of supporting the specified peripheral | 1144 | * device server is capable of supporting the specified peripheral |
1144 | * device type on this logical unit." | 1145 | * device type on this logical unit." |
1145 | * PDT=0x1f: "Unknown or no device type" | 1146 | * PDT=0x1f: "Unknown or no device type" |
1146 | * 2) USB UFI 1.0, p. 20 | 1147 | * 2) USB UFI 1.0, p. 20 |
1147 | * PDT=00h Direct-access device (floppy) | 1148 | * PDT=00h Direct-access device (floppy) |
1148 | * PDT=1Fh none (no FDD connected to the requested logical unit) | 1149 | * PDT=1Fh none (no FDD connected to the requested logical unit) |
1149 | */ | 1150 | */ |
1150 | if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && | 1151 | if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && |
1151 | (result[0] & 0x1f) == 0x1f && | 1152 | (result[0] & 0x1f) == 0x1f && |
1152 | !scsi_is_wlun(lun)) { | 1153 | !scsi_is_wlun(lun)) { |
1153 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, | 1154 | SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, |
1154 | "scsi scan: peripheral device type" | 1155 | "scsi scan: peripheral device type" |
1155 | " of 31, no device added\n")); | 1156 | " of 31, no device added\n")); |
1156 | res = SCSI_SCAN_TARGET_PRESENT; | 1157 | res = SCSI_SCAN_TARGET_PRESENT; |
1157 | goto out_free_result; | 1158 | goto out_free_result; |
1158 | } | 1159 | } |
1159 | 1160 | ||
1160 | res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); | 1161 | res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); |
1161 | if (res == SCSI_SCAN_LUN_PRESENT) { | 1162 | if (res == SCSI_SCAN_LUN_PRESENT) { |
1162 | if (bflags & BLIST_KEY) { | 1163 | if (bflags & BLIST_KEY) { |
1163 | sdev->lockable = 0; | 1164 | sdev->lockable = 0; |
1164 | scsi_unlock_floptical(sdev, result); | 1165 | scsi_unlock_floptical(sdev, result); |
1165 | } | 1166 | } |
1166 | } | 1167 | } |
1167 | 1168 | ||
1168 | out_free_result: | 1169 | out_free_result: |
1169 | kfree(result); | 1170 | kfree(result); |
1170 | out_free_sdev: | 1171 | out_free_sdev: |
1171 | if (res == SCSI_SCAN_LUN_PRESENT) { | 1172 | if (res == SCSI_SCAN_LUN_PRESENT) { |
1172 | if (sdevp) { | 1173 | if (sdevp) { |
1173 | if (scsi_device_get(sdev) == 0) { | 1174 | if (scsi_device_get(sdev) == 0) { |
1174 | *sdevp = sdev; | 1175 | *sdevp = sdev; |
1175 | } else { | 1176 | } else { |
1176 | __scsi_remove_device(sdev); | 1177 | __scsi_remove_device(sdev); |
1177 | res = SCSI_SCAN_NO_RESPONSE; | 1178 | res = SCSI_SCAN_NO_RESPONSE; |
1178 | } | 1179 | } |
1179 | } | 1180 | } |
1180 | } else | 1181 | } else |
1181 | __scsi_remove_device(sdev); | 1182 | __scsi_remove_device(sdev); |
1182 | out: | 1183 | out: |
1183 | return res; | 1184 | return res; |
1184 | } | 1185 | } |
1185 | 1186 | ||
1186 | /** | 1187 | /** |
1187 | * scsi_sequential_lun_scan - sequentially scan a SCSI target | 1188 | * scsi_sequential_lun_scan - sequentially scan a SCSI target |
1188 | * @starget: pointer to target structure to scan | 1189 | * @starget: pointer to target structure to scan |
1189 | * @bflags: black/white list flag for LUN 0 | 1190 | * @bflags: black/white list flag for LUN 0 |
1190 | * @scsi_level: Which version of the standard does this device adhere to | 1191 | * @scsi_level: Which version of the standard does this device adhere to |
1191 | * @rescan: passed to scsi_probe_add_lun() | 1192 | * @rescan: passed to scsi_probe_add_lun() |
1192 | * | 1193 | * |
1193 | * Description: | 1194 | * Description: |
1194 | * Generally, scan from LUN 1 (LUN 0 is assumed to already have been | 1195 | * Generally, scan from LUN 1 (LUN 0 is assumed to already have been |
1195 | * scanned) to some maximum lun until a LUN is found with no device | 1196 | * scanned) to some maximum lun until a LUN is found with no device |
1196 | * attached. Use the bflags to figure out any oddities. | 1197 | * attached. Use the bflags to figure out any oddities. |
1197 | * | 1198 | * |
1198 | * Modifies sdevscan->lun. | 1199 | * Modifies sdevscan->lun. |
1199 | **/ | 1200 | **/ |
1200 | static void scsi_sequential_lun_scan(struct scsi_target *starget, | 1201 | static void scsi_sequential_lun_scan(struct scsi_target *starget, |
1201 | int bflags, int scsi_level, int rescan) | 1202 | int bflags, int scsi_level, int rescan) |
1202 | { | 1203 | { |
1203 | uint max_dev_lun; | 1204 | uint max_dev_lun; |
1204 | u64 sparse_lun, lun; | 1205 | u64 sparse_lun, lun; |
1205 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 1206 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
1206 | 1207 | ||
1207 | SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, | 1208 | SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, |
1208 | "scsi scan: Sequential scan\n")); | 1209 | "scsi scan: Sequential scan\n")); |
1209 | 1210 | ||
1210 | max_dev_lun = min(max_scsi_luns, shost->max_lun); | 1211 | max_dev_lun = min(max_scsi_luns, shost->max_lun); |
1211 | /* | 1212 | /* |
1212 | * If this device is known to support sparse multiple units, | 1213 | * If this device is known to support sparse multiple units, |
1213 | * override the other settings, and scan all of them. Normally, | 1214 | * override the other settings, and scan all of them. Normally, |
1214 | * SCSI-3 devices should be scanned via the REPORT LUNS. | 1215 | * SCSI-3 devices should be scanned via the REPORT LUNS. |
1215 | */ | 1216 | */ |
1216 | if (bflags & BLIST_SPARSELUN) { | 1217 | if (bflags & BLIST_SPARSELUN) { |
1217 | max_dev_lun = shost->max_lun; | 1218 | max_dev_lun = shost->max_lun; |
1218 | sparse_lun = 1; | 1219 | sparse_lun = 1; |
1219 | } else | 1220 | } else |
1220 | sparse_lun = 0; | 1221 | sparse_lun = 0; |
1221 | 1222 | ||
1222 | /* | 1223 | /* |
1223 | * If less than SCSI_1_CCS, and no special lun scanning, stop | 1224 | * If less than SCSI_1_CCS, and no special lun scanning, stop |
1224 | * scanning; this matches 2.4 behaviour, but could just be a bug | 1225 | * scanning; this matches 2.4 behaviour, but could just be a bug |
1225 | * (to continue scanning a SCSI_1_CCS device). | 1226 | * (to continue scanning a SCSI_1_CCS device). |
1226 | * | 1227 | * |
1227 | * This test is broken. We might not have any device on lun0 for | 1228 | * This test is broken. We might not have any device on lun0 for |
1228 | * a sparselun device, and if that's the case then how would we | 1229 | * a sparselun device, and if that's the case then how would we |
1229 | * know the real scsi_level, eh? It might make sense to just not | 1230 | * know the real scsi_level, eh? It might make sense to just not |
1230 | * scan any SCSI_1 device for non-0 luns, but that check would best | 1231 | * scan any SCSI_1 device for non-0 luns, but that check would best |
1231 | * go into scsi_alloc_sdev() and just have it return null when asked | 1232 | * go into scsi_alloc_sdev() and just have it return null when asked |
1232 | * to alloc an sdev for lun > 0 on an already found SCSI_1 device. | 1233 | * to alloc an sdev for lun > 0 on an already found SCSI_1 device. |
1233 | * | 1234 | * |
1234 | if ((sdevscan->scsi_level < SCSI_1_CCS) && | 1235 | if ((sdevscan->scsi_level < SCSI_1_CCS) && |
1235 | ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) | 1236 | ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) |
1236 | == 0)) | 1237 | == 0)) |
1237 | return; | 1238 | return; |
1238 | */ | 1239 | */ |
1239 | /* | 1240 | /* |
1240 | * If this device is known to support multiple units, override | 1241 | * If this device is known to support multiple units, override |
1241 | * the other settings, and scan all of them. | 1242 | * the other settings, and scan all of them. |
1242 | */ | 1243 | */ |
1243 | if (bflags & BLIST_FORCELUN) | 1244 | if (bflags & BLIST_FORCELUN) |
1244 | max_dev_lun = shost->max_lun; | 1245 | max_dev_lun = shost->max_lun; |
1245 | /* | 1246 | /* |
1246 | * REGAL CDC-4X: avoid hang after LUN 4 | 1247 | * REGAL CDC-4X: avoid hang after LUN 4 |
1247 | */ | 1248 | */ |
1248 | if (bflags & BLIST_MAX5LUN) | 1249 | if (bflags & BLIST_MAX5LUN) |
1249 | max_dev_lun = min(5U, max_dev_lun); | 1250 | max_dev_lun = min(5U, max_dev_lun); |
1250 | /* | 1251 | /* |
1251 | * Do not scan SCSI-2 or lower device past LUN 7, unless | 1252 | * Do not scan SCSI-2 or lower device past LUN 7, unless |
1252 | * BLIST_LARGELUN. | 1253 | * BLIST_LARGELUN. |
1253 | */ | 1254 | */ |
1254 | if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) | 1255 | if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) |
1255 | max_dev_lun = min(8U, max_dev_lun); | 1256 | max_dev_lun = min(8U, max_dev_lun); |
1256 | 1257 | ||
1257 | /* | 1258 | /* |
1258 | * Stop scanning at 255 unless BLIST_SCSI3LUN | 1259 | * Stop scanning at 255 unless BLIST_SCSI3LUN |
1259 | */ | 1260 | */ |
1260 | if (!(bflags & BLIST_SCSI3LUN)) | 1261 | if (!(bflags & BLIST_SCSI3LUN)) |
1261 | max_dev_lun = min(256U, max_dev_lun); | 1262 | max_dev_lun = min(256U, max_dev_lun); |
1262 | 1263 | ||
1263 | /* | 1264 | /* |
1264 | * We have already scanned LUN 0, so start at LUN 1. Keep scanning | 1265 | * We have already scanned LUN 0, so start at LUN 1. Keep scanning |
1265 | * until we reach the max, or no LUN is found and we are not | 1266 | * until we reach the max, or no LUN is found and we are not |
1266 | * sparse_lun. | 1267 | * sparse_lun. |
1267 | */ | 1268 | */ |
1268 | for (lun = 1; lun < max_dev_lun; ++lun) | 1269 | for (lun = 1; lun < max_dev_lun; ++lun) |
1269 | if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, | 1270 | if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, |
1270 | NULL) != SCSI_SCAN_LUN_PRESENT) && | 1271 | NULL) != SCSI_SCAN_LUN_PRESENT) && |
1271 | !sparse_lun) | 1272 | !sparse_lun) |
1272 | return; | 1273 | return; |
1273 | } | 1274 | } |
1274 | 1275 | ||
1275 | /** | 1276 | /** |
1276 | * scsi_report_lun_scan - Scan using SCSI REPORT LUN results | 1277 | * scsi_report_lun_scan - Scan using SCSI REPORT LUN results |
1277 | * @starget: which target | 1278 | * @starget: which target |
1278 | * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN | 1279 | * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN |
1279 | * @rescan: nonzero if we can skip code only needed on first scan | 1280 | * @rescan: nonzero if we can skip code only needed on first scan |
1280 | * | 1281 | * |
1281 | * Description: | 1282 | * Description: |
1282 | * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. | 1283 | * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. |
1283 | * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. | 1284 | * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. |
1284 | * | 1285 | * |
1285 | * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 | 1286 | * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 |
1286 | * LUNs even if it's older than SCSI-3. | 1287 | * LUNs even if it's older than SCSI-3. |
1287 | * If BLIST_NOREPORTLUN is set, return 1 always. | 1288 | * If BLIST_NOREPORTLUN is set, return 1 always. |
1288 | * If BLIST_NOLUN is set, return 0 always. | 1289 | * If BLIST_NOLUN is set, return 0 always. |
1289 | * If starget->no_report_luns is set, return 1 always. | 1290 | * If starget->no_report_luns is set, return 1 always. |
1290 | * | 1291 | * |
1291 | * Return: | 1292 | * Return: |
1292 | * 0: scan completed (or no memory, so further scanning is futile) | 1293 | * 0: scan completed (or no memory, so further scanning is futile) |
1293 | * 1: could not scan with REPORT LUN | 1294 | * 1: could not scan with REPORT LUN |
1294 | **/ | 1295 | **/ |
1295 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, | 1296 | static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, |
1296 | int rescan) | 1297 | int rescan) |
1297 | { | 1298 | { |
1298 | char devname[64]; | 1299 | char devname[64]; |
1299 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; | 1300 | unsigned char scsi_cmd[MAX_COMMAND_SIZE]; |
1300 | unsigned int length; | 1301 | unsigned int length; |
1301 | u64 lun; | 1302 | u64 lun; |
1302 | unsigned int num_luns; | 1303 | unsigned int num_luns; |
1303 | unsigned int retries; | 1304 | unsigned int retries; |
1304 | int result; | 1305 | int result; |
1305 | struct scsi_lun *lunp, *lun_data; | 1306 | struct scsi_lun *lunp, *lun_data; |
1306 | struct scsi_sense_hdr sshdr; | 1307 | struct scsi_sense_hdr sshdr; |
1307 | struct scsi_device *sdev; | 1308 | struct scsi_device *sdev; |
1308 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); | 1309 | struct Scsi_Host *shost = dev_to_shost(&starget->dev); |
1309 | int ret = 0; | 1310 | int ret = 0; |
1310 | 1311 | ||
1311 | /* | 1312 | /* |
1312 | * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. | 1313 | * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. |
1313 | * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does | 1314 | * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does |
1314 | * support more than 8 LUNs. | 1315 | * support more than 8 LUNs. |
1315 | * Don't attempt if the target doesn't support REPORT LUNS. | 1316 | * Don't attempt if the target doesn't support REPORT LUNS. |
1316 | */ | 1317 | */ |
1317 | if (bflags & BLIST_NOREPORTLUN) | 1318 | if (bflags & BLIST_NOREPORTLUN) |
1318 | return 1; | 1319 | return 1; |
1319 | if (starget->scsi_level < SCSI_2 && | 1320 | if (starget->scsi_level < SCSI_2 && |
1320 | starget->scsi_level != SCSI_UNKNOWN) | 1321 | starget->scsi_level != SCSI_UNKNOWN) |
1321 | return 1; | 1322 | return 1; |
1322 | if (starget->scsi_level < SCSI_3 && | 1323 | if (starget->scsi_level < SCSI_3 && |
1323 | (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) | 1324 | (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) |
1324 | return 1; | 1325 | return 1; |
1325 | if (bflags & BLIST_NOLUN) | 1326 | if (bflags & BLIST_NOLUN) |
1326 | return 0; | 1327 | return 0; |
1327 | if (starget->no_report_luns) | 1328 | if (starget->no_report_luns) |
1328 | return 1; | 1329 | return 1; |
1329 | 1330 | ||
1330 | if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { | 1331 | if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { |
1331 | sdev = scsi_alloc_sdev(starget, 0, NULL); | 1332 | sdev = scsi_alloc_sdev(starget, 0, NULL); |
1332 | if (!sdev) | 1333 | if (!sdev) |
1333 | return 0; | 1334 | return 0; |
1334 | if (scsi_device_get(sdev)) { | 1335 | if (scsi_device_get(sdev)) { |
1335 | __scsi_remove_device(sdev); | 1336 | __scsi_remove_device(sdev); |
1336 | return 0; | 1337 | return 0; |
1337 | } | 1338 | } |
1338 | } | 1339 | } |
1339 | 1340 | ||
1340 | sprintf(devname, "host %d channel %d id %d", | 1341 | sprintf(devname, "host %d channel %d id %d", |
1341 | shost->host_no, sdev->channel, sdev->id); | 1342 | shost->host_no, sdev->channel, sdev->id); |
1342 | 1343 | ||
1343 | /* | 1344 | /* |
1344 | * Allocate enough to hold the header (the same size as one scsi_lun) | 1345 | * Allocate enough to hold the header (the same size as one scsi_lun) |
1345 | * plus the number of luns we are requesting. 511 was the default | 1346 | * plus the number of luns we are requesting. 511 was the default |
1346 | * value of the now removed max_report_luns parameter. | 1347 | * value of the now removed max_report_luns parameter. |
1347 | */ | 1348 | */ |
1348 | length = (511 + 1) * sizeof(struct scsi_lun); | 1349 | length = (511 + 1) * sizeof(struct scsi_lun); |
1349 | retry: | 1350 | retry: |
1350 | lun_data = kmalloc(length, GFP_KERNEL | | 1351 | lun_data = kmalloc(length, GFP_KERNEL | |
1351 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); | 1352 | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); |
1352 | if (!lun_data) { | 1353 | if (!lun_data) { |
1353 | printk(ALLOC_FAILURE_MSG, __func__); | 1354 | printk(ALLOC_FAILURE_MSG, __func__); |
1354 | goto out; | 1355 | goto out; |
1355 | } | 1356 | } |
1356 | 1357 | ||
1357 | scsi_cmd[0] = REPORT_LUNS; | 1358 | scsi_cmd[0] = REPORT_LUNS; |
1358 | 1359 | ||
1359 | /* | 1360 | /* |
1360 | * bytes 1 - 5: reserved, set to zero. | 1361 | * bytes 1 - 5: reserved, set to zero. |
1361 | */ | 1362 | */ |
1362 | memset(&scsi_cmd[1], 0, 5); | 1363 | memset(&scsi_cmd[1], 0, 5); |
1363 | 1364 | ||
1364 | /* | 1365 | /* |
1365 | * bytes 6 - 9: length of the command. | 1366 | * bytes 6 - 9: length of the command. |
1366 | */ | 1367 | */ |
1367 | put_unaligned_be32(length, &scsi_cmd[6]); | 1368 | put_unaligned_be32(length, &scsi_cmd[6]); |
1368 | 1369 | ||
1369 | scsi_cmd[10] = 0; /* reserved */ | 1370 | scsi_cmd[10] = 0; /* reserved */ |
1370 | scsi_cmd[11] = 0; /* control */ | 1371 | scsi_cmd[11] = 0; /* control */ |
1371 | 1372 | ||
1372 | /* | 1373 | /* |
1373 | * We can get a UNIT ATTENTION, for example a power on/reset, so | 1374 | * We can get a UNIT ATTENTION, for example a power on/reset, so |
1374 | * retry a few times (like sd.c does for TEST UNIT READY). | 1375 | * retry a few times (like sd.c does for TEST UNIT READY). |
1375 | * Experience shows some combinations of adapter/devices get at | 1376 | * Experience shows some combinations of adapter/devices get at |
1376 | * least two power on/resets. | 1377 | * least two power on/resets. |
1377 | * | 1378 | * |
1378 | * Illegal requests (for devices that do not support REPORT LUNS) | 1379 | * Illegal requests (for devices that do not support REPORT LUNS) |
1379 | * should come through as a check condition, and will not generate | 1380 | * should come through as a check condition, and will not generate |
1380 | * a retry. | 1381 | * a retry. |
1381 | */ | 1382 | */ |
1382 | for (retries = 0; retries < 3; retries++) { | 1383 | for (retries = 0; retries < 3; retries++) { |
1383 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, | 1384 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
1384 | "scsi scan: Sending REPORT LUNS to (try %d)\n", | 1385 | "scsi scan: Sending REPORT LUNS to (try %d)\n", |
1385 | retries)); | 1386 | retries)); |
1386 | 1387 | ||
1387 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, | 1388 | result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, |
1388 | lun_data, length, &sshdr, | 1389 | lun_data, length, &sshdr, |
1389 | SCSI_REPORT_LUNS_TIMEOUT, 3, NULL); | 1390 | SCSI_REPORT_LUNS_TIMEOUT, 3, NULL); |
1390 | 1391 | ||
1391 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, | 1392 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
1392 | "scsi scan: REPORT LUNS" | 1393 | "scsi scan: REPORT LUNS" |
1393 | " %s (try %d) result 0x%x\n", | 1394 | " %s (try %d) result 0x%x\n", |
1394 | result ? "failed" : "successful", | 1395 | result ? "failed" : "successful", |
1395 | retries, result)); | 1396 | retries, result)); |
1396 | if (result == 0) | 1397 | if (result == 0) |
1397 | break; | 1398 | break; |
1398 | else if (scsi_sense_valid(&sshdr)) { | 1399 | else if (scsi_sense_valid(&sshdr)) { |
1399 | if (sshdr.sense_key != UNIT_ATTENTION) | 1400 | if (sshdr.sense_key != UNIT_ATTENTION) |
1400 | break; | 1401 | break; |
1401 | } | 1402 | } |
1402 | } | 1403 | } |
1403 | 1404 | ||
1404 | if (result) { | 1405 | if (result) { |
1405 | /* | 1406 | /* |
1406 | * The device probably does not support a REPORT LUN command | 1407 | * The device probably does not support a REPORT LUN command |
1407 | */ | 1408 | */ |
1408 | ret = 1; | 1409 | ret = 1; |
1409 | goto out_err; | 1410 | goto out_err; |
1410 | } | 1411 | } |
1411 | 1412 | ||
1412 | /* | 1413 | /* |
1413 | * Get the length from the first four bytes of lun_data. | 1414 | * Get the length from the first four bytes of lun_data. |
1414 | */ | 1415 | */ |
1415 | if (get_unaligned_be32(lun_data->scsi_lun) + | 1416 | if (get_unaligned_be32(lun_data->scsi_lun) + |
1416 | sizeof(struct scsi_lun) > length) { | 1417 | sizeof(struct scsi_lun) > length) { |
1417 | length = get_unaligned_be32(lun_data->scsi_lun) + | 1418 | length = get_unaligned_be32(lun_data->scsi_lun) + |
1418 | sizeof(struct scsi_lun); | 1419 | sizeof(struct scsi_lun); |
1419 | kfree(lun_data); | 1420 | kfree(lun_data); |
1420 | goto retry; | 1421 | goto retry; |
1421 | } | 1422 | } |
1422 | length = get_unaligned_be32(lun_data->scsi_lun); | 1423 | length = get_unaligned_be32(lun_data->scsi_lun); |
1423 | 1424 | ||
1424 | num_luns = (length / sizeof(struct scsi_lun)); | 1425 | num_luns = (length / sizeof(struct scsi_lun)); |
1425 | 1426 | ||
1426 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, | 1427 | SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, |
1427 | "scsi scan: REPORT LUN scan\n")); | 1428 | "scsi scan: REPORT LUN scan\n")); |
1428 | 1429 | ||
1429 | /* | 1430 | /* |
1430 | * Scan the luns in lun_data. The entry at offset 0 is really | 1431 | * Scan the luns in lun_data. The entry at offset 0 is really |
1431 | * the header, so start at 1 and go up to and including num_luns. | 1432 | * the header, so start at 1 and go up to and including num_luns. |
1432 | */ | 1433 | */ |
1433 | for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { | 1434 | for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { |
1434 | lun = scsilun_to_int(lunp); | 1435 | lun = scsilun_to_int(lunp); |
1435 | 1436 | ||
1436 | if (lun > sdev->host->max_lun) { | 1437 | if (lun > sdev->host->max_lun) { |
1437 | sdev_printk(KERN_WARNING, sdev, | 1438 | sdev_printk(KERN_WARNING, sdev, |
1438 | "lun%llu has a LUN larger than" | 1439 | "lun%llu has a LUN larger than" |
1439 | " allowed by the host adapter\n", lun); | 1440 | " allowed by the host adapter\n", lun); |
1440 | } else { | 1441 | } else { |
1441 | int res; | 1442 | int res; |
1442 | 1443 | ||
1443 | res = scsi_probe_and_add_lun(starget, | 1444 | res = scsi_probe_and_add_lun(starget, |
1444 | lun, NULL, NULL, rescan, NULL); | 1445 | lun, NULL, NULL, rescan, NULL); |
1445 | if (res == SCSI_SCAN_NO_RESPONSE) { | 1446 | if (res == SCSI_SCAN_NO_RESPONSE) { |
1446 | /* | 1447 | /* |
1447 | * Got some results, but now none, abort. | 1448 | * Got some results, but now none, abort. |
1448 | */ | 1449 | */ |
1449 | sdev_printk(KERN_ERR, sdev, | 1450 | sdev_printk(KERN_ERR, sdev, |
1450 | "Unexpected response" | 1451 | "Unexpected response" |
1451 | " from lun %llu while scanning, scan" | 1452 | " from lun %llu while scanning, scan" |
1452 | " aborted\n", (unsigned long long)lun); | 1453 | " aborted\n", (unsigned long long)lun); |
1453 | break; | 1454 | break; |
1454 | } | 1455 | } |
1455 | } | 1456 | } |
1456 | } | 1457 | } |
1457 | 1458 | ||
1458 | out_err: | 1459 | out_err: |
1459 | kfree(lun_data); | 1460 | kfree(lun_data); |
1460 | out: | 1461 | out: |
1461 | scsi_device_put(sdev); | 1462 | scsi_device_put(sdev); |
1462 | if (scsi_device_created(sdev)) | 1463 | if (scsi_device_created(sdev)) |
1463 | /* | 1464 | /* |
1464 | * the sdev we used didn't appear in the report luns scan | 1465 | * the sdev we used didn't appear in the report luns scan |
1465 | */ | 1466 | */ |
1466 | __scsi_remove_device(sdev); | 1467 | __scsi_remove_device(sdev); |
1467 | return ret; | 1468 | return ret; |
1468 | } | 1469 | } |
1469 | 1470 | ||
1470 | struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, | 1471 | struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, |
1471 | uint id, u64 lun, void *hostdata) | 1472 | uint id, u64 lun, void *hostdata) |
1472 | { | 1473 | { |
1473 | struct scsi_device *sdev = ERR_PTR(-ENODEV); | 1474 | struct scsi_device *sdev = ERR_PTR(-ENODEV); |
1474 | struct device *parent = &shost->shost_gendev; | 1475 | struct device *parent = &shost->shost_gendev; |
1475 | struct scsi_target *starget; | 1476 | struct scsi_target *starget; |
1476 | 1477 | ||
1477 | if (strncmp(scsi_scan_type, "none", 4) == 0) | 1478 | if (strncmp(scsi_scan_type, "none", 4) == 0) |
1478 | return ERR_PTR(-ENODEV); | 1479 | return ERR_PTR(-ENODEV); |
1479 | 1480 | ||
1480 | starget = scsi_alloc_target(parent, channel, id); | 1481 | starget = scsi_alloc_target(parent, channel, id); |
1481 | if (!starget) | 1482 | if (!starget) |
1482 | return ERR_PTR(-ENOMEM); | 1483 | return ERR_PTR(-ENOMEM); |
1483 | scsi_autopm_get_target(starget); | 1484 | scsi_autopm_get_target(starget); |
1484 | 1485 | ||
1485 | mutex_lock(&shost->scan_mutex); | 1486 | mutex_lock(&shost->scan_mutex); |
1486 | if (!shost->async_scan) | 1487 | if (!shost->async_scan) |
1487 | scsi_complete_async_scans(); | 1488 | scsi_complete_async_scans(); |
1488 | 1489 | ||
1489 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { | 1490 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { |
1490 | scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); | 1491 | scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); |
1491 | scsi_autopm_put_host(shost); | 1492 | scsi_autopm_put_host(shost); |
1492 | } | 1493 | } |
1493 | mutex_unlock(&shost->scan_mutex); | 1494 | mutex_unlock(&shost->scan_mutex); |
1494 | scsi_autopm_put_target(starget); | 1495 | scsi_autopm_put_target(starget); |
1495 | /* | 1496 | /* |
1496 | * paired with scsi_alloc_target(). Target will be destroyed unless | 1497 | * paired with scsi_alloc_target(). Target will be destroyed unless |
1497 | * scsi_probe_and_add_lun made an underlying device visible | 1498 | * scsi_probe_and_add_lun made an underlying device visible |
1498 | */ | 1499 | */ |
1499 | scsi_target_reap(starget); | 1500 | scsi_target_reap(starget); |
1500 | put_device(&starget->dev); | 1501 | put_device(&starget->dev); |
1501 | 1502 | ||
1502 | return sdev; | 1503 | return sdev; |
1503 | } | 1504 | } |
1504 | EXPORT_SYMBOL(__scsi_add_device); | 1505 | EXPORT_SYMBOL(__scsi_add_device); |
1505 | 1506 | ||
1506 | int scsi_add_device(struct Scsi_Host *host, uint channel, | 1507 | int scsi_add_device(struct Scsi_Host *host, uint channel, |
1507 | uint target, u64 lun) | 1508 | uint target, u64 lun) |
1508 | { | 1509 | { |
1509 | struct scsi_device *sdev = | 1510 | struct scsi_device *sdev = |
1510 | __scsi_add_device(host, channel, target, lun, NULL); | 1511 | __scsi_add_device(host, channel, target, lun, NULL); |
1511 | if (IS_ERR(sdev)) | 1512 | if (IS_ERR(sdev)) |
1512 | return PTR_ERR(sdev); | 1513 | return PTR_ERR(sdev); |
1513 | 1514 | ||
1514 | scsi_device_put(sdev); | 1515 | scsi_device_put(sdev); |
1515 | return 0; | 1516 | return 0; |
1516 | } | 1517 | } |
1517 | EXPORT_SYMBOL(scsi_add_device); | 1518 | EXPORT_SYMBOL(scsi_add_device); |
1518 | 1519 | ||
1519 | void scsi_rescan_device(struct device *dev) | 1520 | void scsi_rescan_device(struct device *dev) |
1520 | { | 1521 | { |
1521 | device_lock(dev); | 1522 | device_lock(dev); |
1522 | if (dev->driver && try_module_get(dev->driver->owner)) { | 1523 | if (dev->driver && try_module_get(dev->driver->owner)) { |
1523 | struct scsi_driver *drv = to_scsi_driver(dev->driver); | 1524 | struct scsi_driver *drv = to_scsi_driver(dev->driver); |
1524 | 1525 | ||
1525 | if (drv->rescan) | 1526 | if (drv->rescan) |
1526 | drv->rescan(dev); | 1527 | drv->rescan(dev); |
1527 | module_put(dev->driver->owner); | 1528 | module_put(dev->driver->owner); |
1528 | } | 1529 | } |
1529 | device_unlock(dev); | 1530 | device_unlock(dev); |
1530 | } | 1531 | } |
1531 | EXPORT_SYMBOL(scsi_rescan_device); | 1532 | EXPORT_SYMBOL(scsi_rescan_device); |
1532 | 1533 | ||
1533 | static void __scsi_scan_target(struct device *parent, unsigned int channel, | 1534 | static void __scsi_scan_target(struct device *parent, unsigned int channel, |
1534 | unsigned int id, u64 lun, int rescan) | 1535 | unsigned int id, u64 lun, int rescan) |
1535 | { | 1536 | { |
1536 | struct Scsi_Host *shost = dev_to_shost(parent); | 1537 | struct Scsi_Host *shost = dev_to_shost(parent); |
1537 | int bflags = 0; | 1538 | int bflags = 0; |
1538 | int res; | 1539 | int res; |
1539 | struct scsi_target *starget; | 1540 | struct scsi_target *starget; |
1540 | 1541 | ||
1541 | if (shost->this_id == id) | 1542 | if (shost->this_id == id) |
1542 | /* | 1543 | /* |
1543 | * Don't scan the host adapter | 1544 | * Don't scan the host adapter |
1544 | */ | 1545 | */ |
1545 | return; | 1546 | return; |
1546 | 1547 | ||
1547 | starget = scsi_alloc_target(parent, channel, id); | 1548 | starget = scsi_alloc_target(parent, channel, id); |
1548 | if (!starget) | 1549 | if (!starget) |
1549 | return; | 1550 | return; |
1550 | scsi_autopm_get_target(starget); | 1551 | scsi_autopm_get_target(starget); |
1551 | 1552 | ||
1552 | if (lun != SCAN_WILD_CARD) { | 1553 | if (lun != SCAN_WILD_CARD) { |
1553 | /* | 1554 | /* |
1554 | * Scan for a specific host/chan/id/lun. | 1555 | * Scan for a specific host/chan/id/lun. |
1555 | */ | 1556 | */ |
1556 | scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); | 1557 | scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); |
1557 | goto out_reap; | 1558 | goto out_reap; |
1558 | } | 1559 | } |
1559 | 1560 | ||
1560 | /* | 1561 | /* |
1561 | * Scan LUN 0, if there is some response, scan further. Ideally, we | 1562 | * Scan LUN 0, if there is some response, scan further. Ideally, we |
1562 | * would not configure LUN 0 until all LUNs are scanned. | 1563 | * would not configure LUN 0 until all LUNs are scanned. |
1563 | */ | 1564 | */ |
1564 | res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); | 1565 | res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); |
1565 | if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { | 1566 | if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { |
1566 | if (scsi_report_lun_scan(starget, bflags, rescan) != 0) | 1567 | if (scsi_report_lun_scan(starget, bflags, rescan) != 0) |
1567 | /* | 1568 | /* |
1568 | * The REPORT LUN did not scan the target, | 1569 | * The REPORT LUN did not scan the target, |
1569 | * do a sequential scan. | 1570 | * do a sequential scan. |
1570 | */ | 1571 | */ |
1571 | scsi_sequential_lun_scan(starget, bflags, | 1572 | scsi_sequential_lun_scan(starget, bflags, |
1572 | starget->scsi_level, rescan); | 1573 | starget->scsi_level, rescan); |
1573 | } | 1574 | } |
1574 | 1575 | ||
1575 | out_reap: | 1576 | out_reap: |
1576 | scsi_autopm_put_target(starget); | 1577 | scsi_autopm_put_target(starget); |
1577 | /* | 1578 | /* |
1578 | * paired with scsi_alloc_target(): determine if the target has | 1579 | * paired with scsi_alloc_target(): determine if the target has |
1579 | * any children at all and if not, nuke it | 1580 | * any children at all and if not, nuke it |
1580 | */ | 1581 | */ |
1581 | scsi_target_reap(starget); | 1582 | scsi_target_reap(starget); |
1582 | 1583 | ||
1583 | put_device(&starget->dev); | 1584 | put_device(&starget->dev); |
1584 | } | 1585 | } |
1585 | 1586 | ||
1586 | /** | 1587 | /** |
1587 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. | 1588 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. |
1588 | * @parent: host to scan | 1589 | * @parent: host to scan |
1589 | * @channel: channel to scan | 1590 | * @channel: channel to scan |
1590 | * @id: target id to scan | 1591 | * @id: target id to scan |
1591 | * @lun: Specific LUN to scan or SCAN_WILD_CARD | 1592 | * @lun: Specific LUN to scan or SCAN_WILD_CARD |
1592 | * @rescan: passed to LUN scanning routines | 1593 | * @rescan: passed to LUN scanning routines |
1593 | * | 1594 | * |
1594 | * Description: | 1595 | * Description: |
1595 | * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, | 1596 | * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, |
1596 | * and possibly all LUNs on the target id. | 1597 | * and possibly all LUNs on the target id. |
1597 | * | 1598 | * |
1598 | * First try a REPORT LUN scan, if that does not scan the target, do a | 1599 | * First try a REPORT LUN scan, if that does not scan the target, do a |
1599 | * sequential scan of LUNs on the target id. | 1600 | * sequential scan of LUNs on the target id. |
1600 | **/ | 1601 | **/ |
1601 | void scsi_scan_target(struct device *parent, unsigned int channel, | 1602 | void scsi_scan_target(struct device *parent, unsigned int channel, |
1602 | unsigned int id, u64 lun, int rescan) | 1603 | unsigned int id, u64 lun, int rescan) |
1603 | { | 1604 | { |
1604 | struct Scsi_Host *shost = dev_to_shost(parent); | 1605 | struct Scsi_Host *shost = dev_to_shost(parent); |
1605 | 1606 | ||
1606 | if (strncmp(scsi_scan_type, "none", 4) == 0) | 1607 | if (strncmp(scsi_scan_type, "none", 4) == 0) |
1607 | return; | 1608 | return; |
1608 | 1609 | ||
1609 | mutex_lock(&shost->scan_mutex); | 1610 | mutex_lock(&shost->scan_mutex); |
1610 | if (!shost->async_scan) | 1611 | if (!shost->async_scan) |
1611 | scsi_complete_async_scans(); | 1612 | scsi_complete_async_scans(); |
1612 | 1613 | ||
1613 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { | 1614 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { |
1614 | __scsi_scan_target(parent, channel, id, lun, rescan); | 1615 | __scsi_scan_target(parent, channel, id, lun, rescan); |
1615 | scsi_autopm_put_host(shost); | 1616 | scsi_autopm_put_host(shost); |
1616 | } | 1617 | } |
1617 | mutex_unlock(&shost->scan_mutex); | 1618 | mutex_unlock(&shost->scan_mutex); |
1618 | } | 1619 | } |
1619 | EXPORT_SYMBOL(scsi_scan_target); | 1620 | EXPORT_SYMBOL(scsi_scan_target); |
1620 | 1621 | ||
1621 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, | 1622 | static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, |
1622 | unsigned int id, u64 lun, int rescan) | 1623 | unsigned int id, u64 lun, int rescan) |
1623 | { | 1624 | { |
1624 | uint order_id; | 1625 | uint order_id; |
1625 | 1626 | ||
1626 | if (id == SCAN_WILD_CARD) | 1627 | if (id == SCAN_WILD_CARD) |
1627 | for (id = 0; id < shost->max_id; ++id) { | 1628 | for (id = 0; id < shost->max_id; ++id) { |
1628 | /* | 1629 | /* |
1629 | * XXX adapter drivers when possible (FCP, iSCSI) | 1630 | * XXX adapter drivers when possible (FCP, iSCSI) |
1630 | * could modify max_id to match the current max, | 1631 | * could modify max_id to match the current max, |
1631 | * not the absolute max. | 1632 | * not the absolute max. |
1632 | * | 1633 | * |
1633 | * XXX add a shost id iterator, so for example, | 1634 | * XXX add a shost id iterator, so for example, |
1634 | * the FC ID can be the same as a target id | 1635 | * the FC ID can be the same as a target id |
1635 | * without a huge overhead of sparse id's. | 1636 | * without a huge overhead of sparse id's. |
1636 | */ | 1637 | */ |
1637 | if (shost->reverse_ordering) | 1638 | if (shost->reverse_ordering) |
1638 | /* | 1639 | /* |
1639 | * Scan from high to low id. | 1640 | * Scan from high to low id. |
1640 | */ | 1641 | */ |
1641 | order_id = shost->max_id - id - 1; | 1642 | order_id = shost->max_id - id - 1; |
1642 | else | 1643 | else |
1643 | order_id = id; | 1644 | order_id = id; |
1644 | __scsi_scan_target(&shost->shost_gendev, channel, | 1645 | __scsi_scan_target(&shost->shost_gendev, channel, |
1645 | order_id, lun, rescan); | 1646 | order_id, lun, rescan); |
1646 | } | 1647 | } |
1647 | else | 1648 | else |
1648 | __scsi_scan_target(&shost->shost_gendev, channel, | 1649 | __scsi_scan_target(&shost->shost_gendev, channel, |
1649 | id, lun, rescan); | 1650 | id, lun, rescan); |
1650 | } | 1651 | } |
1651 | 1652 | ||
1652 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, | 1653 | int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, |
1653 | unsigned int id, u64 lun, int rescan) | 1654 | unsigned int id, u64 lun, int rescan) |
1654 | { | 1655 | { |
1655 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, | 1656 | SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, |
1656 | "%s: <%u:%u:%llu>\n", | 1657 | "%s: <%u:%u:%llu>\n", |
1657 | __func__, channel, id, lun)); | 1658 | __func__, channel, id, lun)); |
1658 | 1659 | ||
1659 | if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || | 1660 | if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || |
1660 | ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || | 1661 | ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || |
1661 | ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) | 1662 | ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) |
1662 | return -EINVAL; | 1663 | return -EINVAL; |
1663 | 1664 | ||
1664 | mutex_lock(&shost->scan_mutex); | 1665 | mutex_lock(&shost->scan_mutex); |
1665 | if (!shost->async_scan) | 1666 | if (!shost->async_scan) |
1666 | scsi_complete_async_scans(); | 1667 | scsi_complete_async_scans(); |
1667 | 1668 | ||
1668 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { | 1669 | if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { |
1669 | if (channel == SCAN_WILD_CARD) | 1670 | if (channel == SCAN_WILD_CARD) |
1670 | for (channel = 0; channel <= shost->max_channel; | 1671 | for (channel = 0; channel <= shost->max_channel; |
1671 | channel++) | 1672 | channel++) |
1672 | scsi_scan_channel(shost, channel, id, lun, | 1673 | scsi_scan_channel(shost, channel, id, lun, |
1673 | rescan); | 1674 | rescan); |
1674 | else | 1675 | else |
1675 | scsi_scan_channel(shost, channel, id, lun, rescan); | 1676 | scsi_scan_channel(shost, channel, id, lun, rescan); |
1676 | scsi_autopm_put_host(shost); | 1677 | scsi_autopm_put_host(shost); |
1677 | } | 1678 | } |
1678 | mutex_unlock(&shost->scan_mutex); | 1679 | mutex_unlock(&shost->scan_mutex); |
1679 | 1680 | ||
1680 | return 0; | 1681 | return 0; |
1681 | } | 1682 | } |
1682 | 1683 | ||
1683 | static void scsi_sysfs_add_devices(struct Scsi_Host *shost) | 1684 | static void scsi_sysfs_add_devices(struct Scsi_Host *shost) |
1684 | { | 1685 | { |
1685 | struct scsi_device *sdev; | 1686 | struct scsi_device *sdev; |
1686 | shost_for_each_device(sdev, shost) { | 1687 | shost_for_each_device(sdev, shost) { |
1687 | /* target removed before the device could be added */ | 1688 | /* target removed before the device could be added */ |
1688 | if (sdev->sdev_state == SDEV_DEL) | 1689 | if (sdev->sdev_state == SDEV_DEL) |
1689 | continue; | 1690 | continue; |
1690 | /* If device is already visible, skip adding it to sysfs */ | 1691 | /* If device is already visible, skip adding it to sysfs */ |
1691 | if (sdev->is_visible) | 1692 | if (sdev->is_visible) |
1692 | continue; | 1693 | continue; |
1693 | if (!scsi_host_scan_allowed(shost) || | 1694 | if (!scsi_host_scan_allowed(shost) || |
1694 | scsi_sysfs_add_sdev(sdev) != 0) | 1695 | scsi_sysfs_add_sdev(sdev) != 0) |
1695 | __scsi_remove_device(sdev); | 1696 | __scsi_remove_device(sdev); |
1696 | } | 1697 | } |
1697 | } | 1698 | } |
1698 | 1699 | ||
1699 | /** | 1700 | /** |
1700 | * scsi_prep_async_scan - prepare for an async scan | 1701 | * scsi_prep_async_scan - prepare for an async scan |
1701 | * @shost: the host which will be scanned | 1702 | * @shost: the host which will be scanned |
1702 | * Returns: a cookie to be passed to scsi_finish_async_scan() | 1703 | * Returns: a cookie to be passed to scsi_finish_async_scan() |
1703 | * | 1704 | * |
1704 | * Tells the midlayer this host is going to do an asynchronous scan. | 1705 | * Tells the midlayer this host is going to do an asynchronous scan. |
1705 | * It reserves the host's position in the scanning list and ensures | 1706 | * It reserves the host's position in the scanning list and ensures |
1706 | * that other asynchronous scans started after this one won't affect the | 1707 | * that other asynchronous scans started after this one won't affect the |
1707 | * ordering of the discovered devices. | 1708 | * ordering of the discovered devices. |
1708 | */ | 1709 | */ |
1709 | static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) | 1710 | static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) |
1710 | { | 1711 | { |
1711 | struct async_scan_data *data; | 1712 | struct async_scan_data *data; |
1712 | unsigned long flags; | 1713 | unsigned long flags; |
1713 | 1714 | ||
1714 | if (strncmp(scsi_scan_type, "sync", 4) == 0) | 1715 | if (strncmp(scsi_scan_type, "sync", 4) == 0) |
1715 | return NULL; | 1716 | return NULL; |
1716 | 1717 | ||
1717 | if (shost->async_scan) { | 1718 | if (shost->async_scan) { |
1718 | shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); | 1719 | shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); |
1719 | return NULL; | 1720 | return NULL; |
1720 | } | 1721 | } |
1721 | 1722 | ||
1722 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 1723 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
1723 | if (!data) | 1724 | if (!data) |
1724 | goto err; | 1725 | goto err; |
1725 | data->shost = scsi_host_get(shost); | 1726 | data->shost = scsi_host_get(shost); |
1726 | if (!data->shost) | 1727 | if (!data->shost) |
1727 | goto err; | 1728 | goto err; |
1728 | init_completion(&data->prev_finished); | 1729 | init_completion(&data->prev_finished); |
1729 | 1730 | ||
1730 | mutex_lock(&shost->scan_mutex); | 1731 | mutex_lock(&shost->scan_mutex); |
1731 | spin_lock_irqsave(shost->host_lock, flags); | 1732 | spin_lock_irqsave(shost->host_lock, flags); |
1732 | shost->async_scan = 1; | 1733 | shost->async_scan = 1; |
1733 | spin_unlock_irqrestore(shost->host_lock, flags); | 1734 | spin_unlock_irqrestore(shost->host_lock, flags); |
1734 | mutex_unlock(&shost->scan_mutex); | 1735 | mutex_unlock(&shost->scan_mutex); |
1735 | 1736 | ||
1736 | spin_lock(&async_scan_lock); | 1737 | spin_lock(&async_scan_lock); |
1737 | if (list_empty(&scanning_hosts)) | 1738 | if (list_empty(&scanning_hosts)) |
1738 | complete(&data->prev_finished); | 1739 | complete(&data->prev_finished); |
1739 | list_add_tail(&data->list, &scanning_hosts); | 1740 | list_add_tail(&data->list, &scanning_hosts); |
1740 | spin_unlock(&async_scan_lock); | 1741 | spin_unlock(&async_scan_lock); |
1741 | 1742 | ||
1742 | return data; | 1743 | return data; |
1743 | 1744 | ||
1744 | err: | 1745 | err: |
1745 | kfree(data); | 1746 | kfree(data); |
1746 | return NULL; | 1747 | return NULL; |
1747 | } | 1748 | } |
1748 | 1749 | ||
1749 | /** | 1750 | /** |
1750 | * scsi_finish_async_scan - asynchronous scan has finished | 1751 | * scsi_finish_async_scan - asynchronous scan has finished |
1751 | * @data: cookie returned from earlier call to scsi_prep_async_scan() | 1752 | * @data: cookie returned from earlier call to scsi_prep_async_scan() |
1752 | * | 1753 | * |
1753 | * All the devices currently attached to this host have been found. | 1754 | * All the devices currently attached to this host have been found. |
1754 | * This function announces all the devices it has found to the rest | 1755 | * This function announces all the devices it has found to the rest |
1755 | * of the system. | 1756 | * of the system. |
1756 | */ | 1757 | */ |
1757 | static void scsi_finish_async_scan(struct async_scan_data *data) | 1758 | static void scsi_finish_async_scan(struct async_scan_data *data) |
1758 | { | 1759 | { |
1759 | struct Scsi_Host *shost; | 1760 | struct Scsi_Host *shost; |
1760 | unsigned long flags; | 1761 | unsigned long flags; |
1761 | 1762 | ||
1762 | if (!data) | 1763 | if (!data) |
1763 | return; | 1764 | return; |
1764 | 1765 | ||
1765 | shost = data->shost; | 1766 | shost = data->shost; |
1766 | 1767 | ||
1767 | mutex_lock(&shost->scan_mutex); | 1768 | mutex_lock(&shost->scan_mutex); |
1768 | 1769 | ||
1769 | if (!shost->async_scan) { | 1770 | if (!shost->async_scan) { |
1770 | shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); | 1771 | shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); |
1771 | dump_stack(); | 1772 | dump_stack(); |
1772 | mutex_unlock(&shost->scan_mutex); | 1773 | mutex_unlock(&shost->scan_mutex); |
1773 | return; | 1774 | return; |
1774 | } | 1775 | } |
1775 | 1776 | ||
1776 | wait_for_completion(&data->prev_finished); | 1777 | wait_for_completion(&data->prev_finished); |
1777 | 1778 | ||
1778 | scsi_sysfs_add_devices(shost); | 1779 | scsi_sysfs_add_devices(shost); |
1779 | 1780 | ||
1780 | spin_lock_irqsave(shost->host_lock, flags); | 1781 | spin_lock_irqsave(shost->host_lock, flags); |
1781 | shost->async_scan = 0; | 1782 | shost->async_scan = 0; |
1782 | spin_unlock_irqrestore(shost->host_lock, flags); | 1783 | spin_unlock_irqrestore(shost->host_lock, flags); |
1783 | 1784 | ||
1784 | mutex_unlock(&shost->scan_mutex); | 1785 | mutex_unlock(&shost->scan_mutex); |
1785 | 1786 | ||
1786 | spin_lock(&async_scan_lock); | 1787 | spin_lock(&async_scan_lock); |
1787 | list_del(&data->list); | 1788 | list_del(&data->list); |
1788 | if (!list_empty(&scanning_hosts)) { | 1789 | if (!list_empty(&scanning_hosts)) { |
1789 | struct async_scan_data *next = list_entry(scanning_hosts.next, | 1790 | struct async_scan_data *next = list_entry(scanning_hosts.next, |
1790 | struct async_scan_data, list); | 1791 | struct async_scan_data, list); |
1791 | complete(&next->prev_finished); | 1792 | complete(&next->prev_finished); |
1792 | } | 1793 | } |
1793 | spin_unlock(&async_scan_lock); | 1794 | spin_unlock(&async_scan_lock); |
1794 | 1795 | ||
1795 | scsi_autopm_put_host(shost); | 1796 | scsi_autopm_put_host(shost); |
1796 | scsi_host_put(shost); | 1797 | scsi_host_put(shost); |
1797 | kfree(data); | 1798 | kfree(data); |
1798 | } | 1799 | } |
1799 | 1800 | ||
1800 | static void do_scsi_scan_host(struct Scsi_Host *shost) | 1801 | static void do_scsi_scan_host(struct Scsi_Host *shost) |
1801 | { | 1802 | { |
1802 | if (shost->hostt->scan_finished) { | 1803 | if (shost->hostt->scan_finished) { |
1803 | unsigned long start = jiffies; | 1804 | unsigned long start = jiffies; |
1804 | if (shost->hostt->scan_start) | 1805 | if (shost->hostt->scan_start) |
1805 | shost->hostt->scan_start(shost); | 1806 | shost->hostt->scan_start(shost); |
1806 | 1807 | ||
1807 | while (!shost->hostt->scan_finished(shost, jiffies - start)) | 1808 | while (!shost->hostt->scan_finished(shost, jiffies - start)) |
1808 | msleep(10); | 1809 | msleep(10); |
1809 | } else { | 1810 | } else { |
1810 | scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, | 1811 | scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, |
1811 | SCAN_WILD_CARD, 0); | 1812 | SCAN_WILD_CARD, 0); |
1812 | } | 1813 | } |
1813 | } | 1814 | } |
1814 | 1815 | ||
1815 | static void do_scan_async(void *_data, async_cookie_t c) | 1816 | static void do_scan_async(void *_data, async_cookie_t c) |
1816 | { | 1817 | { |
1817 | struct async_scan_data *data = _data; | 1818 | struct async_scan_data *data = _data; |
1818 | struct Scsi_Host *shost = data->shost; | 1819 | struct Scsi_Host *shost = data->shost; |
1819 | 1820 | ||
1820 | do_scsi_scan_host(shost); | 1821 | do_scsi_scan_host(shost); |
1821 | scsi_finish_async_scan(data); | 1822 | scsi_finish_async_scan(data); |
1822 | } | 1823 | } |
1823 | 1824 | ||
1824 | /** | 1825 | /** |
1825 | * scsi_scan_host - scan the given adapter | 1826 | * scsi_scan_host - scan the given adapter |
1826 | * @shost: adapter to scan | 1827 | * @shost: adapter to scan |
1827 | **/ | 1828 | **/ |
1828 | void scsi_scan_host(struct Scsi_Host *shost) | 1829 | void scsi_scan_host(struct Scsi_Host *shost) |
1829 | { | 1830 | { |
1830 | struct async_scan_data *data; | 1831 | struct async_scan_data *data; |
1831 | 1832 | ||
1832 | if (strncmp(scsi_scan_type, "none", 4) == 0) | 1833 | if (strncmp(scsi_scan_type, "none", 4) == 0) |
1833 | return; | 1834 | return; |
1834 | if (scsi_autopm_get_host(shost) < 0) | 1835 | if (scsi_autopm_get_host(shost) < 0) |
1835 | return; | 1836 | return; |
1836 | 1837 | ||
1837 | data = scsi_prep_async_scan(shost); | 1838 | data = scsi_prep_async_scan(shost); |
1838 | if (!data) { | 1839 | if (!data) { |
1839 | do_scsi_scan_host(shost); | 1840 | do_scsi_scan_host(shost); |
1840 | scsi_autopm_put_host(shost); | 1841 | scsi_autopm_put_host(shost); |
1841 | return; | 1842 | return; |
1842 | } | 1843 | } |
1843 | 1844 | ||
1844 | /* register with the async subsystem so wait_for_device_probe() | 1845 | /* register with the async subsystem so wait_for_device_probe() |
1845 | * will flush this work | 1846 | * will flush this work |
1846 | */ | 1847 | */ |
1847 | async_schedule(do_scan_async, data); | 1848 | async_schedule(do_scan_async, data); |
1848 | 1849 | ||
1849 | /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ | 1850 | /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ |
1850 | } | 1851 | } |
1851 | EXPORT_SYMBOL(scsi_scan_host); | 1852 | EXPORT_SYMBOL(scsi_scan_host); |
1852 | 1853 | ||
1853 | void scsi_forget_host(struct Scsi_Host *shost) | 1854 | void scsi_forget_host(struct Scsi_Host *shost) |
1854 | { | 1855 | { |
1855 | struct scsi_device *sdev; | 1856 | struct scsi_device *sdev; |
1856 | unsigned long flags; | 1857 | unsigned long flags; |
1857 | 1858 | ||
1858 | restart: | 1859 | restart: |
1859 | spin_lock_irqsave(shost->host_lock, flags); | 1860 | spin_lock_irqsave(shost->host_lock, flags); |
1860 | list_for_each_entry(sdev, &shost->__devices, siblings) { | 1861 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
1861 | if (sdev->sdev_state == SDEV_DEL) | 1862 | if (sdev->sdev_state == SDEV_DEL) |
1862 | continue; | 1863 | continue; |
1863 | spin_unlock_irqrestore(shost->host_lock, flags); | 1864 | spin_unlock_irqrestore(shost->host_lock, flags); |
1864 | __scsi_remove_device(sdev); | 1865 | __scsi_remove_device(sdev); |
1865 | goto restart; | 1866 | goto restart; |
1866 | } | 1867 | } |
1867 | spin_unlock_irqrestore(shost->host_lock, flags); | 1868 | spin_unlock_irqrestore(shost->host_lock, flags); |
1868 | } | 1869 | } |
1869 | 1870 | ||
1870 | /** | 1871 | /** |
1871 | * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself | 1872 | * scsi_get_host_dev - Create a scsi_device that points to the host adapter itself |
1872 | * @shost: Host that needs a scsi_device | 1873 | * @shost: Host that needs a scsi_device |
1873 | * | 1874 | * |
1874 | * Lock status: None assumed. | 1875 | * Lock status: None assumed. |
1875 | * | 1876 | * |
1876 | * Returns: The scsi_device or NULL | 1877 | * Returns: The scsi_device or NULL |
1877 | * | 1878 | * |
1878 | * Notes: | 1879 | * Notes: |
1879 | * Attach a single scsi_device to the Scsi_Host - this should | 1880 | * Attach a single scsi_device to the Scsi_Host - this should |
1880 | * be made to look like a "pseudo-device" that points to the | 1881 | * be made to look like a "pseudo-device" that points to the |
1881 | * HA itself. | 1882 | * HA itself. |
1882 | * | 1883 | * |
1883 | * Note - this device is not accessible from any high-level | 1884 | * Note - this device is not accessible from any high-level |
1884 | * drivers (including generics), which is probably not | 1885 | * drivers (including generics), which is probably not |
1885 | * optimal. We can add hooks later to attach. | 1886 | * optimal. We can add hooks later to attach. |
1886 | */ | 1887 | */ |
1887 | struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) | 1888 | struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost) |
1888 | { | 1889 | { |
1889 | struct scsi_device *sdev = NULL; | 1890 | struct scsi_device *sdev = NULL; |
1890 | struct scsi_target *starget; | 1891 | struct scsi_target *starget; |
1891 | 1892 | ||
1892 | mutex_lock(&shost->scan_mutex); | 1893 | mutex_lock(&shost->scan_mutex); |
1893 | if (!scsi_host_scan_allowed(shost)) | 1894 | if (!scsi_host_scan_allowed(shost)) |
1894 | goto out; | 1895 | goto out; |
1895 | starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id); | 1896 | starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id); |
1896 | if (!starget) | 1897 | if (!starget) |
1897 | goto out; | 1898 | goto out; |
1898 | 1899 | ||
1899 | sdev = scsi_alloc_sdev(starget, 0, NULL); | 1900 | sdev = scsi_alloc_sdev(starget, 0, NULL); |
1900 | if (sdev) | 1901 | if (sdev) |
1901 | sdev->borken = 0; | 1902 | sdev->borken = 0; |
1902 | else | 1903 | else |
1903 | scsi_target_reap(starget); | 1904 | scsi_target_reap(starget); |
1904 | put_device(&starget->dev); | 1905 | put_device(&starget->dev); |
1905 | out: | 1906 | out: |
1906 | mutex_unlock(&shost->scan_mutex); | 1907 | mutex_unlock(&shost->scan_mutex); |
1907 | return sdev; | 1908 | return sdev; |
1908 | } | 1909 | } |
1909 | EXPORT_SYMBOL(scsi_get_host_dev); | 1910 | EXPORT_SYMBOL(scsi_get_host_dev); |
1910 | 1911 | ||
1911 | /** | 1912 | /** |
1912 | * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself | 1913 | * scsi_free_host_dev - Free a scsi_device that points to the host adapter itself |
1913 | * @sdev: Host device to be freed | 1914 | * @sdev: Host device to be freed |
1914 | * | 1915 | * |
1915 | * Lock status: None assumed. | 1916 | * Lock status: None assumed. |
1916 | * | 1917 | * |
1917 | * Returns: Nothing | 1918 | * Returns: Nothing |
1918 | */ | 1919 | */ |
1919 | void scsi_free_host_dev(struct scsi_device *sdev) | 1920 | void scsi_free_host_dev(struct scsi_device *sdev) |
1920 | { | 1921 | { |
1921 | BUG_ON(sdev->id != sdev->host->this_id); | 1922 | BUG_ON(sdev->id != sdev->host->this_id); |
1922 | 1923 | ||
1923 | __scsi_remove_device(sdev); | 1924 | __scsi_remove_device(sdev); |
1924 | } | 1925 | } |
1925 | EXPORT_SYMBOL(scsi_free_host_dev); | 1926 | EXPORT_SYMBOL(scsi_free_host_dev); |
1926 | 1927 | ||
1927 | 1928 |
drivers/scsi/scsi_sysfs.c
1 | /* | 1 | /* |
2 | * scsi_sysfs.c | 2 | * scsi_sysfs.c |
3 | * | 3 | * |
4 | * SCSI sysfs interface routines. | 4 | * SCSI sysfs interface routines. |
5 | * | 5 | * |
6 | * Created to pull SCSI mid layer sysfs routines into one file. | 6 | * Created to pull SCSI mid layer sysfs routines into one file. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/blkdev.h> | 12 | #include <linux/blkdev.h> |
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/pm_runtime.h> | 14 | #include <linux/pm_runtime.h> |
15 | 15 | ||
16 | #include <scsi/scsi.h> | 16 | #include <scsi/scsi.h> |
17 | #include <scsi/scsi_device.h> | 17 | #include <scsi/scsi_device.h> |
18 | #include <scsi/scsi_host.h> | 18 | #include <scsi/scsi_host.h> |
19 | #include <scsi/scsi_tcq.h> | 19 | #include <scsi/scsi_tcq.h> |
20 | #include <scsi/scsi_transport.h> | 20 | #include <scsi/scsi_transport.h> |
21 | #include <scsi/scsi_driver.h> | 21 | #include <scsi/scsi_driver.h> |
22 | 22 | ||
23 | #include "scsi_priv.h" | 23 | #include "scsi_priv.h" |
24 | #include "scsi_logging.h" | 24 | #include "scsi_logging.h" |
25 | 25 | ||
26 | static struct device_type scsi_dev_type; | 26 | static struct device_type scsi_dev_type; |
27 | 27 | ||
28 | static const struct { | 28 | static const struct { |
29 | enum scsi_device_state value; | 29 | enum scsi_device_state value; |
30 | char *name; | 30 | char *name; |
31 | } sdev_states[] = { | 31 | } sdev_states[] = { |
32 | { SDEV_CREATED, "created" }, | 32 | { SDEV_CREATED, "created" }, |
33 | { SDEV_RUNNING, "running" }, | 33 | { SDEV_RUNNING, "running" }, |
34 | { SDEV_CANCEL, "cancel" }, | 34 | { SDEV_CANCEL, "cancel" }, |
35 | { SDEV_DEL, "deleted" }, | 35 | { SDEV_DEL, "deleted" }, |
36 | { SDEV_QUIESCE, "quiesce" }, | 36 | { SDEV_QUIESCE, "quiesce" }, |
37 | { SDEV_OFFLINE, "offline" }, | 37 | { SDEV_OFFLINE, "offline" }, |
38 | { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, | 38 | { SDEV_TRANSPORT_OFFLINE, "transport-offline" }, |
39 | { SDEV_BLOCK, "blocked" }, | 39 | { SDEV_BLOCK, "blocked" }, |
40 | { SDEV_CREATED_BLOCK, "created-blocked" }, | 40 | { SDEV_CREATED_BLOCK, "created-blocked" }, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | const char *scsi_device_state_name(enum scsi_device_state state) | 43 | const char *scsi_device_state_name(enum scsi_device_state state) |
44 | { | 44 | { |
45 | int i; | 45 | int i; |
46 | char *name = NULL; | 46 | char *name = NULL; |
47 | 47 | ||
48 | for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { | 48 | for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { |
49 | if (sdev_states[i].value == state) { | 49 | if (sdev_states[i].value == state) { |
50 | name = sdev_states[i].name; | 50 | name = sdev_states[i].name; |
51 | break; | 51 | break; |
52 | } | 52 | } |
53 | } | 53 | } |
54 | return name; | 54 | return name; |
55 | } | 55 | } |
56 | 56 | ||
57 | static const struct { | 57 | static const struct { |
58 | enum scsi_host_state value; | 58 | enum scsi_host_state value; |
59 | char *name; | 59 | char *name; |
60 | } shost_states[] = { | 60 | } shost_states[] = { |
61 | { SHOST_CREATED, "created" }, | 61 | { SHOST_CREATED, "created" }, |
62 | { SHOST_RUNNING, "running" }, | 62 | { SHOST_RUNNING, "running" }, |
63 | { SHOST_CANCEL, "cancel" }, | 63 | { SHOST_CANCEL, "cancel" }, |
64 | { SHOST_DEL, "deleted" }, | 64 | { SHOST_DEL, "deleted" }, |
65 | { SHOST_RECOVERY, "recovery" }, | 65 | { SHOST_RECOVERY, "recovery" }, |
66 | { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, | 66 | { SHOST_CANCEL_RECOVERY, "cancel/recovery" }, |
67 | { SHOST_DEL_RECOVERY, "deleted/recovery", }, | 67 | { SHOST_DEL_RECOVERY, "deleted/recovery", }, |
68 | }; | 68 | }; |
69 | const char *scsi_host_state_name(enum scsi_host_state state) | 69 | const char *scsi_host_state_name(enum scsi_host_state state) |
70 | { | 70 | { |
71 | int i; | 71 | int i; |
72 | char *name = NULL; | 72 | char *name = NULL; |
73 | 73 | ||
74 | for (i = 0; i < ARRAY_SIZE(shost_states); i++) { | 74 | for (i = 0; i < ARRAY_SIZE(shost_states); i++) { |
75 | if (shost_states[i].value == state) { | 75 | if (shost_states[i].value == state) { |
76 | name = shost_states[i].name; | 76 | name = shost_states[i].name; |
77 | break; | 77 | break; |
78 | } | 78 | } |
79 | } | 79 | } |
80 | return name; | 80 | return name; |
81 | } | 81 | } |
82 | 82 | ||
83 | static int check_set(unsigned long long *val, char *src) | 83 | static int check_set(unsigned long long *val, char *src) |
84 | { | 84 | { |
85 | char *last; | 85 | char *last; |
86 | 86 | ||
87 | if (strncmp(src, "-", 20) == 0) { | 87 | if (strncmp(src, "-", 20) == 0) { |
88 | *val = SCAN_WILD_CARD; | 88 | *val = SCAN_WILD_CARD; |
89 | } else { | 89 | } else { |
90 | /* | 90 | /* |
91 | * Doesn't check for int overflow | 91 | * Doesn't check for int overflow |
92 | */ | 92 | */ |
93 | *val = simple_strtoull(src, &last, 0); | 93 | *val = simple_strtoull(src, &last, 0); |
94 | if (*last != '\0') | 94 | if (*last != '\0') |
95 | return 1; | 95 | return 1; |
96 | } | 96 | } |
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int scsi_scan(struct Scsi_Host *shost, const char *str) | 100 | static int scsi_scan(struct Scsi_Host *shost, const char *str) |
101 | { | 101 | { |
102 | char s1[15], s2[15], s3[17], junk; | 102 | char s1[15], s2[15], s3[17], junk; |
103 | unsigned long long channel, id, lun; | 103 | unsigned long long channel, id, lun; |
104 | int res; | 104 | int res; |
105 | 105 | ||
106 | res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); | 106 | res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk); |
107 | if (res != 3) | 107 | if (res != 3) |
108 | return -EINVAL; | 108 | return -EINVAL; |
109 | if (check_set(&channel, s1)) | 109 | if (check_set(&channel, s1)) |
110 | return -EINVAL; | 110 | return -EINVAL; |
111 | if (check_set(&id, s2)) | 111 | if (check_set(&id, s2)) |
112 | return -EINVAL; | 112 | return -EINVAL; |
113 | if (check_set(&lun, s3)) | 113 | if (check_set(&lun, s3)) |
114 | return -EINVAL; | 114 | return -EINVAL; |
115 | if (shost->transportt->user_scan) | 115 | if (shost->transportt->user_scan) |
116 | res = shost->transportt->user_scan(shost, channel, id, lun); | 116 | res = shost->transportt->user_scan(shost, channel, id, lun); |
117 | else | 117 | else |
118 | res = scsi_scan_host_selected(shost, channel, id, lun, 1); | 118 | res = scsi_scan_host_selected(shost, channel, id, lun, 1); |
119 | return res; | 119 | return res; |
120 | } | 120 | } |
121 | 121 | ||
122 | /* | 122 | /* |
123 | * shost_show_function: macro to create an attr function that can be used to | 123 | * shost_show_function: macro to create an attr function that can be used to |
124 | * show a non-bit field. | 124 | * show a non-bit field. |
125 | */ | 125 | */ |
126 | #define shost_show_function(name, field, format_string) \ | 126 | #define shost_show_function(name, field, format_string) \ |
127 | static ssize_t \ | 127 | static ssize_t \ |
128 | show_##name (struct device *dev, struct device_attribute *attr, \ | 128 | show_##name (struct device *dev, struct device_attribute *attr, \ |
129 | char *buf) \ | 129 | char *buf) \ |
130 | { \ | 130 | { \ |
131 | struct Scsi_Host *shost = class_to_shost(dev); \ | 131 | struct Scsi_Host *shost = class_to_shost(dev); \ |
132 | return snprintf (buf, 20, format_string, shost->field); \ | 132 | return snprintf (buf, 20, format_string, shost->field); \ |
133 | } | 133 | } |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * shost_rd_attr: macro to create a function and attribute variable for a | 136 | * shost_rd_attr: macro to create a function and attribute variable for a |
137 | * read only field. | 137 | * read only field. |
138 | */ | 138 | */ |
139 | #define shost_rd_attr2(name, field, format_string) \ | 139 | #define shost_rd_attr2(name, field, format_string) \ |
140 | shost_show_function(name, field, format_string) \ | 140 | shost_show_function(name, field, format_string) \ |
141 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); | 141 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); |
142 | 142 | ||
143 | #define shost_rd_attr(field, format_string) \ | 143 | #define shost_rd_attr(field, format_string) \ |
144 | shost_rd_attr2(field, field, format_string) | 144 | shost_rd_attr2(field, field, format_string) |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * Create the actual show/store functions and data structures. | 147 | * Create the actual show/store functions and data structures. |
148 | */ | 148 | */ |
149 | 149 | ||
150 | static ssize_t | 150 | static ssize_t |
151 | store_scan(struct device *dev, struct device_attribute *attr, | 151 | store_scan(struct device *dev, struct device_attribute *attr, |
152 | const char *buf, size_t count) | 152 | const char *buf, size_t count) |
153 | { | 153 | { |
154 | struct Scsi_Host *shost = class_to_shost(dev); | 154 | struct Scsi_Host *shost = class_to_shost(dev); |
155 | int res; | 155 | int res; |
156 | 156 | ||
157 | res = scsi_scan(shost, buf); | 157 | res = scsi_scan(shost, buf); |
158 | if (res == 0) | 158 | if (res == 0) |
159 | res = count; | 159 | res = count; |
160 | return res; | 160 | return res; |
161 | }; | 161 | }; |
162 | static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); | 162 | static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); |
163 | 163 | ||
164 | static ssize_t | 164 | static ssize_t |
165 | store_shost_state(struct device *dev, struct device_attribute *attr, | 165 | store_shost_state(struct device *dev, struct device_attribute *attr, |
166 | const char *buf, size_t count) | 166 | const char *buf, size_t count) |
167 | { | 167 | { |
168 | int i; | 168 | int i; |
169 | struct Scsi_Host *shost = class_to_shost(dev); | 169 | struct Scsi_Host *shost = class_to_shost(dev); |
170 | enum scsi_host_state state = 0; | 170 | enum scsi_host_state state = 0; |
171 | 171 | ||
172 | for (i = 0; i < ARRAY_SIZE(shost_states); i++) { | 172 | for (i = 0; i < ARRAY_SIZE(shost_states); i++) { |
173 | const int len = strlen(shost_states[i].name); | 173 | const int len = strlen(shost_states[i].name); |
174 | if (strncmp(shost_states[i].name, buf, len) == 0 && | 174 | if (strncmp(shost_states[i].name, buf, len) == 0 && |
175 | buf[len] == '\n') { | 175 | buf[len] == '\n') { |
176 | state = shost_states[i].value; | 176 | state = shost_states[i].value; |
177 | break; | 177 | break; |
178 | } | 178 | } |
179 | } | 179 | } |
180 | if (!state) | 180 | if (!state) |
181 | return -EINVAL; | 181 | return -EINVAL; |
182 | 182 | ||
183 | if (scsi_host_set_state(shost, state)) | 183 | if (scsi_host_set_state(shost, state)) |
184 | return -EINVAL; | 184 | return -EINVAL; |
185 | return count; | 185 | return count; |
186 | } | 186 | } |
187 | 187 | ||
188 | static ssize_t | 188 | static ssize_t |
189 | show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) | 189 | show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) |
190 | { | 190 | { |
191 | struct Scsi_Host *shost = class_to_shost(dev); | 191 | struct Scsi_Host *shost = class_to_shost(dev); |
192 | const char *name = scsi_host_state_name(shost->shost_state); | 192 | const char *name = scsi_host_state_name(shost->shost_state); |
193 | 193 | ||
194 | if (!name) | 194 | if (!name) |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | 196 | ||
197 | return snprintf(buf, 20, "%s\n", name); | 197 | return snprintf(buf, 20, "%s\n", name); |
198 | } | 198 | } |
199 | 199 | ||
200 | /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ | 200 | /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ |
201 | struct device_attribute dev_attr_hstate = | 201 | struct device_attribute dev_attr_hstate = |
202 | __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); | 202 | __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); |
203 | 203 | ||
204 | static ssize_t | 204 | static ssize_t |
205 | show_shost_mode(unsigned int mode, char *buf) | 205 | show_shost_mode(unsigned int mode, char *buf) |
206 | { | 206 | { |
207 | ssize_t len = 0; | 207 | ssize_t len = 0; |
208 | 208 | ||
209 | if (mode & MODE_INITIATOR) | 209 | if (mode & MODE_INITIATOR) |
210 | len = sprintf(buf, "%s", "Initiator"); | 210 | len = sprintf(buf, "%s", "Initiator"); |
211 | 211 | ||
212 | if (mode & MODE_TARGET) | 212 | if (mode & MODE_TARGET) |
213 | len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); | 213 | len += sprintf(buf + len, "%s%s", len ? ", " : "", "Target"); |
214 | 214 | ||
215 | len += sprintf(buf + len, "\n"); | 215 | len += sprintf(buf + len, "\n"); |
216 | 216 | ||
217 | return len; | 217 | return len; |
218 | } | 218 | } |
219 | 219 | ||
220 | static ssize_t | 220 | static ssize_t |
221 | show_shost_supported_mode(struct device *dev, struct device_attribute *attr, | 221 | show_shost_supported_mode(struct device *dev, struct device_attribute *attr, |
222 | char *buf) | 222 | char *buf) |
223 | { | 223 | { |
224 | struct Scsi_Host *shost = class_to_shost(dev); | 224 | struct Scsi_Host *shost = class_to_shost(dev); |
225 | unsigned int supported_mode = shost->hostt->supported_mode; | 225 | unsigned int supported_mode = shost->hostt->supported_mode; |
226 | 226 | ||
227 | if (supported_mode == MODE_UNKNOWN) | 227 | if (supported_mode == MODE_UNKNOWN) |
228 | /* by default this should be initiator */ | 228 | /* by default this should be initiator */ |
229 | supported_mode = MODE_INITIATOR; | 229 | supported_mode = MODE_INITIATOR; |
230 | 230 | ||
231 | return show_shost_mode(supported_mode, buf); | 231 | return show_shost_mode(supported_mode, buf); |
232 | } | 232 | } |
233 | 233 | ||
234 | static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); | 234 | static DEVICE_ATTR(supported_mode, S_IRUGO | S_IWUSR, show_shost_supported_mode, NULL); |
235 | 235 | ||
236 | static ssize_t | 236 | static ssize_t |
237 | show_shost_active_mode(struct device *dev, | 237 | show_shost_active_mode(struct device *dev, |
238 | struct device_attribute *attr, char *buf) | 238 | struct device_attribute *attr, char *buf) |
239 | { | 239 | { |
240 | struct Scsi_Host *shost = class_to_shost(dev); | 240 | struct Scsi_Host *shost = class_to_shost(dev); |
241 | 241 | ||
242 | if (shost->active_mode == MODE_UNKNOWN) | 242 | if (shost->active_mode == MODE_UNKNOWN) |
243 | return snprintf(buf, 20, "unknown\n"); | 243 | return snprintf(buf, 20, "unknown\n"); |
244 | else | 244 | else |
245 | return show_shost_mode(shost->active_mode, buf); | 245 | return show_shost_mode(shost->active_mode, buf); |
246 | } | 246 | } |
247 | 247 | ||
248 | static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); | 248 | static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); |
249 | 249 | ||
250 | static int check_reset_type(const char *str) | 250 | static int check_reset_type(const char *str) |
251 | { | 251 | { |
252 | if (sysfs_streq(str, "adapter")) | 252 | if (sysfs_streq(str, "adapter")) |
253 | return SCSI_ADAPTER_RESET; | 253 | return SCSI_ADAPTER_RESET; |
254 | else if (sysfs_streq(str, "firmware")) | 254 | else if (sysfs_streq(str, "firmware")) |
255 | return SCSI_FIRMWARE_RESET; | 255 | return SCSI_FIRMWARE_RESET; |
256 | else | 256 | else |
257 | return 0; | 257 | return 0; |
258 | } | 258 | } |
259 | 259 | ||
260 | static ssize_t | 260 | static ssize_t |
261 | store_host_reset(struct device *dev, struct device_attribute *attr, | 261 | store_host_reset(struct device *dev, struct device_attribute *attr, |
262 | const char *buf, size_t count) | 262 | const char *buf, size_t count) |
263 | { | 263 | { |
264 | struct Scsi_Host *shost = class_to_shost(dev); | 264 | struct Scsi_Host *shost = class_to_shost(dev); |
265 | struct scsi_host_template *sht = shost->hostt; | 265 | struct scsi_host_template *sht = shost->hostt; |
266 | int ret = -EINVAL; | 266 | int ret = -EINVAL; |
267 | int type; | 267 | int type; |
268 | 268 | ||
269 | type = check_reset_type(buf); | 269 | type = check_reset_type(buf); |
270 | if (!type) | 270 | if (!type) |
271 | goto exit_store_host_reset; | 271 | goto exit_store_host_reset; |
272 | 272 | ||
273 | if (sht->host_reset) | 273 | if (sht->host_reset) |
274 | ret = sht->host_reset(shost, type); | 274 | ret = sht->host_reset(shost, type); |
275 | 275 | ||
276 | exit_store_host_reset: | 276 | exit_store_host_reset: |
277 | if (ret == 0) | 277 | if (ret == 0) |
278 | ret = count; | 278 | ret = count; |
279 | return ret; | 279 | return ret; |
280 | } | 280 | } |
281 | 281 | ||
282 | static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); | 282 | static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); |
283 | 283 | ||
284 | static ssize_t | 284 | static ssize_t |
285 | show_shost_eh_deadline(struct device *dev, | 285 | show_shost_eh_deadline(struct device *dev, |
286 | struct device_attribute *attr, char *buf) | 286 | struct device_attribute *attr, char *buf) |
287 | { | 287 | { |
288 | struct Scsi_Host *shost = class_to_shost(dev); | 288 | struct Scsi_Host *shost = class_to_shost(dev); |
289 | 289 | ||
290 | if (shost->eh_deadline == -1) | 290 | if (shost->eh_deadline == -1) |
291 | return snprintf(buf, strlen("off") + 2, "off\n"); | 291 | return snprintf(buf, strlen("off") + 2, "off\n"); |
292 | return sprintf(buf, "%u\n", shost->eh_deadline / HZ); | 292 | return sprintf(buf, "%u\n", shost->eh_deadline / HZ); |
293 | } | 293 | } |
294 | 294 | ||
295 | static ssize_t | 295 | static ssize_t |
296 | store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, | 296 | store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, |
297 | const char *buf, size_t count) | 297 | const char *buf, size_t count) |
298 | { | 298 | { |
299 | struct Scsi_Host *shost = class_to_shost(dev); | 299 | struct Scsi_Host *shost = class_to_shost(dev); |
300 | int ret = -EINVAL; | 300 | int ret = -EINVAL; |
301 | unsigned long deadline, flags; | 301 | unsigned long deadline, flags; |
302 | 302 | ||
303 | if (shost->transportt && | 303 | if (shost->transportt && |
304 | (shost->transportt->eh_strategy_handler || | 304 | (shost->transportt->eh_strategy_handler || |
305 | !shost->hostt->eh_host_reset_handler)) | 305 | !shost->hostt->eh_host_reset_handler)) |
306 | return ret; | 306 | return ret; |
307 | 307 | ||
308 | if (!strncmp(buf, "off", strlen("off"))) | 308 | if (!strncmp(buf, "off", strlen("off"))) |
309 | deadline = -1; | 309 | deadline = -1; |
310 | else { | 310 | else { |
311 | ret = kstrtoul(buf, 10, &deadline); | 311 | ret = kstrtoul(buf, 10, &deadline); |
312 | if (ret) | 312 | if (ret) |
313 | return ret; | 313 | return ret; |
314 | if (deadline * HZ > UINT_MAX) | 314 | if (deadline * HZ > UINT_MAX) |
315 | return -EINVAL; | 315 | return -EINVAL; |
316 | } | 316 | } |
317 | 317 | ||
318 | spin_lock_irqsave(shost->host_lock, flags); | 318 | spin_lock_irqsave(shost->host_lock, flags); |
319 | if (scsi_host_in_recovery(shost)) | 319 | if (scsi_host_in_recovery(shost)) |
320 | ret = -EBUSY; | 320 | ret = -EBUSY; |
321 | else { | 321 | else { |
322 | if (deadline == -1) | 322 | if (deadline == -1) |
323 | shost->eh_deadline = -1; | 323 | shost->eh_deadline = -1; |
324 | else | 324 | else |
325 | shost->eh_deadline = deadline * HZ; | 325 | shost->eh_deadline = deadline * HZ; |
326 | 326 | ||
327 | ret = count; | 327 | ret = count; |
328 | } | 328 | } |
329 | spin_unlock_irqrestore(shost->host_lock, flags); | 329 | spin_unlock_irqrestore(shost->host_lock, flags); |
330 | 330 | ||
331 | return ret; | 331 | return ret; |
332 | } | 332 | } |
333 | 333 | ||
334 | static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); | 334 | static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); |
335 | 335 | ||
336 | shost_rd_attr(use_blk_mq, "%d\n"); | 336 | shost_rd_attr(use_blk_mq, "%d\n"); |
337 | shost_rd_attr(unique_id, "%u\n"); | 337 | shost_rd_attr(unique_id, "%u\n"); |
338 | shost_rd_attr(cmd_per_lun, "%hd\n"); | 338 | shost_rd_attr(cmd_per_lun, "%hd\n"); |
339 | shost_rd_attr(can_queue, "%hd\n"); | 339 | shost_rd_attr(can_queue, "%hd\n"); |
340 | shost_rd_attr(sg_tablesize, "%hu\n"); | 340 | shost_rd_attr(sg_tablesize, "%hu\n"); |
341 | shost_rd_attr(sg_prot_tablesize, "%hu\n"); | 341 | shost_rd_attr(sg_prot_tablesize, "%hu\n"); |
342 | shost_rd_attr(unchecked_isa_dma, "%d\n"); | 342 | shost_rd_attr(unchecked_isa_dma, "%d\n"); |
343 | shost_rd_attr(prot_capabilities, "%u\n"); | 343 | shost_rd_attr(prot_capabilities, "%u\n"); |
344 | shost_rd_attr(prot_guard_type, "%hd\n"); | 344 | shost_rd_attr(prot_guard_type, "%hd\n"); |
345 | shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); | 345 | shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); |
346 | 346 | ||
347 | static ssize_t | 347 | static ssize_t |
348 | show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) | 348 | show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) |
349 | { | 349 | { |
350 | struct Scsi_Host *shost = class_to_shost(dev); | 350 | struct Scsi_Host *shost = class_to_shost(dev); |
351 | return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy)); | 351 | return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy)); |
352 | } | 352 | } |
353 | static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); | 353 | static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); |
354 | 354 | ||
355 | static struct attribute *scsi_sysfs_shost_attrs[] = { | 355 | static struct attribute *scsi_sysfs_shost_attrs[] = { |
356 | &dev_attr_use_blk_mq.attr, | 356 | &dev_attr_use_blk_mq.attr, |
357 | &dev_attr_unique_id.attr, | 357 | &dev_attr_unique_id.attr, |
358 | &dev_attr_host_busy.attr, | 358 | &dev_attr_host_busy.attr, |
359 | &dev_attr_cmd_per_lun.attr, | 359 | &dev_attr_cmd_per_lun.attr, |
360 | &dev_attr_can_queue.attr, | 360 | &dev_attr_can_queue.attr, |
361 | &dev_attr_sg_tablesize.attr, | 361 | &dev_attr_sg_tablesize.attr, |
362 | &dev_attr_sg_prot_tablesize.attr, | 362 | &dev_attr_sg_prot_tablesize.attr, |
363 | &dev_attr_unchecked_isa_dma.attr, | 363 | &dev_attr_unchecked_isa_dma.attr, |
364 | &dev_attr_proc_name.attr, | 364 | &dev_attr_proc_name.attr, |
365 | &dev_attr_scan.attr, | 365 | &dev_attr_scan.attr, |
366 | &dev_attr_hstate.attr, | 366 | &dev_attr_hstate.attr, |
367 | &dev_attr_supported_mode.attr, | 367 | &dev_attr_supported_mode.attr, |
368 | &dev_attr_active_mode.attr, | 368 | &dev_attr_active_mode.attr, |
369 | &dev_attr_prot_capabilities.attr, | 369 | &dev_attr_prot_capabilities.attr, |
370 | &dev_attr_prot_guard_type.attr, | 370 | &dev_attr_prot_guard_type.attr, |
371 | &dev_attr_host_reset.attr, | 371 | &dev_attr_host_reset.attr, |
372 | &dev_attr_eh_deadline.attr, | 372 | &dev_attr_eh_deadline.attr, |
373 | NULL | 373 | NULL |
374 | }; | 374 | }; |
375 | 375 | ||
376 | struct attribute_group scsi_shost_attr_group = { | 376 | struct attribute_group scsi_shost_attr_group = { |
377 | .attrs = scsi_sysfs_shost_attrs, | 377 | .attrs = scsi_sysfs_shost_attrs, |
378 | }; | 378 | }; |
379 | 379 | ||
380 | const struct attribute_group *scsi_sysfs_shost_attr_groups[] = { | 380 | const struct attribute_group *scsi_sysfs_shost_attr_groups[] = { |
381 | &scsi_shost_attr_group, | 381 | &scsi_shost_attr_group, |
382 | NULL | 382 | NULL |
383 | }; | 383 | }; |
384 | 384 | ||
385 | static void scsi_device_cls_release(struct device *class_dev) | 385 | static void scsi_device_cls_release(struct device *class_dev) |
386 | { | 386 | { |
387 | struct scsi_device *sdev; | 387 | struct scsi_device *sdev; |
388 | 388 | ||
389 | sdev = class_to_sdev(class_dev); | 389 | sdev = class_to_sdev(class_dev); |
390 | put_device(&sdev->sdev_gendev); | 390 | put_device(&sdev->sdev_gendev); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void scsi_device_dev_release_usercontext(struct work_struct *work) | 393 | static void scsi_device_dev_release_usercontext(struct work_struct *work) |
394 | { | 394 | { |
395 | struct scsi_device *sdev; | 395 | struct scsi_device *sdev; |
396 | struct device *parent; | 396 | struct device *parent; |
397 | struct list_head *this, *tmp; | 397 | struct list_head *this, *tmp; |
398 | unsigned long flags; | 398 | unsigned long flags; |
399 | 399 | ||
400 | sdev = container_of(work, struct scsi_device, ew.work); | 400 | sdev = container_of(work, struct scsi_device, ew.work); |
401 | 401 | ||
402 | scsi_dh_release_device(sdev); | 402 | scsi_dh_release_device(sdev); |
403 | 403 | ||
404 | parent = sdev->sdev_gendev.parent; | 404 | parent = sdev->sdev_gendev.parent; |
405 | 405 | ||
406 | spin_lock_irqsave(sdev->host->host_lock, flags); | 406 | spin_lock_irqsave(sdev->host->host_lock, flags); |
407 | list_del(&sdev->siblings); | 407 | list_del(&sdev->siblings); |
408 | list_del(&sdev->same_target_siblings); | 408 | list_del(&sdev->same_target_siblings); |
409 | list_del(&sdev->starved_entry); | 409 | list_del(&sdev->starved_entry); |
410 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | 410 | spin_unlock_irqrestore(sdev->host->host_lock, flags); |
411 | 411 | ||
412 | cancel_work_sync(&sdev->event_work); | 412 | cancel_work_sync(&sdev->event_work); |
413 | 413 | ||
414 | list_for_each_safe(this, tmp, &sdev->event_list) { | 414 | list_for_each_safe(this, tmp, &sdev->event_list) { |
415 | struct scsi_event *evt; | 415 | struct scsi_event *evt; |
416 | 416 | ||
417 | evt = list_entry(this, struct scsi_event, node); | 417 | evt = list_entry(this, struct scsi_event, node); |
418 | list_del(&evt->node); | 418 | list_del(&evt->node); |
419 | kfree(evt); | 419 | kfree(evt); |
420 | } | 420 | } |
421 | 421 | ||
422 | blk_put_queue(sdev->request_queue); | 422 | blk_put_queue(sdev->request_queue); |
423 | /* NULL queue means the device can't be used */ | 423 | /* NULL queue means the device can't be used */ |
424 | sdev->request_queue = NULL; | 424 | sdev->request_queue = NULL; |
425 | 425 | ||
426 | kfree(sdev->vpd_pg83); | 426 | kfree(sdev->vpd_pg83); |
427 | kfree(sdev->vpd_pg80); | 427 | kfree(sdev->vpd_pg80); |
428 | kfree(sdev->inquiry); | 428 | kfree(sdev->inquiry); |
429 | kfree(sdev); | 429 | kfree(sdev); |
430 | 430 | ||
431 | if (parent) | 431 | if (parent) |
432 | put_device(parent); | 432 | put_device(parent); |
433 | } | 433 | } |
434 | 434 | ||
435 | static void scsi_device_dev_release(struct device *dev) | 435 | static void scsi_device_dev_release(struct device *dev) |
436 | { | 436 | { |
437 | struct scsi_device *sdp = to_scsi_device(dev); | 437 | struct scsi_device *sdp = to_scsi_device(dev); |
438 | execute_in_process_context(scsi_device_dev_release_usercontext, | 438 | execute_in_process_context(scsi_device_dev_release_usercontext, |
439 | &sdp->ew); | 439 | &sdp->ew); |
440 | } | 440 | } |
441 | 441 | ||
442 | static struct class sdev_class = { | 442 | static struct class sdev_class = { |
443 | .name = "scsi_device", | 443 | .name = "scsi_device", |
444 | .dev_release = scsi_device_cls_release, | 444 | .dev_release = scsi_device_cls_release, |
445 | }; | 445 | }; |
446 | 446 | ||
447 | /* all probing is done in the individual ->probe routines */ | 447 | /* all probing is done in the individual ->probe routines */ |
448 | static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) | 448 | static int scsi_bus_match(struct device *dev, struct device_driver *gendrv) |
449 | { | 449 | { |
450 | struct scsi_device *sdp; | 450 | struct scsi_device *sdp; |
451 | 451 | ||
452 | if (dev->type != &scsi_dev_type) | 452 | if (dev->type != &scsi_dev_type) |
453 | return 0; | 453 | return 0; |
454 | 454 | ||
455 | sdp = to_scsi_device(dev); | 455 | sdp = to_scsi_device(dev); |
456 | if (sdp->no_uld_attach) | 456 | if (sdp->no_uld_attach) |
457 | return 0; | 457 | return 0; |
458 | return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; | 458 | return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0; |
459 | } | 459 | } |
460 | 460 | ||
461 | static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) | 461 | static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) |
462 | { | 462 | { |
463 | struct scsi_device *sdev; | 463 | struct scsi_device *sdev; |
464 | 464 | ||
465 | if (dev->type != &scsi_dev_type) | 465 | if (dev->type != &scsi_dev_type) |
466 | return 0; | 466 | return 0; |
467 | 467 | ||
468 | sdev = to_scsi_device(dev); | 468 | sdev = to_scsi_device(dev); |
469 | 469 | ||
470 | add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); | 470 | add_uevent_var(env, "MODALIAS=" SCSI_DEVICE_MODALIAS_FMT, sdev->type); |
471 | return 0; | 471 | return 0; |
472 | } | 472 | } |
473 | 473 | ||
474 | struct bus_type scsi_bus_type = { | 474 | struct bus_type scsi_bus_type = { |
475 | .name = "scsi", | 475 | .name = "scsi", |
476 | .match = scsi_bus_match, | 476 | .match = scsi_bus_match, |
477 | .uevent = scsi_bus_uevent, | 477 | .uevent = scsi_bus_uevent, |
478 | #ifdef CONFIG_PM | 478 | #ifdef CONFIG_PM |
479 | .pm = &scsi_bus_pm_ops, | 479 | .pm = &scsi_bus_pm_ops, |
480 | #endif | 480 | #endif |
481 | }; | 481 | }; |
482 | EXPORT_SYMBOL_GPL(scsi_bus_type); | 482 | EXPORT_SYMBOL_GPL(scsi_bus_type); |
483 | 483 | ||
484 | int scsi_sysfs_register(void) | 484 | int scsi_sysfs_register(void) |
485 | { | 485 | { |
486 | int error; | 486 | int error; |
487 | 487 | ||
488 | error = bus_register(&scsi_bus_type); | 488 | error = bus_register(&scsi_bus_type); |
489 | if (!error) { | 489 | if (!error) { |
490 | error = class_register(&sdev_class); | 490 | error = class_register(&sdev_class); |
491 | if (error) | 491 | if (error) |
492 | bus_unregister(&scsi_bus_type); | 492 | bus_unregister(&scsi_bus_type); |
493 | } | 493 | } |
494 | 494 | ||
495 | return error; | 495 | return error; |
496 | } | 496 | } |
497 | 497 | ||
498 | void scsi_sysfs_unregister(void) | 498 | void scsi_sysfs_unregister(void) |
499 | { | 499 | { |
500 | class_unregister(&sdev_class); | 500 | class_unregister(&sdev_class); |
501 | bus_unregister(&scsi_bus_type); | 501 | bus_unregister(&scsi_bus_type); |
502 | } | 502 | } |
503 | 503 | ||
504 | /* | 504 | /* |
505 | * sdev_show_function: macro to create an attr function that can be used to | 505 | * sdev_show_function: macro to create an attr function that can be used to |
506 | * show a non-bit field. | 506 | * show a non-bit field. |
507 | */ | 507 | */ |
508 | #define sdev_show_function(field, format_string) \ | 508 | #define sdev_show_function(field, format_string) \ |
509 | static ssize_t \ | 509 | static ssize_t \ |
510 | sdev_show_##field (struct device *dev, struct device_attribute *attr, \ | 510 | sdev_show_##field (struct device *dev, struct device_attribute *attr, \ |
511 | char *buf) \ | 511 | char *buf) \ |
512 | { \ | 512 | { \ |
513 | struct scsi_device *sdev; \ | 513 | struct scsi_device *sdev; \ |
514 | sdev = to_scsi_device(dev); \ | 514 | sdev = to_scsi_device(dev); \ |
515 | return snprintf (buf, 20, format_string, sdev->field); \ | 515 | return snprintf (buf, 20, format_string, sdev->field); \ |
516 | } \ | 516 | } \ |
517 | 517 | ||
518 | /* | 518 | /* |
519 | * sdev_rd_attr: macro to create a function and attribute variable for a | 519 | * sdev_rd_attr: macro to create a function and attribute variable for a |
520 | * read only field. | 520 | * read only field. |
521 | */ | 521 | */ |
522 | #define sdev_rd_attr(field, format_string) \ | 522 | #define sdev_rd_attr(field, format_string) \ |
523 | sdev_show_function(field, format_string) \ | 523 | sdev_show_function(field, format_string) \ |
524 | static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); | 524 | static DEVICE_ATTR(field, S_IRUGO, sdev_show_##field, NULL); |
525 | 525 | ||
526 | 526 | ||
527 | /* | 527 | /* |
528 | * sdev_rw_attr: create a function and attribute variable for a | 528 | * sdev_rw_attr: create a function and attribute variable for a |
529 | * read/write field. | 529 | * read/write field. |
530 | */ | 530 | */ |
531 | #define sdev_rw_attr(field, format_string) \ | 531 | #define sdev_rw_attr(field, format_string) \ |
532 | sdev_show_function(field, format_string) \ | 532 | sdev_show_function(field, format_string) \ |
533 | \ | 533 | \ |
534 | static ssize_t \ | 534 | static ssize_t \ |
535 | sdev_store_##field (struct device *dev, struct device_attribute *attr, \ | 535 | sdev_store_##field (struct device *dev, struct device_attribute *attr, \ |
536 | const char *buf, size_t count) \ | 536 | const char *buf, size_t count) \ |
537 | { \ | 537 | { \ |
538 | struct scsi_device *sdev; \ | 538 | struct scsi_device *sdev; \ |
539 | sdev = to_scsi_device(dev); \ | 539 | sdev = to_scsi_device(dev); \ |
540 | sscanf (buf, format_string, &sdev->field); \ | 540 | sscanf (buf, format_string, &sdev->field); \ |
541 | return count; \ | 541 | return count; \ |
542 | } \ | 542 | } \ |
543 | static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); | 543 | static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); |
544 | 544 | ||
545 | /* Currently we don't export bit fields, but we might in future, | 545 | /* Currently we don't export bit fields, but we might in future, |
546 | * so leave this code in */ | 546 | * so leave this code in */ |
547 | #if 0 | 547 | #if 0 |
548 | /* | 548 | /* |
549 | * sdev_rd_attr: create a function and attribute variable for a | 549 | * sdev_rd_attr: create a function and attribute variable for a |
550 | * read/write bit field. | 550 | * read/write bit field. |
551 | */ | 551 | */ |
552 | #define sdev_rw_attr_bit(field) \ | 552 | #define sdev_rw_attr_bit(field) \ |
553 | sdev_show_function(field, "%d\n") \ | 553 | sdev_show_function(field, "%d\n") \ |
554 | \ | 554 | \ |
555 | static ssize_t \ | 555 | static ssize_t \ |
556 | sdev_store_##field (struct device *dev, struct device_attribute *attr, \ | 556 | sdev_store_##field (struct device *dev, struct device_attribute *attr, \ |
557 | const char *buf, size_t count) \ | 557 | const char *buf, size_t count) \ |
558 | { \ | 558 | { \ |
559 | int ret; \ | 559 | int ret; \ |
560 | struct scsi_device *sdev; \ | 560 | struct scsi_device *sdev; \ |
561 | ret = scsi_sdev_check_buf_bit(buf); \ | 561 | ret = scsi_sdev_check_buf_bit(buf); \ |
562 | if (ret >= 0) { \ | 562 | if (ret >= 0) { \ |
563 | sdev = to_scsi_device(dev); \ | 563 | sdev = to_scsi_device(dev); \ |
564 | sdev->field = ret; \ | 564 | sdev->field = ret; \ |
565 | ret = count; \ | 565 | ret = count; \ |
566 | } \ | 566 | } \ |
567 | return ret; \ | 567 | return ret; \ |
568 | } \ | 568 | } \ |
569 | static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); | 569 | static DEVICE_ATTR(field, S_IRUGO | S_IWUSR, sdev_show_##field, sdev_store_##field); |
570 | 570 | ||
571 | /* | 571 | /* |
572 | * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", | 572 | * scsi_sdev_check_buf_bit: return 0 if buf is "0", return 1 if buf is "1", |
573 | * else return -EINVAL. | 573 | * else return -EINVAL. |
574 | */ | 574 | */ |
575 | static int scsi_sdev_check_buf_bit(const char *buf) | 575 | static int scsi_sdev_check_buf_bit(const char *buf) |
576 | { | 576 | { |
577 | if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { | 577 | if ((buf[1] == '\0') || ((buf[1] == '\n') && (buf[2] == '\0'))) { |
578 | if (buf[0] == '1') | 578 | if (buf[0] == '1') |
579 | return 1; | 579 | return 1; |
580 | else if (buf[0] == '0') | 580 | else if (buf[0] == '0') |
581 | return 0; | 581 | return 0; |
582 | else | 582 | else |
583 | return -EINVAL; | 583 | return -EINVAL; |
584 | } else | 584 | } else |
585 | return -EINVAL; | 585 | return -EINVAL; |
586 | } | 586 | } |
587 | #endif | 587 | #endif |
588 | /* | 588 | /* |
589 | * Create the actual show/store functions and data structures. | 589 | * Create the actual show/store functions and data structures. |
590 | */ | 590 | */ |
591 | sdev_rd_attr (type, "%d\n"); | 591 | sdev_rd_attr (type, "%d\n"); |
592 | sdev_rd_attr (scsi_level, "%d\n"); | 592 | sdev_rd_attr (scsi_level, "%d\n"); |
593 | sdev_rd_attr (vendor, "%.8s\n"); | 593 | sdev_rd_attr (vendor, "%.8s\n"); |
594 | sdev_rd_attr (model, "%.16s\n"); | 594 | sdev_rd_attr (model, "%.16s\n"); |
595 | sdev_rd_attr (rev, "%.4s\n"); | 595 | sdev_rd_attr (rev, "%.4s\n"); |
596 | 596 | ||
597 | static ssize_t | 597 | static ssize_t |
598 | sdev_show_device_busy(struct device *dev, struct device_attribute *attr, | 598 | sdev_show_device_busy(struct device *dev, struct device_attribute *attr, |
599 | char *buf) | 599 | char *buf) |
600 | { | 600 | { |
601 | struct scsi_device *sdev = to_scsi_device(dev); | 601 | struct scsi_device *sdev = to_scsi_device(dev); |
602 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy)); | 602 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy)); |
603 | } | 603 | } |
604 | static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); | 604 | static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL); |
605 | 605 | ||
606 | static ssize_t | 606 | static ssize_t |
607 | sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, | 607 | sdev_show_device_blocked(struct device *dev, struct device_attribute *attr, |
608 | char *buf) | 608 | char *buf) |
609 | { | 609 | { |
610 | struct scsi_device *sdev = to_scsi_device(dev); | 610 | struct scsi_device *sdev = to_scsi_device(dev); |
611 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); | 611 | return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked)); |
612 | } | 612 | } |
613 | static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); | 613 | static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL); |
614 | 614 | ||
615 | /* | 615 | /* |
616 | * TODO: can we make these symlinks to the block layer ones? | 616 | * TODO: can we make these symlinks to the block layer ones? |
617 | */ | 617 | */ |
618 | static ssize_t | 618 | static ssize_t |
619 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) | 619 | sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf) |
620 | { | 620 | { |
621 | struct scsi_device *sdev; | 621 | struct scsi_device *sdev; |
622 | sdev = to_scsi_device(dev); | 622 | sdev = to_scsi_device(dev); |
623 | return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); | 623 | return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ); |
624 | } | 624 | } |
625 | 625 | ||
626 | static ssize_t | 626 | static ssize_t |
627 | sdev_store_timeout (struct device *dev, struct device_attribute *attr, | 627 | sdev_store_timeout (struct device *dev, struct device_attribute *attr, |
628 | const char *buf, size_t count) | 628 | const char *buf, size_t count) |
629 | { | 629 | { |
630 | struct scsi_device *sdev; | 630 | struct scsi_device *sdev; |
631 | int timeout; | 631 | int timeout; |
632 | sdev = to_scsi_device(dev); | 632 | sdev = to_scsi_device(dev); |
633 | sscanf (buf, "%d\n", &timeout); | 633 | sscanf (buf, "%d\n", &timeout); |
634 | blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); | 634 | blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); |
635 | return count; | 635 | return count; |
636 | } | 636 | } |
637 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); | 637 | static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout); |
638 | 638 | ||
639 | static ssize_t | 639 | static ssize_t |
640 | sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) | 640 | sdev_show_eh_timeout(struct device *dev, struct device_attribute *attr, char *buf) |
641 | { | 641 | { |
642 | struct scsi_device *sdev; | 642 | struct scsi_device *sdev; |
643 | sdev = to_scsi_device(dev); | 643 | sdev = to_scsi_device(dev); |
644 | return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); | 644 | return snprintf(buf, 20, "%u\n", sdev->eh_timeout / HZ); |
645 | } | 645 | } |
646 | 646 | ||
647 | static ssize_t | 647 | static ssize_t |
648 | sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, | 648 | sdev_store_eh_timeout(struct device *dev, struct device_attribute *attr, |
649 | const char *buf, size_t count) | 649 | const char *buf, size_t count) |
650 | { | 650 | { |
651 | struct scsi_device *sdev; | 651 | struct scsi_device *sdev; |
652 | unsigned int eh_timeout; | 652 | unsigned int eh_timeout; |
653 | int err; | 653 | int err; |
654 | 654 | ||
655 | if (!capable(CAP_SYS_ADMIN)) | 655 | if (!capable(CAP_SYS_ADMIN)) |
656 | return -EACCES; | 656 | return -EACCES; |
657 | 657 | ||
658 | sdev = to_scsi_device(dev); | 658 | sdev = to_scsi_device(dev); |
659 | err = kstrtouint(buf, 10, &eh_timeout); | 659 | err = kstrtouint(buf, 10, &eh_timeout); |
660 | if (err) | 660 | if (err) |
661 | return err; | 661 | return err; |
662 | sdev->eh_timeout = eh_timeout * HZ; | 662 | sdev->eh_timeout = eh_timeout * HZ; |
663 | 663 | ||
664 | return count; | 664 | return count; |
665 | } | 665 | } |
666 | static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); | 666 | static DEVICE_ATTR(eh_timeout, S_IRUGO | S_IWUSR, sdev_show_eh_timeout, sdev_store_eh_timeout); |
667 | 667 | ||
668 | static ssize_t | 668 | static ssize_t |
669 | store_rescan_field (struct device *dev, struct device_attribute *attr, | 669 | store_rescan_field (struct device *dev, struct device_attribute *attr, |
670 | const char *buf, size_t count) | 670 | const char *buf, size_t count) |
671 | { | 671 | { |
672 | scsi_rescan_device(dev); | 672 | scsi_rescan_device(dev); |
673 | return count; | 673 | return count; |
674 | } | 674 | } |
675 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); | 675 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); |
676 | 676 | ||
677 | static ssize_t | 677 | static ssize_t |
678 | sdev_store_delete(struct device *dev, struct device_attribute *attr, | 678 | sdev_store_delete(struct device *dev, struct device_attribute *attr, |
679 | const char *buf, size_t count) | 679 | const char *buf, size_t count) |
680 | { | 680 | { |
681 | if (device_remove_file_self(dev, attr)) | 681 | if (device_remove_file_self(dev, attr)) |
682 | scsi_remove_device(to_scsi_device(dev)); | 682 | scsi_remove_device(to_scsi_device(dev)); |
683 | return count; | 683 | return count; |
684 | }; | 684 | }; |
685 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); | 685 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); |
686 | 686 | ||
687 | static ssize_t | 687 | static ssize_t |
688 | store_state_field(struct device *dev, struct device_attribute *attr, | 688 | store_state_field(struct device *dev, struct device_attribute *attr, |
689 | const char *buf, size_t count) | 689 | const char *buf, size_t count) |
690 | { | 690 | { |
691 | int i; | 691 | int i; |
692 | struct scsi_device *sdev = to_scsi_device(dev); | 692 | struct scsi_device *sdev = to_scsi_device(dev); |
693 | enum scsi_device_state state = 0; | 693 | enum scsi_device_state state = 0; |
694 | 694 | ||
695 | for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { | 695 | for (i = 0; i < ARRAY_SIZE(sdev_states); i++) { |
696 | const int len = strlen(sdev_states[i].name); | 696 | const int len = strlen(sdev_states[i].name); |
697 | if (strncmp(sdev_states[i].name, buf, len) == 0 && | 697 | if (strncmp(sdev_states[i].name, buf, len) == 0 && |
698 | buf[len] == '\n') { | 698 | buf[len] == '\n') { |
699 | state = sdev_states[i].value; | 699 | state = sdev_states[i].value; |
700 | break; | 700 | break; |
701 | } | 701 | } |
702 | } | 702 | } |
703 | if (!state) | 703 | if (!state) |
704 | return -EINVAL; | 704 | return -EINVAL; |
705 | 705 | ||
706 | if (scsi_device_set_state(sdev, state)) | 706 | if (scsi_device_set_state(sdev, state)) |
707 | return -EINVAL; | 707 | return -EINVAL; |
708 | return count; | 708 | return count; |
709 | } | 709 | } |
710 | 710 | ||
711 | static ssize_t | 711 | static ssize_t |
712 | show_state_field(struct device *dev, struct device_attribute *attr, char *buf) | 712 | show_state_field(struct device *dev, struct device_attribute *attr, char *buf) |
713 | { | 713 | { |
714 | struct scsi_device *sdev = to_scsi_device(dev); | 714 | struct scsi_device *sdev = to_scsi_device(dev); |
715 | const char *name = scsi_device_state_name(sdev->sdev_state); | 715 | const char *name = scsi_device_state_name(sdev->sdev_state); |
716 | 716 | ||
717 | if (!name) | 717 | if (!name) |
718 | return -EINVAL; | 718 | return -EINVAL; |
719 | 719 | ||
720 | return snprintf(buf, 20, "%s\n", name); | 720 | return snprintf(buf, 20, "%s\n", name); |
721 | } | 721 | } |
722 | 722 | ||
723 | static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); | 723 | static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_state_field, store_state_field); |
724 | 724 | ||
725 | static ssize_t | 725 | static ssize_t |
726 | show_queue_type_field(struct device *dev, struct device_attribute *attr, | 726 | show_queue_type_field(struct device *dev, struct device_attribute *attr, |
727 | char *buf) | 727 | char *buf) |
728 | { | 728 | { |
729 | struct scsi_device *sdev = to_scsi_device(dev); | 729 | struct scsi_device *sdev = to_scsi_device(dev); |
730 | const char *name = "none"; | 730 | const char *name = "none"; |
731 | 731 | ||
732 | if (sdev->simple_tags) | 732 | if (sdev->simple_tags) |
733 | name = "simple"; | 733 | name = "simple"; |
734 | 734 | ||
735 | return snprintf(buf, 20, "%s\n", name); | 735 | return snprintf(buf, 20, "%s\n", name); |
736 | } | 736 | } |
737 | 737 | ||
738 | static ssize_t | 738 | static ssize_t |
739 | store_queue_type_field(struct device *dev, struct device_attribute *attr, | 739 | store_queue_type_field(struct device *dev, struct device_attribute *attr, |
740 | const char *buf, size_t count) | 740 | const char *buf, size_t count) |
741 | { | 741 | { |
742 | struct scsi_device *sdev = to_scsi_device(dev); | 742 | struct scsi_device *sdev = to_scsi_device(dev); |
743 | 743 | ||
744 | if (!sdev->tagged_supported) | 744 | if (!sdev->tagged_supported) |
745 | return -EINVAL; | 745 | return -EINVAL; |
746 | 746 | ||
747 | sdev_printk(KERN_INFO, sdev, | 747 | sdev_printk(KERN_INFO, sdev, |
748 | "ignoring write to deprecated queue_type attribute"); | 748 | "ignoring write to deprecated queue_type attribute"); |
749 | return count; | 749 | return count; |
750 | } | 750 | } |
751 | 751 | ||
752 | static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, | 752 | static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, |
753 | store_queue_type_field); | 753 | store_queue_type_field); |
754 | 754 | ||
755 | #define sdev_vpd_pg_attr(_page) \ | 755 | #define sdev_vpd_pg_attr(_page) \ |
756 | static ssize_t \ | 756 | static ssize_t \ |
757 | show_vpd_##_page(struct file *filp, struct kobject *kobj, \ | 757 | show_vpd_##_page(struct file *filp, struct kobject *kobj, \ |
758 | struct bin_attribute *bin_attr, \ | 758 | struct bin_attribute *bin_attr, \ |
759 | char *buf, loff_t off, size_t count) \ | 759 | char *buf, loff_t off, size_t count) \ |
760 | { \ | 760 | { \ |
761 | struct device *dev = container_of(kobj, struct device, kobj); \ | 761 | struct device *dev = container_of(kobj, struct device, kobj); \ |
762 | struct scsi_device *sdev = to_scsi_device(dev); \ | 762 | struct scsi_device *sdev = to_scsi_device(dev); \ |
763 | if (!sdev->vpd_##_page) \ | 763 | if (!sdev->vpd_##_page) \ |
764 | return -EINVAL; \ | 764 | return -EINVAL; \ |
765 | return memory_read_from_buffer(buf, count, &off, \ | 765 | return memory_read_from_buffer(buf, count, &off, \ |
766 | sdev->vpd_##_page, \ | 766 | sdev->vpd_##_page, \ |
767 | sdev->vpd_##_page##_len); \ | 767 | sdev->vpd_##_page##_len); \ |
768 | } \ | 768 | } \ |
769 | static struct bin_attribute dev_attr_vpd_##_page = { \ | 769 | static struct bin_attribute dev_attr_vpd_##_page = { \ |
770 | .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ | 770 | .attr = {.name = __stringify(vpd_##_page), .mode = S_IRUGO }, \ |
771 | .size = 0, \ | 771 | .size = 0, \ |
772 | .read = show_vpd_##_page, \ | 772 | .read = show_vpd_##_page, \ |
773 | }; | 773 | }; |
774 | 774 | ||
775 | sdev_vpd_pg_attr(pg83); | 775 | sdev_vpd_pg_attr(pg83); |
776 | sdev_vpd_pg_attr(pg80); | 776 | sdev_vpd_pg_attr(pg80); |
777 | 777 | ||
778 | static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, | 778 | static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, |
779 | struct bin_attribute *bin_attr, | 779 | struct bin_attribute *bin_attr, |
780 | char *buf, loff_t off, size_t count) | 780 | char *buf, loff_t off, size_t count) |
781 | { | 781 | { |
782 | struct device *dev = container_of(kobj, struct device, kobj); | 782 | struct device *dev = container_of(kobj, struct device, kobj); |
783 | struct scsi_device *sdev = to_scsi_device(dev); | 783 | struct scsi_device *sdev = to_scsi_device(dev); |
784 | 784 | ||
785 | if (!sdev->inquiry) | 785 | if (!sdev->inquiry) |
786 | return -EINVAL; | 786 | return -EINVAL; |
787 | 787 | ||
788 | return memory_read_from_buffer(buf, count, &off, sdev->inquiry, | 788 | return memory_read_from_buffer(buf, count, &off, sdev->inquiry, |
789 | sdev->inquiry_len); | 789 | sdev->inquiry_len); |
790 | } | 790 | } |
791 | 791 | ||
792 | static struct bin_attribute dev_attr_inquiry = { | 792 | static struct bin_attribute dev_attr_inquiry = { |
793 | .attr = { | 793 | .attr = { |
794 | .name = "inquiry", | 794 | .name = "inquiry", |
795 | .mode = S_IRUGO, | 795 | .mode = S_IRUGO, |
796 | }, | 796 | }, |
797 | .size = 0, | 797 | .size = 0, |
798 | .read = show_inquiry, | 798 | .read = show_inquiry, |
799 | }; | 799 | }; |
800 | 800 | ||
801 | static ssize_t | 801 | static ssize_t |
802 | show_iostat_counterbits(struct device *dev, struct device_attribute *attr, | 802 | show_iostat_counterbits(struct device *dev, struct device_attribute *attr, |
803 | char *buf) | 803 | char *buf) |
804 | { | 804 | { |
805 | return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); | 805 | return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8); |
806 | } | 806 | } |
807 | 807 | ||
808 | static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); | 808 | static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL); |
809 | 809 | ||
810 | #define show_sdev_iostat(field) \ | 810 | #define show_sdev_iostat(field) \ |
811 | static ssize_t \ | 811 | static ssize_t \ |
812 | show_iostat_##field(struct device *dev, struct device_attribute *attr, \ | 812 | show_iostat_##field(struct device *dev, struct device_attribute *attr, \ |
813 | char *buf) \ | 813 | char *buf) \ |
814 | { \ | 814 | { \ |
815 | struct scsi_device *sdev = to_scsi_device(dev); \ | 815 | struct scsi_device *sdev = to_scsi_device(dev); \ |
816 | unsigned long long count = atomic_read(&sdev->field); \ | 816 | unsigned long long count = atomic_read(&sdev->field); \ |
817 | return snprintf(buf, 20, "0x%llx\n", count); \ | 817 | return snprintf(buf, 20, "0x%llx\n", count); \ |
818 | } \ | 818 | } \ |
819 | static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) | 819 | static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL) |
820 | 820 | ||
821 | show_sdev_iostat(iorequest_cnt); | 821 | show_sdev_iostat(iorequest_cnt); |
822 | show_sdev_iostat(iodone_cnt); | 822 | show_sdev_iostat(iodone_cnt); |
823 | show_sdev_iostat(ioerr_cnt); | 823 | show_sdev_iostat(ioerr_cnt); |
824 | 824 | ||
825 | static ssize_t | 825 | static ssize_t |
826 | sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) | 826 | sdev_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) |
827 | { | 827 | { |
828 | struct scsi_device *sdev; | 828 | struct scsi_device *sdev; |
829 | sdev = to_scsi_device(dev); | 829 | sdev = to_scsi_device(dev); |
830 | return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); | 830 | return snprintf (buf, 20, SCSI_DEVICE_MODALIAS_FMT "\n", sdev->type); |
831 | } | 831 | } |
832 | static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); | 832 | static DEVICE_ATTR(modalias, S_IRUGO, sdev_show_modalias, NULL); |
833 | 833 | ||
834 | #define DECLARE_EVT_SHOW(name, Cap_name) \ | 834 | #define DECLARE_EVT_SHOW(name, Cap_name) \ |
835 | static ssize_t \ | 835 | static ssize_t \ |
836 | sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ | 836 | sdev_show_evt_##name(struct device *dev, struct device_attribute *attr, \ |
837 | char *buf) \ | 837 | char *buf) \ |
838 | { \ | 838 | { \ |
839 | struct scsi_device *sdev = to_scsi_device(dev); \ | 839 | struct scsi_device *sdev = to_scsi_device(dev); \ |
840 | int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ | 840 | int val = test_bit(SDEV_EVT_##Cap_name, sdev->supported_events);\ |
841 | return snprintf(buf, 20, "%d\n", val); \ | 841 | return snprintf(buf, 20, "%d\n", val); \ |
842 | } | 842 | } |
843 | 843 | ||
844 | #define DECLARE_EVT_STORE(name, Cap_name) \ | 844 | #define DECLARE_EVT_STORE(name, Cap_name) \ |
845 | static ssize_t \ | 845 | static ssize_t \ |
846 | sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ | 846 | sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\ |
847 | const char *buf, size_t count) \ | 847 | const char *buf, size_t count) \ |
848 | { \ | 848 | { \ |
849 | struct scsi_device *sdev = to_scsi_device(dev); \ | 849 | struct scsi_device *sdev = to_scsi_device(dev); \ |
850 | int val = simple_strtoul(buf, NULL, 0); \ | 850 | int val = simple_strtoul(buf, NULL, 0); \ |
851 | if (val == 0) \ | 851 | if (val == 0) \ |
852 | clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ | 852 | clear_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ |
853 | else if (val == 1) \ | 853 | else if (val == 1) \ |
854 | set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ | 854 | set_bit(SDEV_EVT_##Cap_name, sdev->supported_events); \ |
855 | else \ | 855 | else \ |
856 | return -EINVAL; \ | 856 | return -EINVAL; \ |
857 | return count; \ | 857 | return count; \ |
858 | } | 858 | } |
859 | 859 | ||
860 | #define DECLARE_EVT(name, Cap_name) \ | 860 | #define DECLARE_EVT(name, Cap_name) \ |
861 | DECLARE_EVT_SHOW(name, Cap_name) \ | 861 | DECLARE_EVT_SHOW(name, Cap_name) \ |
862 | DECLARE_EVT_STORE(name, Cap_name) \ | 862 | DECLARE_EVT_STORE(name, Cap_name) \ |
863 | static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ | 863 | static DEVICE_ATTR(evt_##name, S_IRUGO, sdev_show_evt_##name, \ |
864 | sdev_store_evt_##name); | 864 | sdev_store_evt_##name); |
865 | #define REF_EVT(name) &dev_attr_evt_##name.attr | 865 | #define REF_EVT(name) &dev_attr_evt_##name.attr |
866 | 866 | ||
867 | DECLARE_EVT(media_change, MEDIA_CHANGE) | 867 | DECLARE_EVT(media_change, MEDIA_CHANGE) |
868 | DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) | 868 | DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED) |
869 | DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) | 869 | DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED) |
870 | DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) | 870 | DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED) |
871 | DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) | 871 | DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED) |
872 | DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) | 872 | DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) |
873 | 873 | ||
874 | static ssize_t | 874 | static ssize_t |
875 | sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, | 875 | sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, |
876 | const char *buf, size_t count) | 876 | const char *buf, size_t count) |
877 | { | 877 | { |
878 | int depth, retval; | 878 | int depth, retval; |
879 | struct scsi_device *sdev = to_scsi_device(dev); | 879 | struct scsi_device *sdev = to_scsi_device(dev); |
880 | struct scsi_host_template *sht = sdev->host->hostt; | 880 | struct scsi_host_template *sht = sdev->host->hostt; |
881 | 881 | ||
882 | if (!sht->change_queue_depth) | 882 | if (!sht->change_queue_depth) |
883 | return -EINVAL; | 883 | return -EINVAL; |
884 | 884 | ||
885 | depth = simple_strtoul(buf, NULL, 0); | 885 | depth = simple_strtoul(buf, NULL, 0); |
886 | 886 | ||
887 | if (depth < 1 || depth > sdev->host->can_queue) | 887 | if (depth < 1 || depth > sdev->host->can_queue) |
888 | return -EINVAL; | 888 | return -EINVAL; |
889 | 889 | ||
890 | retval = sht->change_queue_depth(sdev, depth); | 890 | retval = sht->change_queue_depth(sdev, depth); |
891 | if (retval < 0) | 891 | if (retval < 0) |
892 | return retval; | 892 | return retval; |
893 | 893 | ||
894 | sdev->max_queue_depth = sdev->queue_depth; | 894 | sdev->max_queue_depth = sdev->queue_depth; |
895 | 895 | ||
896 | return count; | 896 | return count; |
897 | } | 897 | } |
898 | sdev_show_function(queue_depth, "%d\n"); | 898 | sdev_show_function(queue_depth, "%d\n"); |
899 | 899 | ||
900 | static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, | 900 | static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, |
901 | sdev_store_queue_depth); | 901 | sdev_store_queue_depth); |
902 | 902 | ||
903 | static ssize_t | 903 | static ssize_t |
904 | sdev_show_queue_ramp_up_period(struct device *dev, | 904 | sdev_show_queue_ramp_up_period(struct device *dev, |
905 | struct device_attribute *attr, | 905 | struct device_attribute *attr, |
906 | char *buf) | 906 | char *buf) |
907 | { | 907 | { |
908 | struct scsi_device *sdev; | 908 | struct scsi_device *sdev; |
909 | sdev = to_scsi_device(dev); | 909 | sdev = to_scsi_device(dev); |
910 | return snprintf(buf, 20, "%u\n", | 910 | return snprintf(buf, 20, "%u\n", |
911 | jiffies_to_msecs(sdev->queue_ramp_up_period)); | 911 | jiffies_to_msecs(sdev->queue_ramp_up_period)); |
912 | } | 912 | } |
913 | 913 | ||
914 | static ssize_t | 914 | static ssize_t |
915 | sdev_store_queue_ramp_up_period(struct device *dev, | 915 | sdev_store_queue_ramp_up_period(struct device *dev, |
916 | struct device_attribute *attr, | 916 | struct device_attribute *attr, |
917 | const char *buf, size_t count) | 917 | const char *buf, size_t count) |
918 | { | 918 | { |
919 | struct scsi_device *sdev = to_scsi_device(dev); | 919 | struct scsi_device *sdev = to_scsi_device(dev); |
920 | unsigned int period; | 920 | unsigned int period; |
921 | 921 | ||
922 | if (kstrtouint(buf, 10, &period)) | 922 | if (kstrtouint(buf, 10, &period)) |
923 | return -EINVAL; | 923 | return -EINVAL; |
924 | 924 | ||
925 | sdev->queue_ramp_up_period = msecs_to_jiffies(period); | 925 | sdev->queue_ramp_up_period = msecs_to_jiffies(period); |
926 | return count; | 926 | return count; |
927 | } | 927 | } |
928 | 928 | ||
929 | static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, | 929 | static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, |
930 | sdev_show_queue_ramp_up_period, | 930 | sdev_show_queue_ramp_up_period, |
931 | sdev_store_queue_ramp_up_period); | 931 | sdev_store_queue_ramp_up_period); |
932 | 932 | ||
933 | static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, | 933 | static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, |
934 | struct attribute *attr, int i) | 934 | struct attribute *attr, int i) |
935 | { | 935 | { |
936 | struct device *dev = container_of(kobj, struct device, kobj); | 936 | struct device *dev = container_of(kobj, struct device, kobj); |
937 | struct scsi_device *sdev = to_scsi_device(dev); | 937 | struct scsi_device *sdev = to_scsi_device(dev); |
938 | 938 | ||
939 | 939 | ||
940 | if (attr == &dev_attr_queue_depth.attr && | 940 | if (attr == &dev_attr_queue_depth.attr && |
941 | !sdev->host->hostt->change_queue_depth) | 941 | !sdev->host->hostt->change_queue_depth) |
942 | return S_IRUGO; | 942 | return S_IRUGO; |
943 | 943 | ||
944 | if (attr == &dev_attr_queue_ramp_up_period.attr && | 944 | if (attr == &dev_attr_queue_ramp_up_period.attr && |
945 | !sdev->host->hostt->change_queue_depth) | 945 | !sdev->host->hostt->change_queue_depth) |
946 | return 0; | 946 | return 0; |
947 | 947 | ||
948 | return attr->mode; | 948 | return attr->mode; |
949 | } | 949 | } |
950 | 950 | ||
951 | /* Default template for device attributes. May NOT be modified */ | 951 | /* Default template for device attributes. May NOT be modified */ |
952 | static struct attribute *scsi_sdev_attrs[] = { | 952 | static struct attribute *scsi_sdev_attrs[] = { |
953 | &dev_attr_device_blocked.attr, | 953 | &dev_attr_device_blocked.attr, |
954 | &dev_attr_type.attr, | 954 | &dev_attr_type.attr, |
955 | &dev_attr_scsi_level.attr, | 955 | &dev_attr_scsi_level.attr, |
956 | &dev_attr_device_busy.attr, | 956 | &dev_attr_device_busy.attr, |
957 | &dev_attr_vendor.attr, | 957 | &dev_attr_vendor.attr, |
958 | &dev_attr_model.attr, | 958 | &dev_attr_model.attr, |
959 | &dev_attr_rev.attr, | 959 | &dev_attr_rev.attr, |
960 | &dev_attr_rescan.attr, | 960 | &dev_attr_rescan.attr, |
961 | &dev_attr_delete.attr, | 961 | &dev_attr_delete.attr, |
962 | &dev_attr_state.attr, | 962 | &dev_attr_state.attr, |
963 | &dev_attr_timeout.attr, | 963 | &dev_attr_timeout.attr, |
964 | &dev_attr_eh_timeout.attr, | 964 | &dev_attr_eh_timeout.attr, |
965 | &dev_attr_iocounterbits.attr, | 965 | &dev_attr_iocounterbits.attr, |
966 | &dev_attr_iorequest_cnt.attr, | 966 | &dev_attr_iorequest_cnt.attr, |
967 | &dev_attr_iodone_cnt.attr, | 967 | &dev_attr_iodone_cnt.attr, |
968 | &dev_attr_ioerr_cnt.attr, | 968 | &dev_attr_ioerr_cnt.attr, |
969 | &dev_attr_modalias.attr, | 969 | &dev_attr_modalias.attr, |
970 | &dev_attr_queue_depth.attr, | 970 | &dev_attr_queue_depth.attr, |
971 | &dev_attr_queue_type.attr, | 971 | &dev_attr_queue_type.attr, |
972 | &dev_attr_queue_ramp_up_period.attr, | 972 | &dev_attr_queue_ramp_up_period.attr, |
973 | REF_EVT(media_change), | 973 | REF_EVT(media_change), |
974 | REF_EVT(inquiry_change_reported), | 974 | REF_EVT(inquiry_change_reported), |
975 | REF_EVT(capacity_change_reported), | 975 | REF_EVT(capacity_change_reported), |
976 | REF_EVT(soft_threshold_reached), | 976 | REF_EVT(soft_threshold_reached), |
977 | REF_EVT(mode_parameter_change_reported), | 977 | REF_EVT(mode_parameter_change_reported), |
978 | REF_EVT(lun_change_reported), | 978 | REF_EVT(lun_change_reported), |
979 | NULL | 979 | NULL |
980 | }; | 980 | }; |
981 | 981 | ||
982 | static struct bin_attribute *scsi_sdev_bin_attrs[] = { | 982 | static struct bin_attribute *scsi_sdev_bin_attrs[] = { |
983 | &dev_attr_vpd_pg83, | 983 | &dev_attr_vpd_pg83, |
984 | &dev_attr_vpd_pg80, | 984 | &dev_attr_vpd_pg80, |
985 | &dev_attr_inquiry, | 985 | &dev_attr_inquiry, |
986 | NULL | 986 | NULL |
987 | }; | 987 | }; |
988 | static struct attribute_group scsi_sdev_attr_group = { | 988 | static struct attribute_group scsi_sdev_attr_group = { |
989 | .attrs = scsi_sdev_attrs, | 989 | .attrs = scsi_sdev_attrs, |
990 | .bin_attrs = scsi_sdev_bin_attrs, | 990 | .bin_attrs = scsi_sdev_bin_attrs, |
991 | .is_visible = scsi_sdev_attr_is_visible, | 991 | .is_visible = scsi_sdev_attr_is_visible, |
992 | }; | 992 | }; |
993 | 993 | ||
994 | static const struct attribute_group *scsi_sdev_attr_groups[] = { | 994 | static const struct attribute_group *scsi_sdev_attr_groups[] = { |
995 | &scsi_sdev_attr_group, | 995 | &scsi_sdev_attr_group, |
996 | NULL | 996 | NULL |
997 | }; | 997 | }; |
998 | 998 | ||
999 | static int scsi_target_add(struct scsi_target *starget) | 999 | static int scsi_target_add(struct scsi_target *starget) |
1000 | { | 1000 | { |
1001 | int error; | 1001 | int error; |
1002 | 1002 | ||
1003 | if (starget->state != STARGET_CREATED) | 1003 | if (starget->state != STARGET_CREATED) |
1004 | return 0; | 1004 | return 0; |
1005 | 1005 | ||
1006 | error = device_add(&starget->dev); | 1006 | error = device_add(&starget->dev); |
1007 | if (error) { | 1007 | if (error) { |
1008 | dev_err(&starget->dev, "target device_add failed, error %d\n", error); | 1008 | dev_err(&starget->dev, "target device_add failed, error %d\n", error); |
1009 | return error; | 1009 | return error; |
1010 | } | 1010 | } |
1011 | transport_add_device(&starget->dev); | 1011 | transport_add_device(&starget->dev); |
1012 | starget->state = STARGET_RUNNING; | 1012 | starget->state = STARGET_RUNNING; |
1013 | 1013 | ||
1014 | pm_runtime_set_active(&starget->dev); | 1014 | pm_runtime_set_active(&starget->dev); |
1015 | pm_runtime_enable(&starget->dev); | 1015 | pm_runtime_enable(&starget->dev); |
1016 | device_enable_async_suspend(&starget->dev); | 1016 | device_enable_async_suspend(&starget->dev); |
1017 | 1017 | ||
1018 | return 0; | 1018 | return 0; |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | /** | 1021 | /** |
1022 | * scsi_sysfs_add_sdev - add scsi device to sysfs | 1022 | * scsi_sysfs_add_sdev - add scsi device to sysfs |
1023 | * @sdev: scsi_device to add | 1023 | * @sdev: scsi_device to add |
1024 | * | 1024 | * |
1025 | * Return value: | 1025 | * Return value: |
1026 | * 0 on Success / non-zero on Failure | 1026 | * 0 on Success / non-zero on Failure |
1027 | **/ | 1027 | **/ |
1028 | int scsi_sysfs_add_sdev(struct scsi_device *sdev) | 1028 | int scsi_sysfs_add_sdev(struct scsi_device *sdev) |
1029 | { | 1029 | { |
1030 | int error, i; | 1030 | int error, i; |
1031 | struct request_queue *rq = sdev->request_queue; | 1031 | struct request_queue *rq = sdev->request_queue; |
1032 | struct scsi_target *starget = sdev->sdev_target; | 1032 | struct scsi_target *starget = sdev->sdev_target; |
1033 | 1033 | ||
1034 | error = scsi_device_set_state(sdev, SDEV_RUNNING); | 1034 | error = scsi_device_set_state(sdev, SDEV_RUNNING); |
1035 | if (error) | 1035 | if (error) |
1036 | return error; | 1036 | return error; |
1037 | 1037 | ||
1038 | error = scsi_target_add(starget); | 1038 | error = scsi_target_add(starget); |
1039 | if (error) | 1039 | if (error) |
1040 | return error; | 1040 | return error; |
1041 | 1041 | ||
1042 | transport_configure_device(&starget->dev); | 1042 | transport_configure_device(&starget->dev); |
1043 | 1043 | ||
1044 | device_enable_async_suspend(&sdev->sdev_gendev); | 1044 | device_enable_async_suspend(&sdev->sdev_gendev); |
1045 | scsi_autopm_get_target(starget); | 1045 | scsi_autopm_get_target(starget); |
1046 | pm_runtime_set_active(&sdev->sdev_gendev); | 1046 | pm_runtime_set_active(&sdev->sdev_gendev); |
1047 | pm_runtime_forbid(&sdev->sdev_gendev); | 1047 | pm_runtime_forbid(&sdev->sdev_gendev); |
1048 | pm_runtime_enable(&sdev->sdev_gendev); | 1048 | pm_runtime_enable(&sdev->sdev_gendev); |
1049 | scsi_autopm_put_target(starget); | 1049 | scsi_autopm_put_target(starget); |
1050 | 1050 | ||
1051 | scsi_autopm_get_device(sdev); | 1051 | scsi_autopm_get_device(sdev); |
1052 | 1052 | ||
1053 | error = device_add(&sdev->sdev_gendev); | 1053 | error = device_add(&sdev->sdev_gendev); |
1054 | if (error) { | 1054 | if (error) { |
1055 | sdev_printk(KERN_INFO, sdev, | 1055 | sdev_printk(KERN_INFO, sdev, |
1056 | "failed to add device: %d\n", error); | 1056 | "failed to add device: %d\n", error); |
1057 | return error; | 1057 | return error; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | error = scsi_dh_add_device(sdev); | 1060 | error = scsi_dh_add_device(sdev); |
1061 | if (error) { | 1061 | if (error) { |
1062 | sdev_printk(KERN_INFO, sdev, | 1062 | sdev_printk(KERN_INFO, sdev, |
1063 | "failed to add device handler: %d\n", error); | 1063 | "failed to add device handler: %d\n", error); |
1064 | return error; | 1064 | return error; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | device_enable_async_suspend(&sdev->sdev_dev); | 1067 | device_enable_async_suspend(&sdev->sdev_dev); |
1068 | error = device_add(&sdev->sdev_dev); | 1068 | error = device_add(&sdev->sdev_dev); |
1069 | if (error) { | 1069 | if (error) { |
1070 | sdev_printk(KERN_INFO, sdev, | 1070 | sdev_printk(KERN_INFO, sdev, |
1071 | "failed to add class device: %d\n", error); | 1071 | "failed to add class device: %d\n", error); |
1072 | scsi_dh_remove_device(sdev); | 1072 | scsi_dh_remove_device(sdev); |
1073 | device_del(&sdev->sdev_gendev); | 1073 | device_del(&sdev->sdev_gendev); |
1074 | return error; | 1074 | return error; |
1075 | } | 1075 | } |
1076 | transport_add_device(&sdev->sdev_gendev); | 1076 | transport_add_device(&sdev->sdev_gendev); |
1077 | sdev->is_visible = 1; | 1077 | sdev->is_visible = 1; |
1078 | 1078 | ||
1079 | error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); | 1079 | error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); |
1080 | 1080 | ||
1081 | if (error) | 1081 | if (error) |
1082 | /* we're treating error on bsg register as non-fatal, | 1082 | /* we're treating error on bsg register as non-fatal, |
1083 | * so pretend nothing went wrong */ | 1083 | * so pretend nothing went wrong */ |
1084 | sdev_printk(KERN_INFO, sdev, | 1084 | sdev_printk(KERN_INFO, sdev, |
1085 | "Failed to register bsg queue, errno=%d\n", error); | 1085 | "Failed to register bsg queue, errno=%d\n", error); |
1086 | 1086 | ||
1087 | /* add additional host specific attributes */ | 1087 | /* add additional host specific attributes */ |
1088 | if (sdev->host->hostt->sdev_attrs) { | 1088 | if (sdev->host->hostt->sdev_attrs) { |
1089 | for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { | 1089 | for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { |
1090 | error = device_create_file(&sdev->sdev_gendev, | 1090 | error = device_create_file(&sdev->sdev_gendev, |
1091 | sdev->host->hostt->sdev_attrs[i]); | 1091 | sdev->host->hostt->sdev_attrs[i]); |
1092 | if (error) | 1092 | if (error) |
1093 | return error; | 1093 | return error; |
1094 | } | 1094 | } |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | scsi_autopm_put_device(sdev); | 1097 | scsi_autopm_put_device(sdev); |
1098 | return error; | 1098 | return error; |
1099 | } | 1099 | } |
1100 | 1100 | ||
1101 | void __scsi_remove_device(struct scsi_device *sdev) | 1101 | void __scsi_remove_device(struct scsi_device *sdev) |
1102 | { | 1102 | { |
1103 | struct device *dev = &sdev->sdev_gendev; | 1103 | struct device *dev = &sdev->sdev_gendev; |
1104 | 1104 | ||
1105 | /* | 1105 | /* |
1106 | * This cleanup path is not reentrant and while it is impossible | 1106 | * This cleanup path is not reentrant and while it is impossible |
1107 | * to get a new reference with scsi_device_get() someone can still | 1107 | * to get a new reference with scsi_device_get() someone can still |
1108 | * hold a previously acquired one. | 1108 | * hold a previously acquired one. |
1109 | */ | 1109 | */ |
1110 | if (sdev->sdev_state == SDEV_DEL) | 1110 | if (sdev->sdev_state == SDEV_DEL) |
1111 | return; | 1111 | return; |
1112 | 1112 | ||
1113 | if (sdev->is_visible) { | 1113 | if (sdev->is_visible) { |
1114 | if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) | 1114 | if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) |
1115 | return; | 1115 | return; |
1116 | 1116 | ||
1117 | bsg_unregister_queue(sdev->request_queue); | 1117 | bsg_unregister_queue(sdev->request_queue); |
1118 | device_unregister(&sdev->sdev_dev); | 1118 | device_unregister(&sdev->sdev_dev); |
1119 | transport_remove_device(dev); | 1119 | transport_remove_device(dev); |
1120 | scsi_dh_remove_device(sdev); | 1120 | scsi_dh_remove_device(sdev); |
1121 | device_del(dev); | 1121 | device_del(dev); |
1122 | } else | 1122 | } else |
1123 | put_device(&sdev->sdev_dev); | 1123 | put_device(&sdev->sdev_dev); |
1124 | 1124 | ||
1125 | /* | 1125 | /* |
1126 | * Stop accepting new requests and wait until all queuecommand() and | 1126 | * Stop accepting new requests and wait until all queuecommand() and |
1127 | * scsi_run_queue() invocations have finished before tearing down the | 1127 | * scsi_run_queue() invocations have finished before tearing down the |
1128 | * device. | 1128 | * device. |
1129 | */ | 1129 | */ |
1130 | scsi_device_set_state(sdev, SDEV_DEL); | 1130 | scsi_device_set_state(sdev, SDEV_DEL); |
1131 | blk_cleanup_queue(sdev->request_queue); | 1131 | blk_cleanup_queue(sdev->request_queue); |
1132 | cancel_work_sync(&sdev->requeue_work); | 1132 | cancel_work_sync(&sdev->requeue_work); |
1133 | 1133 | ||
1134 | if (sdev->host->hostt->slave_destroy) | 1134 | if (sdev->host->hostt->slave_destroy) |
1135 | sdev->host->hostt->slave_destroy(sdev); | 1135 | sdev->host->hostt->slave_destroy(sdev); |
1136 | transport_destroy_device(dev); | 1136 | transport_destroy_device(dev); |
1137 | 1137 | ||
1138 | /* | 1138 | /* |
1139 | * Paired with the kref_get() in scsi_sysfs_initialize(). We have | 1139 | * Paired with the kref_get() in scsi_sysfs_initialize(). We have |
1140 | * remoed sysfs visibility from the device, so make the target | 1140 | * remoed sysfs visibility from the device, so make the target |
1141 | * invisible if this was the last device underneath it. | 1141 | * invisible if this was the last device underneath it. |
1142 | */ | 1142 | */ |
1143 | scsi_target_reap(scsi_target(sdev)); | 1143 | scsi_target_reap(scsi_target(sdev)); |
1144 | 1144 | ||
1145 | put_device(dev); | 1145 | put_device(dev); |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /** | 1148 | /** |
1149 | * scsi_remove_device - unregister a device from the scsi bus | 1149 | * scsi_remove_device - unregister a device from the scsi bus |
1150 | * @sdev: scsi_device to unregister | 1150 | * @sdev: scsi_device to unregister |
1151 | **/ | 1151 | **/ |
1152 | void scsi_remove_device(struct scsi_device *sdev) | 1152 | void scsi_remove_device(struct scsi_device *sdev) |
1153 | { | 1153 | { |
1154 | struct Scsi_Host *shost = sdev->host; | 1154 | struct Scsi_Host *shost = sdev->host; |
1155 | 1155 | ||
1156 | mutex_lock(&shost->scan_mutex); | 1156 | mutex_lock(&shost->scan_mutex); |
1157 | __scsi_remove_device(sdev); | 1157 | __scsi_remove_device(sdev); |
1158 | mutex_unlock(&shost->scan_mutex); | 1158 | mutex_unlock(&shost->scan_mutex); |
1159 | } | 1159 | } |
1160 | EXPORT_SYMBOL(scsi_remove_device); | 1160 | EXPORT_SYMBOL(scsi_remove_device); |
1161 | 1161 | ||
1162 | static void __scsi_remove_target(struct scsi_target *starget) | 1162 | static void __scsi_remove_target(struct scsi_target *starget) |
1163 | { | 1163 | { |
1164 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 1164 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
1165 | unsigned long flags; | 1165 | unsigned long flags; |
1166 | struct scsi_device *sdev; | 1166 | struct scsi_device *sdev; |
1167 | 1167 | ||
1168 | spin_lock_irqsave(shost->host_lock, flags); | 1168 | spin_lock_irqsave(shost->host_lock, flags); |
1169 | restart: | 1169 | restart: |
1170 | list_for_each_entry(sdev, &shost->__devices, siblings) { | 1170 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
1171 | if (sdev->channel != starget->channel || | 1171 | if (sdev->channel != starget->channel || |
1172 | sdev->id != starget->id || | 1172 | sdev->id != starget->id || |
1173 | scsi_device_get(sdev)) | 1173 | scsi_device_get(sdev)) |
1174 | continue; | 1174 | continue; |
1175 | spin_unlock_irqrestore(shost->host_lock, flags); | 1175 | spin_unlock_irqrestore(shost->host_lock, flags); |
1176 | scsi_remove_device(sdev); | 1176 | scsi_remove_device(sdev); |
1177 | scsi_device_put(sdev); | 1177 | scsi_device_put(sdev); |
1178 | spin_lock_irqsave(shost->host_lock, flags); | 1178 | spin_lock_irqsave(shost->host_lock, flags); |
1179 | goto restart; | 1179 | goto restart; |
1180 | } | 1180 | } |
1181 | spin_unlock_irqrestore(shost->host_lock, flags); | 1181 | spin_unlock_irqrestore(shost->host_lock, flags); |
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | /** | 1184 | /** |
1185 | * scsi_remove_target - try to remove a target and all its devices | 1185 | * scsi_remove_target - try to remove a target and all its devices |
1186 | * @dev: generic starget or parent of generic stargets to be removed | 1186 | * @dev: generic starget or parent of generic stargets to be removed |
1187 | * | 1187 | * |
1188 | * Note: This is slightly racy. It is possible that if the user | 1188 | * Note: This is slightly racy. It is possible that if the user |
1189 | * requests the addition of another device then the target won't be | 1189 | * requests the addition of another device then the target won't be |
1190 | * removed. | 1190 | * removed. |
1191 | */ | 1191 | */ |
1192 | void scsi_remove_target(struct device *dev) | 1192 | void scsi_remove_target(struct device *dev) |
1193 | { | 1193 | { |
1194 | struct Scsi_Host *shost = dev_to_shost(dev->parent); | 1194 | struct Scsi_Host *shost = dev_to_shost(dev->parent); |
1195 | struct scsi_target *starget, *last_target = NULL; | 1195 | struct scsi_target *starget, *last_target = NULL; |
1196 | unsigned long flags; | 1196 | unsigned long flags; |
1197 | 1197 | ||
1198 | restart: | 1198 | restart: |
1199 | spin_lock_irqsave(shost->host_lock, flags); | 1199 | spin_lock_irqsave(shost->host_lock, flags); |
1200 | list_for_each_entry(starget, &shost->__targets, siblings) { | 1200 | list_for_each_entry(starget, &shost->__targets, siblings) { |
1201 | if (starget->state == STARGET_DEL || | 1201 | if (starget->state == STARGET_DEL || |
1202 | starget->state == STARGET_REMOVE || | ||
1202 | starget == last_target) | 1203 | starget == last_target) |
1203 | continue; | 1204 | continue; |
1204 | if (starget->dev.parent == dev || &starget->dev == dev) { | 1205 | if (starget->dev.parent == dev || &starget->dev == dev) { |
1205 | kref_get(&starget->reap_ref); | 1206 | kref_get(&starget->reap_ref); |
1206 | last_target = starget; | 1207 | last_target = starget; |
1208 | starget->state = STARGET_REMOVE; | ||
1207 | spin_unlock_irqrestore(shost->host_lock, flags); | 1209 | spin_unlock_irqrestore(shost->host_lock, flags); |
1208 | __scsi_remove_target(starget); | 1210 | __scsi_remove_target(starget); |
1209 | scsi_target_reap(starget); | 1211 | scsi_target_reap(starget); |
1210 | goto restart; | 1212 | goto restart; |
1211 | } | 1213 | } |
1212 | } | 1214 | } |
1213 | spin_unlock_irqrestore(shost->host_lock, flags); | 1215 | spin_unlock_irqrestore(shost->host_lock, flags); |
1214 | } | 1216 | } |
1215 | EXPORT_SYMBOL(scsi_remove_target); | 1217 | EXPORT_SYMBOL(scsi_remove_target); |
1216 | 1218 | ||
1217 | int scsi_register_driver(struct device_driver *drv) | 1219 | int scsi_register_driver(struct device_driver *drv) |
1218 | { | 1220 | { |
1219 | drv->bus = &scsi_bus_type; | 1221 | drv->bus = &scsi_bus_type; |
1220 | 1222 | ||
1221 | return driver_register(drv); | 1223 | return driver_register(drv); |
1222 | } | 1224 | } |
1223 | EXPORT_SYMBOL(scsi_register_driver); | 1225 | EXPORT_SYMBOL(scsi_register_driver); |
1224 | 1226 | ||
1225 | int scsi_register_interface(struct class_interface *intf) | 1227 | int scsi_register_interface(struct class_interface *intf) |
1226 | { | 1228 | { |
1227 | intf->class = &sdev_class; | 1229 | intf->class = &sdev_class; |
1228 | 1230 | ||
1229 | return class_interface_register(intf); | 1231 | return class_interface_register(intf); |
1230 | } | 1232 | } |
1231 | EXPORT_SYMBOL(scsi_register_interface); | 1233 | EXPORT_SYMBOL(scsi_register_interface); |
1232 | 1234 | ||
1233 | /** | 1235 | /** |
1234 | * scsi_sysfs_add_host - add scsi host to subsystem | 1236 | * scsi_sysfs_add_host - add scsi host to subsystem |
1235 | * @shost: scsi host struct to add to subsystem | 1237 | * @shost: scsi host struct to add to subsystem |
1236 | **/ | 1238 | **/ |
1237 | int scsi_sysfs_add_host(struct Scsi_Host *shost) | 1239 | int scsi_sysfs_add_host(struct Scsi_Host *shost) |
1238 | { | 1240 | { |
1239 | int error, i; | 1241 | int error, i; |
1240 | 1242 | ||
1241 | /* add host specific attributes */ | 1243 | /* add host specific attributes */ |
1242 | if (shost->hostt->shost_attrs) { | 1244 | if (shost->hostt->shost_attrs) { |
1243 | for (i = 0; shost->hostt->shost_attrs[i]; i++) { | 1245 | for (i = 0; shost->hostt->shost_attrs[i]; i++) { |
1244 | error = device_create_file(&shost->shost_dev, | 1246 | error = device_create_file(&shost->shost_dev, |
1245 | shost->hostt->shost_attrs[i]); | 1247 | shost->hostt->shost_attrs[i]); |
1246 | if (error) | 1248 | if (error) |
1247 | return error; | 1249 | return error; |
1248 | } | 1250 | } |
1249 | } | 1251 | } |
1250 | 1252 | ||
1251 | transport_register_device(&shost->shost_gendev); | 1253 | transport_register_device(&shost->shost_gendev); |
1252 | transport_configure_device(&shost->shost_gendev); | 1254 | transport_configure_device(&shost->shost_gendev); |
1253 | return 0; | 1255 | return 0; |
1254 | } | 1256 | } |
1255 | 1257 | ||
1256 | static struct device_type scsi_dev_type = { | 1258 | static struct device_type scsi_dev_type = { |
1257 | .name = "scsi_device", | 1259 | .name = "scsi_device", |
1258 | .release = scsi_device_dev_release, | 1260 | .release = scsi_device_dev_release, |
1259 | .groups = scsi_sdev_attr_groups, | 1261 | .groups = scsi_sdev_attr_groups, |
1260 | }; | 1262 | }; |
1261 | 1263 | ||
1262 | void scsi_sysfs_device_initialize(struct scsi_device *sdev) | 1264 | void scsi_sysfs_device_initialize(struct scsi_device *sdev) |
1263 | { | 1265 | { |
1264 | unsigned long flags; | 1266 | unsigned long flags; |
1265 | struct Scsi_Host *shost = sdev->host; | 1267 | struct Scsi_Host *shost = sdev->host; |
1266 | struct scsi_target *starget = sdev->sdev_target; | 1268 | struct scsi_target *starget = sdev->sdev_target; |
1267 | 1269 | ||
1268 | device_initialize(&sdev->sdev_gendev); | 1270 | device_initialize(&sdev->sdev_gendev); |
1269 | sdev->sdev_gendev.bus = &scsi_bus_type; | 1271 | sdev->sdev_gendev.bus = &scsi_bus_type; |
1270 | sdev->sdev_gendev.type = &scsi_dev_type; | 1272 | sdev->sdev_gendev.type = &scsi_dev_type; |
1271 | dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", | 1273 | dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu", |
1272 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 1274 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
1273 | 1275 | ||
1274 | device_initialize(&sdev->sdev_dev); | 1276 | device_initialize(&sdev->sdev_dev); |
1275 | sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); | 1277 | sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); |
1276 | sdev->sdev_dev.class = &sdev_class; | 1278 | sdev->sdev_dev.class = &sdev_class; |
1277 | dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", | 1279 | dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", |
1278 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); | 1280 | sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); |
1279 | /* | 1281 | /* |
1280 | * Get a default scsi_level from the target (derived from sibling | 1282 | * Get a default scsi_level from the target (derived from sibling |
1281 | * devices). This is the best we can do for guessing how to set | 1283 | * devices). This is the best we can do for guessing how to set |
1282 | * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the | 1284 | * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the |
1283 | * setting doesn't matter, because all the bits are zero anyway. | 1285 | * setting doesn't matter, because all the bits are zero anyway. |
1284 | * But it does matter for higher LUNs. | 1286 | * But it does matter for higher LUNs. |
1285 | */ | 1287 | */ |
1286 | sdev->scsi_level = starget->scsi_level; | 1288 | sdev->scsi_level = starget->scsi_level; |
1287 | if (sdev->scsi_level <= SCSI_2 && | 1289 | if (sdev->scsi_level <= SCSI_2 && |
1288 | sdev->scsi_level != SCSI_UNKNOWN && | 1290 | sdev->scsi_level != SCSI_UNKNOWN && |
1289 | !shost->no_scsi2_lun_in_cdb) | 1291 | !shost->no_scsi2_lun_in_cdb) |
1290 | sdev->lun_in_cdb = 1; | 1292 | sdev->lun_in_cdb = 1; |
1291 | 1293 | ||
1292 | transport_setup_device(&sdev->sdev_gendev); | 1294 | transport_setup_device(&sdev->sdev_gendev); |
1293 | spin_lock_irqsave(shost->host_lock, flags); | 1295 | spin_lock_irqsave(shost->host_lock, flags); |
1294 | list_add_tail(&sdev->same_target_siblings, &starget->devices); | 1296 | list_add_tail(&sdev->same_target_siblings, &starget->devices); |
1295 | list_add_tail(&sdev->siblings, &shost->__devices); | 1297 | list_add_tail(&sdev->siblings, &shost->__devices); |
1296 | spin_unlock_irqrestore(shost->host_lock, flags); | 1298 | spin_unlock_irqrestore(shost->host_lock, flags); |
1297 | /* | 1299 | /* |
1298 | * device can now only be removed via __scsi_remove_device() so hold | 1300 | * device can now only be removed via __scsi_remove_device() so hold |
1299 | * the target. Target will be held in CREATED state until something | 1301 | * the target. Target will be held in CREATED state until something |
1300 | * beneath it becomes visible (in which case it moves to RUNNING) | 1302 | * beneath it becomes visible (in which case it moves to RUNNING) |
1301 | */ | 1303 | */ |
1302 | kref_get(&starget->reap_ref); | 1304 | kref_get(&starget->reap_ref); |
1303 | } | 1305 | } |
1304 | 1306 | ||
1305 | int scsi_is_sdev_device(const struct device *dev) | 1307 | int scsi_is_sdev_device(const struct device *dev) |
1306 | { | 1308 | { |
1307 | return dev->type == &scsi_dev_type; | 1309 | return dev->type == &scsi_dev_type; |
1308 | } | 1310 | } |
1309 | EXPORT_SYMBOL(scsi_is_sdev_device); | 1311 | EXPORT_SYMBOL(scsi_is_sdev_device); |
1310 | 1312 | ||
1311 | /* A blank transport template that is used in drivers that don't | 1313 | /* A blank transport template that is used in drivers that don't |
1312 | * yet implement Transport Attributes */ | 1314 | * yet implement Transport Attributes */ |
1313 | struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; | 1315 | struct scsi_transport_template blank_transport_template = { { { {NULL, }, }, }, }; |
1314 | 1316 |
include/scsi/scsi_device.h
1 | #ifndef _SCSI_SCSI_DEVICE_H | 1 | #ifndef _SCSI_SCSI_DEVICE_H |
2 | #define _SCSI_SCSI_DEVICE_H | 2 | #define _SCSI_SCSI_DEVICE_H |
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/spinlock.h> | 5 | #include <linux/spinlock.h> |
6 | #include <linux/workqueue.h> | 6 | #include <linux/workqueue.h> |
7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
8 | #include <scsi/scsi.h> | 8 | #include <scsi/scsi.h> |
9 | #include <linux/atomic.h> | 9 | #include <linux/atomic.h> |
10 | 10 | ||
11 | struct device; | 11 | struct device; |
12 | struct request_queue; | 12 | struct request_queue; |
13 | struct scsi_cmnd; | 13 | struct scsi_cmnd; |
14 | struct scsi_lun; | 14 | struct scsi_lun; |
15 | struct scsi_sense_hdr; | 15 | struct scsi_sense_hdr; |
16 | 16 | ||
17 | struct scsi_mode_data { | 17 | struct scsi_mode_data { |
18 | __u32 length; | 18 | __u32 length; |
19 | __u16 block_descriptor_length; | 19 | __u16 block_descriptor_length; |
20 | __u8 medium_type; | 20 | __u8 medium_type; |
21 | __u8 device_specific; | 21 | __u8 device_specific; |
22 | __u8 header_length; | 22 | __u8 header_length; |
23 | __u8 longlba:1; | 23 | __u8 longlba:1; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * sdev state: If you alter this, you also need to alter scsi_sysfs.c | 27 | * sdev state: If you alter this, you also need to alter scsi_sysfs.c |
28 | * (for the ascii descriptions) and the state model enforcer: | 28 | * (for the ascii descriptions) and the state model enforcer: |
29 | * scsi_lib:scsi_device_set_state(). | 29 | * scsi_lib:scsi_device_set_state(). |
30 | */ | 30 | */ |
31 | enum scsi_device_state { | 31 | enum scsi_device_state { |
32 | SDEV_CREATED = 1, /* device created but not added to sysfs | 32 | SDEV_CREATED = 1, /* device created but not added to sysfs |
33 | * Only internal commands allowed (for inq) */ | 33 | * Only internal commands allowed (for inq) */ |
34 | SDEV_RUNNING, /* device properly configured | 34 | SDEV_RUNNING, /* device properly configured |
35 | * All commands allowed */ | 35 | * All commands allowed */ |
36 | SDEV_CANCEL, /* beginning to delete device | 36 | SDEV_CANCEL, /* beginning to delete device |
37 | * Only error handler commands allowed */ | 37 | * Only error handler commands allowed */ |
38 | SDEV_DEL, /* device deleted | 38 | SDEV_DEL, /* device deleted |
39 | * no commands allowed */ | 39 | * no commands allowed */ |
40 | SDEV_QUIESCE, /* Device quiescent. No block commands | 40 | SDEV_QUIESCE, /* Device quiescent. No block commands |
41 | * will be accepted, only specials (which | 41 | * will be accepted, only specials (which |
42 | * originate in the mid-layer) */ | 42 | * originate in the mid-layer) */ |
43 | SDEV_OFFLINE, /* Device offlined (by error handling or | 43 | SDEV_OFFLINE, /* Device offlined (by error handling or |
44 | * user request */ | 44 | * user request */ |
45 | SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */ | 45 | SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */ |
46 | SDEV_BLOCK, /* Device blocked by scsi lld. No | 46 | SDEV_BLOCK, /* Device blocked by scsi lld. No |
47 | * scsi commands from user or midlayer | 47 | * scsi commands from user or midlayer |
48 | * should be issued to the scsi | 48 | * should be issued to the scsi |
49 | * lld. */ | 49 | * lld. */ |
50 | SDEV_CREATED_BLOCK, /* same as above but for created devices */ | 50 | SDEV_CREATED_BLOCK, /* same as above but for created devices */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | enum scsi_device_event { | 53 | enum scsi_device_event { |
54 | SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ | 54 | SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */ |
55 | SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ | 55 | SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */ |
56 | SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */ | 56 | SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */ |
57 | SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ | 57 | SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ |
58 | SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ | 58 | SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ |
59 | SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ | 59 | SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ |
60 | SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */ | 60 | SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */ |
61 | 61 | ||
62 | SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, | 62 | SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, |
63 | SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, | 63 | SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, |
64 | 64 | ||
65 | SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 | 65 | SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct scsi_event { | 68 | struct scsi_event { |
69 | enum scsi_device_event evt_type; | 69 | enum scsi_device_event evt_type; |
70 | struct list_head node; | 70 | struct list_head node; |
71 | 71 | ||
72 | /* put union of data structures, for non-simple event types, | 72 | /* put union of data structures, for non-simple event types, |
73 | * here | 73 | * here |
74 | */ | 74 | */ |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct scsi_device { | 77 | struct scsi_device { |
78 | struct Scsi_Host *host; | 78 | struct Scsi_Host *host; |
79 | struct request_queue *request_queue; | 79 | struct request_queue *request_queue; |
80 | 80 | ||
81 | /* the next two are protected by the host->host_lock */ | 81 | /* the next two are protected by the host->host_lock */ |
82 | struct list_head siblings; /* list of all devices on this host */ | 82 | struct list_head siblings; /* list of all devices on this host */ |
83 | struct list_head same_target_siblings; /* just the devices sharing same target id */ | 83 | struct list_head same_target_siblings; /* just the devices sharing same target id */ |
84 | 84 | ||
85 | atomic_t device_busy; /* commands actually active on LLDD */ | 85 | atomic_t device_busy; /* commands actually active on LLDD */ |
86 | atomic_t device_blocked; /* Device returned QUEUE_FULL. */ | 86 | atomic_t device_blocked; /* Device returned QUEUE_FULL. */ |
87 | 87 | ||
88 | spinlock_t list_lock; | 88 | spinlock_t list_lock; |
89 | struct list_head cmd_list; /* queue of in use SCSI Command structures */ | 89 | struct list_head cmd_list; /* queue of in use SCSI Command structures */ |
90 | struct list_head starved_entry; | 90 | struct list_head starved_entry; |
91 | struct scsi_cmnd *current_cmnd; /* currently active command */ | 91 | struct scsi_cmnd *current_cmnd; /* currently active command */ |
92 | unsigned short queue_depth; /* How deep of a queue we want */ | 92 | unsigned short queue_depth; /* How deep of a queue we want */ |
93 | unsigned short max_queue_depth; /* max queue depth */ | 93 | unsigned short max_queue_depth; /* max queue depth */ |
94 | unsigned short last_queue_full_depth; /* These two are used by */ | 94 | unsigned short last_queue_full_depth; /* These two are used by */ |
95 | unsigned short last_queue_full_count; /* scsi_track_queue_full() */ | 95 | unsigned short last_queue_full_count; /* scsi_track_queue_full() */ |
96 | unsigned long last_queue_full_time; /* last queue full time */ | 96 | unsigned long last_queue_full_time; /* last queue full time */ |
97 | unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ | 97 | unsigned long queue_ramp_up_period; /* ramp up period in jiffies */ |
98 | #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) | 98 | #define SCSI_DEFAULT_RAMP_UP_PERIOD (120 * HZ) |
99 | 99 | ||
100 | unsigned long last_queue_ramp_up; /* last queue ramp up time */ | 100 | unsigned long last_queue_ramp_up; /* last queue ramp up time */ |
101 | 101 | ||
102 | unsigned int id, channel; | 102 | unsigned int id, channel; |
103 | u64 lun; | 103 | u64 lun; |
104 | unsigned int manufacturer; /* Manufacturer of device, for using | 104 | unsigned int manufacturer; /* Manufacturer of device, for using |
105 | * vendor-specific cmd's */ | 105 | * vendor-specific cmd's */ |
106 | unsigned sector_size; /* size in bytes */ | 106 | unsigned sector_size; /* size in bytes */ |
107 | 107 | ||
108 | void *hostdata; /* available to low-level driver */ | 108 | void *hostdata; /* available to low-level driver */ |
109 | char type; | 109 | char type; |
110 | char scsi_level; | 110 | char scsi_level; |
111 | char inq_periph_qual; /* PQ from INQUIRY data */ | 111 | char inq_periph_qual; /* PQ from INQUIRY data */ |
112 | unsigned char inquiry_len; /* valid bytes in 'inquiry' */ | 112 | unsigned char inquiry_len; /* valid bytes in 'inquiry' */ |
113 | unsigned char * inquiry; /* INQUIRY response data */ | 113 | unsigned char * inquiry; /* INQUIRY response data */ |
114 | const char * vendor; /* [back_compat] point into 'inquiry' ... */ | 114 | const char * vendor; /* [back_compat] point into 'inquiry' ... */ |
115 | const char * model; /* ... after scan; point to static string */ | 115 | const char * model; /* ... after scan; point to static string */ |
116 | const char * rev; /* ... "nullnullnullnull" before scan */ | 116 | const char * rev; /* ... "nullnullnullnull" before scan */ |
117 | 117 | ||
118 | #define SCSI_VPD_PG_LEN 255 | 118 | #define SCSI_VPD_PG_LEN 255 |
119 | int vpd_pg83_len; | 119 | int vpd_pg83_len; |
120 | unsigned char *vpd_pg83; | 120 | unsigned char *vpd_pg83; |
121 | int vpd_pg80_len; | 121 | int vpd_pg80_len; |
122 | unsigned char *vpd_pg80; | 122 | unsigned char *vpd_pg80; |
123 | unsigned char current_tag; /* current tag */ | 123 | unsigned char current_tag; /* current tag */ |
124 | struct scsi_target *sdev_target; /* used only for single_lun */ | 124 | struct scsi_target *sdev_target; /* used only for single_lun */ |
125 | 125 | ||
126 | unsigned int sdev_bflags; /* black/white flags as also found in | 126 | unsigned int sdev_bflags; /* black/white flags as also found in |
127 | * scsi_devinfo.[hc]. For now used only to | 127 | * scsi_devinfo.[hc]. For now used only to |
128 | * pass settings from slave_alloc to scsi | 128 | * pass settings from slave_alloc to scsi |
129 | * core. */ | 129 | * core. */ |
130 | unsigned int eh_timeout; /* Error handling timeout */ | 130 | unsigned int eh_timeout; /* Error handling timeout */ |
131 | unsigned removable:1; | 131 | unsigned removable:1; |
132 | unsigned changed:1; /* Data invalid due to media change */ | 132 | unsigned changed:1; /* Data invalid due to media change */ |
133 | unsigned busy:1; /* Used to prevent races */ | 133 | unsigned busy:1; /* Used to prevent races */ |
134 | unsigned lockable:1; /* Able to prevent media removal */ | 134 | unsigned lockable:1; /* Able to prevent media removal */ |
135 | unsigned locked:1; /* Media removal disabled */ | 135 | unsigned locked:1; /* Media removal disabled */ |
136 | unsigned borken:1; /* Tell the Seagate driver to be | 136 | unsigned borken:1; /* Tell the Seagate driver to be |
137 | * painfully slow on this device */ | 137 | * painfully slow on this device */ |
138 | unsigned disconnect:1; /* can disconnect */ | 138 | unsigned disconnect:1; /* can disconnect */ |
139 | unsigned soft_reset:1; /* Uses soft reset option */ | 139 | unsigned soft_reset:1; /* Uses soft reset option */ |
140 | unsigned sdtr:1; /* Device supports SDTR messages */ | 140 | unsigned sdtr:1; /* Device supports SDTR messages */ |
141 | unsigned wdtr:1; /* Device supports WDTR messages */ | 141 | unsigned wdtr:1; /* Device supports WDTR messages */ |
142 | unsigned ppr:1; /* Device supports PPR messages */ | 142 | unsigned ppr:1; /* Device supports PPR messages */ |
143 | unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ | 143 | unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ |
144 | unsigned simple_tags:1; /* simple queue tag messages are enabled */ | 144 | unsigned simple_tags:1; /* simple queue tag messages are enabled */ |
145 | unsigned was_reset:1; /* There was a bus reset on the bus for | 145 | unsigned was_reset:1; /* There was a bus reset on the bus for |
146 | * this device */ | 146 | * this device */ |
147 | unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN | 147 | unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN |
148 | * because we did a bus reset. */ | 148 | * because we did a bus reset. */ |
149 | unsigned use_10_for_rw:1; /* first try 10-byte read / write */ | 149 | unsigned use_10_for_rw:1; /* first try 10-byte read / write */ |
150 | unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ | 150 | unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ |
151 | unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ | 151 | unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ |
152 | unsigned no_write_same:1; /* no WRITE SAME command */ | 152 | unsigned no_write_same:1; /* no WRITE SAME command */ |
153 | unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ | 153 | unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ |
154 | unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ | 154 | unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ |
155 | unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ | 155 | unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ |
156 | unsigned skip_vpd_pages:1; /* do not read VPD pages */ | 156 | unsigned skip_vpd_pages:1; /* do not read VPD pages */ |
157 | unsigned try_vpd_pages:1; /* attempt to read VPD pages */ | 157 | unsigned try_vpd_pages:1; /* attempt to read VPD pages */ |
158 | unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ | 158 | unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ |
159 | unsigned no_start_on_add:1; /* do not issue start on add */ | 159 | unsigned no_start_on_add:1; /* do not issue start on add */ |
160 | unsigned allow_restart:1; /* issue START_UNIT in error handler */ | 160 | unsigned allow_restart:1; /* issue START_UNIT in error handler */ |
161 | unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ | 161 | unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */ |
162 | unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ | 162 | unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */ |
163 | unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ | 163 | unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ |
164 | unsigned select_no_atn:1; | 164 | unsigned select_no_atn:1; |
165 | unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ | 165 | unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ |
166 | unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ | 166 | unsigned guess_capacity:1; /* READ_CAPACITY might be too high by 1 */ |
167 | unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ | 167 | unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ |
168 | unsigned last_sector_bug:1; /* do not use multisector accesses on | 168 | unsigned last_sector_bug:1; /* do not use multisector accesses on |
169 | SD_LAST_BUGGY_SECTORS */ | 169 | SD_LAST_BUGGY_SECTORS */ |
170 | unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ | 170 | unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ |
171 | unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ | 171 | unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ |
172 | unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */ | 172 | unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */ |
173 | unsigned is_visible:1; /* is the device visible in sysfs */ | 173 | unsigned is_visible:1; /* is the device visible in sysfs */ |
174 | unsigned wce_default_on:1; /* Cache is ON by default */ | 174 | unsigned wce_default_on:1; /* Cache is ON by default */ |
175 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ | 175 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ |
176 | unsigned broken_fua:1; /* Don't set FUA bit */ | 176 | unsigned broken_fua:1; /* Don't set FUA bit */ |
177 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ | 177 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ |
178 | 178 | ||
179 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ | 179 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ |
180 | 180 | ||
181 | DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ | 181 | DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ |
182 | DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */ | 182 | DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */ |
183 | struct list_head event_list; /* asserted events */ | 183 | struct list_head event_list; /* asserted events */ |
184 | struct work_struct event_work; | 184 | struct work_struct event_work; |
185 | 185 | ||
186 | unsigned int max_device_blocked; /* what device_blocked counts down from */ | 186 | unsigned int max_device_blocked; /* what device_blocked counts down from */ |
187 | #define SCSI_DEFAULT_DEVICE_BLOCKED 3 | 187 | #define SCSI_DEFAULT_DEVICE_BLOCKED 3 |
188 | 188 | ||
189 | atomic_t iorequest_cnt; | 189 | atomic_t iorequest_cnt; |
190 | atomic_t iodone_cnt; | 190 | atomic_t iodone_cnt; |
191 | atomic_t ioerr_cnt; | 191 | atomic_t ioerr_cnt; |
192 | 192 | ||
193 | struct device sdev_gendev, | 193 | struct device sdev_gendev, |
194 | sdev_dev; | 194 | sdev_dev; |
195 | 195 | ||
196 | struct execute_work ew; /* used to get process context on put */ | 196 | struct execute_work ew; /* used to get process context on put */ |
197 | struct work_struct requeue_work; | 197 | struct work_struct requeue_work; |
198 | 198 | ||
199 | struct scsi_device_handler *handler; | 199 | struct scsi_device_handler *handler; |
200 | void *handler_data; | 200 | void *handler_data; |
201 | 201 | ||
202 | enum scsi_device_state sdev_state; | 202 | enum scsi_device_state sdev_state; |
203 | unsigned long sdev_data[0]; | 203 | unsigned long sdev_data[0]; |
204 | } __attribute__((aligned(sizeof(unsigned long)))); | 204 | } __attribute__((aligned(sizeof(unsigned long)))); |
205 | 205 | ||
206 | #define to_scsi_device(d) \ | 206 | #define to_scsi_device(d) \ |
207 | container_of(d, struct scsi_device, sdev_gendev) | 207 | container_of(d, struct scsi_device, sdev_gendev) |
208 | #define class_to_sdev(d) \ | 208 | #define class_to_sdev(d) \ |
209 | container_of(d, struct scsi_device, sdev_dev) | 209 | container_of(d, struct scsi_device, sdev_dev) |
210 | #define transport_class_to_sdev(class_dev) \ | 210 | #define transport_class_to_sdev(class_dev) \ |
211 | to_scsi_device(class_dev->parent) | 211 | to_scsi_device(class_dev->parent) |
212 | 212 | ||
213 | #define sdev_dbg(sdev, fmt, a...) \ | 213 | #define sdev_dbg(sdev, fmt, a...) \ |
214 | dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) | 214 | dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * like scmd_printk, but the device name is passed in | 217 | * like scmd_printk, but the device name is passed in |
218 | * as a string pointer | 218 | * as a string pointer |
219 | */ | 219 | */ |
220 | __printf(4, 5) void | 220 | __printf(4, 5) void |
221 | sdev_prefix_printk(const char *, const struct scsi_device *, const char *, | 221 | sdev_prefix_printk(const char *, const struct scsi_device *, const char *, |
222 | const char *, ...); | 222 | const char *, ...); |
223 | 223 | ||
224 | #define sdev_printk(l, sdev, fmt, a...) \ | 224 | #define sdev_printk(l, sdev, fmt, a...) \ |
225 | sdev_prefix_printk(l, sdev, NULL, fmt, ##a) | 225 | sdev_prefix_printk(l, sdev, NULL, fmt, ##a) |
226 | 226 | ||
227 | __printf(3, 4) void | 227 | __printf(3, 4) void |
228 | scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); | 228 | scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...); |
229 | 229 | ||
230 | #define scmd_dbg(scmd, fmt, a...) \ | 230 | #define scmd_dbg(scmd, fmt, a...) \ |
231 | do { \ | 231 | do { \ |
232 | if ((scmd)->request->rq_disk) \ | 232 | if ((scmd)->request->rq_disk) \ |
233 | sdev_dbg((scmd)->device, "[%s] " fmt, \ | 233 | sdev_dbg((scmd)->device, "[%s] " fmt, \ |
234 | (scmd)->request->rq_disk->disk_name, ##a);\ | 234 | (scmd)->request->rq_disk->disk_name, ##a);\ |
235 | else \ | 235 | else \ |
236 | sdev_dbg((scmd)->device, fmt, ##a); \ | 236 | sdev_dbg((scmd)->device, fmt, ##a); \ |
237 | } while (0) | 237 | } while (0) |
238 | 238 | ||
239 | enum scsi_target_state { | 239 | enum scsi_target_state { |
240 | STARGET_CREATED = 1, | 240 | STARGET_CREATED = 1, |
241 | STARGET_RUNNING, | 241 | STARGET_RUNNING, |
242 | STARGET_REMOVE, | ||
242 | STARGET_DEL, | 243 | STARGET_DEL, |
243 | }; | 244 | }; |
244 | 245 | ||
245 | /* | 246 | /* |
246 | * scsi_target: representation of a scsi target, for now, this is only | 247 | * scsi_target: representation of a scsi target, for now, this is only |
247 | * used for single_lun devices. If no one has active IO to the target, | 248 | * used for single_lun devices. If no one has active IO to the target, |
248 | * starget_sdev_user is NULL, else it points to the active sdev. | 249 | * starget_sdev_user is NULL, else it points to the active sdev. |
249 | */ | 250 | */ |
250 | struct scsi_target { | 251 | struct scsi_target { |
251 | struct scsi_device *starget_sdev_user; | 252 | struct scsi_device *starget_sdev_user; |
252 | struct list_head siblings; | 253 | struct list_head siblings; |
253 | struct list_head devices; | 254 | struct list_head devices; |
254 | struct device dev; | 255 | struct device dev; |
255 | struct kref reap_ref; /* last put renders target invisible */ | 256 | struct kref reap_ref; /* last put renders target invisible */ |
256 | unsigned int channel; | 257 | unsigned int channel; |
257 | unsigned int id; /* target id ... replace | 258 | unsigned int id; /* target id ... replace |
258 | * scsi_device.id eventually */ | 259 | * scsi_device.id eventually */ |
259 | unsigned int create:1; /* signal that it needs to be added */ | 260 | unsigned int create:1; /* signal that it needs to be added */ |
260 | unsigned int single_lun:1; /* Indicates we should only | 261 | unsigned int single_lun:1; /* Indicates we should only |
261 | * allow I/O to one of the luns | 262 | * allow I/O to one of the luns |
262 | * for the device at a time. */ | 263 | * for the device at a time. */ |
263 | unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f | 264 | unsigned int pdt_1f_for_no_lun:1; /* PDT = 0x1f |
264 | * means no lun present. */ | 265 | * means no lun present. */ |
265 | unsigned int no_report_luns:1; /* Don't use | 266 | unsigned int no_report_luns:1; /* Don't use |
266 | * REPORT LUNS for scanning. */ | 267 | * REPORT LUNS for scanning. */ |
267 | unsigned int expecting_lun_change:1; /* A device has reported | 268 | unsigned int expecting_lun_change:1; /* A device has reported |
268 | * a 3F/0E UA, other devices on | 269 | * a 3F/0E UA, other devices on |
269 | * the same target will also. */ | 270 | * the same target will also. */ |
270 | /* commands actually active on LLD. */ | 271 | /* commands actually active on LLD. */ |
271 | atomic_t target_busy; | 272 | atomic_t target_busy; |
272 | atomic_t target_blocked; | 273 | atomic_t target_blocked; |
273 | 274 | ||
274 | /* | 275 | /* |
275 | * LLDs should set this in the slave_alloc host template callout. | 276 | * LLDs should set this in the slave_alloc host template callout. |
276 | * If set to zero then there is not limit. | 277 | * If set to zero then there is not limit. |
277 | */ | 278 | */ |
278 | unsigned int can_queue; | 279 | unsigned int can_queue; |
279 | unsigned int max_target_blocked; | 280 | unsigned int max_target_blocked; |
280 | #define SCSI_DEFAULT_TARGET_BLOCKED 3 | 281 | #define SCSI_DEFAULT_TARGET_BLOCKED 3 |
281 | 282 | ||
282 | char scsi_level; | 283 | char scsi_level; |
283 | enum scsi_target_state state; | 284 | enum scsi_target_state state; |
284 | void *hostdata; /* available to low-level driver */ | 285 | void *hostdata; /* available to low-level driver */ |
285 | unsigned long starget_data[0]; /* for the transport */ | 286 | unsigned long starget_data[0]; /* for the transport */ |
286 | /* starget_data must be the last element!!!! */ | 287 | /* starget_data must be the last element!!!! */ |
287 | } __attribute__((aligned(sizeof(unsigned long)))); | 288 | } __attribute__((aligned(sizeof(unsigned long)))); |
288 | 289 | ||
289 | #define to_scsi_target(d) container_of(d, struct scsi_target, dev) | 290 | #define to_scsi_target(d) container_of(d, struct scsi_target, dev) |
290 | static inline struct scsi_target *scsi_target(struct scsi_device *sdev) | 291 | static inline struct scsi_target *scsi_target(struct scsi_device *sdev) |
291 | { | 292 | { |
292 | return to_scsi_target(sdev->sdev_gendev.parent); | 293 | return to_scsi_target(sdev->sdev_gendev.parent); |
293 | } | 294 | } |
294 | #define transport_class_to_starget(class_dev) \ | 295 | #define transport_class_to_starget(class_dev) \ |
295 | to_scsi_target(class_dev->parent) | 296 | to_scsi_target(class_dev->parent) |
296 | 297 | ||
297 | #define starget_printk(prefix, starget, fmt, a...) \ | 298 | #define starget_printk(prefix, starget, fmt, a...) \ |
298 | dev_printk(prefix, &(starget)->dev, fmt, ##a) | 299 | dev_printk(prefix, &(starget)->dev, fmt, ##a) |
299 | 300 | ||
300 | extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, | 301 | extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, |
301 | uint, uint, u64, void *hostdata); | 302 | uint, uint, u64, void *hostdata); |
302 | extern int scsi_add_device(struct Scsi_Host *host, uint channel, | 303 | extern int scsi_add_device(struct Scsi_Host *host, uint channel, |
303 | uint target, u64 lun); | 304 | uint target, u64 lun); |
304 | extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); | 305 | extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); |
305 | extern void scsi_remove_device(struct scsi_device *); | 306 | extern void scsi_remove_device(struct scsi_device *); |
306 | extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); | 307 | extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); |
307 | void scsi_attach_vpd(struct scsi_device *sdev); | 308 | void scsi_attach_vpd(struct scsi_device *sdev); |
308 | 309 | ||
309 | extern int scsi_device_get(struct scsi_device *); | 310 | extern int scsi_device_get(struct scsi_device *); |
310 | extern void scsi_device_put(struct scsi_device *); | 311 | extern void scsi_device_put(struct scsi_device *); |
311 | extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, | 312 | extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, |
312 | uint, uint, u64); | 313 | uint, uint, u64); |
313 | extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, | 314 | extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, |
314 | uint, uint, u64); | 315 | uint, uint, u64); |
315 | extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, | 316 | extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, |
316 | u64); | 317 | u64); |
317 | extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, | 318 | extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, |
318 | u64); | 319 | u64); |
319 | extern void starget_for_each_device(struct scsi_target *, void *, | 320 | extern void starget_for_each_device(struct scsi_target *, void *, |
320 | void (*fn)(struct scsi_device *, void *)); | 321 | void (*fn)(struct scsi_device *, void *)); |
321 | extern void __starget_for_each_device(struct scsi_target *, void *, | 322 | extern void __starget_for_each_device(struct scsi_target *, void *, |
322 | void (*fn)(struct scsi_device *, | 323 | void (*fn)(struct scsi_device *, |
323 | void *)); | 324 | void *)); |
324 | 325 | ||
325 | /* only exposed to implement shost_for_each_device */ | 326 | /* only exposed to implement shost_for_each_device */ |
326 | extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, | 327 | extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, |
327 | struct scsi_device *); | 328 | struct scsi_device *); |
328 | 329 | ||
329 | /** | 330 | /** |
330 | * shost_for_each_device - iterate over all devices of a host | 331 | * shost_for_each_device - iterate over all devices of a host |
331 | * @sdev: the &struct scsi_device to use as a cursor | 332 | * @sdev: the &struct scsi_device to use as a cursor |
332 | * @shost: the &struct scsi_host to iterate over | 333 | * @shost: the &struct scsi_host to iterate over |
333 | * | 334 | * |
334 | * Iterator that returns each device attached to @shost. This loop | 335 | * Iterator that returns each device attached to @shost. This loop |
335 | * takes a reference on each device and releases it at the end. If | 336 | * takes a reference on each device and releases it at the end. If |
336 | * you break out of the loop, you must call scsi_device_put(sdev). | 337 | * you break out of the loop, you must call scsi_device_put(sdev). |
337 | */ | 338 | */ |
338 | #define shost_for_each_device(sdev, shost) \ | 339 | #define shost_for_each_device(sdev, shost) \ |
339 | for ((sdev) = __scsi_iterate_devices((shost), NULL); \ | 340 | for ((sdev) = __scsi_iterate_devices((shost), NULL); \ |
340 | (sdev); \ | 341 | (sdev); \ |
341 | (sdev) = __scsi_iterate_devices((shost), (sdev))) | 342 | (sdev) = __scsi_iterate_devices((shost), (sdev))) |
342 | 343 | ||
343 | /** | 344 | /** |
344 | * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) | 345 | * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) |
345 | * @sdev: the &struct scsi_device to use as a cursor | 346 | * @sdev: the &struct scsi_device to use as a cursor |
346 | * @shost: the &struct scsi_host to iterate over | 347 | * @shost: the &struct scsi_host to iterate over |
347 | * | 348 | * |
348 | * Iterator that returns each device attached to @shost. It does _not_ | 349 | * Iterator that returns each device attached to @shost. It does _not_ |
349 | * take a reference on the scsi_device, so the whole loop must be | 350 | * take a reference on the scsi_device, so the whole loop must be |
350 | * protected by shost->host_lock. | 351 | * protected by shost->host_lock. |
351 | * | 352 | * |
352 | * Note: The only reason to use this is because you need to access the | 353 | * Note: The only reason to use this is because you need to access the |
353 | * device list in interrupt context. Otherwise you really want to use | 354 | * device list in interrupt context. Otherwise you really want to use |
354 | * shost_for_each_device instead. | 355 | * shost_for_each_device instead. |
355 | */ | 356 | */ |
356 | #define __shost_for_each_device(sdev, shost) \ | 357 | #define __shost_for_each_device(sdev, shost) \ |
357 | list_for_each_entry((sdev), &((shost)->__devices), siblings) | 358 | list_for_each_entry((sdev), &((shost)->__devices), siblings) |
358 | 359 | ||
359 | extern int scsi_change_queue_depth(struct scsi_device *, int); | 360 | extern int scsi_change_queue_depth(struct scsi_device *, int); |
360 | extern int scsi_track_queue_full(struct scsi_device *, int); | 361 | extern int scsi_track_queue_full(struct scsi_device *, int); |
361 | 362 | ||
362 | extern int scsi_set_medium_removal(struct scsi_device *, char); | 363 | extern int scsi_set_medium_removal(struct scsi_device *, char); |
363 | 364 | ||
364 | extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, | 365 | extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
365 | unsigned char *buffer, int len, int timeout, | 366 | unsigned char *buffer, int len, int timeout, |
366 | int retries, struct scsi_mode_data *data, | 367 | int retries, struct scsi_mode_data *data, |
367 | struct scsi_sense_hdr *); | 368 | struct scsi_sense_hdr *); |
368 | extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, | 369 | extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, |
369 | int modepage, unsigned char *buffer, int len, | 370 | int modepage, unsigned char *buffer, int len, |
370 | int timeout, int retries, | 371 | int timeout, int retries, |
371 | struct scsi_mode_data *data, | 372 | struct scsi_mode_data *data, |
372 | struct scsi_sense_hdr *); | 373 | struct scsi_sense_hdr *); |
373 | extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, | 374 | extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, |
374 | int retries, struct scsi_sense_hdr *sshdr); | 375 | int retries, struct scsi_sense_hdr *sshdr); |
375 | extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, | 376 | extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, |
376 | int buf_len); | 377 | int buf_len); |
377 | extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, | 378 | extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, |
378 | unsigned int len, unsigned char opcode); | 379 | unsigned int len, unsigned char opcode); |
379 | extern int scsi_device_set_state(struct scsi_device *sdev, | 380 | extern int scsi_device_set_state(struct scsi_device *sdev, |
380 | enum scsi_device_state state); | 381 | enum scsi_device_state state); |
381 | extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, | 382 | extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, |
382 | gfp_t gfpflags); | 383 | gfp_t gfpflags); |
383 | extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); | 384 | extern void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt); |
384 | extern void sdev_evt_send_simple(struct scsi_device *sdev, | 385 | extern void sdev_evt_send_simple(struct scsi_device *sdev, |
385 | enum scsi_device_event evt_type, gfp_t gfpflags); | 386 | enum scsi_device_event evt_type, gfp_t gfpflags); |
386 | extern int scsi_device_quiesce(struct scsi_device *sdev); | 387 | extern int scsi_device_quiesce(struct scsi_device *sdev); |
387 | extern void scsi_device_resume(struct scsi_device *sdev); | 388 | extern void scsi_device_resume(struct scsi_device *sdev); |
388 | extern void scsi_target_quiesce(struct scsi_target *); | 389 | extern void scsi_target_quiesce(struct scsi_target *); |
389 | extern void scsi_target_resume(struct scsi_target *); | 390 | extern void scsi_target_resume(struct scsi_target *); |
390 | extern void scsi_scan_target(struct device *parent, unsigned int channel, | 391 | extern void scsi_scan_target(struct device *parent, unsigned int channel, |
391 | unsigned int id, u64 lun, int rescan); | 392 | unsigned int id, u64 lun, int rescan); |
392 | extern void scsi_target_reap(struct scsi_target *); | 393 | extern void scsi_target_reap(struct scsi_target *); |
393 | extern void scsi_target_block(struct device *); | 394 | extern void scsi_target_block(struct device *); |
394 | extern void scsi_target_unblock(struct device *, enum scsi_device_state); | 395 | extern void scsi_target_unblock(struct device *, enum scsi_device_state); |
395 | extern void scsi_remove_target(struct device *); | 396 | extern void scsi_remove_target(struct device *); |
396 | extern const char *scsi_device_state_name(enum scsi_device_state); | 397 | extern const char *scsi_device_state_name(enum scsi_device_state); |
397 | extern int scsi_is_sdev_device(const struct device *); | 398 | extern int scsi_is_sdev_device(const struct device *); |
398 | extern int scsi_is_target_device(const struct device *); | 399 | extern int scsi_is_target_device(const struct device *); |
399 | extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, | 400 | extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
400 | int data_direction, void *buffer, unsigned bufflen, | 401 | int data_direction, void *buffer, unsigned bufflen, |
401 | unsigned char *sense, int timeout, int retries, | 402 | unsigned char *sense, int timeout, int retries, |
402 | u64 flags, int *resid); | 403 | u64 flags, int *resid); |
403 | extern int scsi_execute_req_flags(struct scsi_device *sdev, | 404 | extern int scsi_execute_req_flags(struct scsi_device *sdev, |
404 | const unsigned char *cmd, int data_direction, void *buffer, | 405 | const unsigned char *cmd, int data_direction, void *buffer, |
405 | unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, | 406 | unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, |
406 | int retries, int *resid, u64 flags); | 407 | int retries, int *resid, u64 flags); |
407 | static inline int scsi_execute_req(struct scsi_device *sdev, | 408 | static inline int scsi_execute_req(struct scsi_device *sdev, |
408 | const unsigned char *cmd, int data_direction, void *buffer, | 409 | const unsigned char *cmd, int data_direction, void *buffer, |
409 | unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, | 410 | unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, |
410 | int retries, int *resid) | 411 | int retries, int *resid) |
411 | { | 412 | { |
412 | return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, | 413 | return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, |
413 | bufflen, sshdr, timeout, retries, resid, 0); | 414 | bufflen, sshdr, timeout, retries, resid, 0); |
414 | } | 415 | } |
415 | extern void sdev_disable_disk_events(struct scsi_device *sdev); | 416 | extern void sdev_disable_disk_events(struct scsi_device *sdev); |
416 | extern void sdev_enable_disk_events(struct scsi_device *sdev); | 417 | extern void sdev_enable_disk_events(struct scsi_device *sdev); |
417 | 418 | ||
418 | #ifdef CONFIG_PM | 419 | #ifdef CONFIG_PM |
419 | extern int scsi_autopm_get_device(struct scsi_device *); | 420 | extern int scsi_autopm_get_device(struct scsi_device *); |
420 | extern void scsi_autopm_put_device(struct scsi_device *); | 421 | extern void scsi_autopm_put_device(struct scsi_device *); |
421 | #else | 422 | #else |
422 | static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } | 423 | static inline int scsi_autopm_get_device(struct scsi_device *d) { return 0; } |
423 | static inline void scsi_autopm_put_device(struct scsi_device *d) {} | 424 | static inline void scsi_autopm_put_device(struct scsi_device *d) {} |
424 | #endif /* CONFIG_PM */ | 425 | #endif /* CONFIG_PM */ |
425 | 426 | ||
426 | static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) | 427 | static inline int __must_check scsi_device_reprobe(struct scsi_device *sdev) |
427 | { | 428 | { |
428 | return device_reprobe(&sdev->sdev_gendev); | 429 | return device_reprobe(&sdev->sdev_gendev); |
429 | } | 430 | } |
430 | 431 | ||
431 | static inline unsigned int sdev_channel(struct scsi_device *sdev) | 432 | static inline unsigned int sdev_channel(struct scsi_device *sdev) |
432 | { | 433 | { |
433 | return sdev->channel; | 434 | return sdev->channel; |
434 | } | 435 | } |
435 | 436 | ||
436 | static inline unsigned int sdev_id(struct scsi_device *sdev) | 437 | static inline unsigned int sdev_id(struct scsi_device *sdev) |
437 | { | 438 | { |
438 | return sdev->id; | 439 | return sdev->id; |
439 | } | 440 | } |
440 | 441 | ||
441 | #define scmd_id(scmd) sdev_id((scmd)->device) | 442 | #define scmd_id(scmd) sdev_id((scmd)->device) |
442 | #define scmd_channel(scmd) sdev_channel((scmd)->device) | 443 | #define scmd_channel(scmd) sdev_channel((scmd)->device) |
443 | 444 | ||
444 | /* | 445 | /* |
445 | * checks for positions of the SCSI state machine | 446 | * checks for positions of the SCSI state machine |
446 | */ | 447 | */ |
447 | static inline int scsi_device_online(struct scsi_device *sdev) | 448 | static inline int scsi_device_online(struct scsi_device *sdev) |
448 | { | 449 | { |
449 | return (sdev->sdev_state != SDEV_OFFLINE && | 450 | return (sdev->sdev_state != SDEV_OFFLINE && |
450 | sdev->sdev_state != SDEV_TRANSPORT_OFFLINE && | 451 | sdev->sdev_state != SDEV_TRANSPORT_OFFLINE && |
451 | sdev->sdev_state != SDEV_DEL); | 452 | sdev->sdev_state != SDEV_DEL); |
452 | } | 453 | } |
453 | static inline int scsi_device_blocked(struct scsi_device *sdev) | 454 | static inline int scsi_device_blocked(struct scsi_device *sdev) |
454 | { | 455 | { |
455 | return sdev->sdev_state == SDEV_BLOCK || | 456 | return sdev->sdev_state == SDEV_BLOCK || |
456 | sdev->sdev_state == SDEV_CREATED_BLOCK; | 457 | sdev->sdev_state == SDEV_CREATED_BLOCK; |
457 | } | 458 | } |
458 | static inline int scsi_device_created(struct scsi_device *sdev) | 459 | static inline int scsi_device_created(struct scsi_device *sdev) |
459 | { | 460 | { |
460 | return sdev->sdev_state == SDEV_CREATED || | 461 | return sdev->sdev_state == SDEV_CREATED || |
461 | sdev->sdev_state == SDEV_CREATED_BLOCK; | 462 | sdev->sdev_state == SDEV_CREATED_BLOCK; |
462 | } | 463 | } |
463 | 464 | ||
464 | /* accessor functions for the SCSI parameters */ | 465 | /* accessor functions for the SCSI parameters */ |
465 | static inline int scsi_device_sync(struct scsi_device *sdev) | 466 | static inline int scsi_device_sync(struct scsi_device *sdev) |
466 | { | 467 | { |
467 | return sdev->sdtr; | 468 | return sdev->sdtr; |
468 | } | 469 | } |
469 | static inline int scsi_device_wide(struct scsi_device *sdev) | 470 | static inline int scsi_device_wide(struct scsi_device *sdev) |
470 | { | 471 | { |
471 | return sdev->wdtr; | 472 | return sdev->wdtr; |
472 | } | 473 | } |
473 | static inline int scsi_device_dt(struct scsi_device *sdev) | 474 | static inline int scsi_device_dt(struct scsi_device *sdev) |
474 | { | 475 | { |
475 | return sdev->ppr; | 476 | return sdev->ppr; |
476 | } | 477 | } |
477 | static inline int scsi_device_dt_only(struct scsi_device *sdev) | 478 | static inline int scsi_device_dt_only(struct scsi_device *sdev) |
478 | { | 479 | { |
479 | if (sdev->inquiry_len < 57) | 480 | if (sdev->inquiry_len < 57) |
480 | return 0; | 481 | return 0; |
481 | return (sdev->inquiry[56] & 0x0c) == 0x04; | 482 | return (sdev->inquiry[56] & 0x0c) == 0x04; |
482 | } | 483 | } |
483 | static inline int scsi_device_ius(struct scsi_device *sdev) | 484 | static inline int scsi_device_ius(struct scsi_device *sdev) |
484 | { | 485 | { |
485 | if (sdev->inquiry_len < 57) | 486 | if (sdev->inquiry_len < 57) |
486 | return 0; | 487 | return 0; |
487 | return sdev->inquiry[56] & 0x01; | 488 | return sdev->inquiry[56] & 0x01; |
488 | } | 489 | } |
489 | static inline int scsi_device_qas(struct scsi_device *sdev) | 490 | static inline int scsi_device_qas(struct scsi_device *sdev) |
490 | { | 491 | { |
491 | if (sdev->inquiry_len < 57) | 492 | if (sdev->inquiry_len < 57) |
492 | return 0; | 493 | return 0; |
493 | return sdev->inquiry[56] & 0x02; | 494 | return sdev->inquiry[56] & 0x02; |
494 | } | 495 | } |
495 | static inline int scsi_device_enclosure(struct scsi_device *sdev) | 496 | static inline int scsi_device_enclosure(struct scsi_device *sdev) |
496 | { | 497 | { |
497 | return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; | 498 | return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; |
498 | } | 499 | } |
499 | 500 | ||
500 | static inline int scsi_device_protection(struct scsi_device *sdev) | 501 | static inline int scsi_device_protection(struct scsi_device *sdev) |
501 | { | 502 | { |
502 | if (sdev->no_dif) | 503 | if (sdev->no_dif) |
503 | return 0; | 504 | return 0; |
504 | 505 | ||
505 | return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); | 506 | return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); |
506 | } | 507 | } |
507 | 508 | ||
508 | static inline int scsi_device_tpgs(struct scsi_device *sdev) | 509 | static inline int scsi_device_tpgs(struct scsi_device *sdev) |
509 | { | 510 | { |
510 | return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; | 511 | return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; |
511 | } | 512 | } |
512 | 513 | ||
513 | #define MODULE_ALIAS_SCSI_DEVICE(type) \ | 514 | #define MODULE_ALIAS_SCSI_DEVICE(type) \ |
514 | MODULE_ALIAS("scsi:t-" __stringify(type) "*") | 515 | MODULE_ALIAS("scsi:t-" __stringify(type) "*") |
515 | #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" | 516 | #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" |
516 | 517 | ||
517 | #endif /* _SCSI_SCSI_DEVICE_H */ | 518 | #endif /* _SCSI_SCSI_DEVICE_H */ |
518 | 519 |