Commit bb1d1073a10fdc8547e3eb821ee2488260094b39

Authored by brking@us.ibm.com
Committed by James Bottomley
1 parent 15084a4a63

[SCSI] Prevent scsi_execute_async from guessing cdb length

When the scsi_execute_async interface was added it ended up reducing
the flexibility of userspace to send arbitrary scsi commands through
sg using SG_IO. The SG_IO interface allows userspace to specify the
CDB length. This is now ignored in scsi_execute_async and it is
guessed using the COMMAND_SIZE macro, which is not always correct,
particularly for vendor specific commands. This patch adds a cmd_len
parameter to the scsi_execute_async interface to allow the caller
to specify the length of the CDB.

Signed-off-by: Brian King <brking@us.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>

Showing 5 changed files with 7 additions and 6 deletions Inline Diff

drivers/scsi/scsi_error.c
1 /* 1 /*
2 * scsi_error.c Copyright (C) 1997 Eric Youngdale 2 * scsi_error.c Copyright (C) 1997 Eric Youngdale
3 * 3 *
4 * SCSI error/timeout handling 4 * SCSI error/timeout handling
5 * Initial versions: Eric Youngdale. Based upon conversations with 5 * Initial versions: Eric Youngdale. Based upon conversations with
6 * Leonard Zubkoff and David Miller at Linux Expo, 6 * Leonard Zubkoff and David Miller at Linux Expo,
7 * ideas originating from all over the place. 7 * ideas originating from all over the place.
8 * 8 *
9 * Restructured scsi_unjam_host and associated functions. 9 * Restructured scsi_unjam_host and associated functions.
10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com) 10 * September 04, 2002 Mike Anderson (andmike@us.ibm.com)
11 * 11 *
12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and 12 * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
13 * minor cleanups. 13 * minor cleanups.
14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com) 14 * September 30, 2002 Mike Anderson (andmike@us.ibm.com)
15 */ 15 */
16 16
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/timer.h> 19 #include <linux/timer.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/kthread.h> 23 #include <linux/kthread.h>
24 #include <linux/interrupt.h> 24 #include <linux/interrupt.h>
25 #include <linux/blkdev.h> 25 #include <linux/blkdev.h>
26 #include <linux/delay.h> 26 #include <linux/delay.h>
27 27
28 #include <scsi/scsi.h> 28 #include <scsi/scsi.h>
29 #include <scsi/scsi_dbg.h> 29 #include <scsi/scsi_dbg.h>
30 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_eh.h> 31 #include <scsi/scsi_eh.h>
32 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_ioctl.h> 33 #include <scsi/scsi_ioctl.h>
34 #include <scsi/scsi_request.h> 34 #include <scsi/scsi_request.h>
35 35
36 #include "scsi_priv.h" 36 #include "scsi_priv.h"
37 #include "scsi_logging.h" 37 #include "scsi_logging.h"
38 38
39 #define SENSE_TIMEOUT (10*HZ) 39 #define SENSE_TIMEOUT (10*HZ)
40 #define START_UNIT_TIMEOUT (30*HZ) 40 #define START_UNIT_TIMEOUT (30*HZ)
41 41
42 /* 42 /*
43 * These should *probably* be handled by the host itself. 43 * These should *probably* be handled by the host itself.
44 * Since it is allowed to sleep, it probably should. 44 * Since it is allowed to sleep, it probably should.
45 */ 45 */
46 #define BUS_RESET_SETTLE_TIME (10) 46 #define BUS_RESET_SETTLE_TIME (10)
47 #define HOST_RESET_SETTLE_TIME (10) 47 #define HOST_RESET_SETTLE_TIME (10)
48 48
49 /* called with shost->host_lock held */ 49 /* called with shost->host_lock held */
50 void scsi_eh_wakeup(struct Scsi_Host *shost) 50 void scsi_eh_wakeup(struct Scsi_Host *shost)
51 { 51 {
52 if (shost->host_busy == shost->host_failed) { 52 if (shost->host_busy == shost->host_failed) {
53 wake_up_process(shost->ehandler); 53 wake_up_process(shost->ehandler);
54 SCSI_LOG_ERROR_RECOVERY(5, 54 SCSI_LOG_ERROR_RECOVERY(5,
55 printk("Waking error handler thread\n")); 55 printk("Waking error handler thread\n"));
56 } 56 }
57 } 57 }
58 58
59 /** 59 /**
60 * scsi_eh_scmd_add - add scsi cmd to error handling. 60 * scsi_eh_scmd_add - add scsi cmd to error handling.
61 * @scmd: scmd to run eh on. 61 * @scmd: scmd to run eh on.
62 * @eh_flag: optional SCSI_EH flag. 62 * @eh_flag: optional SCSI_EH flag.
63 * 63 *
64 * Return value: 64 * Return value:
65 * 0 on failure. 65 * 0 on failure.
66 **/ 66 **/
67 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) 67 int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
68 { 68 {
69 struct Scsi_Host *shost = scmd->device->host; 69 struct Scsi_Host *shost = scmd->device->host;
70 unsigned long flags; 70 unsigned long flags;
71 int ret = 0; 71 int ret = 0;
72 72
73 if (!shost->ehandler) 73 if (!shost->ehandler)
74 return 0; 74 return 0;
75 75
76 spin_lock_irqsave(shost->host_lock, flags); 76 spin_lock_irqsave(shost->host_lock, flags);
77 if (scsi_host_set_state(shost, SHOST_RECOVERY)) 77 if (scsi_host_set_state(shost, SHOST_RECOVERY))
78 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) 78 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
79 goto out_unlock; 79 goto out_unlock;
80 80
81 ret = 1; 81 ret = 1;
82 scmd->eh_eflags |= eh_flag; 82 scmd->eh_eflags |= eh_flag;
83 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 83 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
84 shost->host_failed++; 84 shost->host_failed++;
85 scsi_eh_wakeup(shost); 85 scsi_eh_wakeup(shost);
86 out_unlock: 86 out_unlock:
87 spin_unlock_irqrestore(shost->host_lock, flags); 87 spin_unlock_irqrestore(shost->host_lock, flags);
88 return ret; 88 return ret;
89 } 89 }
90 90
91 /** 91 /**
92 * scsi_add_timer - Start timeout timer for a single scsi command. 92 * scsi_add_timer - Start timeout timer for a single scsi command.
93 * @scmd: scsi command that is about to start running. 93 * @scmd: scsi command that is about to start running.
94 * @timeout: amount of time to allow this command to run. 94 * @timeout: amount of time to allow this command to run.
95 * @complete: timeout function to call if timer isn't canceled. 95 * @complete: timeout function to call if timer isn't canceled.
96 * 96 *
97 * Notes: 97 * Notes:
98 * This should be turned into an inline function. Each scsi command 98 * This should be turned into an inline function. Each scsi command
99 * has its own timer, and as it is added to the queue, we set up the 99 * has its own timer, and as it is added to the queue, we set up the
100 * timer. When the command completes, we cancel the timer. 100 * timer. When the command completes, we cancel the timer.
101 **/ 101 **/
102 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, 102 void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
103 void (*complete)(struct scsi_cmnd *)) 103 void (*complete)(struct scsi_cmnd *))
104 { 104 {
105 105
106 /* 106 /*
107 * If the clock was already running for this command, then 107 * If the clock was already running for this command, then
108 * first delete the timer. The timer handling code gets rather 108 * first delete the timer. The timer handling code gets rather
109 * confused if we don't do this. 109 * confused if we don't do this.
110 */ 110 */
111 if (scmd->eh_timeout.function) 111 if (scmd->eh_timeout.function)
112 del_timer(&scmd->eh_timeout); 112 del_timer(&scmd->eh_timeout);
113 113
114 scmd->eh_timeout.data = (unsigned long)scmd; 114 scmd->eh_timeout.data = (unsigned long)scmd;
115 scmd->eh_timeout.expires = jiffies + timeout; 115 scmd->eh_timeout.expires = jiffies + timeout;
116 scmd->eh_timeout.function = (void (*)(unsigned long)) complete; 116 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
117 117
118 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" 118 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
119 " %d, (%p)\n", __FUNCTION__, 119 " %d, (%p)\n", __FUNCTION__,
120 scmd, timeout, complete)); 120 scmd, timeout, complete));
121 121
122 add_timer(&scmd->eh_timeout); 122 add_timer(&scmd->eh_timeout);
123 } 123 }
124 124
125 /** 125 /**
126 * scsi_delete_timer - Delete/cancel timer for a given function. 126 * scsi_delete_timer - Delete/cancel timer for a given function.
127 * @scmd: Cmd that we are canceling timer for 127 * @scmd: Cmd that we are canceling timer for
128 * 128 *
129 * Notes: 129 * Notes:
130 * This should be turned into an inline function. 130 * This should be turned into an inline function.
131 * 131 *
132 * Return value: 132 * Return value:
133 * 1 if we were able to detach the timer. 0 if we blew it, and the 133 * 1 if we were able to detach the timer. 0 if we blew it, and the
134 * timer function has already started to run. 134 * timer function has already started to run.
135 **/ 135 **/
136 int scsi_delete_timer(struct scsi_cmnd *scmd) 136 int scsi_delete_timer(struct scsi_cmnd *scmd)
137 { 137 {
138 int rtn; 138 int rtn;
139 139
140 rtn = del_timer(&scmd->eh_timeout); 140 rtn = del_timer(&scmd->eh_timeout);
141 141
142 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," 142 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
143 " rtn: %d\n", __FUNCTION__, 143 " rtn: %d\n", __FUNCTION__,
144 scmd, rtn)); 144 scmd, rtn));
145 145
146 scmd->eh_timeout.data = (unsigned long)NULL; 146 scmd->eh_timeout.data = (unsigned long)NULL;
147 scmd->eh_timeout.function = NULL; 147 scmd->eh_timeout.function = NULL;
148 148
149 return rtn; 149 return rtn;
150 } 150 }
151 151
152 /** 152 /**
153 * scsi_times_out - Timeout function for normal scsi commands. 153 * scsi_times_out - Timeout function for normal scsi commands.
154 * @scmd: Cmd that is timing out. 154 * @scmd: Cmd that is timing out.
155 * 155 *
156 * Notes: 156 * Notes:
157 * We do not need to lock this. There is the potential for a race 157 * We do not need to lock this. There is the potential for a race
158 * only in that the normal completion handling might run, but if the 158 * only in that the normal completion handling might run, but if the
159 * normal completion function determines that the timer has already 159 * normal completion function determines that the timer has already
160 * fired, then it mustn't do anything. 160 * fired, then it mustn't do anything.
161 **/ 161 **/
162 void scsi_times_out(struct scsi_cmnd *scmd) 162 void scsi_times_out(struct scsi_cmnd *scmd)
163 { 163 {
164 scsi_log_completion(scmd, TIMEOUT_ERROR); 164 scsi_log_completion(scmd, TIMEOUT_ERROR);
165 165
166 if (scmd->device->host->hostt->eh_timed_out) 166 if (scmd->device->host->hostt->eh_timed_out)
167 switch (scmd->device->host->hostt->eh_timed_out(scmd)) { 167 switch (scmd->device->host->hostt->eh_timed_out(scmd)) {
168 case EH_HANDLED: 168 case EH_HANDLED:
169 __scsi_done(scmd); 169 __scsi_done(scmd);
170 return; 170 return;
171 case EH_RESET_TIMER: 171 case EH_RESET_TIMER:
172 /* This allows a single retry even of a command 172 /* This allows a single retry even of a command
173 * with allowed == 0 */ 173 * with allowed == 0 */
174 if (scmd->retries++ > scmd->allowed) 174 if (scmd->retries++ > scmd->allowed)
175 break; 175 break;
176 scsi_add_timer(scmd, scmd->timeout_per_command, 176 scsi_add_timer(scmd, scmd->timeout_per_command,
177 scsi_times_out); 177 scsi_times_out);
178 return; 178 return;
179 case EH_NOT_HANDLED: 179 case EH_NOT_HANDLED:
180 break; 180 break;
181 } 181 }
182 182
183 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 183 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
184 scmd->result |= DID_TIME_OUT << 16; 184 scmd->result |= DID_TIME_OUT << 16;
185 __scsi_done(scmd); 185 __scsi_done(scmd);
186 } 186 }
187 } 187 }
188 188
189 /** 189 /**
190 * scsi_block_when_processing_errors - Prevent cmds from being queued. 190 * scsi_block_when_processing_errors - Prevent cmds from being queued.
191 * @sdev: Device on which we are performing recovery. 191 * @sdev: Device on which we are performing recovery.
192 * 192 *
193 * Description: 193 * Description:
194 * We block until the host is out of error recovery, and then check to 194 * We block until the host is out of error recovery, and then check to
195 * see whether the host or the device is offline. 195 * see whether the host or the device is offline.
196 * 196 *
197 * Return value: 197 * Return value:
198 * 0 when dev was taken offline by error recovery. 1 OK to proceed. 198 * 0 when dev was taken offline by error recovery. 1 OK to proceed.
199 **/ 199 **/
200 int scsi_block_when_processing_errors(struct scsi_device *sdev) 200 int scsi_block_when_processing_errors(struct scsi_device *sdev)
201 { 201 {
202 int online; 202 int online;
203 203
204 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host)); 204 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
205 205
206 online = scsi_device_online(sdev); 206 online = scsi_device_online(sdev);
207 207
208 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, 208 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__,
209 online)); 209 online));
210 210
211 return online; 211 return online;
212 } 212 }
213 EXPORT_SYMBOL(scsi_block_when_processing_errors); 213 EXPORT_SYMBOL(scsi_block_when_processing_errors);
214 214
215 #ifdef CONFIG_SCSI_LOGGING 215 #ifdef CONFIG_SCSI_LOGGING
216 /** 216 /**
217 * scsi_eh_prt_fail_stats - Log info on failures. 217 * scsi_eh_prt_fail_stats - Log info on failures.
218 * @shost: scsi host being recovered. 218 * @shost: scsi host being recovered.
219 * @work_q: Queue of scsi cmds to process. 219 * @work_q: Queue of scsi cmds to process.
220 **/ 220 **/
221 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, 221 static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
222 struct list_head *work_q) 222 struct list_head *work_q)
223 { 223 {
224 struct scsi_cmnd *scmd; 224 struct scsi_cmnd *scmd;
225 struct scsi_device *sdev; 225 struct scsi_device *sdev;
226 int total_failures = 0; 226 int total_failures = 0;
227 int cmd_failed = 0; 227 int cmd_failed = 0;
228 int cmd_cancel = 0; 228 int cmd_cancel = 0;
229 int devices_failed = 0; 229 int devices_failed = 0;
230 230
231 shost_for_each_device(sdev, shost) { 231 shost_for_each_device(sdev, shost) {
232 list_for_each_entry(scmd, work_q, eh_entry) { 232 list_for_each_entry(scmd, work_q, eh_entry) {
233 if (scmd->device == sdev) { 233 if (scmd->device == sdev) {
234 ++total_failures; 234 ++total_failures;
235 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) 235 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
236 ++cmd_cancel; 236 ++cmd_cancel;
237 else 237 else
238 ++cmd_failed; 238 ++cmd_failed;
239 } 239 }
240 } 240 }
241 241
242 if (cmd_cancel || cmd_failed) { 242 if (cmd_cancel || cmd_failed) {
243 SCSI_LOG_ERROR_RECOVERY(3, 243 SCSI_LOG_ERROR_RECOVERY(3,
244 sdev_printk(KERN_INFO, sdev, 244 sdev_printk(KERN_INFO, sdev,
245 "%s: cmds failed: %d, cancel: %d\n", 245 "%s: cmds failed: %d, cancel: %d\n",
246 __FUNCTION__, cmd_failed, 246 __FUNCTION__, cmd_failed,
247 cmd_cancel)); 247 cmd_cancel));
248 cmd_cancel = 0; 248 cmd_cancel = 0;
249 cmd_failed = 0; 249 cmd_failed = 0;
250 ++devices_failed; 250 ++devices_failed;
251 } 251 }
252 } 252 }
253 253
254 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" 254 SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
255 " devices require eh work\n", 255 " devices require eh work\n",
256 total_failures, devices_failed)); 256 total_failures, devices_failed));
257 } 257 }
258 #endif 258 #endif
259 259
260 /** 260 /**
261 * scsi_check_sense - Examine scsi cmd sense 261 * scsi_check_sense - Examine scsi cmd sense
262 * @scmd: Cmd to have sense checked. 262 * @scmd: Cmd to have sense checked.
263 * 263 *
264 * Return value: 264 * Return value:
265 * SUCCESS or FAILED or NEEDS_RETRY 265 * SUCCESS or FAILED or NEEDS_RETRY
266 * 266 *
267 * Notes: 267 * Notes:
268 * When a deferred error is detected the current command has 268 * When a deferred error is detected the current command has
269 * not been executed and needs retrying. 269 * not been executed and needs retrying.
270 **/ 270 **/
271 static int scsi_check_sense(struct scsi_cmnd *scmd) 271 static int scsi_check_sense(struct scsi_cmnd *scmd)
272 { 272 {
273 struct scsi_sense_hdr sshdr; 273 struct scsi_sense_hdr sshdr;
274 274
275 if (! scsi_command_normalize_sense(scmd, &sshdr)) 275 if (! scsi_command_normalize_sense(scmd, &sshdr))
276 return FAILED; /* no valid sense data */ 276 return FAILED; /* no valid sense data */
277 277
278 if (scsi_sense_is_deferred(&sshdr)) 278 if (scsi_sense_is_deferred(&sshdr))
279 return NEEDS_RETRY; 279 return NEEDS_RETRY;
280 280
281 /* 281 /*
282 * Previous logic looked for FILEMARK, EOM or ILI which are 282 * Previous logic looked for FILEMARK, EOM or ILI which are
283 * mainly associated with tapes and returned SUCCESS. 283 * mainly associated with tapes and returned SUCCESS.
284 */ 284 */
285 if (sshdr.response_code == 0x70) { 285 if (sshdr.response_code == 0x70) {
286 /* fixed format */ 286 /* fixed format */
287 if (scmd->sense_buffer[2] & 0xe0) 287 if (scmd->sense_buffer[2] & 0xe0)
288 return SUCCESS; 288 return SUCCESS;
289 } else { 289 } else {
290 /* 290 /*
291 * descriptor format: look for "stream commands sense data 291 * descriptor format: look for "stream commands sense data
292 * descriptor" (see SSC-3). Assume single sense data 292 * descriptor" (see SSC-3). Assume single sense data
293 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. 293 * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
294 */ 294 */
295 if ((sshdr.additional_length > 3) && 295 if ((sshdr.additional_length > 3) &&
296 (scmd->sense_buffer[8] == 0x4) && 296 (scmd->sense_buffer[8] == 0x4) &&
297 (scmd->sense_buffer[11] & 0xe0)) 297 (scmd->sense_buffer[11] & 0xe0))
298 return SUCCESS; 298 return SUCCESS;
299 } 299 }
300 300
301 switch (sshdr.sense_key) { 301 switch (sshdr.sense_key) {
302 case NO_SENSE: 302 case NO_SENSE:
303 return SUCCESS; 303 return SUCCESS;
304 case RECOVERED_ERROR: 304 case RECOVERED_ERROR:
305 return /* soft_error */ SUCCESS; 305 return /* soft_error */ SUCCESS;
306 306
307 case ABORTED_COMMAND: 307 case ABORTED_COMMAND:
308 return NEEDS_RETRY; 308 return NEEDS_RETRY;
309 case NOT_READY: 309 case NOT_READY:
310 case UNIT_ATTENTION: 310 case UNIT_ATTENTION:
311 /* 311 /*
312 * if we are expecting a cc/ua because of a bus reset that we 312 * if we are expecting a cc/ua because of a bus reset that we
313 * performed, treat this just as a retry. otherwise this is 313 * performed, treat this just as a retry. otherwise this is
314 * information that we should pass up to the upper-level driver 314 * information that we should pass up to the upper-level driver
315 * so that we can deal with it there. 315 * so that we can deal with it there.
316 */ 316 */
317 if (scmd->device->expecting_cc_ua) { 317 if (scmd->device->expecting_cc_ua) {
318 scmd->device->expecting_cc_ua = 0; 318 scmd->device->expecting_cc_ua = 0;
319 return NEEDS_RETRY; 319 return NEEDS_RETRY;
320 } 320 }
321 /* 321 /*
322 * if the device is in the process of becoming ready, we 322 * if the device is in the process of becoming ready, we
323 * should retry. 323 * should retry.
324 */ 324 */
325 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) 325 if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
326 return NEEDS_RETRY; 326 return NEEDS_RETRY;
327 /* 327 /*
328 * if the device is not started, we need to wake 328 * if the device is not started, we need to wake
329 * the error handler to start the motor 329 * the error handler to start the motor
330 */ 330 */
331 if (scmd->device->allow_restart && 331 if (scmd->device->allow_restart &&
332 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 332 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
333 return FAILED; 333 return FAILED;
334 return SUCCESS; 334 return SUCCESS;
335 335
336 /* these three are not supported */ 336 /* these three are not supported */
337 case COPY_ABORTED: 337 case COPY_ABORTED:
338 case VOLUME_OVERFLOW: 338 case VOLUME_OVERFLOW:
339 case MISCOMPARE: 339 case MISCOMPARE:
340 return SUCCESS; 340 return SUCCESS;
341 341
342 case MEDIUM_ERROR: 342 case MEDIUM_ERROR:
343 return NEEDS_RETRY; 343 return NEEDS_RETRY;
344 344
345 case HARDWARE_ERROR: 345 case HARDWARE_ERROR:
346 if (scmd->device->retry_hwerror) 346 if (scmd->device->retry_hwerror)
347 return NEEDS_RETRY; 347 return NEEDS_RETRY;
348 else 348 else
349 return SUCCESS; 349 return SUCCESS;
350 350
351 case ILLEGAL_REQUEST: 351 case ILLEGAL_REQUEST:
352 case BLANK_CHECK: 352 case BLANK_CHECK:
353 case DATA_PROTECT: 353 case DATA_PROTECT:
354 default: 354 default:
355 return SUCCESS; 355 return SUCCESS;
356 } 356 }
357 } 357 }
358 358
359 /** 359 /**
360 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. 360 * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
361 * @scmd: SCSI cmd to examine. 361 * @scmd: SCSI cmd to examine.
362 * 362 *
363 * Notes: 363 * Notes:
364 * This is *only* called when we are examining the status of commands 364 * This is *only* called when we are examining the status of commands
365 * queued during error recovery. the main difference here is that we 365 * queued during error recovery. the main difference here is that we
366 * don't allow for the possibility of retries here, and we are a lot 366 * don't allow for the possibility of retries here, and we are a lot
367 * more restrictive about what we consider acceptable. 367 * more restrictive about what we consider acceptable.
368 **/ 368 **/
369 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) 369 static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
370 { 370 {
371 /* 371 /*
372 * first check the host byte, to see if there is anything in there 372 * first check the host byte, to see if there is anything in there
373 * that would indicate what we need to do. 373 * that would indicate what we need to do.
374 */ 374 */
375 if (host_byte(scmd->result) == DID_RESET) { 375 if (host_byte(scmd->result) == DID_RESET) {
376 /* 376 /*
377 * rats. we are already in the error handler, so we now 377 * rats. we are already in the error handler, so we now
378 * get to try and figure out what to do next. if the sense 378 * get to try and figure out what to do next. if the sense
379 * is valid, we have a pretty good idea of what to do. 379 * is valid, we have a pretty good idea of what to do.
380 * if not, we mark it as FAILED. 380 * if not, we mark it as FAILED.
381 */ 381 */
382 return scsi_check_sense(scmd); 382 return scsi_check_sense(scmd);
383 } 383 }
384 if (host_byte(scmd->result) != DID_OK) 384 if (host_byte(scmd->result) != DID_OK)
385 return FAILED; 385 return FAILED;
386 386
387 /* 387 /*
388 * next, check the message byte. 388 * next, check the message byte.
389 */ 389 */
390 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 390 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
391 return FAILED; 391 return FAILED;
392 392
393 /* 393 /*
394 * now, check the status byte to see if this indicates 394 * now, check the status byte to see if this indicates
395 * anything special. 395 * anything special.
396 */ 396 */
397 switch (status_byte(scmd->result)) { 397 switch (status_byte(scmd->result)) {
398 case GOOD: 398 case GOOD:
399 case COMMAND_TERMINATED: 399 case COMMAND_TERMINATED:
400 return SUCCESS; 400 return SUCCESS;
401 case CHECK_CONDITION: 401 case CHECK_CONDITION:
402 return scsi_check_sense(scmd); 402 return scsi_check_sense(scmd);
403 case CONDITION_GOOD: 403 case CONDITION_GOOD:
404 case INTERMEDIATE_GOOD: 404 case INTERMEDIATE_GOOD:
405 case INTERMEDIATE_C_GOOD: 405 case INTERMEDIATE_C_GOOD:
406 /* 406 /*
407 * who knows? FIXME(eric) 407 * who knows? FIXME(eric)
408 */ 408 */
409 return SUCCESS; 409 return SUCCESS;
410 case BUSY: 410 case BUSY:
411 case QUEUE_FULL: 411 case QUEUE_FULL:
412 case RESERVATION_CONFLICT: 412 case RESERVATION_CONFLICT:
413 default: 413 default:
414 return FAILED; 414 return FAILED;
415 } 415 }
416 return FAILED; 416 return FAILED;
417 } 417 }
418 418
419 /** 419 /**
420 * scsi_eh_done - Completion function for error handling. 420 * scsi_eh_done - Completion function for error handling.
421 * @scmd: Cmd that is done. 421 * @scmd: Cmd that is done.
422 **/ 422 **/
423 static void scsi_eh_done(struct scsi_cmnd *scmd) 423 static void scsi_eh_done(struct scsi_cmnd *scmd)
424 { 424 {
425 struct completion *eh_action; 425 struct completion *eh_action;
426 426
427 SCSI_LOG_ERROR_RECOVERY(3, 427 SCSI_LOG_ERROR_RECOVERY(3,
428 printk("%s scmd: %p result: %x\n", 428 printk("%s scmd: %p result: %x\n",
429 __FUNCTION__, scmd, scmd->result)); 429 __FUNCTION__, scmd, scmd->result));
430 430
431 eh_action = scmd->device->host->eh_action; 431 eh_action = scmd->device->host->eh_action;
432 if (eh_action) 432 if (eh_action)
433 complete(eh_action); 433 complete(eh_action);
434 } 434 }
435 435
436 /** 436 /**
437 * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. 437 * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery.
438 * @scmd: SCSI Cmd to send. 438 * @scmd: SCSI Cmd to send.
439 * @timeout: Timeout for cmd. 439 * @timeout: Timeout for cmd.
440 * 440 *
441 * Return value: 441 * Return value:
442 * SUCCESS or FAILED or NEEDS_RETRY 442 * SUCCESS or FAILED or NEEDS_RETRY
443 **/ 443 **/
444 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) 444 static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout)
445 { 445 {
446 struct scsi_device *sdev = scmd->device; 446 struct scsi_device *sdev = scmd->device;
447 struct Scsi_Host *shost = sdev->host; 447 struct Scsi_Host *shost = sdev->host;
448 DECLARE_COMPLETION(done); 448 DECLARE_COMPLETION(done);
449 unsigned long timeleft; 449 unsigned long timeleft;
450 unsigned long flags; 450 unsigned long flags;
451 int rtn; 451 int rtn;
452 452
453 if (sdev->scsi_level <= SCSI_2) 453 if (sdev->scsi_level <= SCSI_2)
454 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | 454 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
455 (sdev->lun << 5 & 0xe0); 455 (sdev->lun << 5 & 0xe0);
456 456
457 shost->eh_action = &done; 457 shost->eh_action = &done;
458 scmd->request->rq_status = RQ_SCSI_BUSY; 458 scmd->request->rq_status = RQ_SCSI_BUSY;
459 459
460 spin_lock_irqsave(shost->host_lock, flags); 460 spin_lock_irqsave(shost->host_lock, flags);
461 scsi_log_send(scmd); 461 scsi_log_send(scmd);
462 shost->hostt->queuecommand(scmd, scsi_eh_done); 462 shost->hostt->queuecommand(scmd, scsi_eh_done);
463 spin_unlock_irqrestore(shost->host_lock, flags); 463 spin_unlock_irqrestore(shost->host_lock, flags);
464 464
465 timeleft = wait_for_completion_timeout(&done, timeout); 465 timeleft = wait_for_completion_timeout(&done, timeout);
466 466
467 scmd->request->rq_status = RQ_SCSI_DONE; 467 scmd->request->rq_status = RQ_SCSI_DONE;
468 shost->eh_action = NULL; 468 shost->eh_action = NULL;
469 469
470 scsi_log_completion(scmd, SUCCESS); 470 scsi_log_completion(scmd, SUCCESS);
471 471
472 SCSI_LOG_ERROR_RECOVERY(3, 472 SCSI_LOG_ERROR_RECOVERY(3,
473 printk("%s: scmd: %p, timeleft: %ld\n", 473 printk("%s: scmd: %p, timeleft: %ld\n",
474 __FUNCTION__, scmd, timeleft)); 474 __FUNCTION__, scmd, timeleft));
475 475
476 /* 476 /*
477 * If there is time left scsi_eh_done got called, and we will 477 * If there is time left scsi_eh_done got called, and we will
478 * examine the actual status codes to see whether the command 478 * examine the actual status codes to see whether the command
479 * actually did complete normally, else tell the host to forget 479 * actually did complete normally, else tell the host to forget
480 * about this command. 480 * about this command.
481 */ 481 */
482 if (timeleft) { 482 if (timeleft) {
483 rtn = scsi_eh_completed_normally(scmd); 483 rtn = scsi_eh_completed_normally(scmd);
484 SCSI_LOG_ERROR_RECOVERY(3, 484 SCSI_LOG_ERROR_RECOVERY(3,
485 printk("%s: scsi_eh_completed_normally %x\n", 485 printk("%s: scsi_eh_completed_normally %x\n",
486 __FUNCTION__, rtn)); 486 __FUNCTION__, rtn));
487 487
488 switch (rtn) { 488 switch (rtn) {
489 case SUCCESS: 489 case SUCCESS:
490 case NEEDS_RETRY: 490 case NEEDS_RETRY:
491 case FAILED: 491 case FAILED:
492 break; 492 break;
493 default: 493 default:
494 rtn = FAILED; 494 rtn = FAILED;
495 break; 495 break;
496 } 496 }
497 } else { 497 } else {
498 /* 498 /*
499 * FIXME(eric) - we are not tracking whether we could 499 * FIXME(eric) - we are not tracking whether we could
500 * abort a timed out command or not. not sure how 500 * abort a timed out command or not. not sure how
501 * we should treat them differently anyways. 501 * we should treat them differently anyways.
502 */ 502 */
503 if (shost->hostt->eh_abort_handler) 503 if (shost->hostt->eh_abort_handler)
504 shost->hostt->eh_abort_handler(scmd); 504 shost->hostt->eh_abort_handler(scmd);
505 rtn = FAILED; 505 rtn = FAILED;
506 } 506 }
507 507
508 return rtn; 508 return rtn;
509 } 509 }
510 510
511 /** 511 /**
512 * scsi_request_sense - Request sense data from a particular target. 512 * scsi_request_sense - Request sense data from a particular target.
513 * @scmd: SCSI cmd for request sense. 513 * @scmd: SCSI cmd for request sense.
514 * 514 *
515 * Notes: 515 * Notes:
516 * Some hosts automatically obtain this information, others require 516 * Some hosts automatically obtain this information, others require
517 * that we obtain it on our own. This function will *not* return until 517 * that we obtain it on our own. This function will *not* return until
518 * the command either times out, or it completes. 518 * the command either times out, or it completes.
519 **/ 519 **/
520 static int scsi_request_sense(struct scsi_cmnd *scmd) 520 static int scsi_request_sense(struct scsi_cmnd *scmd)
521 { 521 {
522 static unsigned char generic_sense[6] = 522 static unsigned char generic_sense[6] =
523 {REQUEST_SENSE, 0, 0, 0, 252, 0}; 523 {REQUEST_SENSE, 0, 0, 0, 252, 0};
524 unsigned char *scsi_result; 524 unsigned char *scsi_result;
525 int saved_result; 525 int saved_result;
526 int rtn; 526 int rtn;
527 527
528 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); 528 memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
529 529
530 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); 530 scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0));
531 531
532 532
533 if (unlikely(!scsi_result)) { 533 if (unlikely(!scsi_result)) {
534 printk(KERN_ERR "%s: cannot allocate scsi_result.\n", 534 printk(KERN_ERR "%s: cannot allocate scsi_result.\n",
535 __FUNCTION__); 535 __FUNCTION__);
536 return FAILED; 536 return FAILED;
537 } 537 }
538 538
539 /* 539 /*
540 * zero the sense buffer. some host adapters automatically always 540 * zero the sense buffer. some host adapters automatically always
541 * request sense, so it is not a good idea that 541 * request sense, so it is not a good idea that
542 * scmd->request_buffer and scmd->sense_buffer point to the same 542 * scmd->request_buffer and scmd->sense_buffer point to the same
543 * address (db). 0 is not a valid sense code. 543 * address (db). 0 is not a valid sense code.
544 */ 544 */
545 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 545 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
546 memset(scsi_result, 0, 252); 546 memset(scsi_result, 0, 252);
547 547
548 saved_result = scmd->result; 548 saved_result = scmd->result;
549 scmd->request_buffer = scsi_result; 549 scmd->request_buffer = scsi_result;
550 scmd->request_bufflen = 252; 550 scmd->request_bufflen = 252;
551 scmd->use_sg = 0; 551 scmd->use_sg = 0;
552 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 552 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
553 scmd->sc_data_direction = DMA_FROM_DEVICE; 553 scmd->sc_data_direction = DMA_FROM_DEVICE;
554 scmd->underflow = 0; 554 scmd->underflow = 0;
555 555
556 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 556 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
557 557
558 /* last chance to have valid sense data */ 558 /* last chance to have valid sense data */
559 if(!SCSI_SENSE_VALID(scmd)) { 559 if(!SCSI_SENSE_VALID(scmd)) {
560 memcpy(scmd->sense_buffer, scmd->request_buffer, 560 memcpy(scmd->sense_buffer, scmd->request_buffer,
561 sizeof(scmd->sense_buffer)); 561 sizeof(scmd->sense_buffer));
562 } 562 }
563 563
564 kfree(scsi_result); 564 kfree(scsi_result);
565 565
566 /* 566 /*
567 * when we eventually call scsi_finish, we really wish to complete 567 * when we eventually call scsi_finish, we really wish to complete
568 * the original request, so let's restore the original data. (db) 568 * the original request, so let's restore the original data. (db)
569 */ 569 */
570 scsi_setup_cmd_retry(scmd); 570 scsi_setup_cmd_retry(scmd);
571 scmd->result = saved_result; 571 scmd->result = saved_result;
572 return rtn; 572 return rtn;
573 } 573 }
574 574
575 /** 575 /**
576 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. 576 * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.
577 * @scmd: Original SCSI cmd that eh has finished. 577 * @scmd: Original SCSI cmd that eh has finished.
578 * @done_q: Queue for processed commands. 578 * @done_q: Queue for processed commands.
579 * 579 *
580 * Notes: 580 * Notes:
581 * We don't want to use the normal command completion while we are are 581 * We don't want to use the normal command completion while we are are
582 * still handling errors - it may cause other commands to be queued, 582 * still handling errors - it may cause other commands to be queued,
583 * and that would disturb what we are doing. thus we really want to 583 * and that would disturb what we are doing. thus we really want to
584 * keep a list of pending commands for final completion, and once we 584 * keep a list of pending commands for final completion, and once we
585 * are ready to leave error handling we handle completion for real. 585 * are ready to leave error handling we handle completion for real.
586 **/ 586 **/
587 static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, 587 static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd,
588 struct list_head *done_q) 588 struct list_head *done_q)
589 { 589 {
590 scmd->device->host->host_failed--; 590 scmd->device->host->host_failed--;
591 scmd->eh_eflags = 0; 591 scmd->eh_eflags = 0;
592 592
593 /* 593 /*
594 * set this back so that the upper level can correctly free up 594 * set this back so that the upper level can correctly free up
595 * things. 595 * things.
596 */ 596 */
597 scsi_setup_cmd_retry(scmd); 597 scsi_setup_cmd_retry(scmd);
598 list_move_tail(&scmd->eh_entry, done_q); 598 list_move_tail(&scmd->eh_entry, done_q);
599 } 599 }
600 600
601 /** 601 /**
602 * scsi_eh_get_sense - Get device sense data. 602 * scsi_eh_get_sense - Get device sense data.
603 * @work_q: Queue of commands to process. 603 * @work_q: Queue of commands to process.
604 * @done_q: Queue of proccessed commands.. 604 * @done_q: Queue of proccessed commands..
605 * 605 *
606 * Description: 606 * Description:
607 * See if we need to request sense information. if so, then get it 607 * See if we need to request sense information. if so, then get it
608 * now, so we have a better idea of what to do. 608 * now, so we have a better idea of what to do.
609 * 609 *
610 * Notes: 610 * Notes:
611 * This has the unfortunate side effect that if a shost adapter does 611 * This has the unfortunate side effect that if a shost adapter does
612 * not automatically request sense information, that we end up shutting 612 * not automatically request sense information, that we end up shutting
613 * it down before we request it. 613 * it down before we request it.
614 * 614 *
615 * All drivers should request sense information internally these days, 615 * All drivers should request sense information internally these days,
616 * so for now all I have to say is tough noogies if you end up in here. 616 * so for now all I have to say is tough noogies if you end up in here.
617 * 617 *
618 * XXX: Long term this code should go away, but that needs an audit of 618 * XXX: Long term this code should go away, but that needs an audit of
619 * all LLDDs first. 619 * all LLDDs first.
620 **/ 620 **/
621 static int scsi_eh_get_sense(struct list_head *work_q, 621 static int scsi_eh_get_sense(struct list_head *work_q,
622 struct list_head *done_q) 622 struct list_head *done_q)
623 { 623 {
624 struct scsi_cmnd *scmd, *next; 624 struct scsi_cmnd *scmd, *next;
625 int rtn; 625 int rtn;
626 626
627 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 627 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
628 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || 628 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
629 SCSI_SENSE_VALID(scmd)) 629 SCSI_SENSE_VALID(scmd))
630 continue; 630 continue;
631 631
632 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, 632 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
633 "%s: requesting sense\n", 633 "%s: requesting sense\n",
634 current->comm)); 634 current->comm));
635 rtn = scsi_request_sense(scmd); 635 rtn = scsi_request_sense(scmd);
636 if (rtn != SUCCESS) 636 if (rtn != SUCCESS)
637 continue; 637 continue;
638 638
639 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" 639 SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
640 " result %x\n", scmd, 640 " result %x\n", scmd,
641 scmd->result)); 641 scmd->result));
642 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); 642 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
643 643
644 rtn = scsi_decide_disposition(scmd); 644 rtn = scsi_decide_disposition(scmd);
645 645
646 /* 646 /*
647 * if the result was normal, then just pass it along to the 647 * if the result was normal, then just pass it along to the
648 * upper level. 648 * upper level.
649 */ 649 */
650 if (rtn == SUCCESS) 650 if (rtn == SUCCESS)
651 /* we don't want this command reissued, just 651 /* we don't want this command reissued, just
652 * finished with the sense data, so set 652 * finished with the sense data, so set
653 * retries to the max allowed to ensure it 653 * retries to the max allowed to ensure it
654 * won't get reissued */ 654 * won't get reissued */
655 scmd->retries = scmd->allowed; 655 scmd->retries = scmd->allowed;
656 else if (rtn != NEEDS_RETRY) 656 else if (rtn != NEEDS_RETRY)
657 continue; 657 continue;
658 658
659 scsi_eh_finish_cmd(scmd, done_q); 659 scsi_eh_finish_cmd(scmd, done_q);
660 } 660 }
661 661
662 return list_empty(work_q); 662 return list_empty(work_q);
663 } 663 }
664 664
665 /** 665 /**
666 * scsi_try_to_abort_cmd - Ask host to abort a running command. 666 * scsi_try_to_abort_cmd - Ask host to abort a running command.
667 * @scmd: SCSI cmd to abort from Lower Level. 667 * @scmd: SCSI cmd to abort from Lower Level.
668 * 668 *
669 * Notes: 669 * Notes:
670 * This function will not return until the user's completion function 670 * This function will not return until the user's completion function
671 * has been called. there is no timeout on this operation. if the 671 * has been called. there is no timeout on this operation. if the
672 * author of the low-level driver wishes this operation to be timed, 672 * author of the low-level driver wishes this operation to be timed,
673 * they can provide this facility themselves. helper functions in 673 * they can provide this facility themselves. helper functions in
674 * scsi_error.c can be supplied to make this easier to do. 674 * scsi_error.c can be supplied to make this easier to do.
675 **/ 675 **/
676 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) 676 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
677 { 677 {
678 if (!scmd->device->host->hostt->eh_abort_handler) 678 if (!scmd->device->host->hostt->eh_abort_handler)
679 return FAILED; 679 return FAILED;
680 680
681 /* 681 /*
682 * scsi_done was called just after the command timed out and before 682 * scsi_done was called just after the command timed out and before
683 * we had a chance to process it. (db) 683 * we had a chance to process it. (db)
684 */ 684 */
685 if (scmd->serial_number == 0) 685 if (scmd->serial_number == 0)
686 return SUCCESS; 686 return SUCCESS;
687 return scmd->device->host->hostt->eh_abort_handler(scmd); 687 return scmd->device->host->hostt->eh_abort_handler(scmd);
688 } 688 }
689 689
690 /** 690 /**
691 * scsi_eh_tur - Send TUR to device. 691 * scsi_eh_tur - Send TUR to device.
692 * @scmd: Scsi cmd to send TUR 692 * @scmd: Scsi cmd to send TUR
693 * 693 *
694 * Return value: 694 * Return value:
695 * 0 - Device is ready. 1 - Device NOT ready. 695 * 0 - Device is ready. 1 - Device NOT ready.
696 **/ 696 **/
697 static int scsi_eh_tur(struct scsi_cmnd *scmd) 697 static int scsi_eh_tur(struct scsi_cmnd *scmd)
698 { 698 {
699 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; 699 static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0};
700 int retry_cnt = 1, rtn; 700 int retry_cnt = 1, rtn;
701 int saved_result; 701 int saved_result;
702 702
703 retry_tur: 703 retry_tur:
704 memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); 704 memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
705 705
706 /* 706 /*
707 * zero the sense buffer. the scsi spec mandates that any 707 * zero the sense buffer. the scsi spec mandates that any
708 * untransferred sense data should be interpreted as being zero. 708 * untransferred sense data should be interpreted as being zero.
709 */ 709 */
710 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 710 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
711 711
712 saved_result = scmd->result; 712 saved_result = scmd->result;
713 scmd->request_buffer = NULL; 713 scmd->request_buffer = NULL;
714 scmd->request_bufflen = 0; 714 scmd->request_bufflen = 0;
715 scmd->use_sg = 0; 715 scmd->use_sg = 0;
716 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 716 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
717 scmd->underflow = 0; 717 scmd->underflow = 0;
718 scmd->sc_data_direction = DMA_NONE; 718 scmd->sc_data_direction = DMA_NONE;
719 719
720 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); 720 rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT);
721 721
722 /* 722 /*
723 * when we eventually call scsi_finish, we really wish to complete 723 * when we eventually call scsi_finish, we really wish to complete
724 * the original request, so let's restore the original data. (db) 724 * the original request, so let's restore the original data. (db)
725 */ 725 */
726 scsi_setup_cmd_retry(scmd); 726 scsi_setup_cmd_retry(scmd);
727 scmd->result = saved_result; 727 scmd->result = saved_result;
728 728
729 /* 729 /*
730 * hey, we are done. let's look to see what happened. 730 * hey, we are done. let's look to see what happened.
731 */ 731 */
732 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 732 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
733 __FUNCTION__, scmd, rtn)); 733 __FUNCTION__, scmd, rtn));
734 if (rtn == SUCCESS) 734 if (rtn == SUCCESS)
735 return 0; 735 return 0;
736 else if (rtn == NEEDS_RETRY) { 736 else if (rtn == NEEDS_RETRY) {
737 if (retry_cnt--) 737 if (retry_cnt--)
738 goto retry_tur; 738 goto retry_tur;
739 return 0; 739 return 0;
740 } 740 }
741 return 1; 741 return 1;
742 } 742 }
743 743
744 /** 744 /**
745 * scsi_eh_abort_cmds - abort canceled commands. 745 * scsi_eh_abort_cmds - abort canceled commands.
746 * @shost: scsi host being recovered. 746 * @shost: scsi host being recovered.
747 * @eh_done_q: list_head for processed commands. 747 * @eh_done_q: list_head for processed commands.
748 * 748 *
749 * Decription: 749 * Decription:
750 * Try and see whether or not it makes sense to try and abort the 750 * Try and see whether or not it makes sense to try and abort the
751 * running command. this only works out to be the case if we have one 751 * running command. this only works out to be the case if we have one
752 * command that has timed out. if the command simply failed, it makes 752 * command that has timed out. if the command simply failed, it makes
753 * no sense to try and abort the command, since as far as the shost 753 * no sense to try and abort the command, since as far as the shost
754 * adapter is concerned, it isn't running. 754 * adapter is concerned, it isn't running.
755 **/ 755 **/
756 static int scsi_eh_abort_cmds(struct list_head *work_q, 756 static int scsi_eh_abort_cmds(struct list_head *work_q,
757 struct list_head *done_q) 757 struct list_head *done_q)
758 { 758 {
759 struct scsi_cmnd *scmd, *next; 759 struct scsi_cmnd *scmd, *next;
760 int rtn; 760 int rtn;
761 761
762 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 762 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
763 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) 763 if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))
764 continue; 764 continue;
765 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" 765 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
766 "0x%p\n", current->comm, 766 "0x%p\n", current->comm,
767 scmd)); 767 scmd));
768 rtn = scsi_try_to_abort_cmd(scmd); 768 rtn = scsi_try_to_abort_cmd(scmd);
769 if (rtn == SUCCESS) { 769 if (rtn == SUCCESS) {
770 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; 770 scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD;
771 if (!scsi_device_online(scmd->device) || 771 if (!scsi_device_online(scmd->device) ||
772 !scsi_eh_tur(scmd)) { 772 !scsi_eh_tur(scmd)) {
773 scsi_eh_finish_cmd(scmd, done_q); 773 scsi_eh_finish_cmd(scmd, done_q);
774 } 774 }
775 775
776 } else 776 } else
777 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" 777 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
778 " cmd failed:" 778 " cmd failed:"
779 "0x%p\n", 779 "0x%p\n",
780 current->comm, 780 current->comm,
781 scmd)); 781 scmd));
782 } 782 }
783 783
784 return list_empty(work_q); 784 return list_empty(work_q);
785 } 785 }
786 786
787 /** 787 /**
788 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev 788 * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
789 * @scmd: SCSI cmd used to send BDR 789 * @scmd: SCSI cmd used to send BDR
790 * 790 *
791 * Notes: 791 * Notes:
792 * There is no timeout for this operation. if this operation is 792 * There is no timeout for this operation. if this operation is
793 * unreliable for a given host, then the host itself needs to put a 793 * unreliable for a given host, then the host itself needs to put a
794 * timer on it, and set the host back to a consistent state prior to 794 * timer on it, and set the host back to a consistent state prior to
795 * returning. 795 * returning.
796 **/ 796 **/
797 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) 797 static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
798 { 798 {
799 int rtn; 799 int rtn;
800 800
801 if (!scmd->device->host->hostt->eh_device_reset_handler) 801 if (!scmd->device->host->hostt->eh_device_reset_handler)
802 return FAILED; 802 return FAILED;
803 803
804 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); 804 rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd);
805 if (rtn == SUCCESS) { 805 if (rtn == SUCCESS) {
806 scmd->device->was_reset = 1; 806 scmd->device->was_reset = 1;
807 scmd->device->expecting_cc_ua = 1; 807 scmd->device->expecting_cc_ua = 1;
808 } 808 }
809 809
810 return rtn; 810 return rtn;
811 } 811 }
812 812
813 /** 813 /**
814 * scsi_eh_try_stu - Send START_UNIT to device. 814 * scsi_eh_try_stu - Send START_UNIT to device.
815 * @scmd: Scsi cmd to send START_UNIT 815 * @scmd: Scsi cmd to send START_UNIT
816 * 816 *
817 * Return value: 817 * Return value:
818 * 0 - Device is ready. 1 - Device NOT ready. 818 * 0 - Device is ready. 1 - Device NOT ready.
819 **/ 819 **/
820 static int scsi_eh_try_stu(struct scsi_cmnd *scmd) 820 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
821 { 821 {
822 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; 822 static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0};
823 int rtn; 823 int rtn;
824 int saved_result; 824 int saved_result;
825 825
826 if (!scmd->device->allow_restart) 826 if (!scmd->device->allow_restart)
827 return 1; 827 return 1;
828 828
829 memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); 829 memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
830 830
831 /* 831 /*
832 * zero the sense buffer. the scsi spec mandates that any 832 * zero the sense buffer. the scsi spec mandates that any
833 * untransferred sense data should be interpreted as being zero. 833 * untransferred sense data should be interpreted as being zero.
834 */ 834 */
835 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); 835 memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer));
836 836
837 saved_result = scmd->result; 837 saved_result = scmd->result;
838 scmd->request_buffer = NULL; 838 scmd->request_buffer = NULL;
839 scmd->request_bufflen = 0; 839 scmd->request_bufflen = 0;
840 scmd->use_sg = 0; 840 scmd->use_sg = 0;
841 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 841 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
842 scmd->underflow = 0; 842 scmd->underflow = 0;
843 scmd->sc_data_direction = DMA_NONE; 843 scmd->sc_data_direction = DMA_NONE;
844 844
845 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); 845 rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT);
846 846
847 /* 847 /*
848 * when we eventually call scsi_finish, we really wish to complete 848 * when we eventually call scsi_finish, we really wish to complete
849 * the original request, so let's restore the original data. (db) 849 * the original request, so let's restore the original data. (db)
850 */ 850 */
851 scsi_setup_cmd_retry(scmd); 851 scsi_setup_cmd_retry(scmd);
852 scmd->result = saved_result; 852 scmd->result = saved_result;
853 853
854 /* 854 /*
855 * hey, we are done. let's look to see what happened. 855 * hey, we are done. let's look to see what happened.
856 */ 856 */
857 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 857 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
858 __FUNCTION__, scmd, rtn)); 858 __FUNCTION__, scmd, rtn));
859 if (rtn == SUCCESS) 859 if (rtn == SUCCESS)
860 return 0; 860 return 0;
861 return 1; 861 return 1;
862 } 862 }
863 863
864 /** 864 /**
865 * scsi_eh_stu - send START_UNIT if needed 865 * scsi_eh_stu - send START_UNIT if needed
866 * @shost: scsi host being recovered. 866 * @shost: scsi host being recovered.
867 * @eh_done_q: list_head for processed commands. 867 * @eh_done_q: list_head for processed commands.
868 * 868 *
869 * Notes: 869 * Notes:
870 * If commands are failing due to not ready, initializing command required, 870 * If commands are failing due to not ready, initializing command required,
871 * try revalidating the device, which will end up sending a start unit. 871 * try revalidating the device, which will end up sending a start unit.
872 **/ 872 **/
873 static int scsi_eh_stu(struct Scsi_Host *shost, 873 static int scsi_eh_stu(struct Scsi_Host *shost,
874 struct list_head *work_q, 874 struct list_head *work_q,
875 struct list_head *done_q) 875 struct list_head *done_q)
876 { 876 {
877 struct scsi_cmnd *scmd, *stu_scmd, *next; 877 struct scsi_cmnd *scmd, *stu_scmd, *next;
878 struct scsi_device *sdev; 878 struct scsi_device *sdev;
879 879
880 shost_for_each_device(sdev, shost) { 880 shost_for_each_device(sdev, shost) {
881 stu_scmd = NULL; 881 stu_scmd = NULL;
882 list_for_each_entry(scmd, work_q, eh_entry) 882 list_for_each_entry(scmd, work_q, eh_entry)
883 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && 883 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
884 scsi_check_sense(scmd) == FAILED ) { 884 scsi_check_sense(scmd) == FAILED ) {
885 stu_scmd = scmd; 885 stu_scmd = scmd;
886 break; 886 break;
887 } 887 }
888 888
889 if (!stu_scmd) 889 if (!stu_scmd)
890 continue; 890 continue;
891 891
892 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" 892 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
893 " 0x%p\n", current->comm, sdev)); 893 " 0x%p\n", current->comm, sdev));
894 894
895 if (!scsi_eh_try_stu(stu_scmd)) { 895 if (!scsi_eh_try_stu(stu_scmd)) {
896 if (!scsi_device_online(sdev) || 896 if (!scsi_device_online(sdev) ||
897 !scsi_eh_tur(stu_scmd)) { 897 !scsi_eh_tur(stu_scmd)) {
898 list_for_each_entry_safe(scmd, next, 898 list_for_each_entry_safe(scmd, next,
899 work_q, eh_entry) { 899 work_q, eh_entry) {
900 if (scmd->device == sdev) 900 if (scmd->device == sdev)
901 scsi_eh_finish_cmd(scmd, done_q); 901 scsi_eh_finish_cmd(scmd, done_q);
902 } 902 }
903 } 903 }
904 } else { 904 } else {
905 SCSI_LOG_ERROR_RECOVERY(3, 905 SCSI_LOG_ERROR_RECOVERY(3,
906 printk("%s: START_UNIT failed to sdev:" 906 printk("%s: START_UNIT failed to sdev:"
907 " 0x%p\n", current->comm, sdev)); 907 " 0x%p\n", current->comm, sdev));
908 } 908 }
909 } 909 }
910 910
911 return list_empty(work_q); 911 return list_empty(work_q);
912 } 912 }
913 913
914 914
915 /** 915 /**
916 * scsi_eh_bus_device_reset - send bdr if needed 916 * scsi_eh_bus_device_reset - send bdr if needed
917 * @shost: scsi host being recovered. 917 * @shost: scsi host being recovered.
918 * @eh_done_q: list_head for processed commands. 918 * @eh_done_q: list_head for processed commands.
919 * 919 *
920 * Notes: 920 * Notes:
921 * Try a bus device reset. still, look to see whether we have multiple 921 * Try a bus device reset. still, look to see whether we have multiple
922 * devices that are jammed or not - if we have multiple devices, it 922 * devices that are jammed or not - if we have multiple devices, it
923 * makes no sense to try bus_device_reset - we really would need to try 923 * makes no sense to try bus_device_reset - we really would need to try
924 * a bus_reset instead. 924 * a bus_reset instead.
925 **/ 925 **/
926 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, 926 static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
927 struct list_head *work_q, 927 struct list_head *work_q,
928 struct list_head *done_q) 928 struct list_head *done_q)
929 { 929 {
930 struct scsi_cmnd *scmd, *bdr_scmd, *next; 930 struct scsi_cmnd *scmd, *bdr_scmd, *next;
931 struct scsi_device *sdev; 931 struct scsi_device *sdev;
932 int rtn; 932 int rtn;
933 933
934 shost_for_each_device(sdev, shost) { 934 shost_for_each_device(sdev, shost) {
935 bdr_scmd = NULL; 935 bdr_scmd = NULL;
936 list_for_each_entry(scmd, work_q, eh_entry) 936 list_for_each_entry(scmd, work_q, eh_entry)
937 if (scmd->device == sdev) { 937 if (scmd->device == sdev) {
938 bdr_scmd = scmd; 938 bdr_scmd = scmd;
939 break; 939 break;
940 } 940 }
941 941
942 if (!bdr_scmd) 942 if (!bdr_scmd)
943 continue; 943 continue;
944 944
945 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" 945 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
946 " 0x%p\n", current->comm, 946 " 0x%p\n", current->comm,
947 sdev)); 947 sdev));
948 rtn = scsi_try_bus_device_reset(bdr_scmd); 948 rtn = scsi_try_bus_device_reset(bdr_scmd);
949 if (rtn == SUCCESS) { 949 if (rtn == SUCCESS) {
950 if (!scsi_device_online(sdev) || 950 if (!scsi_device_online(sdev) ||
951 !scsi_eh_tur(bdr_scmd)) { 951 !scsi_eh_tur(bdr_scmd)) {
952 list_for_each_entry_safe(scmd, next, 952 list_for_each_entry_safe(scmd, next,
953 work_q, eh_entry) { 953 work_q, eh_entry) {
954 if (scmd->device == sdev) 954 if (scmd->device == sdev)
955 scsi_eh_finish_cmd(scmd, 955 scsi_eh_finish_cmd(scmd,
956 done_q); 956 done_q);
957 } 957 }
958 } 958 }
959 } else { 959 } else {
960 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" 960 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
961 " failed sdev:" 961 " failed sdev:"
962 "0x%p\n", 962 "0x%p\n",
963 current->comm, 963 current->comm,
964 sdev)); 964 sdev));
965 } 965 }
966 } 966 }
967 967
968 return list_empty(work_q); 968 return list_empty(work_q);
969 } 969 }
970 970
971 /** 971 /**
972 * scsi_try_bus_reset - ask host to perform a bus reset 972 * scsi_try_bus_reset - ask host to perform a bus reset
973 * @scmd: SCSI cmd to send bus reset. 973 * @scmd: SCSI cmd to send bus reset.
974 **/ 974 **/
975 static int scsi_try_bus_reset(struct scsi_cmnd *scmd) 975 static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
976 { 976 {
977 unsigned long flags; 977 unsigned long flags;
978 int rtn; 978 int rtn;
979 979
980 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 980 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
981 __FUNCTION__)); 981 __FUNCTION__));
982 982
983 if (!scmd->device->host->hostt->eh_bus_reset_handler) 983 if (!scmd->device->host->hostt->eh_bus_reset_handler)
984 return FAILED; 984 return FAILED;
985 985
986 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd); 986 rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd);
987 987
988 if (rtn == SUCCESS) { 988 if (rtn == SUCCESS) {
989 if (!scmd->device->host->hostt->skip_settle_delay) 989 if (!scmd->device->host->hostt->skip_settle_delay)
990 ssleep(BUS_RESET_SETTLE_TIME); 990 ssleep(BUS_RESET_SETTLE_TIME);
991 spin_lock_irqsave(scmd->device->host->host_lock, flags); 991 spin_lock_irqsave(scmd->device->host->host_lock, flags);
992 scsi_report_bus_reset(scmd->device->host, 992 scsi_report_bus_reset(scmd->device->host,
993 scmd_channel(scmd)); 993 scmd_channel(scmd));
994 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 994 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
995 } 995 }
996 996
997 return rtn; 997 return rtn;
998 } 998 }
999 999
1000 /** 1000 /**
1001 * scsi_try_host_reset - ask host adapter to reset itself 1001 * scsi_try_host_reset - ask host adapter to reset itself
1002 * @scmd: SCSI cmd to send hsot reset. 1002 * @scmd: SCSI cmd to send hsot reset.
1003 **/ 1003 **/
1004 static int scsi_try_host_reset(struct scsi_cmnd *scmd) 1004 static int scsi_try_host_reset(struct scsi_cmnd *scmd)
1005 { 1005 {
1006 unsigned long flags; 1006 unsigned long flags;
1007 int rtn; 1007 int rtn;
1008 1008
1009 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 1009 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
1010 __FUNCTION__)); 1010 __FUNCTION__));
1011 1011
1012 if (!scmd->device->host->hostt->eh_host_reset_handler) 1012 if (!scmd->device->host->hostt->eh_host_reset_handler)
1013 return FAILED; 1013 return FAILED;
1014 1014
1015 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd); 1015 rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd);
1016 1016
1017 if (rtn == SUCCESS) { 1017 if (rtn == SUCCESS) {
1018 if (!scmd->device->host->hostt->skip_settle_delay) 1018 if (!scmd->device->host->hostt->skip_settle_delay)
1019 ssleep(HOST_RESET_SETTLE_TIME); 1019 ssleep(HOST_RESET_SETTLE_TIME);
1020 spin_lock_irqsave(scmd->device->host->host_lock, flags); 1020 spin_lock_irqsave(scmd->device->host->host_lock, flags);
1021 scsi_report_bus_reset(scmd->device->host, 1021 scsi_report_bus_reset(scmd->device->host,
1022 scmd_channel(scmd)); 1022 scmd_channel(scmd));
1023 spin_unlock_irqrestore(scmd->device->host->host_lock, flags); 1023 spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
1024 } 1024 }
1025 1025
1026 return rtn; 1026 return rtn;
1027 } 1027 }
1028 1028
1029 /** 1029 /**
1030 * scsi_eh_bus_reset - send a bus reset 1030 * scsi_eh_bus_reset - send a bus reset
1031 * @shost: scsi host being recovered. 1031 * @shost: scsi host being recovered.
1032 * @eh_done_q: list_head for processed commands. 1032 * @eh_done_q: list_head for processed commands.
1033 **/ 1033 **/
1034 static int scsi_eh_bus_reset(struct Scsi_Host *shost, 1034 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1035 struct list_head *work_q, 1035 struct list_head *work_q,
1036 struct list_head *done_q) 1036 struct list_head *done_q)
1037 { 1037 {
1038 struct scsi_cmnd *scmd, *chan_scmd, *next; 1038 struct scsi_cmnd *scmd, *chan_scmd, *next;
1039 unsigned int channel; 1039 unsigned int channel;
1040 int rtn; 1040 int rtn;
1041 1041
1042 /* 1042 /*
1043 * we really want to loop over the various channels, and do this on 1043 * we really want to loop over the various channels, and do this on
1044 * a channel by channel basis. we should also check to see if any 1044 * a channel by channel basis. we should also check to see if any
1045 * of the failed commands are on soft_reset devices, and if so, skip 1045 * of the failed commands are on soft_reset devices, and if so, skip
1046 * the reset. 1046 * the reset.
1047 */ 1047 */
1048 1048
1049 for (channel = 0; channel <= shost->max_channel; channel++) { 1049 for (channel = 0; channel <= shost->max_channel; channel++) {
1050 chan_scmd = NULL; 1050 chan_scmd = NULL;
1051 list_for_each_entry(scmd, work_q, eh_entry) { 1051 list_for_each_entry(scmd, work_q, eh_entry) {
1052 if (channel == scmd_channel(scmd)) { 1052 if (channel == scmd_channel(scmd)) {
1053 chan_scmd = scmd; 1053 chan_scmd = scmd;
1054 break; 1054 break;
1055 /* 1055 /*
1056 * FIXME add back in some support for 1056 * FIXME add back in some support for
1057 * soft_reset devices. 1057 * soft_reset devices.
1058 */ 1058 */
1059 } 1059 }
1060 } 1060 }
1061 1061
1062 if (!chan_scmd) 1062 if (!chan_scmd)
1063 continue; 1063 continue;
1064 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" 1064 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
1065 " %d\n", current->comm, 1065 " %d\n", current->comm,
1066 channel)); 1066 channel));
1067 rtn = scsi_try_bus_reset(chan_scmd); 1067 rtn = scsi_try_bus_reset(chan_scmd);
1068 if (rtn == SUCCESS) { 1068 if (rtn == SUCCESS) {
1069 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1069 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1070 if (channel == scmd_channel(scmd)) 1070 if (channel == scmd_channel(scmd))
1071 if (!scsi_device_online(scmd->device) || 1071 if (!scsi_device_online(scmd->device) ||
1072 !scsi_eh_tur(scmd)) 1072 !scsi_eh_tur(scmd))
1073 scsi_eh_finish_cmd(scmd, 1073 scsi_eh_finish_cmd(scmd,
1074 done_q); 1074 done_q);
1075 } 1075 }
1076 } else { 1076 } else {
1077 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" 1077 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
1078 " failed chan: %d\n", 1078 " failed chan: %d\n",
1079 current->comm, 1079 current->comm,
1080 channel)); 1080 channel));
1081 } 1081 }
1082 } 1082 }
1083 return list_empty(work_q); 1083 return list_empty(work_q);
1084 } 1084 }
1085 1085
1086 /** 1086 /**
1087 * scsi_eh_host_reset - send a host reset 1087 * scsi_eh_host_reset - send a host reset
1088 * @work_q: list_head for processed commands. 1088 * @work_q: list_head for processed commands.
1089 * @done_q: list_head for processed commands. 1089 * @done_q: list_head for processed commands.
1090 **/ 1090 **/
1091 static int scsi_eh_host_reset(struct list_head *work_q, 1091 static int scsi_eh_host_reset(struct list_head *work_q,
1092 struct list_head *done_q) 1092 struct list_head *done_q)
1093 { 1093 {
1094 struct scsi_cmnd *scmd, *next; 1094 struct scsi_cmnd *scmd, *next;
1095 int rtn; 1095 int rtn;
1096 1096
1097 if (!list_empty(work_q)) { 1097 if (!list_empty(work_q)) {
1098 scmd = list_entry(work_q->next, 1098 scmd = list_entry(work_q->next,
1099 struct scsi_cmnd, eh_entry); 1099 struct scsi_cmnd, eh_entry);
1100 1100
1101 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" 1101 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
1102 , current->comm)); 1102 , current->comm));
1103 1103
1104 rtn = scsi_try_host_reset(scmd); 1104 rtn = scsi_try_host_reset(scmd);
1105 if (rtn == SUCCESS) { 1105 if (rtn == SUCCESS) {
1106 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1106 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1107 if (!scsi_device_online(scmd->device) || 1107 if (!scsi_device_online(scmd->device) ||
1108 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || 1108 (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) ||
1109 !scsi_eh_tur(scmd)) 1109 !scsi_eh_tur(scmd))
1110 scsi_eh_finish_cmd(scmd, done_q); 1110 scsi_eh_finish_cmd(scmd, done_q);
1111 } 1111 }
1112 } else { 1112 } else {
1113 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" 1113 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
1114 " failed\n", 1114 " failed\n",
1115 current->comm)); 1115 current->comm));
1116 } 1116 }
1117 } 1117 }
1118 return list_empty(work_q); 1118 return list_empty(work_q);
1119 } 1119 }
1120 1120
1121 /** 1121 /**
1122 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover 1122 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1123 * @work_q: list_head for processed commands. 1123 * @work_q: list_head for processed commands.
1124 * @done_q: list_head for processed commands. 1124 * @done_q: list_head for processed commands.
1125 * 1125 *
1126 **/ 1126 **/
1127 static void scsi_eh_offline_sdevs(struct list_head *work_q, 1127 static void scsi_eh_offline_sdevs(struct list_head *work_q,
1128 struct list_head *done_q) 1128 struct list_head *done_q)
1129 { 1129 {
1130 struct scsi_cmnd *scmd, *next; 1130 struct scsi_cmnd *scmd, *next;
1131 1131
1132 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1132 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1133 sdev_printk(KERN_INFO, scmd->device, 1133 sdev_printk(KERN_INFO, scmd->device,
1134 "scsi: Device offlined - not" 1134 "scsi: Device offlined - not"
1135 " ready after error recovery\n"); 1135 " ready after error recovery\n");
1136 scsi_device_set_state(scmd->device, SDEV_OFFLINE); 1136 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1137 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) { 1137 if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) {
1138 /* 1138 /*
1139 * FIXME: Handle lost cmds. 1139 * FIXME: Handle lost cmds.
1140 */ 1140 */
1141 } 1141 }
1142 scsi_eh_finish_cmd(scmd, done_q); 1142 scsi_eh_finish_cmd(scmd, done_q);
1143 } 1143 }
1144 return; 1144 return;
1145 } 1145 }
1146 1146
1147 /** 1147 /**
1148 * scsi_decide_disposition - Disposition a cmd on return from LLD. 1148 * scsi_decide_disposition - Disposition a cmd on return from LLD.
1149 * @scmd: SCSI cmd to examine. 1149 * @scmd: SCSI cmd to examine.
1150 * 1150 *
1151 * Notes: 1151 * Notes:
1152 * This is *only* called when we are examining the status after sending 1152 * This is *only* called when we are examining the status after sending
1153 * out the actual data command. any commands that are queued for error 1153 * out the actual data command. any commands that are queued for error
1154 * recovery (e.g. test_unit_ready) do *not* come through here. 1154 * recovery (e.g. test_unit_ready) do *not* come through here.
1155 * 1155 *
1156 * When this routine returns failed, it means the error handler thread 1156 * When this routine returns failed, it means the error handler thread
1157 * is woken. In cases where the error code indicates an error that 1157 * is woken. In cases where the error code indicates an error that
1158 * doesn't require the error handler read (i.e. we don't need to 1158 * doesn't require the error handler read (i.e. we don't need to
1159 * abort/reset), this function should return SUCCESS. 1159 * abort/reset), this function should return SUCCESS.
1160 **/ 1160 **/
1161 int scsi_decide_disposition(struct scsi_cmnd *scmd) 1161 int scsi_decide_disposition(struct scsi_cmnd *scmd)
1162 { 1162 {
1163 int rtn; 1163 int rtn;
1164 1164
1165 /* 1165 /*
1166 * if the device is offline, then we clearly just pass the result back 1166 * if the device is offline, then we clearly just pass the result back
1167 * up to the top level. 1167 * up to the top level.
1168 */ 1168 */
1169 if (!scsi_device_online(scmd->device)) { 1169 if (!scsi_device_online(scmd->device)) {
1170 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1170 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1171 " as SUCCESS\n", 1171 " as SUCCESS\n",
1172 __FUNCTION__)); 1172 __FUNCTION__));
1173 return SUCCESS; 1173 return SUCCESS;
1174 } 1174 }
1175 1175
1176 /* 1176 /*
1177 * first check the host byte, to see if there is anything in there 1177 * first check the host byte, to see if there is anything in there
1178 * that would indicate what we need to do. 1178 * that would indicate what we need to do.
1179 */ 1179 */
1180 switch (host_byte(scmd->result)) { 1180 switch (host_byte(scmd->result)) {
1181 case DID_PASSTHROUGH: 1181 case DID_PASSTHROUGH:
1182 /* 1182 /*
1183 * no matter what, pass this through to the upper layer. 1183 * no matter what, pass this through to the upper layer.
1184 * nuke this special code so that it looks like we are saying 1184 * nuke this special code so that it looks like we are saying
1185 * did_ok. 1185 * did_ok.
1186 */ 1186 */
1187 scmd->result &= 0xff00ffff; 1187 scmd->result &= 0xff00ffff;
1188 return SUCCESS; 1188 return SUCCESS;
1189 case DID_OK: 1189 case DID_OK:
1190 /* 1190 /*
1191 * looks good. drop through, and check the next byte. 1191 * looks good. drop through, and check the next byte.
1192 */ 1192 */
1193 break; 1193 break;
1194 case DID_NO_CONNECT: 1194 case DID_NO_CONNECT:
1195 case DID_BAD_TARGET: 1195 case DID_BAD_TARGET:
1196 case DID_ABORT: 1196 case DID_ABORT:
1197 /* 1197 /*
1198 * note - this means that we just report the status back 1198 * note - this means that we just report the status back
1199 * to the top level driver, not that we actually think 1199 * to the top level driver, not that we actually think
1200 * that it indicates SUCCESS. 1200 * that it indicates SUCCESS.
1201 */ 1201 */
1202 return SUCCESS; 1202 return SUCCESS;
1203 /* 1203 /*
1204 * when the low level driver returns did_soft_error, 1204 * when the low level driver returns did_soft_error,
1205 * it is responsible for keeping an internal retry counter 1205 * it is responsible for keeping an internal retry counter
1206 * in order to avoid endless loops (db) 1206 * in order to avoid endless loops (db)
1207 * 1207 *
1208 * actually this is a bug in this function here. we should 1208 * actually this is a bug in this function here. we should
1209 * be mindful of the maximum number of retries specified 1209 * be mindful of the maximum number of retries specified
1210 * and not get stuck in a loop. 1210 * and not get stuck in a loop.
1211 */ 1211 */
1212 case DID_SOFT_ERROR: 1212 case DID_SOFT_ERROR:
1213 goto maybe_retry; 1213 goto maybe_retry;
1214 case DID_IMM_RETRY: 1214 case DID_IMM_RETRY:
1215 return NEEDS_RETRY; 1215 return NEEDS_RETRY;
1216 1216
1217 case DID_REQUEUE: 1217 case DID_REQUEUE:
1218 return ADD_TO_MLQUEUE; 1218 return ADD_TO_MLQUEUE;
1219 1219
1220 case DID_ERROR: 1220 case DID_ERROR:
1221 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1221 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
1222 status_byte(scmd->result) == RESERVATION_CONFLICT) 1222 status_byte(scmd->result) == RESERVATION_CONFLICT)
1223 /* 1223 /*
1224 * execute reservation conflict processing code 1224 * execute reservation conflict processing code
1225 * lower down 1225 * lower down
1226 */ 1226 */
1227 break; 1227 break;
1228 /* fallthrough */ 1228 /* fallthrough */
1229 1229
1230 case DID_BUS_BUSY: 1230 case DID_BUS_BUSY:
1231 case DID_PARITY: 1231 case DID_PARITY:
1232 goto maybe_retry; 1232 goto maybe_retry;
1233 case DID_TIME_OUT: 1233 case DID_TIME_OUT:
1234 /* 1234 /*
1235 * when we scan the bus, we get timeout messages for 1235 * when we scan the bus, we get timeout messages for
1236 * these commands if there is no device available. 1236 * these commands if there is no device available.
1237 * other hosts report did_no_connect for the same thing. 1237 * other hosts report did_no_connect for the same thing.
1238 */ 1238 */
1239 if ((scmd->cmnd[0] == TEST_UNIT_READY || 1239 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1240 scmd->cmnd[0] == INQUIRY)) { 1240 scmd->cmnd[0] == INQUIRY)) {
1241 return SUCCESS; 1241 return SUCCESS;
1242 } else { 1242 } else {
1243 return FAILED; 1243 return FAILED;
1244 } 1244 }
1245 case DID_RESET: 1245 case DID_RESET:
1246 return SUCCESS; 1246 return SUCCESS;
1247 default: 1247 default:
1248 return FAILED; 1248 return FAILED;
1249 } 1249 }
1250 1250
1251 /* 1251 /*
1252 * next, check the message byte. 1252 * next, check the message byte.
1253 */ 1253 */
1254 if (msg_byte(scmd->result) != COMMAND_COMPLETE) 1254 if (msg_byte(scmd->result) != COMMAND_COMPLETE)
1255 return FAILED; 1255 return FAILED;
1256 1256
1257 /* 1257 /*
1258 * check the status byte to see if this indicates anything special. 1258 * check the status byte to see if this indicates anything special.
1259 */ 1259 */
1260 switch (status_byte(scmd->result)) { 1260 switch (status_byte(scmd->result)) {
1261 case QUEUE_FULL: 1261 case QUEUE_FULL:
1262 /* 1262 /*
1263 * the case of trying to send too many commands to a 1263 * the case of trying to send too many commands to a
1264 * tagged queueing device. 1264 * tagged queueing device.
1265 */ 1265 */
1266 case BUSY: 1266 case BUSY:
1267 /* 1267 /*
1268 * device can't talk to us at the moment. Should only 1268 * device can't talk to us at the moment. Should only
1269 * occur (SAM-3) when the task queue is empty, so will cause 1269 * occur (SAM-3) when the task queue is empty, so will cause
1270 * the empty queue handling to trigger a stall in the 1270 * the empty queue handling to trigger a stall in the
1271 * device. 1271 * device.
1272 */ 1272 */
1273 return ADD_TO_MLQUEUE; 1273 return ADD_TO_MLQUEUE;
1274 case GOOD: 1274 case GOOD:
1275 case COMMAND_TERMINATED: 1275 case COMMAND_TERMINATED:
1276 case TASK_ABORTED: 1276 case TASK_ABORTED:
1277 return SUCCESS; 1277 return SUCCESS;
1278 case CHECK_CONDITION: 1278 case CHECK_CONDITION:
1279 rtn = scsi_check_sense(scmd); 1279 rtn = scsi_check_sense(scmd);
1280 if (rtn == NEEDS_RETRY) 1280 if (rtn == NEEDS_RETRY)
1281 goto maybe_retry; 1281 goto maybe_retry;
1282 /* if rtn == FAILED, we have no sense information; 1282 /* if rtn == FAILED, we have no sense information;
1283 * returning FAILED will wake the error handler thread 1283 * returning FAILED will wake the error handler thread
1284 * to collect the sense and redo the decide 1284 * to collect the sense and redo the decide
1285 * disposition */ 1285 * disposition */
1286 return rtn; 1286 return rtn;
1287 case CONDITION_GOOD: 1287 case CONDITION_GOOD:
1288 case INTERMEDIATE_GOOD: 1288 case INTERMEDIATE_GOOD:
1289 case INTERMEDIATE_C_GOOD: 1289 case INTERMEDIATE_C_GOOD:
1290 case ACA_ACTIVE: 1290 case ACA_ACTIVE:
1291 /* 1291 /*
1292 * who knows? FIXME(eric) 1292 * who knows? FIXME(eric)
1293 */ 1293 */
1294 return SUCCESS; 1294 return SUCCESS;
1295 1295
1296 case RESERVATION_CONFLICT: 1296 case RESERVATION_CONFLICT:
1297 sdev_printk(KERN_INFO, scmd->device, 1297 sdev_printk(KERN_INFO, scmd->device,
1298 "reservation conflict\n"); 1298 "reservation conflict\n");
1299 return SUCCESS; /* causes immediate i/o error */ 1299 return SUCCESS; /* causes immediate i/o error */
1300 default: 1300 default:
1301 return FAILED; 1301 return FAILED;
1302 } 1302 }
1303 return FAILED; 1303 return FAILED;
1304 1304
1305 maybe_retry: 1305 maybe_retry:
1306 1306
1307 /* we requeue for retry because the error was retryable, and 1307 /* we requeue for retry because the error was retryable, and
1308 * the request was not marked fast fail. Note that above, 1308 * the request was not marked fast fail. Note that above,
1309 * even if the request is marked fast fail, we still requeue 1309 * even if the request is marked fast fail, we still requeue
1310 * for queue congestion conditions (QUEUE_FULL or BUSY) */ 1310 * for queue congestion conditions (QUEUE_FULL or BUSY) */
1311 if ((++scmd->retries) < scmd->allowed 1311 if ((++scmd->retries) < scmd->allowed
1312 && !blk_noretry_request(scmd->request)) { 1312 && !blk_noretry_request(scmd->request)) {
1313 return NEEDS_RETRY; 1313 return NEEDS_RETRY;
1314 } else { 1314 } else {
1315 /* 1315 /*
1316 * no more retries - report this one back to upper level. 1316 * no more retries - report this one back to upper level.
1317 */ 1317 */
1318 return SUCCESS; 1318 return SUCCESS;
1319 } 1319 }
1320 } 1320 }
1321 1321
1322 /** 1322 /**
1323 * scsi_eh_lock_door - Prevent medium removal for the specified device 1323 * scsi_eh_lock_door - Prevent medium removal for the specified device
1324 * @sdev: SCSI device to prevent medium removal 1324 * @sdev: SCSI device to prevent medium removal
1325 * 1325 *
1326 * Locking: 1326 * Locking:
1327 * We must be called from process context; scsi_allocate_request() 1327 * We must be called from process context; scsi_allocate_request()
1328 * may sleep. 1328 * may sleep.
1329 * 1329 *
1330 * Notes: 1330 * Notes:
1331 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the 1331 * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
1332 * head of the devices request queue, and continue. 1332 * head of the devices request queue, and continue.
1333 * 1333 *
1334 * Bugs: 1334 * Bugs:
1335 * scsi_allocate_request() may sleep waiting for existing requests to 1335 * scsi_allocate_request() may sleep waiting for existing requests to
1336 * be processed. However, since we haven't kicked off any request 1336 * be processed. However, since we haven't kicked off any request
1337 * processing for this host, this may deadlock. 1337 * processing for this host, this may deadlock.
1338 * 1338 *
1339 * If scsi_allocate_request() fails for what ever reason, we 1339 * If scsi_allocate_request() fails for what ever reason, we
1340 * completely forget to lock the door. 1340 * completely forget to lock the door.
1341 **/ 1341 **/
1342 static void scsi_eh_lock_door(struct scsi_device *sdev) 1342 static void scsi_eh_lock_door(struct scsi_device *sdev)
1343 { 1343 {
1344 unsigned char cmnd[MAX_COMMAND_SIZE]; 1344 unsigned char cmnd[MAX_COMMAND_SIZE];
1345 1345
1346 cmnd[0] = ALLOW_MEDIUM_REMOVAL; 1346 cmnd[0] = ALLOW_MEDIUM_REMOVAL;
1347 cmnd[1] = 0; 1347 cmnd[1] = 0;
1348 cmnd[2] = 0; 1348 cmnd[2] = 0;
1349 cmnd[3] = 0; 1349 cmnd[3] = 0;
1350 cmnd[4] = SCSI_REMOVAL_PREVENT; 1350 cmnd[4] = SCSI_REMOVAL_PREVENT;
1351 cmnd[5] = 0; 1351 cmnd[5] = 0;
1352 1352
1353 scsi_execute_async(sdev, cmnd, DMA_NONE, NULL, 0, 0, 10 * HZ, 1353 scsi_execute_async(sdev, cmnd, 6, DMA_NONE, NULL, 0, 0, 10 * HZ,
1354 5, NULL, NULL, GFP_KERNEL); 1354 5, NULL, NULL, GFP_KERNEL);
1355 } 1355 }
1356 1356
1357 1357
1358 /** 1358 /**
1359 * scsi_restart_operations - restart io operations to the specified host. 1359 * scsi_restart_operations - restart io operations to the specified host.
1360 * @shost: Host we are restarting. 1360 * @shost: Host we are restarting.
1361 * 1361 *
1362 * Notes: 1362 * Notes:
1363 * When we entered the error handler, we blocked all further i/o to 1363 * When we entered the error handler, we blocked all further i/o to
1364 * this device. we need to 'reverse' this process. 1364 * this device. we need to 'reverse' this process.
1365 **/ 1365 **/
1366 static void scsi_restart_operations(struct Scsi_Host *shost) 1366 static void scsi_restart_operations(struct Scsi_Host *shost)
1367 { 1367 {
1368 struct scsi_device *sdev; 1368 struct scsi_device *sdev;
1369 unsigned long flags; 1369 unsigned long flags;
1370 1370
1371 /* 1371 /*
1372 * If the door was locked, we need to insert a door lock request 1372 * If the door was locked, we need to insert a door lock request
1373 * onto the head of the SCSI request queue for the device. There 1373 * onto the head of the SCSI request queue for the device. There
1374 * is no point trying to lock the door of an off-line device. 1374 * is no point trying to lock the door of an off-line device.
1375 */ 1375 */
1376 shost_for_each_device(sdev, shost) { 1376 shost_for_each_device(sdev, shost) {
1377 if (scsi_device_online(sdev) && sdev->locked) 1377 if (scsi_device_online(sdev) && sdev->locked)
1378 scsi_eh_lock_door(sdev); 1378 scsi_eh_lock_door(sdev);
1379 } 1379 }
1380 1380
1381 /* 1381 /*
1382 * next free up anything directly waiting upon the host. this 1382 * next free up anything directly waiting upon the host. this
1383 * will be requests for character device operations, and also for 1383 * will be requests for character device operations, and also for
1384 * ioctls to queued block devices. 1384 * ioctls to queued block devices.
1385 */ 1385 */
1386 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1386 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1387 __FUNCTION__)); 1387 __FUNCTION__));
1388 1388
1389 spin_lock_irqsave(shost->host_lock, flags); 1389 spin_lock_irqsave(shost->host_lock, flags);
1390 if (scsi_host_set_state(shost, SHOST_RUNNING)) 1390 if (scsi_host_set_state(shost, SHOST_RUNNING))
1391 if (scsi_host_set_state(shost, SHOST_CANCEL)) 1391 if (scsi_host_set_state(shost, SHOST_CANCEL))
1392 BUG_ON(scsi_host_set_state(shost, SHOST_DEL)); 1392 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1393 spin_unlock_irqrestore(shost->host_lock, flags); 1393 spin_unlock_irqrestore(shost->host_lock, flags);
1394 1394
1395 wake_up(&shost->host_wait); 1395 wake_up(&shost->host_wait);
1396 1396
1397 /* 1397 /*
1398 * finally we need to re-initiate requests that may be pending. we will 1398 * finally we need to re-initiate requests that may be pending. we will
1399 * have had everything blocked while error handling is taking place, and 1399 * have had everything blocked while error handling is taking place, and
1400 * now that error recovery is done, we will need to ensure that these 1400 * now that error recovery is done, we will need to ensure that these
1401 * requests are started. 1401 * requests are started.
1402 */ 1402 */
1403 scsi_run_host_queues(shost); 1403 scsi_run_host_queues(shost);
1404 } 1404 }
1405 1405
1406 /** 1406 /**
1407 * scsi_eh_ready_devs - check device ready state and recover if not. 1407 * scsi_eh_ready_devs - check device ready state and recover if not.
1408 * @shost: host to be recovered. 1408 * @shost: host to be recovered.
1409 * @eh_done_q: list_head for processed commands. 1409 * @eh_done_q: list_head for processed commands.
1410 * 1410 *
1411 **/ 1411 **/
1412 static void scsi_eh_ready_devs(struct Scsi_Host *shost, 1412 static void scsi_eh_ready_devs(struct Scsi_Host *shost,
1413 struct list_head *work_q, 1413 struct list_head *work_q,
1414 struct list_head *done_q) 1414 struct list_head *done_q)
1415 { 1415 {
1416 if (!scsi_eh_stu(shost, work_q, done_q)) 1416 if (!scsi_eh_stu(shost, work_q, done_q))
1417 if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) 1417 if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
1418 if (!scsi_eh_bus_reset(shost, work_q, done_q)) 1418 if (!scsi_eh_bus_reset(shost, work_q, done_q))
1419 if (!scsi_eh_host_reset(work_q, done_q)) 1419 if (!scsi_eh_host_reset(work_q, done_q))
1420 scsi_eh_offline_sdevs(work_q, done_q); 1420 scsi_eh_offline_sdevs(work_q, done_q);
1421 } 1421 }
1422 1422
1423 /** 1423 /**
1424 * scsi_eh_flush_done_q - finish processed commands or retry them. 1424 * scsi_eh_flush_done_q - finish processed commands or retry them.
1425 * @done_q: list_head of processed commands. 1425 * @done_q: list_head of processed commands.
1426 * 1426 *
1427 **/ 1427 **/
1428 static void scsi_eh_flush_done_q(struct list_head *done_q) 1428 static void scsi_eh_flush_done_q(struct list_head *done_q)
1429 { 1429 {
1430 struct scsi_cmnd *scmd, *next; 1430 struct scsi_cmnd *scmd, *next;
1431 1431
1432 list_for_each_entry_safe(scmd, next, done_q, eh_entry) { 1432 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
1433 list_del_init(&scmd->eh_entry); 1433 list_del_init(&scmd->eh_entry);
1434 if (scsi_device_online(scmd->device) && 1434 if (scsi_device_online(scmd->device) &&
1435 !blk_noretry_request(scmd->request) && 1435 !blk_noretry_request(scmd->request) &&
1436 (++scmd->retries < scmd->allowed)) { 1436 (++scmd->retries < scmd->allowed)) {
1437 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" 1437 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
1438 " retry cmd: %p\n", 1438 " retry cmd: %p\n",
1439 current->comm, 1439 current->comm,
1440 scmd)); 1440 scmd));
1441 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); 1441 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
1442 } else { 1442 } else {
1443 /* 1443 /*
1444 * If just we got sense for the device (called 1444 * If just we got sense for the device (called
1445 * scsi_eh_get_sense), scmd->result is already 1445 * scsi_eh_get_sense), scmd->result is already
1446 * set, do not set DRIVER_TIMEOUT. 1446 * set, do not set DRIVER_TIMEOUT.
1447 */ 1447 */
1448 if (!scmd->result) 1448 if (!scmd->result)
1449 scmd->result |= (DRIVER_TIMEOUT << 24); 1449 scmd->result |= (DRIVER_TIMEOUT << 24);
1450 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" 1450 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
1451 " cmd: %p\n", 1451 " cmd: %p\n",
1452 current->comm, scmd)); 1452 current->comm, scmd));
1453 scsi_finish_command(scmd); 1453 scsi_finish_command(scmd);
1454 } 1454 }
1455 } 1455 }
1456 } 1456 }
1457 1457
1458 /** 1458 /**
1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. 1459 * scsi_unjam_host - Attempt to fix a host which has a cmd that failed.
1460 * @shost: Host to unjam. 1460 * @shost: Host to unjam.
1461 * 1461 *
1462 * Notes: 1462 * Notes:
1463 * When we come in here, we *know* that all commands on the bus have 1463 * When we come in here, we *know* that all commands on the bus have
1464 * either completed, failed or timed out. we also know that no further 1464 * either completed, failed or timed out. we also know that no further
1465 * commands are being sent to the host, so things are relatively quiet 1465 * commands are being sent to the host, so things are relatively quiet
1466 * and we have freedom to fiddle with things as we wish. 1466 * and we have freedom to fiddle with things as we wish.
1467 * 1467 *
1468 * This is only the *default* implementation. it is possible for 1468 * This is only the *default* implementation. it is possible for
1469 * individual drivers to supply their own version of this function, and 1469 * individual drivers to supply their own version of this function, and
1470 * if the maintainer wishes to do this, it is strongly suggested that 1470 * if the maintainer wishes to do this, it is strongly suggested that
1471 * this function be taken as a template and modified. this function 1471 * this function be taken as a template and modified. this function
1472 * was designed to correctly handle problems for about 95% of the 1472 * was designed to correctly handle problems for about 95% of the
1473 * different cases out there, and it should always provide at least a 1473 * different cases out there, and it should always provide at least a
1474 * reasonable amount of error recovery. 1474 * reasonable amount of error recovery.
1475 * 1475 *
1476 * Any command marked 'failed' or 'timeout' must eventually have 1476 * Any command marked 'failed' or 'timeout' must eventually have
1477 * scsi_finish_cmd() called for it. we do all of the retry stuff 1477 * scsi_finish_cmd() called for it. we do all of the retry stuff
1478 * here, so when we restart the host after we return it should have an 1478 * here, so when we restart the host after we return it should have an
1479 * empty queue. 1479 * empty queue.
1480 **/ 1480 **/
1481 static void scsi_unjam_host(struct Scsi_Host *shost) 1481 static void scsi_unjam_host(struct Scsi_Host *shost)
1482 { 1482 {
1483 unsigned long flags; 1483 unsigned long flags;
1484 LIST_HEAD(eh_work_q); 1484 LIST_HEAD(eh_work_q);
1485 LIST_HEAD(eh_done_q); 1485 LIST_HEAD(eh_done_q);
1486 1486
1487 spin_lock_irqsave(shost->host_lock, flags); 1487 spin_lock_irqsave(shost->host_lock, flags);
1488 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 1488 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
1489 spin_unlock_irqrestore(shost->host_lock, flags); 1489 spin_unlock_irqrestore(shost->host_lock, flags);
1490 1490
1491 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); 1491 SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q));
1492 1492
1493 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) 1493 if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q))
1494 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) 1494 if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))
1495 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); 1495 scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q);
1496 1496
1497 scsi_eh_flush_done_q(&eh_done_q); 1497 scsi_eh_flush_done_q(&eh_done_q);
1498 } 1498 }
1499 1499
1500 /** 1500 /**
1501 * scsi_error_handler - SCSI error handler thread 1501 * scsi_error_handler - SCSI error handler thread
1502 * @data: Host for which we are running. 1502 * @data: Host for which we are running.
1503 * 1503 *
1504 * Notes: 1504 * Notes:
1505 * This is the main error handling loop. This is run as a kernel thread 1505 * This is the main error handling loop. This is run as a kernel thread
1506 * for every SCSI host and handles all error handling activity. 1506 * for every SCSI host and handles all error handling activity.
1507 **/ 1507 **/
1508 int scsi_error_handler(void *data) 1508 int scsi_error_handler(void *data)
1509 { 1509 {
1510 struct Scsi_Host *shost = data; 1510 struct Scsi_Host *shost = data;
1511 1511
1512 current->flags |= PF_NOFREEZE; 1512 current->flags |= PF_NOFREEZE;
1513 1513
1514 /* 1514 /*
1515 * We use TASK_INTERRUPTIBLE so that the thread is not 1515 * We use TASK_INTERRUPTIBLE so that the thread is not
1516 * counted against the load average as a running process. 1516 * counted against the load average as a running process.
1517 * We never actually get interrupted because kthread_run 1517 * We never actually get interrupted because kthread_run
1518 * disables singal delivery for the created thread. 1518 * disables singal delivery for the created thread.
1519 */ 1519 */
1520 set_current_state(TASK_INTERRUPTIBLE); 1520 set_current_state(TASK_INTERRUPTIBLE);
1521 while (!kthread_should_stop()) { 1521 while (!kthread_should_stop()) {
1522 if (shost->host_failed == 0 || 1522 if (shost->host_failed == 0 ||
1523 shost->host_failed != shost->host_busy) { 1523 shost->host_failed != shost->host_busy) {
1524 SCSI_LOG_ERROR_RECOVERY(1, 1524 SCSI_LOG_ERROR_RECOVERY(1,
1525 printk("Error handler scsi_eh_%d sleeping\n", 1525 printk("Error handler scsi_eh_%d sleeping\n",
1526 shost->host_no)); 1526 shost->host_no));
1527 schedule(); 1527 schedule();
1528 set_current_state(TASK_INTERRUPTIBLE); 1528 set_current_state(TASK_INTERRUPTIBLE);
1529 continue; 1529 continue;
1530 } 1530 }
1531 1531
1532 __set_current_state(TASK_RUNNING); 1532 __set_current_state(TASK_RUNNING);
1533 SCSI_LOG_ERROR_RECOVERY(1, 1533 SCSI_LOG_ERROR_RECOVERY(1,
1534 printk("Error handler scsi_eh_%d waking up\n", 1534 printk("Error handler scsi_eh_%d waking up\n",
1535 shost->host_no)); 1535 shost->host_no));
1536 1536
1537 /* 1537 /*
1538 * We have a host that is failing for some reason. Figure out 1538 * We have a host that is failing for some reason. Figure out
1539 * what we need to do to get it up and online again (if we can). 1539 * what we need to do to get it up and online again (if we can).
1540 * If we fail, we end up taking the thing offline. 1540 * If we fail, we end up taking the thing offline.
1541 */ 1541 */
1542 if (shost->hostt->eh_strategy_handler) 1542 if (shost->hostt->eh_strategy_handler)
1543 shost->hostt->eh_strategy_handler(shost); 1543 shost->hostt->eh_strategy_handler(shost);
1544 else 1544 else
1545 scsi_unjam_host(shost); 1545 scsi_unjam_host(shost);
1546 1546
1547 /* 1547 /*
1548 * Note - if the above fails completely, the action is to take 1548 * Note - if the above fails completely, the action is to take
1549 * individual devices offline and flush the queue of any 1549 * individual devices offline and flush the queue of any
1550 * outstanding requests that may have been pending. When we 1550 * outstanding requests that may have been pending. When we
1551 * restart, we restart any I/O to any other devices on the bus 1551 * restart, we restart any I/O to any other devices on the bus
1552 * which are still online. 1552 * which are still online.
1553 */ 1553 */
1554 scsi_restart_operations(shost); 1554 scsi_restart_operations(shost);
1555 set_current_state(TASK_INTERRUPTIBLE); 1555 set_current_state(TASK_INTERRUPTIBLE);
1556 } 1556 }
1557 __set_current_state(TASK_RUNNING); 1557 __set_current_state(TASK_RUNNING);
1558 1558
1559 SCSI_LOG_ERROR_RECOVERY(1, 1559 SCSI_LOG_ERROR_RECOVERY(1,
1560 printk("Error handler scsi_eh_%d exiting\n", shost->host_no)); 1560 printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
1561 shost->ehandler = NULL; 1561 shost->ehandler = NULL;
1562 return 0; 1562 return 0;
1563 } 1563 }
1564 1564
1565 /* 1565 /*
1566 * Function: scsi_report_bus_reset() 1566 * Function: scsi_report_bus_reset()
1567 * 1567 *
1568 * Purpose: Utility function used by low-level drivers to report that 1568 * Purpose: Utility function used by low-level drivers to report that
1569 * they have observed a bus reset on the bus being handled. 1569 * they have observed a bus reset on the bus being handled.
1570 * 1570 *
1571 * Arguments: shost - Host in question 1571 * Arguments: shost - Host in question
1572 * channel - channel on which reset was observed. 1572 * channel - channel on which reset was observed.
1573 * 1573 *
1574 * Returns: Nothing 1574 * Returns: Nothing
1575 * 1575 *
1576 * Lock status: Host lock must be held. 1576 * Lock status: Host lock must be held.
1577 * 1577 *
1578 * Notes: This only needs to be called if the reset is one which 1578 * Notes: This only needs to be called if the reset is one which
1579 * originates from an unknown location. Resets originated 1579 * originates from an unknown location. Resets originated
1580 * by the mid-level itself don't need to call this, but there 1580 * by the mid-level itself don't need to call this, but there
1581 * should be no harm. 1581 * should be no harm.
1582 * 1582 *
1583 * The main purpose of this is to make sure that a CHECK_CONDITION 1583 * The main purpose of this is to make sure that a CHECK_CONDITION
1584 * is properly treated. 1584 * is properly treated.
1585 */ 1585 */
1586 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) 1586 void scsi_report_bus_reset(struct Scsi_Host *shost, int channel)
1587 { 1587 {
1588 struct scsi_device *sdev; 1588 struct scsi_device *sdev;
1589 1589
1590 __shost_for_each_device(sdev, shost) { 1590 __shost_for_each_device(sdev, shost) {
1591 if (channel == sdev_channel(sdev)) { 1591 if (channel == sdev_channel(sdev)) {
1592 sdev->was_reset = 1; 1592 sdev->was_reset = 1;
1593 sdev->expecting_cc_ua = 1; 1593 sdev->expecting_cc_ua = 1;
1594 } 1594 }
1595 } 1595 }
1596 } 1596 }
1597 EXPORT_SYMBOL(scsi_report_bus_reset); 1597 EXPORT_SYMBOL(scsi_report_bus_reset);
1598 1598
1599 /* 1599 /*
1600 * Function: scsi_report_device_reset() 1600 * Function: scsi_report_device_reset()
1601 * 1601 *
1602 * Purpose: Utility function used by low-level drivers to report that 1602 * Purpose: Utility function used by low-level drivers to report that
1603 * they have observed a device reset on the device being handled. 1603 * they have observed a device reset on the device being handled.
1604 * 1604 *
1605 * Arguments: shost - Host in question 1605 * Arguments: shost - Host in question
1606 * channel - channel on which reset was observed 1606 * channel - channel on which reset was observed
1607 * target - target on which reset was observed 1607 * target - target on which reset was observed
1608 * 1608 *
1609 * Returns: Nothing 1609 * Returns: Nothing
1610 * 1610 *
1611 * Lock status: Host lock must be held 1611 * Lock status: Host lock must be held
1612 * 1612 *
1613 * Notes: This only needs to be called if the reset is one which 1613 * Notes: This only needs to be called if the reset is one which
1614 * originates from an unknown location. Resets originated 1614 * originates from an unknown location. Resets originated
1615 * by the mid-level itself don't need to call this, but there 1615 * by the mid-level itself don't need to call this, but there
1616 * should be no harm. 1616 * should be no harm.
1617 * 1617 *
1618 * The main purpose of this is to make sure that a CHECK_CONDITION 1618 * The main purpose of this is to make sure that a CHECK_CONDITION
1619 * is properly treated. 1619 * is properly treated.
1620 */ 1620 */
1621 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) 1621 void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target)
1622 { 1622 {
1623 struct scsi_device *sdev; 1623 struct scsi_device *sdev;
1624 1624
1625 __shost_for_each_device(sdev, shost) { 1625 __shost_for_each_device(sdev, shost) {
1626 if (channel == sdev_channel(sdev) && 1626 if (channel == sdev_channel(sdev) &&
1627 target == sdev_id(sdev)) { 1627 target == sdev_id(sdev)) {
1628 sdev->was_reset = 1; 1628 sdev->was_reset = 1;
1629 sdev->expecting_cc_ua = 1; 1629 sdev->expecting_cc_ua = 1;
1630 } 1630 }
1631 } 1631 }
1632 } 1632 }
1633 EXPORT_SYMBOL(scsi_report_device_reset); 1633 EXPORT_SYMBOL(scsi_report_device_reset);
1634 1634
1635 static void 1635 static void
1636 scsi_reset_provider_done_command(struct scsi_cmnd *scmd) 1636 scsi_reset_provider_done_command(struct scsi_cmnd *scmd)
1637 { 1637 {
1638 } 1638 }
1639 1639
1640 /* 1640 /*
1641 * Function: scsi_reset_provider 1641 * Function: scsi_reset_provider
1642 * 1642 *
1643 * Purpose: Send requested reset to a bus or device at any phase. 1643 * Purpose: Send requested reset to a bus or device at any phase.
1644 * 1644 *
1645 * Arguments: device - device to send reset to 1645 * Arguments: device - device to send reset to
1646 * flag - reset type (see scsi.h) 1646 * flag - reset type (see scsi.h)
1647 * 1647 *
1648 * Returns: SUCCESS/FAILURE. 1648 * Returns: SUCCESS/FAILURE.
1649 * 1649 *
1650 * Notes: This is used by the SCSI Generic driver to provide 1650 * Notes: This is used by the SCSI Generic driver to provide
1651 * Bus/Device reset capability. 1651 * Bus/Device reset capability.
1652 */ 1652 */
1653 int 1653 int
1654 scsi_reset_provider(struct scsi_device *dev, int flag) 1654 scsi_reset_provider(struct scsi_device *dev, int flag)
1655 { 1655 {
1656 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); 1656 struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL);
1657 struct request req; 1657 struct request req;
1658 int rtn; 1658 int rtn;
1659 1659
1660 scmd->request = &req; 1660 scmd->request = &req;
1661 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1661 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1662 scmd->request->rq_status = RQ_SCSI_BUSY; 1662 scmd->request->rq_status = RQ_SCSI_BUSY;
1663 1663
1664 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); 1664 memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd));
1665 1665
1666 scmd->scsi_done = scsi_reset_provider_done_command; 1666 scmd->scsi_done = scsi_reset_provider_done_command;
1667 scmd->done = NULL; 1667 scmd->done = NULL;
1668 scmd->buffer = NULL; 1668 scmd->buffer = NULL;
1669 scmd->bufflen = 0; 1669 scmd->bufflen = 0;
1670 scmd->request_buffer = NULL; 1670 scmd->request_buffer = NULL;
1671 scmd->request_bufflen = 0; 1671 scmd->request_bufflen = 0;
1672 1672
1673 scmd->cmd_len = 0; 1673 scmd->cmd_len = 0;
1674 1674
1675 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 1675 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1676 scmd->sc_request = NULL; 1676 scmd->sc_request = NULL;
1677 scmd->sc_magic = SCSI_CMND_MAGIC; 1677 scmd->sc_magic = SCSI_CMND_MAGIC;
1678 1678
1679 init_timer(&scmd->eh_timeout); 1679 init_timer(&scmd->eh_timeout);
1680 1680
1681 /* 1681 /*
1682 * Sometimes the command can get back into the timer chain, 1682 * Sometimes the command can get back into the timer chain,
1683 * so use the pid as an identifier. 1683 * so use the pid as an identifier.
1684 */ 1684 */
1685 scmd->pid = 0; 1685 scmd->pid = 0;
1686 1686
1687 switch (flag) { 1687 switch (flag) {
1688 case SCSI_TRY_RESET_DEVICE: 1688 case SCSI_TRY_RESET_DEVICE:
1689 rtn = scsi_try_bus_device_reset(scmd); 1689 rtn = scsi_try_bus_device_reset(scmd);
1690 if (rtn == SUCCESS) 1690 if (rtn == SUCCESS)
1691 break; 1691 break;
1692 /* FALLTHROUGH */ 1692 /* FALLTHROUGH */
1693 case SCSI_TRY_RESET_BUS: 1693 case SCSI_TRY_RESET_BUS:
1694 rtn = scsi_try_bus_reset(scmd); 1694 rtn = scsi_try_bus_reset(scmd);
1695 if (rtn == SUCCESS) 1695 if (rtn == SUCCESS)
1696 break; 1696 break;
1697 /* FALLTHROUGH */ 1697 /* FALLTHROUGH */
1698 case SCSI_TRY_RESET_HOST: 1698 case SCSI_TRY_RESET_HOST:
1699 rtn = scsi_try_host_reset(scmd); 1699 rtn = scsi_try_host_reset(scmd);
1700 break; 1700 break;
1701 default: 1701 default:
1702 rtn = FAILED; 1702 rtn = FAILED;
1703 } 1703 }
1704 1704
1705 scsi_next_command(scmd); 1705 scsi_next_command(scmd);
1706 return rtn; 1706 return rtn;
1707 } 1707 }
1708 EXPORT_SYMBOL(scsi_reset_provider); 1708 EXPORT_SYMBOL(scsi_reset_provider);
1709 1709
1710 /** 1710 /**
1711 * scsi_normalize_sense - normalize main elements from either fixed or 1711 * scsi_normalize_sense - normalize main elements from either fixed or
1712 * descriptor sense data format into a common format. 1712 * descriptor sense data format into a common format.
1713 * 1713 *
1714 * @sense_buffer: byte array containing sense data returned by device 1714 * @sense_buffer: byte array containing sense data returned by device
1715 * @sb_len: number of valid bytes in sense_buffer 1715 * @sb_len: number of valid bytes in sense_buffer
1716 * @sshdr: pointer to instance of structure that common 1716 * @sshdr: pointer to instance of structure that common
1717 * elements are written to. 1717 * elements are written to.
1718 * 1718 *
1719 * Notes: 1719 * Notes:
1720 * The "main elements" from sense data are: response_code, sense_key, 1720 * The "main elements" from sense data are: response_code, sense_key,
1721 * asc, ascq and additional_length (only for descriptor format). 1721 * asc, ascq and additional_length (only for descriptor format).
1722 * 1722 *
1723 * Typically this function can be called after a device has 1723 * Typically this function can be called after a device has
1724 * responded to a SCSI command with the CHECK_CONDITION status. 1724 * responded to a SCSI command with the CHECK_CONDITION status.
1725 * 1725 *
1726 * Return value: 1726 * Return value:
1727 * 1 if valid sense data information found, else 0; 1727 * 1 if valid sense data information found, else 0;
1728 **/ 1728 **/
1729 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, 1729 int scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
1730 struct scsi_sense_hdr *sshdr) 1730 struct scsi_sense_hdr *sshdr)
1731 { 1731 {
1732 if (!sense_buffer || !sb_len) 1732 if (!sense_buffer || !sb_len)
1733 return 0; 1733 return 0;
1734 1734
1735 memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); 1735 memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
1736 1736
1737 sshdr->response_code = (sense_buffer[0] & 0x7f); 1737 sshdr->response_code = (sense_buffer[0] & 0x7f);
1738 1738
1739 if (!scsi_sense_valid(sshdr)) 1739 if (!scsi_sense_valid(sshdr))
1740 return 0; 1740 return 0;
1741 1741
1742 if (sshdr->response_code >= 0x72) { 1742 if (sshdr->response_code >= 0x72) {
1743 /* 1743 /*
1744 * descriptor format 1744 * descriptor format
1745 */ 1745 */
1746 if (sb_len > 1) 1746 if (sb_len > 1)
1747 sshdr->sense_key = (sense_buffer[1] & 0xf); 1747 sshdr->sense_key = (sense_buffer[1] & 0xf);
1748 if (sb_len > 2) 1748 if (sb_len > 2)
1749 sshdr->asc = sense_buffer[2]; 1749 sshdr->asc = sense_buffer[2];
1750 if (sb_len > 3) 1750 if (sb_len > 3)
1751 sshdr->ascq = sense_buffer[3]; 1751 sshdr->ascq = sense_buffer[3];
1752 if (sb_len > 7) 1752 if (sb_len > 7)
1753 sshdr->additional_length = sense_buffer[7]; 1753 sshdr->additional_length = sense_buffer[7];
1754 } else { 1754 } else {
1755 /* 1755 /*
1756 * fixed format 1756 * fixed format
1757 */ 1757 */
1758 if (sb_len > 2) 1758 if (sb_len > 2)
1759 sshdr->sense_key = (sense_buffer[2] & 0xf); 1759 sshdr->sense_key = (sense_buffer[2] & 0xf);
1760 if (sb_len > 7) { 1760 if (sb_len > 7) {
1761 sb_len = (sb_len < (sense_buffer[7] + 8)) ? 1761 sb_len = (sb_len < (sense_buffer[7] + 8)) ?
1762 sb_len : (sense_buffer[7] + 8); 1762 sb_len : (sense_buffer[7] + 8);
1763 if (sb_len > 12) 1763 if (sb_len > 12)
1764 sshdr->asc = sense_buffer[12]; 1764 sshdr->asc = sense_buffer[12];
1765 if (sb_len > 13) 1765 if (sb_len > 13)
1766 sshdr->ascq = sense_buffer[13]; 1766 sshdr->ascq = sense_buffer[13];
1767 } 1767 }
1768 } 1768 }
1769 1769
1770 return 1; 1770 return 1;
1771 } 1771 }
1772 EXPORT_SYMBOL(scsi_normalize_sense); 1772 EXPORT_SYMBOL(scsi_normalize_sense);
1773 1773
1774 int scsi_request_normalize_sense(struct scsi_request *sreq, 1774 int scsi_request_normalize_sense(struct scsi_request *sreq,
1775 struct scsi_sense_hdr *sshdr) 1775 struct scsi_sense_hdr *sshdr)
1776 { 1776 {
1777 return scsi_normalize_sense(sreq->sr_sense_buffer, 1777 return scsi_normalize_sense(sreq->sr_sense_buffer,
1778 sizeof(sreq->sr_sense_buffer), sshdr); 1778 sizeof(sreq->sr_sense_buffer), sshdr);
1779 } 1779 }
1780 EXPORT_SYMBOL(scsi_request_normalize_sense); 1780 EXPORT_SYMBOL(scsi_request_normalize_sense);
1781 1781
1782 int scsi_command_normalize_sense(struct scsi_cmnd *cmd, 1782 int scsi_command_normalize_sense(struct scsi_cmnd *cmd,
1783 struct scsi_sense_hdr *sshdr) 1783 struct scsi_sense_hdr *sshdr)
1784 { 1784 {
1785 return scsi_normalize_sense(cmd->sense_buffer, 1785 return scsi_normalize_sense(cmd->sense_buffer,
1786 sizeof(cmd->sense_buffer), sshdr); 1786 sizeof(cmd->sense_buffer), sshdr);
1787 } 1787 }
1788 EXPORT_SYMBOL(scsi_command_normalize_sense); 1788 EXPORT_SYMBOL(scsi_command_normalize_sense);
1789 1789
1790 /** 1790 /**
1791 * scsi_sense_desc_find - search for a given descriptor type in 1791 * scsi_sense_desc_find - search for a given descriptor type in
1792 * descriptor sense data format. 1792 * descriptor sense data format.
1793 * 1793 *
1794 * @sense_buffer: byte array of descriptor format sense data 1794 * @sense_buffer: byte array of descriptor format sense data
1795 * @sb_len: number of valid bytes in sense_buffer 1795 * @sb_len: number of valid bytes in sense_buffer
1796 * @desc_type: value of descriptor type to find 1796 * @desc_type: value of descriptor type to find
1797 * (e.g. 0 -> information) 1797 * (e.g. 0 -> information)
1798 * 1798 *
1799 * Notes: 1799 * Notes:
1800 * only valid when sense data is in descriptor format 1800 * only valid when sense data is in descriptor format
1801 * 1801 *
1802 * Return value: 1802 * Return value:
1803 * pointer to start of (first) descriptor if found else NULL 1803 * pointer to start of (first) descriptor if found else NULL
1804 **/ 1804 **/
1805 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, 1805 const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len,
1806 int desc_type) 1806 int desc_type)
1807 { 1807 {
1808 int add_sen_len, add_len, desc_len, k; 1808 int add_sen_len, add_len, desc_len, k;
1809 const u8 * descp; 1809 const u8 * descp;
1810 1810
1811 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) 1811 if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7])))
1812 return NULL; 1812 return NULL;
1813 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) 1813 if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73))
1814 return NULL; 1814 return NULL;
1815 add_sen_len = (add_sen_len < (sb_len - 8)) ? 1815 add_sen_len = (add_sen_len < (sb_len - 8)) ?
1816 add_sen_len : (sb_len - 8); 1816 add_sen_len : (sb_len - 8);
1817 descp = &sense_buffer[8]; 1817 descp = &sense_buffer[8];
1818 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { 1818 for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) {
1819 descp += desc_len; 1819 descp += desc_len;
1820 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; 1820 add_len = (k < (add_sen_len - 1)) ? descp[1]: -1;
1821 desc_len = add_len + 2; 1821 desc_len = add_len + 2;
1822 if (descp[0] == desc_type) 1822 if (descp[0] == desc_type)
1823 return descp; 1823 return descp;
1824 if (add_len < 0) // short descriptor ?? 1824 if (add_len < 0) // short descriptor ??
1825 break; 1825 break;
1826 } 1826 }
1827 return NULL; 1827 return NULL;
1828 } 1828 }
1829 EXPORT_SYMBOL(scsi_sense_desc_find); 1829 EXPORT_SYMBOL(scsi_sense_desc_find);
1830 1830
1831 /** 1831 /**
1832 * scsi_get_sense_info_fld - attempts to get information field from 1832 * scsi_get_sense_info_fld - attempts to get information field from
1833 * sense data (either fixed or descriptor format) 1833 * sense data (either fixed or descriptor format)
1834 * 1834 *
1835 * @sense_buffer: byte array of sense data 1835 * @sense_buffer: byte array of sense data
1836 * @sb_len: number of valid bytes in sense_buffer 1836 * @sb_len: number of valid bytes in sense_buffer
1837 * @info_out: pointer to 64 integer where 8 or 4 byte information 1837 * @info_out: pointer to 64 integer where 8 or 4 byte information
1838 * field will be placed if found. 1838 * field will be placed if found.
1839 * 1839 *
1840 * Return value: 1840 * Return value:
1841 * 1 if information field found, 0 if not found. 1841 * 1 if information field found, 0 if not found.
1842 **/ 1842 **/
1843 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, 1843 int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
1844 u64 * info_out) 1844 u64 * info_out)
1845 { 1845 {
1846 int j; 1846 int j;
1847 const u8 * ucp; 1847 const u8 * ucp;
1848 u64 ull; 1848 u64 ull;
1849 1849
1850 if (sb_len < 7) 1850 if (sb_len < 7)
1851 return 0; 1851 return 0;
1852 switch (sense_buffer[0] & 0x7f) { 1852 switch (sense_buffer[0] & 0x7f) {
1853 case 0x70: 1853 case 0x70:
1854 case 0x71: 1854 case 0x71:
1855 if (sense_buffer[0] & 0x80) { 1855 if (sense_buffer[0] & 0x80) {
1856 *info_out = (sense_buffer[3] << 24) + 1856 *info_out = (sense_buffer[3] << 24) +
1857 (sense_buffer[4] << 16) + 1857 (sense_buffer[4] << 16) +
1858 (sense_buffer[5] << 8) + sense_buffer[6]; 1858 (sense_buffer[5] << 8) + sense_buffer[6];
1859 return 1; 1859 return 1;
1860 } else 1860 } else
1861 return 0; 1861 return 0;
1862 case 0x72: 1862 case 0x72:
1863 case 0x73: 1863 case 0x73:
1864 ucp = scsi_sense_desc_find(sense_buffer, sb_len, 1864 ucp = scsi_sense_desc_find(sense_buffer, sb_len,
1865 0 /* info desc */); 1865 0 /* info desc */);
1866 if (ucp && (0xa == ucp[1])) { 1866 if (ucp && (0xa == ucp[1])) {
1867 ull = 0; 1867 ull = 0;
1868 for (j = 0; j < 8; ++j) { 1868 for (j = 0; j < 8; ++j) {
1869 if (j > 0) 1869 if (j > 0)
1870 ull <<= 8; 1870 ull <<= 8;
1871 ull |= ucp[4 + j]; 1871 ull |= ucp[4 + j];
1872 } 1872 }
1873 *info_out = ull; 1873 *info_out = ull;
1874 return 1; 1874 return 1;
1875 } else 1875 } else
1876 return 0; 1876 return 0;
1877 default: 1877 default:
1878 return 0; 1878 return 0;
1879 } 1879 }
1880 } 1880 }
1881 EXPORT_SYMBOL(scsi_get_sense_info_fld); 1881 EXPORT_SYMBOL(scsi_get_sense_info_fld);
1882 1882
drivers/scsi/scsi_lib.c
1 /* 1 /*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale 2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 * 3 *
4 * SCSI queueing library. 4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org). 5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers 6 * Based upon conversations with large numbers
7 * of people at Linux Expo. 7 * of people at Linux Expo.
8 */ 8 */
9 9
10 #include <linux/bio.h> 10 #include <linux/bio.h>
11 #include <linux/blkdev.h> 11 #include <linux/blkdev.h>
12 #include <linux/completion.h> 12 #include <linux/completion.h>
13 #include <linux/kernel.h> 13 #include <linux/kernel.h>
14 #include <linux/mempool.h> 14 #include <linux/mempool.h>
15 #include <linux/slab.h> 15 #include <linux/slab.h>
16 #include <linux/init.h> 16 #include <linux/init.h>
17 #include <linux/pci.h> 17 #include <linux/pci.h>
18 #include <linux/delay.h> 18 #include <linux/delay.h>
19 19
20 #include <scsi/scsi.h> 20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h> 21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h> 22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h> 23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h> 24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h> 25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h> 26 #include <scsi/scsi_request.h>
27 27
28 #include "scsi_priv.h" 28 #include "scsi_priv.h"
29 #include "scsi_logging.h" 29 #include "scsi_logging.h"
30 30
31 31
32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool)) 32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE 32 33 #define SG_MEMPOOL_SIZE 32
34 34
35 struct scsi_host_sg_pool { 35 struct scsi_host_sg_pool {
36 size_t size; 36 size_t size;
37 char *name; 37 char *name;
38 kmem_cache_t *slab; 38 kmem_cache_t *slab;
39 mempool_t *pool; 39 mempool_t *pool;
40 }; 40 };
41 41
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32) 42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small 43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif 44 #endif
45 45
46 #define SP(x) { x, "sgpool-" #x } 46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = { 47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48 SP(8), 48 SP(8),
49 SP(16), 49 SP(16),
50 SP(32), 50 SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32) 51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64), 52 SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64) 53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128), 54 SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128) 55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256), 56 SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256) 57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large 58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif 59 #endif
60 #endif 60 #endif
61 #endif 61 #endif
62 #endif 62 #endif
63 }; 63 };
64 #undef SP 64 #undef SP
65 65
66 static void scsi_run_queue(struct request_queue *q); 66 static void scsi_run_queue(struct request_queue *q);
67 67
68 /* 68 /*
69 * Function: scsi_unprep_request() 69 * Function: scsi_unprep_request()
70 * 70 *
71 * Purpose: Remove all preparation done for a request, including its 71 * Purpose: Remove all preparation done for a request, including its
72 * associated scsi_cmnd, so that it can be requeued. 72 * associated scsi_cmnd, so that it can be requeued.
73 * 73 *
74 * Arguments: req - request to unprepare 74 * Arguments: req - request to unprepare
75 * 75 *
76 * Lock status: Assumed that no locks are held upon entry. 76 * Lock status: Assumed that no locks are held upon entry.
77 * 77 *
78 * Returns: Nothing. 78 * Returns: Nothing.
79 */ 79 */
80 static void scsi_unprep_request(struct request *req) 80 static void scsi_unprep_request(struct request *req)
81 { 81 {
82 struct scsi_cmnd *cmd = req->special; 82 struct scsi_cmnd *cmd = req->special;
83 83
84 req->flags &= ~REQ_DONTPREP; 84 req->flags &= ~REQ_DONTPREP;
85 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 85 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
86 86
87 scsi_put_command(cmd); 87 scsi_put_command(cmd);
88 } 88 }
89 89
90 /* 90 /*
91 * Function: scsi_queue_insert() 91 * Function: scsi_queue_insert()
92 * 92 *
93 * Purpose: Insert a command in the midlevel queue. 93 * Purpose: Insert a command in the midlevel queue.
94 * 94 *
95 * Arguments: cmd - command that we are adding to queue. 95 * Arguments: cmd - command that we are adding to queue.
96 * reason - why we are inserting command to queue. 96 * reason - why we are inserting command to queue.
97 * 97 *
98 * Lock status: Assumed that lock is not held upon entry. 98 * Lock status: Assumed that lock is not held upon entry.
99 * 99 *
100 * Returns: Nothing. 100 * Returns: Nothing.
101 * 101 *
102 * Notes: We do this for one of two cases. Either the host is busy 102 * Notes: We do this for one of two cases. Either the host is busy
103 * and it cannot accept any more commands for the time being, 103 * and it cannot accept any more commands for the time being,
104 * or the device returned QUEUE_FULL and can accept no more 104 * or the device returned QUEUE_FULL and can accept no more
105 * commands. 105 * commands.
106 * Notes: This could be called either from an interrupt context or a 106 * Notes: This could be called either from an interrupt context or a
107 * normal process context. 107 * normal process context.
108 */ 108 */
109 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 109 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
110 { 110 {
111 struct Scsi_Host *host = cmd->device->host; 111 struct Scsi_Host *host = cmd->device->host;
112 struct scsi_device *device = cmd->device; 112 struct scsi_device *device = cmd->device;
113 struct request_queue *q = device->request_queue; 113 struct request_queue *q = device->request_queue;
114 unsigned long flags; 114 unsigned long flags;
115 115
116 SCSI_LOG_MLQUEUE(1, 116 SCSI_LOG_MLQUEUE(1,
117 printk("Inserting command %p into mlqueue\n", cmd)); 117 printk("Inserting command %p into mlqueue\n", cmd));
118 118
119 /* 119 /*
120 * Set the appropriate busy bit for the device/host. 120 * Set the appropriate busy bit for the device/host.
121 * 121 *
122 * If the host/device isn't busy, assume that something actually 122 * If the host/device isn't busy, assume that something actually
123 * completed, and that we should be able to queue a command now. 123 * completed, and that we should be able to queue a command now.
124 * 124 *
125 * Note that the prior mid-layer assumption that any host could 125 * Note that the prior mid-layer assumption that any host could
126 * always queue at least one command is now broken. The mid-layer 126 * always queue at least one command is now broken. The mid-layer
127 * will implement a user specifiable stall (see 127 * will implement a user specifiable stall (see
128 * scsi_host.max_host_blocked and scsi_device.max_device_blocked) 128 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
129 * if a command is requeued with no other commands outstanding 129 * if a command is requeued with no other commands outstanding
130 * either for the device or for the host. 130 * either for the device or for the host.
131 */ 131 */
132 if (reason == SCSI_MLQUEUE_HOST_BUSY) 132 if (reason == SCSI_MLQUEUE_HOST_BUSY)
133 host->host_blocked = host->max_host_blocked; 133 host->host_blocked = host->max_host_blocked;
134 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) 134 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
135 device->device_blocked = device->max_device_blocked; 135 device->device_blocked = device->max_device_blocked;
136 136
137 /* 137 /*
138 * Decrement the counters, since these commands are no longer 138 * Decrement the counters, since these commands are no longer
139 * active on the host/device. 139 * active on the host/device.
140 */ 140 */
141 scsi_device_unbusy(device); 141 scsi_device_unbusy(device);
142 142
143 /* 143 /*
144 * Requeue this command. It will go before all other commands 144 * Requeue this command. It will go before all other commands
145 * that are already in the queue. 145 * that are already in the queue.
146 * 146 *
147 * NOTE: there is magic here about the way the queue is plugged if 147 * NOTE: there is magic here about the way the queue is plugged if
148 * we have no outstanding commands. 148 * we have no outstanding commands.
149 * 149 *
150 * Although we *don't* plug the queue, we call the request 150 * Although we *don't* plug the queue, we call the request
151 * function. The SCSI request function detects the blocked condition 151 * function. The SCSI request function detects the blocked condition
152 * and plugs the queue appropriately. 152 * and plugs the queue appropriately.
153 */ 153 */
154 spin_lock_irqsave(q->queue_lock, flags); 154 spin_lock_irqsave(q->queue_lock, flags);
155 blk_requeue_request(q, cmd->request); 155 blk_requeue_request(q, cmd->request);
156 spin_unlock_irqrestore(q->queue_lock, flags); 156 spin_unlock_irqrestore(q->queue_lock, flags);
157 157
158 scsi_run_queue(q); 158 scsi_run_queue(q);
159 159
160 return 0; 160 return 0;
161 } 161 }
162 162
163 /* 163 /*
164 * Function: scsi_do_req 164 * Function: scsi_do_req
165 * 165 *
166 * Purpose: Queue a SCSI request 166 * Purpose: Queue a SCSI request
167 * 167 *
168 * Arguments: sreq - command descriptor. 168 * Arguments: sreq - command descriptor.
169 * cmnd - actual SCSI command to be performed. 169 * cmnd - actual SCSI command to be performed.
170 * buffer - data buffer. 170 * buffer - data buffer.
171 * bufflen - size of data buffer. 171 * bufflen - size of data buffer.
172 * done - completion function to be run. 172 * done - completion function to be run.
173 * timeout - how long to let it run before timeout. 173 * timeout - how long to let it run before timeout.
174 * retries - number of retries we allow. 174 * retries - number of retries we allow.
175 * 175 *
176 * Lock status: No locks held upon entry. 176 * Lock status: No locks held upon entry.
177 * 177 *
178 * Returns: Nothing. 178 * Returns: Nothing.
179 * 179 *
180 * Notes: This function is only used for queueing requests for things 180 * Notes: This function is only used for queueing requests for things
181 * like ioctls and character device requests - this is because 181 * like ioctls and character device requests - this is because
182 * we essentially just inject a request into the queue for the 182 * we essentially just inject a request into the queue for the
183 * device. 183 * device.
184 * 184 *
185 * In order to support the scsi_device_quiesce function, we 185 * In order to support the scsi_device_quiesce function, we
186 * now inject requests on the *head* of the device queue 186 * now inject requests on the *head* of the device queue
187 * rather than the tail. 187 * rather than the tail.
188 */ 188 */
189 void scsi_do_req(struct scsi_request *sreq, const void *cmnd, 189 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
190 void *buffer, unsigned bufflen, 190 void *buffer, unsigned bufflen,
191 void (*done)(struct scsi_cmnd *), 191 void (*done)(struct scsi_cmnd *),
192 int timeout, int retries) 192 int timeout, int retries)
193 { 193 {
194 /* 194 /*
195 * If the upper level driver is reusing these things, then 195 * If the upper level driver is reusing these things, then
196 * we should release the low-level block now. Another one will 196 * we should release the low-level block now. Another one will
197 * be allocated later when this request is getting queued. 197 * be allocated later when this request is getting queued.
198 */ 198 */
199 __scsi_release_request(sreq); 199 __scsi_release_request(sreq);
200 200
201 /* 201 /*
202 * Our own function scsi_done (which marks the host as not busy, 202 * Our own function scsi_done (which marks the host as not busy,
203 * disables the timeout counter, etc) will be called by us or by the 203 * disables the timeout counter, etc) will be called by us or by the
204 * scsi_hosts[host].queuecommand() function needs to also call 204 * scsi_hosts[host].queuecommand() function needs to also call
205 * the completion function for the high level driver. 205 * the completion function for the high level driver.
206 */ 206 */
207 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd)); 207 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
208 sreq->sr_bufflen = bufflen; 208 sreq->sr_bufflen = bufflen;
209 sreq->sr_buffer = buffer; 209 sreq->sr_buffer = buffer;
210 sreq->sr_allowed = retries; 210 sreq->sr_allowed = retries;
211 sreq->sr_done = done; 211 sreq->sr_done = done;
212 sreq->sr_timeout_per_command = timeout; 212 sreq->sr_timeout_per_command = timeout;
213 213
214 if (sreq->sr_cmd_len == 0) 214 if (sreq->sr_cmd_len == 0)
215 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); 215 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
216 216
217 /* 217 /*
218 * head injection *required* here otherwise quiesce won't work 218 * head injection *required* here otherwise quiesce won't work
219 * 219 *
220 * Because users of this function are apt to reuse requests with no 220 * Because users of this function are apt to reuse requests with no
221 * modification, we have to sanitise the request flags here 221 * modification, we have to sanitise the request flags here
222 */ 222 */
223 sreq->sr_request->flags &= ~REQ_DONTPREP; 223 sreq->sr_request->flags &= ~REQ_DONTPREP;
224 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request, 224 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
225 1, sreq); 225 1, sreq);
226 } 226 }
227 EXPORT_SYMBOL(scsi_do_req); 227 EXPORT_SYMBOL(scsi_do_req);
228 228
229 /** 229 /**
230 * scsi_execute - insert request and wait for the result 230 * scsi_execute - insert request and wait for the result
231 * @sdev: scsi device 231 * @sdev: scsi device
232 * @cmd: scsi command 232 * @cmd: scsi command
233 * @data_direction: data direction 233 * @data_direction: data direction
234 * @buffer: data buffer 234 * @buffer: data buffer
235 * @bufflen: len of buffer 235 * @bufflen: len of buffer
236 * @sense: optional sense buffer 236 * @sense: optional sense buffer
237 * @timeout: request timeout in seconds 237 * @timeout: request timeout in seconds
238 * @retries: number of times to retry request 238 * @retries: number of times to retry request
239 * @flags: or into request flags; 239 * @flags: or into request flags;
240 * 240 *
241 * returns the req->errors value which is the the scsi_cmnd result 241 * returns the req->errors value which is the the scsi_cmnd result
242 * field. 242 * field.
243 **/ 243 **/
244 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 244 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
245 int data_direction, void *buffer, unsigned bufflen, 245 int data_direction, void *buffer, unsigned bufflen,
246 unsigned char *sense, int timeout, int retries, int flags) 246 unsigned char *sense, int timeout, int retries, int flags)
247 { 247 {
248 struct request *req; 248 struct request *req;
249 int write = (data_direction == DMA_TO_DEVICE); 249 int write = (data_direction == DMA_TO_DEVICE);
250 int ret = DRIVER_ERROR << 24; 250 int ret = DRIVER_ERROR << 24;
251 251
252 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 252 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
253 253
254 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 254 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
255 buffer, bufflen, __GFP_WAIT)) 255 buffer, bufflen, __GFP_WAIT))
256 goto out; 256 goto out;
257 257
258 req->cmd_len = COMMAND_SIZE(cmd[0]); 258 req->cmd_len = COMMAND_SIZE(cmd[0]);
259 memcpy(req->cmd, cmd, req->cmd_len); 259 memcpy(req->cmd, cmd, req->cmd_len);
260 req->sense = sense; 260 req->sense = sense;
261 req->sense_len = 0; 261 req->sense_len = 0;
262 req->retries = retries; 262 req->retries = retries;
263 req->timeout = timeout; 263 req->timeout = timeout;
264 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; 264 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
265 265
266 /* 266 /*
267 * head injection *required* here otherwise quiesce won't work 267 * head injection *required* here otherwise quiesce won't work
268 */ 268 */
269 blk_execute_rq(req->q, NULL, req, 1); 269 blk_execute_rq(req->q, NULL, req, 1);
270 270
271 ret = req->errors; 271 ret = req->errors;
272 out: 272 out:
273 blk_put_request(req); 273 blk_put_request(req);
274 274
275 return ret; 275 return ret;
276 } 276 }
277 EXPORT_SYMBOL(scsi_execute); 277 EXPORT_SYMBOL(scsi_execute);
278 278
279 279
280 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 280 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
281 int data_direction, void *buffer, unsigned bufflen, 281 int data_direction, void *buffer, unsigned bufflen,
282 struct scsi_sense_hdr *sshdr, int timeout, int retries) 282 struct scsi_sense_hdr *sshdr, int timeout, int retries)
283 { 283 {
284 char *sense = NULL; 284 char *sense = NULL;
285 int result; 285 int result;
286 286
287 if (sshdr) { 287 if (sshdr) {
288 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 288 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
289 if (!sense) 289 if (!sense)
290 return DRIVER_ERROR << 24; 290 return DRIVER_ERROR << 24;
291 memset(sense, 0, SCSI_SENSE_BUFFERSIZE); 291 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
292 } 292 }
293 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 293 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
294 sense, timeout, retries, 0); 294 sense, timeout, retries, 0);
295 if (sshdr) 295 if (sshdr)
296 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 296 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
297 297
298 kfree(sense); 298 kfree(sense);
299 return result; 299 return result;
300 } 300 }
301 EXPORT_SYMBOL(scsi_execute_req); 301 EXPORT_SYMBOL(scsi_execute_req);
302 302
303 struct scsi_io_context { 303 struct scsi_io_context {
304 void *data; 304 void *data;
305 void (*done)(void *data, char *sense, int result, int resid); 305 void (*done)(void *data, char *sense, int result, int resid);
306 char sense[SCSI_SENSE_BUFFERSIZE]; 306 char sense[SCSI_SENSE_BUFFERSIZE];
307 }; 307 };
308 308
309 static kmem_cache_t *scsi_io_context_cache; 309 static kmem_cache_t *scsi_io_context_cache;
310 310
311 static void scsi_end_async(struct request *req, int uptodate) 311 static void scsi_end_async(struct request *req, int uptodate)
312 { 312 {
313 struct scsi_io_context *sioc = req->end_io_data; 313 struct scsi_io_context *sioc = req->end_io_data;
314 314
315 if (sioc->done) 315 if (sioc->done)
316 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); 316 sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
317 317
318 kmem_cache_free(scsi_io_context_cache, sioc); 318 kmem_cache_free(scsi_io_context_cache, sioc);
319 __blk_put_request(req->q, req); 319 __blk_put_request(req->q, req);
320 } 320 }
321 321
322 static int scsi_merge_bio(struct request *rq, struct bio *bio) 322 static int scsi_merge_bio(struct request *rq, struct bio *bio)
323 { 323 {
324 struct request_queue *q = rq->q; 324 struct request_queue *q = rq->q;
325 325
326 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 326 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
327 if (rq_data_dir(rq) == WRITE) 327 if (rq_data_dir(rq) == WRITE)
328 bio->bi_rw |= (1 << BIO_RW); 328 bio->bi_rw |= (1 << BIO_RW);
329 blk_queue_bounce(q, &bio); 329 blk_queue_bounce(q, &bio);
330 330
331 if (!rq->bio) 331 if (!rq->bio)
332 blk_rq_bio_prep(q, rq, bio); 332 blk_rq_bio_prep(q, rq, bio);
333 else if (!q->back_merge_fn(q, rq, bio)) 333 else if (!q->back_merge_fn(q, rq, bio))
334 return -EINVAL; 334 return -EINVAL;
335 else { 335 else {
336 rq->biotail->bi_next = bio; 336 rq->biotail->bi_next = bio;
337 rq->biotail = bio; 337 rq->biotail = bio;
338 rq->hard_nr_sectors += bio_sectors(bio); 338 rq->hard_nr_sectors += bio_sectors(bio);
339 rq->nr_sectors = rq->hard_nr_sectors; 339 rq->nr_sectors = rq->hard_nr_sectors;
340 } 340 }
341 341
342 return 0; 342 return 0;
343 } 343 }
344 344
345 static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error) 345 static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
346 { 346 {
347 if (bio->bi_size) 347 if (bio->bi_size)
348 return 1; 348 return 1;
349 349
350 bio_put(bio); 350 bio_put(bio);
351 return 0; 351 return 0;
352 } 352 }
353 353
354 /** 354 /**
355 * scsi_req_map_sg - map a scatterlist into a request 355 * scsi_req_map_sg - map a scatterlist into a request
356 * @rq: request to fill 356 * @rq: request to fill
357 * @sg: scatterlist 357 * @sg: scatterlist
358 * @nsegs: number of elements 358 * @nsegs: number of elements
359 * @bufflen: len of buffer 359 * @bufflen: len of buffer
360 * @gfp: memory allocation flags 360 * @gfp: memory allocation flags
361 * 361 *
362 * scsi_req_map_sg maps a scatterlist into a request so that the 362 * scsi_req_map_sg maps a scatterlist into a request so that the
363 * request can be sent to the block layer. We do not trust the scatterlist 363 * request can be sent to the block layer. We do not trust the scatterlist
364 * sent to use, as some ULDs use that struct to only organize the pages. 364 * sent to use, as some ULDs use that struct to only organize the pages.
365 */ 365 */
366 static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, 366 static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
367 int nsegs, unsigned bufflen, gfp_t gfp) 367 int nsegs, unsigned bufflen, gfp_t gfp)
368 { 368 {
369 struct request_queue *q = rq->q; 369 struct request_queue *q = rq->q;
370 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT; 370 int nr_pages = (bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
371 unsigned int data_len = 0, len, bytes, off; 371 unsigned int data_len = 0, len, bytes, off;
372 struct page *page; 372 struct page *page;
373 struct bio *bio = NULL; 373 struct bio *bio = NULL;
374 int i, err, nr_vecs = 0; 374 int i, err, nr_vecs = 0;
375 375
376 for (i = 0; i < nsegs; i++) { 376 for (i = 0; i < nsegs; i++) {
377 page = sgl[i].page; 377 page = sgl[i].page;
378 off = sgl[i].offset; 378 off = sgl[i].offset;
379 len = sgl[i].length; 379 len = sgl[i].length;
380 data_len += len; 380 data_len += len;
381 381
382 while (len > 0) { 382 while (len > 0) {
383 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 383 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
384 384
385 if (!bio) { 385 if (!bio) {
386 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 386 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
387 nr_pages -= nr_vecs; 387 nr_pages -= nr_vecs;
388 388
389 bio = bio_alloc(gfp, nr_vecs); 389 bio = bio_alloc(gfp, nr_vecs);
390 if (!bio) { 390 if (!bio) {
391 err = -ENOMEM; 391 err = -ENOMEM;
392 goto free_bios; 392 goto free_bios;
393 } 393 }
394 bio->bi_end_io = scsi_bi_endio; 394 bio->bi_end_io = scsi_bi_endio;
395 } 395 }
396 396
397 if (bio_add_pc_page(q, bio, page, bytes, off) != 397 if (bio_add_pc_page(q, bio, page, bytes, off) !=
398 bytes) { 398 bytes) {
399 bio_put(bio); 399 bio_put(bio);
400 err = -EINVAL; 400 err = -EINVAL;
401 goto free_bios; 401 goto free_bios;
402 } 402 }
403 403
404 if (bio->bi_vcnt >= nr_vecs) { 404 if (bio->bi_vcnt >= nr_vecs) {
405 err = scsi_merge_bio(rq, bio); 405 err = scsi_merge_bio(rq, bio);
406 if (err) { 406 if (err) {
407 bio_endio(bio, bio->bi_size, 0); 407 bio_endio(bio, bio->bi_size, 0);
408 goto free_bios; 408 goto free_bios;
409 } 409 }
410 bio = NULL; 410 bio = NULL;
411 } 411 }
412 412
413 page++; 413 page++;
414 len -= bytes; 414 len -= bytes;
415 off = 0; 415 off = 0;
416 } 416 }
417 } 417 }
418 418
419 rq->buffer = rq->data = NULL; 419 rq->buffer = rq->data = NULL;
420 rq->data_len = data_len; 420 rq->data_len = data_len;
421 return 0; 421 return 0;
422 422
423 free_bios: 423 free_bios:
424 while ((bio = rq->bio) != NULL) { 424 while ((bio = rq->bio) != NULL) {
425 rq->bio = bio->bi_next; 425 rq->bio = bio->bi_next;
426 /* 426 /*
427 * call endio instead of bio_put incase it was bounced 427 * call endio instead of bio_put incase it was bounced
428 */ 428 */
429 bio_endio(bio, bio->bi_size, 0); 429 bio_endio(bio, bio->bi_size, 0);
430 } 430 }
431 431
432 return err; 432 return err;
433 } 433 }
434 434
435 /** 435 /**
436 * scsi_execute_async - insert request 436 * scsi_execute_async - insert request
437 * @sdev: scsi device 437 * @sdev: scsi device
438 * @cmd: scsi command 438 * @cmd: scsi command
439 * @cmd_len: length of scsi cdb
439 * @data_direction: data direction 440 * @data_direction: data direction
440 * @buffer: data buffer (this can be a kernel buffer or scatterlist) 441 * @buffer: data buffer (this can be a kernel buffer or scatterlist)
441 * @bufflen: len of buffer 442 * @bufflen: len of buffer
442 * @use_sg: if buffer is a scatterlist this is the number of elements 443 * @use_sg: if buffer is a scatterlist this is the number of elements
443 * @timeout: request timeout in seconds 444 * @timeout: request timeout in seconds
444 * @retries: number of times to retry request 445 * @retries: number of times to retry request
445 * @flags: or into request flags 446 * @flags: or into request flags
446 **/ 447 **/
447 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, 448 int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
448 int data_direction, void *buffer, unsigned bufflen, 449 int cmd_len, int data_direction, void *buffer, unsigned bufflen,
449 int use_sg, int timeout, int retries, void *privdata, 450 int use_sg, int timeout, int retries, void *privdata,
450 void (*done)(void *, char *, int, int), gfp_t gfp) 451 void (*done)(void *, char *, int, int), gfp_t gfp)
451 { 452 {
452 struct request *req; 453 struct request *req;
453 struct scsi_io_context *sioc; 454 struct scsi_io_context *sioc;
454 int err = 0; 455 int err = 0;
455 int write = (data_direction == DMA_TO_DEVICE); 456 int write = (data_direction == DMA_TO_DEVICE);
456 457
457 sioc = kmem_cache_alloc(scsi_io_context_cache, gfp); 458 sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
458 if (!sioc) 459 if (!sioc)
459 return DRIVER_ERROR << 24; 460 return DRIVER_ERROR << 24;
460 memset(sioc, 0, sizeof(*sioc)); 461 memset(sioc, 0, sizeof(*sioc));
461 462
462 req = blk_get_request(sdev->request_queue, write, gfp); 463 req = blk_get_request(sdev->request_queue, write, gfp);
463 if (!req) 464 if (!req)
464 goto free_sense; 465 goto free_sense;
465 req->flags |= REQ_BLOCK_PC | REQ_QUIET; 466 req->flags |= REQ_BLOCK_PC | REQ_QUIET;
466 467
467 if (use_sg) 468 if (use_sg)
468 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); 469 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
469 else if (bufflen) 470 else if (bufflen)
470 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); 471 err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
471 472
472 if (err) 473 if (err)
473 goto free_req; 474 goto free_req;
474 475
475 req->cmd_len = COMMAND_SIZE(cmd[0]); 476 req->cmd_len = cmd_len;
476 memcpy(req->cmd, cmd, req->cmd_len); 477 memcpy(req->cmd, cmd, req->cmd_len);
477 req->sense = sioc->sense; 478 req->sense = sioc->sense;
478 req->sense_len = 0; 479 req->sense_len = 0;
479 req->timeout = timeout; 480 req->timeout = timeout;
480 req->retries = retries; 481 req->retries = retries;
481 req->end_io_data = sioc; 482 req->end_io_data = sioc;
482 483
483 sioc->data = privdata; 484 sioc->data = privdata;
484 sioc->done = done; 485 sioc->done = done;
485 486
486 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); 487 blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
487 return 0; 488 return 0;
488 489
489 free_req: 490 free_req:
490 blk_put_request(req); 491 blk_put_request(req);
491 free_sense: 492 free_sense:
492 kfree(sioc); 493 kfree(sioc);
493 return DRIVER_ERROR << 24; 494 return DRIVER_ERROR << 24;
494 } 495 }
495 EXPORT_SYMBOL_GPL(scsi_execute_async); 496 EXPORT_SYMBOL_GPL(scsi_execute_async);
496 497
497 /* 498 /*
498 * Function: scsi_init_cmd_errh() 499 * Function: scsi_init_cmd_errh()
499 * 500 *
500 * Purpose: Initialize cmd fields related to error handling. 501 * Purpose: Initialize cmd fields related to error handling.
501 * 502 *
502 * Arguments: cmd - command that is ready to be queued. 503 * Arguments: cmd - command that is ready to be queued.
503 * 504 *
504 * Returns: Nothing 505 * Returns: Nothing
505 * 506 *
506 * Notes: This function has the job of initializing a number of 507 * Notes: This function has the job of initializing a number of
507 * fields related to error handling. Typically this will 508 * fields related to error handling. Typically this will
508 * be called once for each command, as required. 509 * be called once for each command, as required.
509 */ 510 */
510 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd) 511 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
511 { 512 {
512 cmd->serial_number = 0; 513 cmd->serial_number = 0;
513 514
514 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); 515 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
515 516
516 if (cmd->cmd_len == 0) 517 if (cmd->cmd_len == 0)
517 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); 518 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
518 519
519 /* 520 /*
520 * We need saved copies of a number of fields - this is because 521 * We need saved copies of a number of fields - this is because
521 * error handling may need to overwrite these with different values 522 * error handling may need to overwrite these with different values
522 * to run different commands, and once error handling is complete, 523 * to run different commands, and once error handling is complete,
523 * we will need to restore these values prior to running the actual 524 * we will need to restore these values prior to running the actual
524 * command. 525 * command.
525 */ 526 */
526 cmd->old_use_sg = cmd->use_sg; 527 cmd->old_use_sg = cmd->use_sg;
527 cmd->old_cmd_len = cmd->cmd_len; 528 cmd->old_cmd_len = cmd->cmd_len;
528 cmd->sc_old_data_direction = cmd->sc_data_direction; 529 cmd->sc_old_data_direction = cmd->sc_data_direction;
529 cmd->old_underflow = cmd->underflow; 530 cmd->old_underflow = cmd->underflow;
530 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd)); 531 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
531 cmd->buffer = cmd->request_buffer; 532 cmd->buffer = cmd->request_buffer;
532 cmd->bufflen = cmd->request_bufflen; 533 cmd->bufflen = cmd->request_bufflen;
533 534
534 return 1; 535 return 1;
535 } 536 }
536 537
537 /* 538 /*
538 * Function: scsi_setup_cmd_retry() 539 * Function: scsi_setup_cmd_retry()
539 * 540 *
540 * Purpose: Restore the command state for a retry 541 * Purpose: Restore the command state for a retry
541 * 542 *
542 * Arguments: cmd - command to be restored 543 * Arguments: cmd - command to be restored
543 * 544 *
544 * Returns: Nothing 545 * Returns: Nothing
545 * 546 *
546 * Notes: Immediately prior to retrying a command, we need 547 * Notes: Immediately prior to retrying a command, we need
547 * to restore certain fields that we saved above. 548 * to restore certain fields that we saved above.
548 */ 549 */
549 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd) 550 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
550 { 551 {
551 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd)); 552 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
552 cmd->request_buffer = cmd->buffer; 553 cmd->request_buffer = cmd->buffer;
553 cmd->request_bufflen = cmd->bufflen; 554 cmd->request_bufflen = cmd->bufflen;
554 cmd->use_sg = cmd->old_use_sg; 555 cmd->use_sg = cmd->old_use_sg;
555 cmd->cmd_len = cmd->old_cmd_len; 556 cmd->cmd_len = cmd->old_cmd_len;
556 cmd->sc_data_direction = cmd->sc_old_data_direction; 557 cmd->sc_data_direction = cmd->sc_old_data_direction;
557 cmd->underflow = cmd->old_underflow; 558 cmd->underflow = cmd->old_underflow;
558 } 559 }
559 560
560 void scsi_device_unbusy(struct scsi_device *sdev) 561 void scsi_device_unbusy(struct scsi_device *sdev)
561 { 562 {
562 struct Scsi_Host *shost = sdev->host; 563 struct Scsi_Host *shost = sdev->host;
563 unsigned long flags; 564 unsigned long flags;
564 565
565 spin_lock_irqsave(shost->host_lock, flags); 566 spin_lock_irqsave(shost->host_lock, flags);
566 shost->host_busy--; 567 shost->host_busy--;
567 if (unlikely(scsi_host_in_recovery(shost) && 568 if (unlikely(scsi_host_in_recovery(shost) &&
568 shost->host_failed)) 569 shost->host_failed))
569 scsi_eh_wakeup(shost); 570 scsi_eh_wakeup(shost);
570 spin_unlock(shost->host_lock); 571 spin_unlock(shost->host_lock);
571 spin_lock(sdev->request_queue->queue_lock); 572 spin_lock(sdev->request_queue->queue_lock);
572 sdev->device_busy--; 573 sdev->device_busy--;
573 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 574 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
574 } 575 }
575 576
576 /* 577 /*
577 * Called for single_lun devices on IO completion. Clear starget_sdev_user, 578 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
578 * and call blk_run_queue for all the scsi_devices on the target - 579 * and call blk_run_queue for all the scsi_devices on the target -
579 * including current_sdev first. 580 * including current_sdev first.
580 * 581 *
581 * Called with *no* scsi locks held. 582 * Called with *no* scsi locks held.
582 */ 583 */
583 static void scsi_single_lun_run(struct scsi_device *current_sdev) 584 static void scsi_single_lun_run(struct scsi_device *current_sdev)
584 { 585 {
585 struct Scsi_Host *shost = current_sdev->host; 586 struct Scsi_Host *shost = current_sdev->host;
586 struct scsi_device *sdev, *tmp; 587 struct scsi_device *sdev, *tmp;
587 struct scsi_target *starget = scsi_target(current_sdev); 588 struct scsi_target *starget = scsi_target(current_sdev);
588 unsigned long flags; 589 unsigned long flags;
589 590
590 spin_lock_irqsave(shost->host_lock, flags); 591 spin_lock_irqsave(shost->host_lock, flags);
591 starget->starget_sdev_user = NULL; 592 starget->starget_sdev_user = NULL;
592 spin_unlock_irqrestore(shost->host_lock, flags); 593 spin_unlock_irqrestore(shost->host_lock, flags);
593 594
594 /* 595 /*
595 * Call blk_run_queue for all LUNs on the target, starting with 596 * Call blk_run_queue for all LUNs on the target, starting with
596 * current_sdev. We race with others (to set starget_sdev_user), 597 * current_sdev. We race with others (to set starget_sdev_user),
597 * but in most cases, we will be first. Ideally, each LU on the 598 * but in most cases, we will be first. Ideally, each LU on the
598 * target would get some limited time or requests on the target. 599 * target would get some limited time or requests on the target.
599 */ 600 */
600 blk_run_queue(current_sdev->request_queue); 601 blk_run_queue(current_sdev->request_queue);
601 602
602 spin_lock_irqsave(shost->host_lock, flags); 603 spin_lock_irqsave(shost->host_lock, flags);
603 if (starget->starget_sdev_user) 604 if (starget->starget_sdev_user)
604 goto out; 605 goto out;
605 list_for_each_entry_safe(sdev, tmp, &starget->devices, 606 list_for_each_entry_safe(sdev, tmp, &starget->devices,
606 same_target_siblings) { 607 same_target_siblings) {
607 if (sdev == current_sdev) 608 if (sdev == current_sdev)
608 continue; 609 continue;
609 if (scsi_device_get(sdev)) 610 if (scsi_device_get(sdev))
610 continue; 611 continue;
611 612
612 spin_unlock_irqrestore(shost->host_lock, flags); 613 spin_unlock_irqrestore(shost->host_lock, flags);
613 blk_run_queue(sdev->request_queue); 614 blk_run_queue(sdev->request_queue);
614 spin_lock_irqsave(shost->host_lock, flags); 615 spin_lock_irqsave(shost->host_lock, flags);
615 616
616 scsi_device_put(sdev); 617 scsi_device_put(sdev);
617 } 618 }
618 out: 619 out:
619 spin_unlock_irqrestore(shost->host_lock, flags); 620 spin_unlock_irqrestore(shost->host_lock, flags);
620 } 621 }
621 622
622 /* 623 /*
623 * Function: scsi_run_queue() 624 * Function: scsi_run_queue()
624 * 625 *
625 * Purpose: Select a proper request queue to serve next 626 * Purpose: Select a proper request queue to serve next
626 * 627 *
627 * Arguments: q - last request's queue 628 * Arguments: q - last request's queue
628 * 629 *
629 * Returns: Nothing 630 * Returns: Nothing
630 * 631 *
631 * Notes: The previous command was completely finished, start 632 * Notes: The previous command was completely finished, start
632 * a new one if possible. 633 * a new one if possible.
633 */ 634 */
634 static void scsi_run_queue(struct request_queue *q) 635 static void scsi_run_queue(struct request_queue *q)
635 { 636 {
636 struct scsi_device *sdev = q->queuedata; 637 struct scsi_device *sdev = q->queuedata;
637 struct Scsi_Host *shost = sdev->host; 638 struct Scsi_Host *shost = sdev->host;
638 unsigned long flags; 639 unsigned long flags;
639 640
640 if (sdev->single_lun) 641 if (sdev->single_lun)
641 scsi_single_lun_run(sdev); 642 scsi_single_lun_run(sdev);
642 643
643 spin_lock_irqsave(shost->host_lock, flags); 644 spin_lock_irqsave(shost->host_lock, flags);
644 while (!list_empty(&shost->starved_list) && 645 while (!list_empty(&shost->starved_list) &&
645 !shost->host_blocked && !shost->host_self_blocked && 646 !shost->host_blocked && !shost->host_self_blocked &&
646 !((shost->can_queue > 0) && 647 !((shost->can_queue > 0) &&
647 (shost->host_busy >= shost->can_queue))) { 648 (shost->host_busy >= shost->can_queue))) {
648 /* 649 /*
649 * As long as shost is accepting commands and we have 650 * As long as shost is accepting commands and we have
650 * starved queues, call blk_run_queue. scsi_request_fn 651 * starved queues, call blk_run_queue. scsi_request_fn
651 * drops the queue_lock and can add us back to the 652 * drops the queue_lock and can add us back to the
652 * starved_list. 653 * starved_list.
653 * 654 *
654 * host_lock protects the starved_list and starved_entry. 655 * host_lock protects the starved_list and starved_entry.
655 * scsi_request_fn must get the host_lock before checking 656 * scsi_request_fn must get the host_lock before checking
656 * or modifying starved_list or starved_entry. 657 * or modifying starved_list or starved_entry.
657 */ 658 */
658 sdev = list_entry(shost->starved_list.next, 659 sdev = list_entry(shost->starved_list.next,
659 struct scsi_device, starved_entry); 660 struct scsi_device, starved_entry);
660 list_del_init(&sdev->starved_entry); 661 list_del_init(&sdev->starved_entry);
661 spin_unlock_irqrestore(shost->host_lock, flags); 662 spin_unlock_irqrestore(shost->host_lock, flags);
662 663
663 blk_run_queue(sdev->request_queue); 664 blk_run_queue(sdev->request_queue);
664 665
665 spin_lock_irqsave(shost->host_lock, flags); 666 spin_lock_irqsave(shost->host_lock, flags);
666 if (unlikely(!list_empty(&sdev->starved_entry))) 667 if (unlikely(!list_empty(&sdev->starved_entry)))
667 /* 668 /*
668 * sdev lost a race, and was put back on the 669 * sdev lost a race, and was put back on the
669 * starved list. This is unlikely but without this 670 * starved list. This is unlikely but without this
670 * in theory we could loop forever. 671 * in theory we could loop forever.
671 */ 672 */
672 break; 673 break;
673 } 674 }
674 spin_unlock_irqrestore(shost->host_lock, flags); 675 spin_unlock_irqrestore(shost->host_lock, flags);
675 676
676 blk_run_queue(q); 677 blk_run_queue(q);
677 } 678 }
678 679
679 /* 680 /*
680 * Function: scsi_requeue_command() 681 * Function: scsi_requeue_command()
681 * 682 *
682 * Purpose: Handle post-processing of completed commands. 683 * Purpose: Handle post-processing of completed commands.
683 * 684 *
684 * Arguments: q - queue to operate on 685 * Arguments: q - queue to operate on
685 * cmd - command that may need to be requeued. 686 * cmd - command that may need to be requeued.
686 * 687 *
687 * Returns: Nothing 688 * Returns: Nothing
688 * 689 *
689 * Notes: After command completion, there may be blocks left 690 * Notes: After command completion, there may be blocks left
690 * over which weren't finished by the previous command 691 * over which weren't finished by the previous command
691 * this can be for a number of reasons - the main one is 692 * this can be for a number of reasons - the main one is
692 * I/O errors in the middle of the request, in which case 693 * I/O errors in the middle of the request, in which case
693 * we need to request the blocks that come after the bad 694 * we need to request the blocks that come after the bad
694 * sector. 695 * sector.
695 * Notes: Upon return, cmd is a stale pointer. 696 * Notes: Upon return, cmd is a stale pointer.
696 */ 697 */
697 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) 698 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
698 { 699 {
699 struct request *req = cmd->request; 700 struct request *req = cmd->request;
700 unsigned long flags; 701 unsigned long flags;
701 702
702 scsi_unprep_request(req); 703 scsi_unprep_request(req);
703 spin_lock_irqsave(q->queue_lock, flags); 704 spin_lock_irqsave(q->queue_lock, flags);
704 blk_requeue_request(q, req); 705 blk_requeue_request(q, req);
705 spin_unlock_irqrestore(q->queue_lock, flags); 706 spin_unlock_irqrestore(q->queue_lock, flags);
706 707
707 scsi_run_queue(q); 708 scsi_run_queue(q);
708 } 709 }
709 710
710 void scsi_next_command(struct scsi_cmnd *cmd) 711 void scsi_next_command(struct scsi_cmnd *cmd)
711 { 712 {
712 struct scsi_device *sdev = cmd->device; 713 struct scsi_device *sdev = cmd->device;
713 struct request_queue *q = sdev->request_queue; 714 struct request_queue *q = sdev->request_queue;
714 715
715 /* need to hold a reference on the device before we let go of the cmd */ 716 /* need to hold a reference on the device before we let go of the cmd */
716 get_device(&sdev->sdev_gendev); 717 get_device(&sdev->sdev_gendev);
717 718
718 scsi_put_command(cmd); 719 scsi_put_command(cmd);
719 scsi_run_queue(q); 720 scsi_run_queue(q);
720 721
721 /* ok to remove device now */ 722 /* ok to remove device now */
722 put_device(&sdev->sdev_gendev); 723 put_device(&sdev->sdev_gendev);
723 } 724 }
724 725
725 void scsi_run_host_queues(struct Scsi_Host *shost) 726 void scsi_run_host_queues(struct Scsi_Host *shost)
726 { 727 {
727 struct scsi_device *sdev; 728 struct scsi_device *sdev;
728 729
729 shost_for_each_device(sdev, shost) 730 shost_for_each_device(sdev, shost)
730 scsi_run_queue(sdev->request_queue); 731 scsi_run_queue(sdev->request_queue);
731 } 732 }
732 733
733 /* 734 /*
734 * Function: scsi_end_request() 735 * Function: scsi_end_request()
735 * 736 *
736 * Purpose: Post-processing of completed commands (usually invoked at end 737 * Purpose: Post-processing of completed commands (usually invoked at end
737 * of upper level post-processing and scsi_io_completion). 738 * of upper level post-processing and scsi_io_completion).
738 * 739 *
739 * Arguments: cmd - command that is complete. 740 * Arguments: cmd - command that is complete.
740 * uptodate - 1 if I/O indicates success, <= 0 for I/O error. 741 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
741 * bytes - number of bytes of completed I/O 742 * bytes - number of bytes of completed I/O
742 * requeue - indicates whether we should requeue leftovers. 743 * requeue - indicates whether we should requeue leftovers.
743 * 744 *
744 * Lock status: Assumed that lock is not held upon entry. 745 * Lock status: Assumed that lock is not held upon entry.
745 * 746 *
746 * Returns: cmd if requeue required, NULL otherwise. 747 * Returns: cmd if requeue required, NULL otherwise.
747 * 748 *
748 * Notes: This is called for block device requests in order to 749 * Notes: This is called for block device requests in order to
749 * mark some number of sectors as complete. 750 * mark some number of sectors as complete.
750 * 751 *
751 * We are guaranteeing that the request queue will be goosed 752 * We are guaranteeing that the request queue will be goosed
752 * at some point during this call. 753 * at some point during this call.
753 * Notes: If cmd was requeued, upon return it will be a stale pointer. 754 * Notes: If cmd was requeued, upon return it will be a stale pointer.
754 */ 755 */
755 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, 756 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
756 int bytes, int requeue) 757 int bytes, int requeue)
757 { 758 {
758 request_queue_t *q = cmd->device->request_queue; 759 request_queue_t *q = cmd->device->request_queue;
759 struct request *req = cmd->request; 760 struct request *req = cmd->request;
760 unsigned long flags; 761 unsigned long flags;
761 762
762 /* 763 /*
763 * If there are blocks left over at the end, set up the command 764 * If there are blocks left over at the end, set up the command
764 * to queue the remainder of them. 765 * to queue the remainder of them.
765 */ 766 */
766 if (end_that_request_chunk(req, uptodate, bytes)) { 767 if (end_that_request_chunk(req, uptodate, bytes)) {
767 int leftover = (req->hard_nr_sectors << 9); 768 int leftover = (req->hard_nr_sectors << 9);
768 769
769 if (blk_pc_request(req)) 770 if (blk_pc_request(req))
770 leftover = req->data_len; 771 leftover = req->data_len;
771 772
772 /* kill remainder if no retrys */ 773 /* kill remainder if no retrys */
773 if (!uptodate && blk_noretry_request(req)) 774 if (!uptodate && blk_noretry_request(req))
774 end_that_request_chunk(req, 0, leftover); 775 end_that_request_chunk(req, 0, leftover);
775 else { 776 else {
776 if (requeue) { 777 if (requeue) {
777 /* 778 /*
778 * Bleah. Leftovers again. Stick the 779 * Bleah. Leftovers again. Stick the
779 * leftovers in the front of the 780 * leftovers in the front of the
780 * queue, and goose the queue again. 781 * queue, and goose the queue again.
781 */ 782 */
782 scsi_requeue_command(q, cmd); 783 scsi_requeue_command(q, cmd);
783 cmd = NULL; 784 cmd = NULL;
784 } 785 }
785 return cmd; 786 return cmd;
786 } 787 }
787 } 788 }
788 789
789 add_disk_randomness(req->rq_disk); 790 add_disk_randomness(req->rq_disk);
790 791
791 spin_lock_irqsave(q->queue_lock, flags); 792 spin_lock_irqsave(q->queue_lock, flags);
792 if (blk_rq_tagged(req)) 793 if (blk_rq_tagged(req))
793 blk_queue_end_tag(q, req); 794 blk_queue_end_tag(q, req);
794 end_that_request_last(req, uptodate); 795 end_that_request_last(req, uptodate);
795 spin_unlock_irqrestore(q->queue_lock, flags); 796 spin_unlock_irqrestore(q->queue_lock, flags);
796 797
797 /* 798 /*
798 * This will goose the queue request function at the end, so we don't 799 * This will goose the queue request function at the end, so we don't
799 * need to worry about launching another command. 800 * need to worry about launching another command.
800 */ 801 */
801 scsi_next_command(cmd); 802 scsi_next_command(cmd);
802 return NULL; 803 return NULL;
803 } 804 }
804 805
805 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) 806 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
806 { 807 {
807 struct scsi_host_sg_pool *sgp; 808 struct scsi_host_sg_pool *sgp;
808 struct scatterlist *sgl; 809 struct scatterlist *sgl;
809 810
810 BUG_ON(!cmd->use_sg); 811 BUG_ON(!cmd->use_sg);
811 812
812 switch (cmd->use_sg) { 813 switch (cmd->use_sg) {
813 case 1 ... 8: 814 case 1 ... 8:
814 cmd->sglist_len = 0; 815 cmd->sglist_len = 0;
815 break; 816 break;
816 case 9 ... 16: 817 case 9 ... 16:
817 cmd->sglist_len = 1; 818 cmd->sglist_len = 1;
818 break; 819 break;
819 case 17 ... 32: 820 case 17 ... 32:
820 cmd->sglist_len = 2; 821 cmd->sglist_len = 2;
821 break; 822 break;
822 #if (SCSI_MAX_PHYS_SEGMENTS > 32) 823 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
823 case 33 ... 64: 824 case 33 ... 64:
824 cmd->sglist_len = 3; 825 cmd->sglist_len = 3;
825 break; 826 break;
826 #if (SCSI_MAX_PHYS_SEGMENTS > 64) 827 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
827 case 65 ... 128: 828 case 65 ... 128:
828 cmd->sglist_len = 4; 829 cmd->sglist_len = 4;
829 break; 830 break;
830 #if (SCSI_MAX_PHYS_SEGMENTS > 128) 831 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
831 case 129 ... 256: 832 case 129 ... 256:
832 cmd->sglist_len = 5; 833 cmd->sglist_len = 5;
833 break; 834 break;
834 #endif 835 #endif
835 #endif 836 #endif
836 #endif 837 #endif
837 default: 838 default:
838 return NULL; 839 return NULL;
839 } 840 }
840 841
841 sgp = scsi_sg_pools + cmd->sglist_len; 842 sgp = scsi_sg_pools + cmd->sglist_len;
842 sgl = mempool_alloc(sgp->pool, gfp_mask); 843 sgl = mempool_alloc(sgp->pool, gfp_mask);
843 return sgl; 844 return sgl;
844 } 845 }
845 846
846 static void scsi_free_sgtable(struct scatterlist *sgl, int index) 847 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
847 { 848 {
848 struct scsi_host_sg_pool *sgp; 849 struct scsi_host_sg_pool *sgp;
849 850
850 BUG_ON(index >= SG_MEMPOOL_NR); 851 BUG_ON(index >= SG_MEMPOOL_NR);
851 852
852 sgp = scsi_sg_pools + index; 853 sgp = scsi_sg_pools + index;
853 mempool_free(sgl, sgp->pool); 854 mempool_free(sgl, sgp->pool);
854 } 855 }
855 856
856 /* 857 /*
857 * Function: scsi_release_buffers() 858 * Function: scsi_release_buffers()
858 * 859 *
859 * Purpose: Completion processing for block device I/O requests. 860 * Purpose: Completion processing for block device I/O requests.
860 * 861 *
861 * Arguments: cmd - command that we are bailing. 862 * Arguments: cmd - command that we are bailing.
862 * 863 *
863 * Lock status: Assumed that no lock is held upon entry. 864 * Lock status: Assumed that no lock is held upon entry.
864 * 865 *
865 * Returns: Nothing 866 * Returns: Nothing
866 * 867 *
867 * Notes: In the event that an upper level driver rejects a 868 * Notes: In the event that an upper level driver rejects a
868 * command, we must release resources allocated during 869 * command, we must release resources allocated during
869 * the __init_io() function. Primarily this would involve 870 * the __init_io() function. Primarily this would involve
870 * the scatter-gather table, and potentially any bounce 871 * the scatter-gather table, and potentially any bounce
871 * buffers. 872 * buffers.
872 */ 873 */
873 static void scsi_release_buffers(struct scsi_cmnd *cmd) 874 static void scsi_release_buffers(struct scsi_cmnd *cmd)
874 { 875 {
875 struct request *req = cmd->request; 876 struct request *req = cmd->request;
876 877
877 /* 878 /*
878 * Free up any indirection buffers we allocated for DMA purposes. 879 * Free up any indirection buffers we allocated for DMA purposes.
879 */ 880 */
880 if (cmd->use_sg) 881 if (cmd->use_sg)
881 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); 882 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
882 else if (cmd->request_buffer != req->buffer) 883 else if (cmd->request_buffer != req->buffer)
883 kfree(cmd->request_buffer); 884 kfree(cmd->request_buffer);
884 885
885 /* 886 /*
886 * Zero these out. They now point to freed memory, and it is 887 * Zero these out. They now point to freed memory, and it is
887 * dangerous to hang onto the pointers. 888 * dangerous to hang onto the pointers.
888 */ 889 */
889 cmd->buffer = NULL; 890 cmd->buffer = NULL;
890 cmd->bufflen = 0; 891 cmd->bufflen = 0;
891 cmd->request_buffer = NULL; 892 cmd->request_buffer = NULL;
892 cmd->request_bufflen = 0; 893 cmd->request_bufflen = 0;
893 } 894 }
894 895
895 /* 896 /*
896 * Function: scsi_io_completion() 897 * Function: scsi_io_completion()
897 * 898 *
898 * Purpose: Completion processing for block device I/O requests. 899 * Purpose: Completion processing for block device I/O requests.
899 * 900 *
900 * Arguments: cmd - command that is finished. 901 * Arguments: cmd - command that is finished.
901 * 902 *
902 * Lock status: Assumed that no lock is held upon entry. 903 * Lock status: Assumed that no lock is held upon entry.
903 * 904 *
904 * Returns: Nothing 905 * Returns: Nothing
905 * 906 *
906 * Notes: This function is matched in terms of capabilities to 907 * Notes: This function is matched in terms of capabilities to
907 * the function that created the scatter-gather list. 908 * the function that created the scatter-gather list.
908 * In other words, if there are no bounce buffers 909 * In other words, if there are no bounce buffers
909 * (the normal case for most drivers), we don't need 910 * (the normal case for most drivers), we don't need
910 * the logic to deal with cleaning up afterwards. 911 * the logic to deal with cleaning up afterwards.
911 * 912 *
912 * We must do one of several things here: 913 * We must do one of several things here:
913 * 914 *
914 * a) Call scsi_end_request. This will finish off the 915 * a) Call scsi_end_request. This will finish off the
915 * specified number of sectors. If we are done, the 916 * specified number of sectors. If we are done, the
916 * command block will be released, and the queue 917 * command block will be released, and the queue
917 * function will be goosed. If we are not done, then 918 * function will be goosed. If we are not done, then
918 * scsi_end_request will directly goose the queue. 919 * scsi_end_request will directly goose the queue.
919 * 920 *
920 * b) We can just use scsi_requeue_command() here. This would 921 * b) We can just use scsi_requeue_command() here. This would
921 * be used if we just wanted to retry, for example. 922 * be used if we just wanted to retry, for example.
922 */ 923 */
923 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, 924 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
924 unsigned int block_bytes) 925 unsigned int block_bytes)
925 { 926 {
926 int result = cmd->result; 927 int result = cmd->result;
927 int this_count = cmd->bufflen; 928 int this_count = cmd->bufflen;
928 request_queue_t *q = cmd->device->request_queue; 929 request_queue_t *q = cmd->device->request_queue;
929 struct request *req = cmd->request; 930 struct request *req = cmd->request;
930 int clear_errors = 1; 931 int clear_errors = 1;
931 struct scsi_sense_hdr sshdr; 932 struct scsi_sense_hdr sshdr;
932 int sense_valid = 0; 933 int sense_valid = 0;
933 int sense_deferred = 0; 934 int sense_deferred = 0;
934 935
935 /* 936 /*
936 * Free up any indirection buffers we allocated for DMA purposes. 937 * Free up any indirection buffers we allocated for DMA purposes.
937 * For the case of a READ, we need to copy the data out of the 938 * For the case of a READ, we need to copy the data out of the
938 * bounce buffer and into the real buffer. 939 * bounce buffer and into the real buffer.
939 */ 940 */
940 if (cmd->use_sg) 941 if (cmd->use_sg)
941 scsi_free_sgtable(cmd->buffer, cmd->sglist_len); 942 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
942 else if (cmd->buffer != req->buffer) { 943 else if (cmd->buffer != req->buffer) {
943 if (rq_data_dir(req) == READ) { 944 if (rq_data_dir(req) == READ) {
944 unsigned long flags; 945 unsigned long flags;
945 char *to = bio_kmap_irq(req->bio, &flags); 946 char *to = bio_kmap_irq(req->bio, &flags);
946 memcpy(to, cmd->buffer, cmd->bufflen); 947 memcpy(to, cmd->buffer, cmd->bufflen);
947 bio_kunmap_irq(to, &flags); 948 bio_kunmap_irq(to, &flags);
948 } 949 }
949 kfree(cmd->buffer); 950 kfree(cmd->buffer);
950 } 951 }
951 952
952 if (result) { 953 if (result) {
953 sense_valid = scsi_command_normalize_sense(cmd, &sshdr); 954 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
954 if (sense_valid) 955 if (sense_valid)
955 sense_deferred = scsi_sense_is_deferred(&sshdr); 956 sense_deferred = scsi_sense_is_deferred(&sshdr);
956 } 957 }
957 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 958 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
958 req->errors = result; 959 req->errors = result;
959 if (result) { 960 if (result) {
960 clear_errors = 0; 961 clear_errors = 0;
961 if (sense_valid && req->sense) { 962 if (sense_valid && req->sense) {
962 /* 963 /*
963 * SG_IO wants current and deferred errors 964 * SG_IO wants current and deferred errors
964 */ 965 */
965 int len = 8 + cmd->sense_buffer[7]; 966 int len = 8 + cmd->sense_buffer[7];
966 967
967 if (len > SCSI_SENSE_BUFFERSIZE) 968 if (len > SCSI_SENSE_BUFFERSIZE)
968 len = SCSI_SENSE_BUFFERSIZE; 969 len = SCSI_SENSE_BUFFERSIZE;
969 memcpy(req->sense, cmd->sense_buffer, len); 970 memcpy(req->sense, cmd->sense_buffer, len);
970 req->sense_len = len; 971 req->sense_len = len;
971 } 972 }
972 } else 973 } else
973 req->data_len = cmd->resid; 974 req->data_len = cmd->resid;
974 } 975 }
975 976
976 /* 977 /*
977 * Zero these out. They now point to freed memory, and it is 978 * Zero these out. They now point to freed memory, and it is
978 * dangerous to hang onto the pointers. 979 * dangerous to hang onto the pointers.
979 */ 980 */
980 cmd->buffer = NULL; 981 cmd->buffer = NULL;
981 cmd->bufflen = 0; 982 cmd->bufflen = 0;
982 cmd->request_buffer = NULL; 983 cmd->request_buffer = NULL;
983 cmd->request_bufflen = 0; 984 cmd->request_bufflen = 0;
984 985
985 /* 986 /*
986 * Next deal with any sectors which we were able to correctly 987 * Next deal with any sectors which we were able to correctly
987 * handle. 988 * handle.
988 */ 989 */
989 if (good_bytes >= 0) { 990 if (good_bytes >= 0) {
990 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n", 991 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
991 req->nr_sectors, good_bytes)); 992 req->nr_sectors, good_bytes));
992 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); 993 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
993 994
994 if (clear_errors) 995 if (clear_errors)
995 req->errors = 0; 996 req->errors = 0;
996 /* 997 /*
997 * If multiple sectors are requested in one buffer, then 998 * If multiple sectors are requested in one buffer, then
998 * they will have been finished off by the first command. 999 * they will have been finished off by the first command.
999 * If not, then we have a multi-buffer command. 1000 * If not, then we have a multi-buffer command.
1000 * 1001 *
1001 * If block_bytes != 0, it means we had a medium error 1002 * If block_bytes != 0, it means we had a medium error
1002 * of some sort, and that we want to mark some number of 1003 * of some sort, and that we want to mark some number of
1003 * sectors as not uptodate. Thus we want to inhibit 1004 * sectors as not uptodate. Thus we want to inhibit
1004 * requeueing right here - we will requeue down below 1005 * requeueing right here - we will requeue down below
1005 * when we handle the bad sectors. 1006 * when we handle the bad sectors.
1006 */ 1007 */
1007 1008
1008 /* 1009 /*
1009 * If the command completed without error, then either 1010 * If the command completed without error, then either
1010 * finish off the rest of the command, or start a new one. 1011 * finish off the rest of the command, or start a new one.
1011 */ 1012 */
1012 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) 1013 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
1013 return; 1014 return;
1014 } 1015 }
1015 /* 1016 /*
1016 * Now, if we were good little boys and girls, Santa left us a request 1017 * Now, if we were good little boys and girls, Santa left us a request
1017 * sense buffer. We can extract information from this, so we 1018 * sense buffer. We can extract information from this, so we
1018 * can choose a block to remap, etc. 1019 * can choose a block to remap, etc.
1019 */ 1020 */
1020 if (sense_valid && !sense_deferred) { 1021 if (sense_valid && !sense_deferred) {
1021 switch (sshdr.sense_key) { 1022 switch (sshdr.sense_key) {
1022 case UNIT_ATTENTION: 1023 case UNIT_ATTENTION:
1023 if (cmd->device->removable) { 1024 if (cmd->device->removable) {
1024 /* detected disc change. set a bit 1025 /* detected disc change. set a bit
1025 * and quietly refuse further access. 1026 * and quietly refuse further access.
1026 */ 1027 */
1027 cmd->device->changed = 1; 1028 cmd->device->changed = 1;
1028 scsi_end_request(cmd, 0, 1029 scsi_end_request(cmd, 0,
1029 this_count, 1); 1030 this_count, 1);
1030 return; 1031 return;
1031 } else { 1032 } else {
1032 /* 1033 /*
1033 * Must have been a power glitch, or a 1034 * Must have been a power glitch, or a
1034 * bus reset. Could not have been a 1035 * bus reset. Could not have been a
1035 * media change, so we just retry the 1036 * media change, so we just retry the
1036 * request and see what happens. 1037 * request and see what happens.
1037 */ 1038 */
1038 scsi_requeue_command(q, cmd); 1039 scsi_requeue_command(q, cmd);
1039 return; 1040 return;
1040 } 1041 }
1041 break; 1042 break;
1042 case ILLEGAL_REQUEST: 1043 case ILLEGAL_REQUEST:
1043 /* 1044 /*
1044 * If we had an ILLEGAL REQUEST returned, then we may 1045 * If we had an ILLEGAL REQUEST returned, then we may
1045 * have performed an unsupported command. The only 1046 * have performed an unsupported command. The only
1046 * thing this should be would be a ten byte read where 1047 * thing this should be would be a ten byte read where
1047 * only a six byte read was supported. Also, on a 1048 * only a six byte read was supported. Also, on a
1048 * system where READ CAPACITY failed, we may have read 1049 * system where READ CAPACITY failed, we may have read
1049 * past the end of the disk. 1050 * past the end of the disk.
1050 */ 1051 */
1051 if ((cmd->device->use_10_for_rw && 1052 if ((cmd->device->use_10_for_rw &&
1052 sshdr.asc == 0x20 && sshdr.ascq == 0x00) && 1053 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1053 (cmd->cmnd[0] == READ_10 || 1054 (cmd->cmnd[0] == READ_10 ||
1054 cmd->cmnd[0] == WRITE_10)) { 1055 cmd->cmnd[0] == WRITE_10)) {
1055 cmd->device->use_10_for_rw = 0; 1056 cmd->device->use_10_for_rw = 0;
1056 /* 1057 /*
1057 * This will cause a retry with a 6-byte 1058 * This will cause a retry with a 6-byte
1058 * command. 1059 * command.
1059 */ 1060 */
1060 scsi_requeue_command(q, cmd); 1061 scsi_requeue_command(q, cmd);
1061 result = 0; 1062 result = 0;
1062 } else { 1063 } else {
1063 scsi_end_request(cmd, 0, this_count, 1); 1064 scsi_end_request(cmd, 0, this_count, 1);
1064 return; 1065 return;
1065 } 1066 }
1066 break; 1067 break;
1067 case NOT_READY: 1068 case NOT_READY:
1068 /* 1069 /*
1069 * If the device is in the process of becoming ready, 1070 * If the device is in the process of becoming ready,
1070 * retry. 1071 * retry.
1071 */ 1072 */
1072 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { 1073 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
1073 scsi_requeue_command(q, cmd); 1074 scsi_requeue_command(q, cmd);
1074 return; 1075 return;
1075 } 1076 }
1076 if (!(req->flags & REQ_QUIET)) 1077 if (!(req->flags & REQ_QUIET))
1077 scmd_printk(KERN_INFO, cmd, 1078 scmd_printk(KERN_INFO, cmd,
1078 "Device not ready.\n"); 1079 "Device not ready.\n");
1079 scsi_end_request(cmd, 0, this_count, 1); 1080 scsi_end_request(cmd, 0, this_count, 1);
1080 return; 1081 return;
1081 case VOLUME_OVERFLOW: 1082 case VOLUME_OVERFLOW:
1082 if (!(req->flags & REQ_QUIET)) { 1083 if (!(req->flags & REQ_QUIET)) {
1083 scmd_printk(KERN_INFO, cmd, 1084 scmd_printk(KERN_INFO, cmd,
1084 "Volume overflow, CDB: "); 1085 "Volume overflow, CDB: ");
1085 __scsi_print_command(cmd->data_cmnd); 1086 __scsi_print_command(cmd->data_cmnd);
1086 scsi_print_sense("", cmd); 1087 scsi_print_sense("", cmd);
1087 } 1088 }
1088 scsi_end_request(cmd, 0, block_bytes, 1); 1089 scsi_end_request(cmd, 0, block_bytes, 1);
1089 return; 1090 return;
1090 default: 1091 default:
1091 break; 1092 break;
1092 } 1093 }
1093 } /* driver byte != 0 */ 1094 } /* driver byte != 0 */
1094 if (host_byte(result) == DID_RESET) { 1095 if (host_byte(result) == DID_RESET) {
1095 /* 1096 /*
1096 * Third party bus reset or reset for error 1097 * Third party bus reset or reset for error
1097 * recovery reasons. Just retry the request 1098 * recovery reasons. Just retry the request
1098 * and see what happens. 1099 * and see what happens.
1099 */ 1100 */
1100 scsi_requeue_command(q, cmd); 1101 scsi_requeue_command(q, cmd);
1101 return; 1102 return;
1102 } 1103 }
1103 if (result) { 1104 if (result) {
1104 if (!(req->flags & REQ_QUIET)) { 1105 if (!(req->flags & REQ_QUIET)) {
1105 scmd_printk(KERN_INFO, cmd, 1106 scmd_printk(KERN_INFO, cmd,
1106 "SCSI error: return code = 0x%x\n", result); 1107 "SCSI error: return code = 0x%x\n", result);
1107 1108
1108 if (driver_byte(result) & DRIVER_SENSE) 1109 if (driver_byte(result) & DRIVER_SENSE)
1109 scsi_print_sense("", cmd); 1110 scsi_print_sense("", cmd);
1110 } 1111 }
1111 /* 1112 /*
1112 * Mark a single buffer as not uptodate. Queue the remainder. 1113 * Mark a single buffer as not uptodate. Queue the remainder.
1113 * We sometimes get this cruft in the event that a medium error 1114 * We sometimes get this cruft in the event that a medium error
1114 * isn't properly reported. 1115 * isn't properly reported.
1115 */ 1116 */
1116 block_bytes = req->hard_cur_sectors << 9; 1117 block_bytes = req->hard_cur_sectors << 9;
1117 if (!block_bytes) 1118 if (!block_bytes)
1118 block_bytes = req->data_len; 1119 block_bytes = req->data_len;
1119 scsi_end_request(cmd, 0, block_bytes, 1); 1120 scsi_end_request(cmd, 0, block_bytes, 1);
1120 } 1121 }
1121 } 1122 }
1122 EXPORT_SYMBOL(scsi_io_completion); 1123 EXPORT_SYMBOL(scsi_io_completion);
1123 1124
1124 /* 1125 /*
1125 * Function: scsi_init_io() 1126 * Function: scsi_init_io()
1126 * 1127 *
1127 * Purpose: SCSI I/O initialize function. 1128 * Purpose: SCSI I/O initialize function.
1128 * 1129 *
1129 * Arguments: cmd - Command descriptor we wish to initialize 1130 * Arguments: cmd - Command descriptor we wish to initialize
1130 * 1131 *
1131 * Returns: 0 on success 1132 * Returns: 0 on success
1132 * BLKPREP_DEFER if the failure is retryable 1133 * BLKPREP_DEFER if the failure is retryable
1133 * BLKPREP_KILL if the failure is fatal 1134 * BLKPREP_KILL if the failure is fatal
1134 */ 1135 */
1135 static int scsi_init_io(struct scsi_cmnd *cmd) 1136 static int scsi_init_io(struct scsi_cmnd *cmd)
1136 { 1137 {
1137 struct request *req = cmd->request; 1138 struct request *req = cmd->request;
1138 struct scatterlist *sgpnt; 1139 struct scatterlist *sgpnt;
1139 int count; 1140 int count;
1140 1141
1141 /* 1142 /*
1142 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 1143 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1143 */ 1144 */
1144 if ((req->flags & REQ_BLOCK_PC) && !req->bio) { 1145 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1145 cmd->request_bufflen = req->data_len; 1146 cmd->request_bufflen = req->data_len;
1146 cmd->request_buffer = req->data; 1147 cmd->request_buffer = req->data;
1147 req->buffer = req->data; 1148 req->buffer = req->data;
1148 cmd->use_sg = 0; 1149 cmd->use_sg = 0;
1149 return 0; 1150 return 0;
1150 } 1151 }
1151 1152
1152 /* 1153 /*
1153 * we used to not use scatter-gather for single segment request, 1154 * we used to not use scatter-gather for single segment request,
1154 * but now we do (it makes highmem I/O easier to support without 1155 * but now we do (it makes highmem I/O easier to support without
1155 * kmapping pages) 1156 * kmapping pages)
1156 */ 1157 */
1157 cmd->use_sg = req->nr_phys_segments; 1158 cmd->use_sg = req->nr_phys_segments;
1158 1159
1159 /* 1160 /*
1160 * if sg table allocation fails, requeue request later. 1161 * if sg table allocation fails, requeue request later.
1161 */ 1162 */
1162 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); 1163 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1163 if (unlikely(!sgpnt)) { 1164 if (unlikely(!sgpnt)) {
1164 scsi_unprep_request(req); 1165 scsi_unprep_request(req);
1165 return BLKPREP_DEFER; 1166 return BLKPREP_DEFER;
1166 } 1167 }
1167 1168
1168 cmd->request_buffer = (char *) sgpnt; 1169 cmd->request_buffer = (char *) sgpnt;
1169 cmd->request_bufflen = req->nr_sectors << 9; 1170 cmd->request_bufflen = req->nr_sectors << 9;
1170 if (blk_pc_request(req)) 1171 if (blk_pc_request(req))
1171 cmd->request_bufflen = req->data_len; 1172 cmd->request_bufflen = req->data_len;
1172 req->buffer = NULL; 1173 req->buffer = NULL;
1173 1174
1174 /* 1175 /*
1175 * Next, walk the list, and fill in the addresses and sizes of 1176 * Next, walk the list, and fill in the addresses and sizes of
1176 * each segment. 1177 * each segment.
1177 */ 1178 */
1178 count = blk_rq_map_sg(req->q, req, cmd->request_buffer); 1179 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1179 1180
1180 /* 1181 /*
1181 * mapped well, send it off 1182 * mapped well, send it off
1182 */ 1183 */
1183 if (likely(count <= cmd->use_sg)) { 1184 if (likely(count <= cmd->use_sg)) {
1184 cmd->use_sg = count; 1185 cmd->use_sg = count;
1185 return 0; 1186 return 0;
1186 } 1187 }
1187 1188
1188 printk(KERN_ERR "Incorrect number of segments after building list\n"); 1189 printk(KERN_ERR "Incorrect number of segments after building list\n");
1189 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); 1190 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1190 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, 1191 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1191 req->current_nr_sectors); 1192 req->current_nr_sectors);
1192 1193
1193 /* release the command and kill it */ 1194 /* release the command and kill it */
1194 scsi_release_buffers(cmd); 1195 scsi_release_buffers(cmd);
1195 scsi_put_command(cmd); 1196 scsi_put_command(cmd);
1196 return BLKPREP_KILL; 1197 return BLKPREP_KILL;
1197 } 1198 }
1198 1199
1199 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 1200 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1200 sector_t *error_sector) 1201 sector_t *error_sector)
1201 { 1202 {
1202 struct scsi_device *sdev = q->queuedata; 1203 struct scsi_device *sdev = q->queuedata;
1203 struct scsi_driver *drv; 1204 struct scsi_driver *drv;
1204 1205
1205 if (sdev->sdev_state != SDEV_RUNNING) 1206 if (sdev->sdev_state != SDEV_RUNNING)
1206 return -ENXIO; 1207 return -ENXIO;
1207 1208
1208 drv = *(struct scsi_driver **) disk->private_data; 1209 drv = *(struct scsi_driver **) disk->private_data;
1209 if (drv->issue_flush) 1210 if (drv->issue_flush)
1210 return drv->issue_flush(&sdev->sdev_gendev, error_sector); 1211 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1211 1212
1212 return -EOPNOTSUPP; 1213 return -EOPNOTSUPP;
1213 } 1214 }
1214 1215
1215 static void scsi_blk_pc_done(struct scsi_cmnd *cmd) 1216 static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
1216 { 1217 {
1217 BUG_ON(!blk_pc_request(cmd->request)); 1218 BUG_ON(!blk_pc_request(cmd->request));
1218 /* 1219 /*
1219 * This will complete the whole command with uptodate=1 so 1220 * This will complete the whole command with uptodate=1 so
1220 * as far as the block layer is concerned the command completed 1221 * as far as the block layer is concerned the command completed
1221 * successfully. Since this is a REQ_BLOCK_PC command the 1222 * successfully. Since this is a REQ_BLOCK_PC command the
1222 * caller should check the request's errors value 1223 * caller should check the request's errors value
1223 */ 1224 */
1224 scsi_io_completion(cmd, cmd->bufflen, 0); 1225 scsi_io_completion(cmd, cmd->bufflen, 0);
1225 } 1226 }
1226 1227
1227 static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd) 1228 static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
1228 { 1229 {
1229 struct request *req = cmd->request; 1230 struct request *req = cmd->request;
1230 1231
1231 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); 1232 BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
1232 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1233 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1233 cmd->cmd_len = req->cmd_len; 1234 cmd->cmd_len = req->cmd_len;
1234 if (!req->data_len) 1235 if (!req->data_len)
1235 cmd->sc_data_direction = DMA_NONE; 1236 cmd->sc_data_direction = DMA_NONE;
1236 else if (rq_data_dir(req) == WRITE) 1237 else if (rq_data_dir(req) == WRITE)
1237 cmd->sc_data_direction = DMA_TO_DEVICE; 1238 cmd->sc_data_direction = DMA_TO_DEVICE;
1238 else 1239 else
1239 cmd->sc_data_direction = DMA_FROM_DEVICE; 1240 cmd->sc_data_direction = DMA_FROM_DEVICE;
1240 1241
1241 cmd->transfersize = req->data_len; 1242 cmd->transfersize = req->data_len;
1242 cmd->allowed = req->retries; 1243 cmd->allowed = req->retries;
1243 cmd->timeout_per_command = req->timeout; 1244 cmd->timeout_per_command = req->timeout;
1244 cmd->done = scsi_blk_pc_done; 1245 cmd->done = scsi_blk_pc_done;
1245 } 1246 }
1246 1247
1247 static int scsi_prep_fn(struct request_queue *q, struct request *req) 1248 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1248 { 1249 {
1249 struct scsi_device *sdev = q->queuedata; 1250 struct scsi_device *sdev = q->queuedata;
1250 struct scsi_cmnd *cmd; 1251 struct scsi_cmnd *cmd;
1251 int specials_only = 0; 1252 int specials_only = 0;
1252 1253
1253 /* 1254 /*
1254 * Just check to see if the device is online. If it isn't, we 1255 * Just check to see if the device is online. If it isn't, we
1255 * refuse to process any commands. The device must be brought 1256 * refuse to process any commands. The device must be brought
1256 * online before trying any recovery commands 1257 * online before trying any recovery commands
1257 */ 1258 */
1258 if (unlikely(!scsi_device_online(sdev))) { 1259 if (unlikely(!scsi_device_online(sdev))) {
1259 sdev_printk(KERN_ERR, sdev, 1260 sdev_printk(KERN_ERR, sdev,
1260 "rejecting I/O to offline device\n"); 1261 "rejecting I/O to offline device\n");
1261 goto kill; 1262 goto kill;
1262 } 1263 }
1263 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { 1264 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1264 /* OK, we're not in a running state don't prep 1265 /* OK, we're not in a running state don't prep
1265 * user commands */ 1266 * user commands */
1266 if (sdev->sdev_state == SDEV_DEL) { 1267 if (sdev->sdev_state == SDEV_DEL) {
1267 /* Device is fully deleted, no commands 1268 /* Device is fully deleted, no commands
1268 * at all allowed down */ 1269 * at all allowed down */
1269 sdev_printk(KERN_ERR, sdev, 1270 sdev_printk(KERN_ERR, sdev,
1270 "rejecting I/O to dead device\n"); 1271 "rejecting I/O to dead device\n");
1271 goto kill; 1272 goto kill;
1272 } 1273 }
1273 /* OK, we only allow special commands (i.e. not 1274 /* OK, we only allow special commands (i.e. not
1274 * user initiated ones */ 1275 * user initiated ones */
1275 specials_only = sdev->sdev_state; 1276 specials_only = sdev->sdev_state;
1276 } 1277 }
1277 1278
1278 /* 1279 /*
1279 * Find the actual device driver associated with this command. 1280 * Find the actual device driver associated with this command.
1280 * The SPECIAL requests are things like character device or 1281 * The SPECIAL requests are things like character device or
1281 * ioctls, which did not originate from ll_rw_blk. Note that 1282 * ioctls, which did not originate from ll_rw_blk. Note that
1282 * the special field is also used to indicate the cmd for 1283 * the special field is also used to indicate the cmd for
1283 * the remainder of a partially fulfilled request that can 1284 * the remainder of a partially fulfilled request that can
1284 * come up when there is a medium error. We have to treat 1285 * come up when there is a medium error. We have to treat
1285 * these two cases differently. We differentiate by looking 1286 * these two cases differently. We differentiate by looking
1286 * at request->cmd, as this tells us the real story. 1287 * at request->cmd, as this tells us the real story.
1287 */ 1288 */
1288 if (req->flags & REQ_SPECIAL && req->special) { 1289 if (req->flags & REQ_SPECIAL && req->special) {
1289 struct scsi_request *sreq = req->special; 1290 struct scsi_request *sreq = req->special;
1290 1291
1291 if (sreq->sr_magic == SCSI_REQ_MAGIC) { 1292 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1292 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC); 1293 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1293 if (unlikely(!cmd)) 1294 if (unlikely(!cmd))
1294 goto defer; 1295 goto defer;
1295 scsi_init_cmd_from_req(cmd, sreq); 1296 scsi_init_cmd_from_req(cmd, sreq);
1296 } else 1297 } else
1297 cmd = req->special; 1298 cmd = req->special;
1298 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1299 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1299 1300
1300 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { 1301 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1301 if(specials_only == SDEV_QUIESCE || 1302 if(specials_only == SDEV_QUIESCE ||
1302 specials_only == SDEV_BLOCK) 1303 specials_only == SDEV_BLOCK)
1303 goto defer; 1304 goto defer;
1304 1305
1305 sdev_printk(KERN_ERR, sdev, 1306 sdev_printk(KERN_ERR, sdev,
1306 "rejecting I/O to device being removed\n"); 1307 "rejecting I/O to device being removed\n");
1307 goto kill; 1308 goto kill;
1308 } 1309 }
1309 1310
1310 1311
1311 /* 1312 /*
1312 * Now try and find a command block that we can use. 1313 * Now try and find a command block that we can use.
1313 */ 1314 */
1314 if (!req->special) { 1315 if (!req->special) {
1315 cmd = scsi_get_command(sdev, GFP_ATOMIC); 1316 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1316 if (unlikely(!cmd)) 1317 if (unlikely(!cmd))
1317 goto defer; 1318 goto defer;
1318 } else 1319 } else
1319 cmd = req->special; 1320 cmd = req->special;
1320 1321
1321 /* pull a tag out of the request if we have one */ 1322 /* pull a tag out of the request if we have one */
1322 cmd->tag = req->tag; 1323 cmd->tag = req->tag;
1323 } else { 1324 } else {
1324 blk_dump_rq_flags(req, "SCSI bad req"); 1325 blk_dump_rq_flags(req, "SCSI bad req");
1325 goto kill; 1326 goto kill;
1326 } 1327 }
1327 1328
1328 /* note the overloading of req->special. When the tag 1329 /* note the overloading of req->special. When the tag
1329 * is active it always means cmd. If the tag goes 1330 * is active it always means cmd. If the tag goes
1330 * back for re-queueing, it may be reset */ 1331 * back for re-queueing, it may be reset */
1331 req->special = cmd; 1332 req->special = cmd;
1332 cmd->request = req; 1333 cmd->request = req;
1333 1334
1334 /* 1335 /*
1335 * FIXME: drop the lock here because the functions below 1336 * FIXME: drop the lock here because the functions below
1336 * expect to be called without the queue lock held. Also, 1337 * expect to be called without the queue lock held. Also,
1337 * previously, we dequeued the request before dropping the 1338 * previously, we dequeued the request before dropping the
1338 * lock. We hope REQ_STARTED prevents anything untoward from 1339 * lock. We hope REQ_STARTED prevents anything untoward from
1339 * happening now. 1340 * happening now.
1340 */ 1341 */
1341 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1342 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1342 int ret; 1343 int ret;
1343 1344
1344 /* 1345 /*
1345 * This will do a couple of things: 1346 * This will do a couple of things:
1346 * 1) Fill in the actual SCSI command. 1347 * 1) Fill in the actual SCSI command.
1347 * 2) Fill in any other upper-level specific fields 1348 * 2) Fill in any other upper-level specific fields
1348 * (timeout). 1349 * (timeout).
1349 * 1350 *
1350 * If this returns 0, it means that the request failed 1351 * If this returns 0, it means that the request failed
1351 * (reading past end of disk, reading offline device, 1352 * (reading past end of disk, reading offline device,
1352 * etc). This won't actually talk to the device, but 1353 * etc). This won't actually talk to the device, but
1353 * some kinds of consistency checking may cause the 1354 * some kinds of consistency checking may cause the
1354 * request to be rejected immediately. 1355 * request to be rejected immediately.
1355 */ 1356 */
1356 1357
1357 /* 1358 /*
1358 * This sets up the scatter-gather table (allocating if 1359 * This sets up the scatter-gather table (allocating if
1359 * required). 1360 * required).
1360 */ 1361 */
1361 ret = scsi_init_io(cmd); 1362 ret = scsi_init_io(cmd);
1362 switch(ret) { 1363 switch(ret) {
1363 /* For BLKPREP_KILL/DEFER the cmd was released */ 1364 /* For BLKPREP_KILL/DEFER the cmd was released */
1364 case BLKPREP_KILL: 1365 case BLKPREP_KILL:
1365 goto kill; 1366 goto kill;
1366 case BLKPREP_DEFER: 1367 case BLKPREP_DEFER:
1367 goto defer; 1368 goto defer;
1368 } 1369 }
1369 1370
1370 /* 1371 /*
1371 * Initialize the actual SCSI command for this request. 1372 * Initialize the actual SCSI command for this request.
1372 */ 1373 */
1373 if (req->flags & REQ_BLOCK_PC) { 1374 if (req->flags & REQ_BLOCK_PC) {
1374 scsi_setup_blk_pc_cmnd(cmd); 1375 scsi_setup_blk_pc_cmnd(cmd);
1375 } else if (req->rq_disk) { 1376 } else if (req->rq_disk) {
1376 struct scsi_driver *drv; 1377 struct scsi_driver *drv;
1377 1378
1378 drv = *(struct scsi_driver **)req->rq_disk->private_data; 1379 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1379 if (unlikely(!drv->init_command(cmd))) { 1380 if (unlikely(!drv->init_command(cmd))) {
1380 scsi_release_buffers(cmd); 1381 scsi_release_buffers(cmd);
1381 scsi_put_command(cmd); 1382 scsi_put_command(cmd);
1382 goto kill; 1383 goto kill;
1383 } 1384 }
1384 } 1385 }
1385 } 1386 }
1386 1387
1387 /* 1388 /*
1388 * The request is now prepped, no need to come back here 1389 * The request is now prepped, no need to come back here
1389 */ 1390 */
1390 req->flags |= REQ_DONTPREP; 1391 req->flags |= REQ_DONTPREP;
1391 return BLKPREP_OK; 1392 return BLKPREP_OK;
1392 1393
1393 defer: 1394 defer:
1394 /* If we defer, the elv_next_request() returns NULL, but the 1395 /* If we defer, the elv_next_request() returns NULL, but the
1395 * queue must be restarted, so we plug here if no returning 1396 * queue must be restarted, so we plug here if no returning
1396 * command will automatically do that. */ 1397 * command will automatically do that. */
1397 if (sdev->device_busy == 0) 1398 if (sdev->device_busy == 0)
1398 blk_plug_device(q); 1399 blk_plug_device(q);
1399 return BLKPREP_DEFER; 1400 return BLKPREP_DEFER;
1400 kill: 1401 kill:
1401 req->errors = DID_NO_CONNECT << 16; 1402 req->errors = DID_NO_CONNECT << 16;
1402 return BLKPREP_KILL; 1403 return BLKPREP_KILL;
1403 } 1404 }
1404 1405
1405 /* 1406 /*
1406 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else 1407 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1407 * return 0. 1408 * return 0.
1408 * 1409 *
1409 * Called with the queue_lock held. 1410 * Called with the queue_lock held.
1410 */ 1411 */
1411 static inline int scsi_dev_queue_ready(struct request_queue *q, 1412 static inline int scsi_dev_queue_ready(struct request_queue *q,
1412 struct scsi_device *sdev) 1413 struct scsi_device *sdev)
1413 { 1414 {
1414 if (sdev->device_busy >= sdev->queue_depth) 1415 if (sdev->device_busy >= sdev->queue_depth)
1415 return 0; 1416 return 0;
1416 if (sdev->device_busy == 0 && sdev->device_blocked) { 1417 if (sdev->device_busy == 0 && sdev->device_blocked) {
1417 /* 1418 /*
1418 * unblock after device_blocked iterates to zero 1419 * unblock after device_blocked iterates to zero
1419 */ 1420 */
1420 if (--sdev->device_blocked == 0) { 1421 if (--sdev->device_blocked == 0) {
1421 SCSI_LOG_MLQUEUE(3, 1422 SCSI_LOG_MLQUEUE(3,
1422 sdev_printk(KERN_INFO, sdev, 1423 sdev_printk(KERN_INFO, sdev,
1423 "unblocking device at zero depth\n")); 1424 "unblocking device at zero depth\n"));
1424 } else { 1425 } else {
1425 blk_plug_device(q); 1426 blk_plug_device(q);
1426 return 0; 1427 return 0;
1427 } 1428 }
1428 } 1429 }
1429 if (sdev->device_blocked) 1430 if (sdev->device_blocked)
1430 return 0; 1431 return 0;
1431 1432
1432 return 1; 1433 return 1;
1433 } 1434 }
1434 1435
1435 /* 1436 /*
1436 * scsi_host_queue_ready: if we can send requests to shost, return 1 else 1437 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1437 * return 0. We must end up running the queue again whenever 0 is 1438 * return 0. We must end up running the queue again whenever 0 is
1438 * returned, else IO can hang. 1439 * returned, else IO can hang.
1439 * 1440 *
1440 * Called with host_lock held. 1441 * Called with host_lock held.
1441 */ 1442 */
1442 static inline int scsi_host_queue_ready(struct request_queue *q, 1443 static inline int scsi_host_queue_ready(struct request_queue *q,
1443 struct Scsi_Host *shost, 1444 struct Scsi_Host *shost,
1444 struct scsi_device *sdev) 1445 struct scsi_device *sdev)
1445 { 1446 {
1446 if (scsi_host_in_recovery(shost)) 1447 if (scsi_host_in_recovery(shost))
1447 return 0; 1448 return 0;
1448 if (shost->host_busy == 0 && shost->host_blocked) { 1449 if (shost->host_busy == 0 && shost->host_blocked) {
1449 /* 1450 /*
1450 * unblock after host_blocked iterates to zero 1451 * unblock after host_blocked iterates to zero
1451 */ 1452 */
1452 if (--shost->host_blocked == 0) { 1453 if (--shost->host_blocked == 0) {
1453 SCSI_LOG_MLQUEUE(3, 1454 SCSI_LOG_MLQUEUE(3,
1454 printk("scsi%d unblocking host at zero depth\n", 1455 printk("scsi%d unblocking host at zero depth\n",
1455 shost->host_no)); 1456 shost->host_no));
1456 } else { 1457 } else {
1457 blk_plug_device(q); 1458 blk_plug_device(q);
1458 return 0; 1459 return 0;
1459 } 1460 }
1460 } 1461 }
1461 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || 1462 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1462 shost->host_blocked || shost->host_self_blocked) { 1463 shost->host_blocked || shost->host_self_blocked) {
1463 if (list_empty(&sdev->starved_entry)) 1464 if (list_empty(&sdev->starved_entry))
1464 list_add_tail(&sdev->starved_entry, &shost->starved_list); 1465 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1465 return 0; 1466 return 0;
1466 } 1467 }
1467 1468
1468 /* We're OK to process the command, so we can't be starved */ 1469 /* We're OK to process the command, so we can't be starved */
1469 if (!list_empty(&sdev->starved_entry)) 1470 if (!list_empty(&sdev->starved_entry))
1470 list_del_init(&sdev->starved_entry); 1471 list_del_init(&sdev->starved_entry);
1471 1472
1472 return 1; 1473 return 1;
1473 } 1474 }
1474 1475
1475 /* 1476 /*
1476 * Kill a request for a dead device 1477 * Kill a request for a dead device
1477 */ 1478 */
1478 static void scsi_kill_request(struct request *req, request_queue_t *q) 1479 static void scsi_kill_request(struct request *req, request_queue_t *q)
1479 { 1480 {
1480 struct scsi_cmnd *cmd = req->special; 1481 struct scsi_cmnd *cmd = req->special;
1481 1482
1482 blkdev_dequeue_request(req); 1483 blkdev_dequeue_request(req);
1483 1484
1484 if (unlikely(cmd == NULL)) { 1485 if (unlikely(cmd == NULL)) {
1485 printk(KERN_CRIT "impossible request in %s.\n", 1486 printk(KERN_CRIT "impossible request in %s.\n",
1486 __FUNCTION__); 1487 __FUNCTION__);
1487 BUG(); 1488 BUG();
1488 } 1489 }
1489 1490
1490 scsi_init_cmd_errh(cmd); 1491 scsi_init_cmd_errh(cmd);
1491 cmd->result = DID_NO_CONNECT << 16; 1492 cmd->result = DID_NO_CONNECT << 16;
1492 atomic_inc(&cmd->device->iorequest_cnt); 1493 atomic_inc(&cmd->device->iorequest_cnt);
1493 __scsi_done(cmd); 1494 __scsi_done(cmd);
1494 } 1495 }
1495 1496
1496 static void scsi_softirq_done(struct request *rq) 1497 static void scsi_softirq_done(struct request *rq)
1497 { 1498 {
1498 struct scsi_cmnd *cmd = rq->completion_data; 1499 struct scsi_cmnd *cmd = rq->completion_data;
1499 unsigned long wait_for = cmd->allowed * cmd->timeout_per_command; 1500 unsigned long wait_for = cmd->allowed * cmd->timeout_per_command;
1500 int disposition; 1501 int disposition;
1501 1502
1502 INIT_LIST_HEAD(&cmd->eh_entry); 1503 INIT_LIST_HEAD(&cmd->eh_entry);
1503 1504
1504 disposition = scsi_decide_disposition(cmd); 1505 disposition = scsi_decide_disposition(cmd);
1505 if (disposition != SUCCESS && 1506 if (disposition != SUCCESS &&
1506 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1507 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1507 sdev_printk(KERN_ERR, cmd->device, 1508 sdev_printk(KERN_ERR, cmd->device,
1508 "timing out command, waited %lus\n", 1509 "timing out command, waited %lus\n",
1509 wait_for/HZ); 1510 wait_for/HZ);
1510 disposition = SUCCESS; 1511 disposition = SUCCESS;
1511 } 1512 }
1512 1513
1513 scsi_log_completion(cmd, disposition); 1514 scsi_log_completion(cmd, disposition);
1514 1515
1515 switch (disposition) { 1516 switch (disposition) {
1516 case SUCCESS: 1517 case SUCCESS:
1517 scsi_finish_command(cmd); 1518 scsi_finish_command(cmd);
1518 break; 1519 break;
1519 case NEEDS_RETRY: 1520 case NEEDS_RETRY:
1520 scsi_retry_command(cmd); 1521 scsi_retry_command(cmd);
1521 break; 1522 break;
1522 case ADD_TO_MLQUEUE: 1523 case ADD_TO_MLQUEUE:
1523 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1524 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1524 break; 1525 break;
1525 default: 1526 default:
1526 if (!scsi_eh_scmd_add(cmd, 0)) 1527 if (!scsi_eh_scmd_add(cmd, 0))
1527 scsi_finish_command(cmd); 1528 scsi_finish_command(cmd);
1528 } 1529 }
1529 } 1530 }
1530 1531
1531 /* 1532 /*
1532 * Function: scsi_request_fn() 1533 * Function: scsi_request_fn()
1533 * 1534 *
1534 * Purpose: Main strategy routine for SCSI. 1535 * Purpose: Main strategy routine for SCSI.
1535 * 1536 *
1536 * Arguments: q - Pointer to actual queue. 1537 * Arguments: q - Pointer to actual queue.
1537 * 1538 *
1538 * Returns: Nothing 1539 * Returns: Nothing
1539 * 1540 *
1540 * Lock status: IO request lock assumed to be held when called. 1541 * Lock status: IO request lock assumed to be held when called.
1541 */ 1542 */
1542 static void scsi_request_fn(struct request_queue *q) 1543 static void scsi_request_fn(struct request_queue *q)
1543 { 1544 {
1544 struct scsi_device *sdev = q->queuedata; 1545 struct scsi_device *sdev = q->queuedata;
1545 struct Scsi_Host *shost; 1546 struct Scsi_Host *shost;
1546 struct scsi_cmnd *cmd; 1547 struct scsi_cmnd *cmd;
1547 struct request *req; 1548 struct request *req;
1548 1549
1549 if (!sdev) { 1550 if (!sdev) {
1550 printk("scsi: killing requests for dead queue\n"); 1551 printk("scsi: killing requests for dead queue\n");
1551 while ((req = elv_next_request(q)) != NULL) 1552 while ((req = elv_next_request(q)) != NULL)
1552 scsi_kill_request(req, q); 1553 scsi_kill_request(req, q);
1553 return; 1554 return;
1554 } 1555 }
1555 1556
1556 if(!get_device(&sdev->sdev_gendev)) 1557 if(!get_device(&sdev->sdev_gendev))
1557 /* We must be tearing the block queue down already */ 1558 /* We must be tearing the block queue down already */
1558 return; 1559 return;
1559 1560
1560 /* 1561 /*
1561 * To start with, we keep looping until the queue is empty, or until 1562 * To start with, we keep looping until the queue is empty, or until
1562 * the host is no longer able to accept any more requests. 1563 * the host is no longer able to accept any more requests.
1563 */ 1564 */
1564 shost = sdev->host; 1565 shost = sdev->host;
1565 while (!blk_queue_plugged(q)) { 1566 while (!blk_queue_plugged(q)) {
1566 int rtn; 1567 int rtn;
1567 /* 1568 /*
1568 * get next queueable request. We do this early to make sure 1569 * get next queueable request. We do this early to make sure
1569 * that the request is fully prepared even if we cannot 1570 * that the request is fully prepared even if we cannot
1570 * accept it. 1571 * accept it.
1571 */ 1572 */
1572 req = elv_next_request(q); 1573 req = elv_next_request(q);
1573 if (!req || !scsi_dev_queue_ready(q, sdev)) 1574 if (!req || !scsi_dev_queue_ready(q, sdev))
1574 break; 1575 break;
1575 1576
1576 if (unlikely(!scsi_device_online(sdev))) { 1577 if (unlikely(!scsi_device_online(sdev))) {
1577 sdev_printk(KERN_ERR, sdev, 1578 sdev_printk(KERN_ERR, sdev,
1578 "rejecting I/O to offline device\n"); 1579 "rejecting I/O to offline device\n");
1579 scsi_kill_request(req, q); 1580 scsi_kill_request(req, q);
1580 continue; 1581 continue;
1581 } 1582 }
1582 1583
1583 1584
1584 /* 1585 /*
1585 * Remove the request from the request list. 1586 * Remove the request from the request list.
1586 */ 1587 */
1587 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1588 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1588 blkdev_dequeue_request(req); 1589 blkdev_dequeue_request(req);
1589 sdev->device_busy++; 1590 sdev->device_busy++;
1590 1591
1591 spin_unlock(q->queue_lock); 1592 spin_unlock(q->queue_lock);
1592 cmd = req->special; 1593 cmd = req->special;
1593 if (unlikely(cmd == NULL)) { 1594 if (unlikely(cmd == NULL)) {
1594 printk(KERN_CRIT "impossible request in %s.\n" 1595 printk(KERN_CRIT "impossible request in %s.\n"
1595 "please mail a stack trace to " 1596 "please mail a stack trace to "
1596 "linux-scsi@vger.kernel.org", 1597 "linux-scsi@vger.kernel.org",
1597 __FUNCTION__); 1598 __FUNCTION__);
1598 BUG(); 1599 BUG();
1599 } 1600 }
1600 spin_lock(shost->host_lock); 1601 spin_lock(shost->host_lock);
1601 1602
1602 if (!scsi_host_queue_ready(q, shost, sdev)) 1603 if (!scsi_host_queue_ready(q, shost, sdev))
1603 goto not_ready; 1604 goto not_ready;
1604 if (sdev->single_lun) { 1605 if (sdev->single_lun) {
1605 if (scsi_target(sdev)->starget_sdev_user && 1606 if (scsi_target(sdev)->starget_sdev_user &&
1606 scsi_target(sdev)->starget_sdev_user != sdev) 1607 scsi_target(sdev)->starget_sdev_user != sdev)
1607 goto not_ready; 1608 goto not_ready;
1608 scsi_target(sdev)->starget_sdev_user = sdev; 1609 scsi_target(sdev)->starget_sdev_user = sdev;
1609 } 1610 }
1610 shost->host_busy++; 1611 shost->host_busy++;
1611 1612
1612 /* 1613 /*
1613 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will 1614 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1614 * take the lock again. 1615 * take the lock again.
1615 */ 1616 */
1616 spin_unlock_irq(shost->host_lock); 1617 spin_unlock_irq(shost->host_lock);
1617 1618
1618 /* 1619 /*
1619 * Finally, initialize any error handling parameters, and set up 1620 * Finally, initialize any error handling parameters, and set up
1620 * the timers for timeouts. 1621 * the timers for timeouts.
1621 */ 1622 */
1622 scsi_init_cmd_errh(cmd); 1623 scsi_init_cmd_errh(cmd);
1623 1624
1624 /* 1625 /*
1625 * Dispatch the command to the low-level driver. 1626 * Dispatch the command to the low-level driver.
1626 */ 1627 */
1627 rtn = scsi_dispatch_cmd(cmd); 1628 rtn = scsi_dispatch_cmd(cmd);
1628 spin_lock_irq(q->queue_lock); 1629 spin_lock_irq(q->queue_lock);
1629 if(rtn) { 1630 if(rtn) {
1630 /* we're refusing the command; because of 1631 /* we're refusing the command; because of
1631 * the way locks get dropped, we need to 1632 * the way locks get dropped, we need to
1632 * check here if plugging is required */ 1633 * check here if plugging is required */
1633 if(sdev->device_busy == 0) 1634 if(sdev->device_busy == 0)
1634 blk_plug_device(q); 1635 blk_plug_device(q);
1635 1636
1636 break; 1637 break;
1637 } 1638 }
1638 } 1639 }
1639 1640
1640 goto out; 1641 goto out;
1641 1642
1642 not_ready: 1643 not_ready:
1643 spin_unlock_irq(shost->host_lock); 1644 spin_unlock_irq(shost->host_lock);
1644 1645
1645 /* 1646 /*
1646 * lock q, handle tag, requeue req, and decrement device_busy. We 1647 * lock q, handle tag, requeue req, and decrement device_busy. We
1647 * must return with queue_lock held. 1648 * must return with queue_lock held.
1648 * 1649 *
1649 * Decrementing device_busy without checking it is OK, as all such 1650 * Decrementing device_busy without checking it is OK, as all such
1650 * cases (host limits or settings) should run the queue at some 1651 * cases (host limits or settings) should run the queue at some
1651 * later time. 1652 * later time.
1652 */ 1653 */
1653 spin_lock_irq(q->queue_lock); 1654 spin_lock_irq(q->queue_lock);
1654 blk_requeue_request(q, req); 1655 blk_requeue_request(q, req);
1655 sdev->device_busy--; 1656 sdev->device_busy--;
1656 if(sdev->device_busy == 0) 1657 if(sdev->device_busy == 0)
1657 blk_plug_device(q); 1658 blk_plug_device(q);
1658 out: 1659 out:
1659 /* must be careful here...if we trigger the ->remove() function 1660 /* must be careful here...if we trigger the ->remove() function
1660 * we cannot be holding the q lock */ 1661 * we cannot be holding the q lock */
1661 spin_unlock_irq(q->queue_lock); 1662 spin_unlock_irq(q->queue_lock);
1662 put_device(&sdev->sdev_gendev); 1663 put_device(&sdev->sdev_gendev);
1663 spin_lock_irq(q->queue_lock); 1664 spin_lock_irq(q->queue_lock);
1664 } 1665 }
1665 1666
1666 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) 1667 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1667 { 1668 {
1668 struct device *host_dev; 1669 struct device *host_dev;
1669 u64 bounce_limit = 0xffffffff; 1670 u64 bounce_limit = 0xffffffff;
1670 1671
1671 if (shost->unchecked_isa_dma) 1672 if (shost->unchecked_isa_dma)
1672 return BLK_BOUNCE_ISA; 1673 return BLK_BOUNCE_ISA;
1673 /* 1674 /*
1674 * Platforms with virtual-DMA translation 1675 * Platforms with virtual-DMA translation
1675 * hardware have no practical limit. 1676 * hardware have no practical limit.
1676 */ 1677 */
1677 if (!PCI_DMA_BUS_IS_PHYS) 1678 if (!PCI_DMA_BUS_IS_PHYS)
1678 return BLK_BOUNCE_ANY; 1679 return BLK_BOUNCE_ANY;
1679 1680
1680 host_dev = scsi_get_device(shost); 1681 host_dev = scsi_get_device(shost);
1681 if (host_dev && host_dev->dma_mask) 1682 if (host_dev && host_dev->dma_mask)
1682 bounce_limit = *host_dev->dma_mask; 1683 bounce_limit = *host_dev->dma_mask;
1683 1684
1684 return bounce_limit; 1685 return bounce_limit;
1685 } 1686 }
1686 EXPORT_SYMBOL(scsi_calculate_bounce_limit); 1687 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1687 1688
1688 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) 1689 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1689 { 1690 {
1690 struct Scsi_Host *shost = sdev->host; 1691 struct Scsi_Host *shost = sdev->host;
1691 struct request_queue *q; 1692 struct request_queue *q;
1692 1693
1693 q = blk_init_queue(scsi_request_fn, NULL); 1694 q = blk_init_queue(scsi_request_fn, NULL);
1694 if (!q) 1695 if (!q)
1695 return NULL; 1696 return NULL;
1696 1697
1697 blk_queue_prep_rq(q, scsi_prep_fn); 1698 blk_queue_prep_rq(q, scsi_prep_fn);
1698 1699
1699 blk_queue_max_hw_segments(q, shost->sg_tablesize); 1700 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1700 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); 1701 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1701 blk_queue_max_sectors(q, shost->max_sectors); 1702 blk_queue_max_sectors(q, shost->max_sectors);
1702 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1703 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1703 blk_queue_segment_boundary(q, shost->dma_boundary); 1704 blk_queue_segment_boundary(q, shost->dma_boundary);
1704 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1705 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1705 blk_queue_softirq_done(q, scsi_softirq_done); 1706 blk_queue_softirq_done(q, scsi_softirq_done);
1706 1707
1707 if (!shost->use_clustering) 1708 if (!shost->use_clustering)
1708 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1709 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1709 return q; 1710 return q;
1710 } 1711 }
1711 1712
1712 void scsi_free_queue(struct request_queue *q) 1713 void scsi_free_queue(struct request_queue *q)
1713 { 1714 {
1714 blk_cleanup_queue(q); 1715 blk_cleanup_queue(q);
1715 } 1716 }
1716 1717
1717 /* 1718 /*
1718 * Function: scsi_block_requests() 1719 * Function: scsi_block_requests()
1719 * 1720 *
1720 * Purpose: Utility function used by low-level drivers to prevent further 1721 * Purpose: Utility function used by low-level drivers to prevent further
1721 * commands from being queued to the device. 1722 * commands from being queued to the device.
1722 * 1723 *
1723 * Arguments: shost - Host in question 1724 * Arguments: shost - Host in question
1724 * 1725 *
1725 * Returns: Nothing 1726 * Returns: Nothing
1726 * 1727 *
1727 * Lock status: No locks are assumed held. 1728 * Lock status: No locks are assumed held.
1728 * 1729 *
1729 * Notes: There is no timer nor any other means by which the requests 1730 * Notes: There is no timer nor any other means by which the requests
1730 * get unblocked other than the low-level driver calling 1731 * get unblocked other than the low-level driver calling
1731 * scsi_unblock_requests(). 1732 * scsi_unblock_requests().
1732 */ 1733 */
1733 void scsi_block_requests(struct Scsi_Host *shost) 1734 void scsi_block_requests(struct Scsi_Host *shost)
1734 { 1735 {
1735 shost->host_self_blocked = 1; 1736 shost->host_self_blocked = 1;
1736 } 1737 }
1737 EXPORT_SYMBOL(scsi_block_requests); 1738 EXPORT_SYMBOL(scsi_block_requests);
1738 1739
1739 /* 1740 /*
1740 * Function: scsi_unblock_requests() 1741 * Function: scsi_unblock_requests()
1741 * 1742 *
1742 * Purpose: Utility function used by low-level drivers to allow further 1743 * Purpose: Utility function used by low-level drivers to allow further
1743 * commands from being queued to the device. 1744 * commands from being queued to the device.
1744 * 1745 *
1745 * Arguments: shost - Host in question 1746 * Arguments: shost - Host in question
1746 * 1747 *
1747 * Returns: Nothing 1748 * Returns: Nothing
1748 * 1749 *
1749 * Lock status: No locks are assumed held. 1750 * Lock status: No locks are assumed held.
1750 * 1751 *
1751 * Notes: There is no timer nor any other means by which the requests 1752 * Notes: There is no timer nor any other means by which the requests
1752 * get unblocked other than the low-level driver calling 1753 * get unblocked other than the low-level driver calling
1753 * scsi_unblock_requests(). 1754 * scsi_unblock_requests().
1754 * 1755 *
1755 * This is done as an API function so that changes to the 1756 * This is done as an API function so that changes to the
1756 * internals of the scsi mid-layer won't require wholesale 1757 * internals of the scsi mid-layer won't require wholesale
1757 * changes to drivers that use this feature. 1758 * changes to drivers that use this feature.
1758 */ 1759 */
1759 void scsi_unblock_requests(struct Scsi_Host *shost) 1760 void scsi_unblock_requests(struct Scsi_Host *shost)
1760 { 1761 {
1761 shost->host_self_blocked = 0; 1762 shost->host_self_blocked = 0;
1762 scsi_run_host_queues(shost); 1763 scsi_run_host_queues(shost);
1763 } 1764 }
1764 EXPORT_SYMBOL(scsi_unblock_requests); 1765 EXPORT_SYMBOL(scsi_unblock_requests);
1765 1766
1766 int __init scsi_init_queue(void) 1767 int __init scsi_init_queue(void)
1767 { 1768 {
1768 int i; 1769 int i;
1769 1770
1770 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1771 scsi_io_context_cache = kmem_cache_create("scsi_io_context",
1771 sizeof(struct scsi_io_context), 1772 sizeof(struct scsi_io_context),
1772 0, 0, NULL, NULL); 1773 0, 0, NULL, NULL);
1773 if (!scsi_io_context_cache) { 1774 if (!scsi_io_context_cache) {
1774 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1775 printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
1775 return -ENOMEM; 1776 return -ENOMEM;
1776 } 1777 }
1777 1778
1778 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1779 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1779 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1780 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1780 int size = sgp->size * sizeof(struct scatterlist); 1781 int size = sgp->size * sizeof(struct scatterlist);
1781 1782
1782 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1783 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1783 SLAB_HWCACHE_ALIGN, NULL, NULL); 1784 SLAB_HWCACHE_ALIGN, NULL, NULL);
1784 if (!sgp->slab) { 1785 if (!sgp->slab) {
1785 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1786 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1786 sgp->name); 1787 sgp->name);
1787 } 1788 }
1788 1789
1789 sgp->pool = mempool_create(SG_MEMPOOL_SIZE, 1790 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1790 mempool_alloc_slab, mempool_free_slab, 1791 mempool_alloc_slab, mempool_free_slab,
1791 sgp->slab); 1792 sgp->slab);
1792 if (!sgp->pool) { 1793 if (!sgp->pool) {
1793 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1794 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1794 sgp->name); 1795 sgp->name);
1795 } 1796 }
1796 } 1797 }
1797 1798
1798 return 0; 1799 return 0;
1799 } 1800 }
1800 1801
1801 void scsi_exit_queue(void) 1802 void scsi_exit_queue(void)
1802 { 1803 {
1803 int i; 1804 int i;
1804 1805
1805 kmem_cache_destroy(scsi_io_context_cache); 1806 kmem_cache_destroy(scsi_io_context_cache);
1806 1807
1807 for (i = 0; i < SG_MEMPOOL_NR; i++) { 1808 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1808 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; 1809 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1809 mempool_destroy(sgp->pool); 1810 mempool_destroy(sgp->pool);
1810 kmem_cache_destroy(sgp->slab); 1811 kmem_cache_destroy(sgp->slab);
1811 } 1812 }
1812 } 1813 }
1813 /** 1814 /**
1814 * scsi_mode_sense - issue a mode sense, falling back from 10 to 1815 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1815 * six bytes if necessary. 1816 * six bytes if necessary.
1816 * @sdev: SCSI device to be queried 1817 * @sdev: SCSI device to be queried
1817 * @dbd: set if mode sense will allow block descriptors to be returned 1818 * @dbd: set if mode sense will allow block descriptors to be returned
1818 * @modepage: mode page being requested 1819 * @modepage: mode page being requested
1819 * @buffer: request buffer (may not be smaller than eight bytes) 1820 * @buffer: request buffer (may not be smaller than eight bytes)
1820 * @len: length of request buffer. 1821 * @len: length of request buffer.
1821 * @timeout: command timeout 1822 * @timeout: command timeout
1822 * @retries: number of retries before failing 1823 * @retries: number of retries before failing
1823 * @data: returns a structure abstracting the mode header data 1824 * @data: returns a structure abstracting the mode header data
1824 * @sense: place to put sense data (or NULL if no sense to be collected). 1825 * @sense: place to put sense data (or NULL if no sense to be collected).
1825 * must be SCSI_SENSE_BUFFERSIZE big. 1826 * must be SCSI_SENSE_BUFFERSIZE big.
1826 * 1827 *
1827 * Returns zero if unsuccessful, or the header offset (either 4 1828 * Returns zero if unsuccessful, or the header offset (either 4
1828 * or 8 depending on whether a six or ten byte command was 1829 * or 8 depending on whether a six or ten byte command was
1829 * issued) if successful. 1830 * issued) if successful.
1830 **/ 1831 **/
1831 int 1832 int
1832 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 1833 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1833 unsigned char *buffer, int len, int timeout, int retries, 1834 unsigned char *buffer, int len, int timeout, int retries,
1834 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { 1835 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1835 unsigned char cmd[12]; 1836 unsigned char cmd[12];
1836 int use_10_for_ms; 1837 int use_10_for_ms;
1837 int header_length; 1838 int header_length;
1838 int result; 1839 int result;
1839 struct scsi_sense_hdr my_sshdr; 1840 struct scsi_sense_hdr my_sshdr;
1840 1841
1841 memset(data, 0, sizeof(*data)); 1842 memset(data, 0, sizeof(*data));
1842 memset(&cmd[0], 0, 12); 1843 memset(&cmd[0], 0, 12);
1843 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ 1844 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1844 cmd[2] = modepage; 1845 cmd[2] = modepage;
1845 1846
1846 /* caller might not be interested in sense, but we need it */ 1847 /* caller might not be interested in sense, but we need it */
1847 if (!sshdr) 1848 if (!sshdr)
1848 sshdr = &my_sshdr; 1849 sshdr = &my_sshdr;
1849 1850
1850 retry: 1851 retry:
1851 use_10_for_ms = sdev->use_10_for_ms; 1852 use_10_for_ms = sdev->use_10_for_ms;
1852 1853
1853 if (use_10_for_ms) { 1854 if (use_10_for_ms) {
1854 if (len < 8) 1855 if (len < 8)
1855 len = 8; 1856 len = 8;
1856 1857
1857 cmd[0] = MODE_SENSE_10; 1858 cmd[0] = MODE_SENSE_10;
1858 cmd[8] = len; 1859 cmd[8] = len;
1859 header_length = 8; 1860 header_length = 8;
1860 } else { 1861 } else {
1861 if (len < 4) 1862 if (len < 4)
1862 len = 4; 1863 len = 4;
1863 1864
1864 cmd[0] = MODE_SENSE; 1865 cmd[0] = MODE_SENSE;
1865 cmd[4] = len; 1866 cmd[4] = len;
1866 header_length = 4; 1867 header_length = 4;
1867 } 1868 }
1868 1869
1869 memset(buffer, 0, len); 1870 memset(buffer, 0, len);
1870 1871
1871 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, 1872 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1872 sshdr, timeout, retries); 1873 sshdr, timeout, retries);
1873 1874
1874 /* This code looks awful: what it's doing is making sure an 1875 /* This code looks awful: what it's doing is making sure an
1875 * ILLEGAL REQUEST sense return identifies the actual command 1876 * ILLEGAL REQUEST sense return identifies the actual command
1876 * byte as the problem. MODE_SENSE commands can return 1877 * byte as the problem. MODE_SENSE commands can return
1877 * ILLEGAL REQUEST if the code page isn't supported */ 1878 * ILLEGAL REQUEST if the code page isn't supported */
1878 1879
1879 if (use_10_for_ms && !scsi_status_is_good(result) && 1880 if (use_10_for_ms && !scsi_status_is_good(result) &&
1880 (driver_byte(result) & DRIVER_SENSE)) { 1881 (driver_byte(result) & DRIVER_SENSE)) {
1881 if (scsi_sense_valid(sshdr)) { 1882 if (scsi_sense_valid(sshdr)) {
1882 if ((sshdr->sense_key == ILLEGAL_REQUEST) && 1883 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1883 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { 1884 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1884 /* 1885 /*
1885 * Invalid command operation code 1886 * Invalid command operation code
1886 */ 1887 */
1887 sdev->use_10_for_ms = 0; 1888 sdev->use_10_for_ms = 0;
1888 goto retry; 1889 goto retry;
1889 } 1890 }
1890 } 1891 }
1891 } 1892 }
1892 1893
1893 if(scsi_status_is_good(result)) { 1894 if(scsi_status_is_good(result)) {
1894 data->header_length = header_length; 1895 data->header_length = header_length;
1895 if(use_10_for_ms) { 1896 if(use_10_for_ms) {
1896 data->length = buffer[0]*256 + buffer[1] + 2; 1897 data->length = buffer[0]*256 + buffer[1] + 2;
1897 data->medium_type = buffer[2]; 1898 data->medium_type = buffer[2];
1898 data->device_specific = buffer[3]; 1899 data->device_specific = buffer[3];
1899 data->longlba = buffer[4] & 0x01; 1900 data->longlba = buffer[4] & 0x01;
1900 data->block_descriptor_length = buffer[6]*256 1901 data->block_descriptor_length = buffer[6]*256
1901 + buffer[7]; 1902 + buffer[7];
1902 } else { 1903 } else {
1903 data->length = buffer[0] + 1; 1904 data->length = buffer[0] + 1;
1904 data->medium_type = buffer[1]; 1905 data->medium_type = buffer[1];
1905 data->device_specific = buffer[2]; 1906 data->device_specific = buffer[2];
1906 data->block_descriptor_length = buffer[3]; 1907 data->block_descriptor_length = buffer[3];
1907 } 1908 }
1908 } 1909 }
1909 1910
1910 return result; 1911 return result;
1911 } 1912 }
1912 EXPORT_SYMBOL(scsi_mode_sense); 1913 EXPORT_SYMBOL(scsi_mode_sense);
1913 1914
1914 int 1915 int
1915 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) 1916 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1916 { 1917 {
1917 char cmd[] = { 1918 char cmd[] = {
1918 TEST_UNIT_READY, 0, 0, 0, 0, 0, 1919 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1919 }; 1920 };
1920 struct scsi_sense_hdr sshdr; 1921 struct scsi_sense_hdr sshdr;
1921 int result; 1922 int result;
1922 1923
1923 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, 1924 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1924 timeout, retries); 1925 timeout, retries);
1925 1926
1926 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { 1927 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1927 1928
1928 if ((scsi_sense_valid(&sshdr)) && 1929 if ((scsi_sense_valid(&sshdr)) &&
1929 ((sshdr.sense_key == UNIT_ATTENTION) || 1930 ((sshdr.sense_key == UNIT_ATTENTION) ||
1930 (sshdr.sense_key == NOT_READY))) { 1931 (sshdr.sense_key == NOT_READY))) {
1931 sdev->changed = 1; 1932 sdev->changed = 1;
1932 result = 0; 1933 result = 0;
1933 } 1934 }
1934 } 1935 }
1935 return result; 1936 return result;
1936 } 1937 }
1937 EXPORT_SYMBOL(scsi_test_unit_ready); 1938 EXPORT_SYMBOL(scsi_test_unit_ready);
1938 1939
1939 /** 1940 /**
1940 * scsi_device_set_state - Take the given device through the device 1941 * scsi_device_set_state - Take the given device through the device
1941 * state model. 1942 * state model.
1942 * @sdev: scsi device to change the state of. 1943 * @sdev: scsi device to change the state of.
1943 * @state: state to change to. 1944 * @state: state to change to.
1944 * 1945 *
1945 * Returns zero if unsuccessful or an error if the requested 1946 * Returns zero if unsuccessful or an error if the requested
1946 * transition is illegal. 1947 * transition is illegal.
1947 **/ 1948 **/
1948 int 1949 int
1949 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) 1950 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1950 { 1951 {
1951 enum scsi_device_state oldstate = sdev->sdev_state; 1952 enum scsi_device_state oldstate = sdev->sdev_state;
1952 1953
1953 if (state == oldstate) 1954 if (state == oldstate)
1954 return 0; 1955 return 0;
1955 1956
1956 switch (state) { 1957 switch (state) {
1957 case SDEV_CREATED: 1958 case SDEV_CREATED:
1958 /* There are no legal states that come back to 1959 /* There are no legal states that come back to
1959 * created. This is the manually initialised start 1960 * created. This is the manually initialised start
1960 * state */ 1961 * state */
1961 goto illegal; 1962 goto illegal;
1962 1963
1963 case SDEV_RUNNING: 1964 case SDEV_RUNNING:
1964 switch (oldstate) { 1965 switch (oldstate) {
1965 case SDEV_CREATED: 1966 case SDEV_CREATED:
1966 case SDEV_OFFLINE: 1967 case SDEV_OFFLINE:
1967 case SDEV_QUIESCE: 1968 case SDEV_QUIESCE:
1968 case SDEV_BLOCK: 1969 case SDEV_BLOCK:
1969 break; 1970 break;
1970 default: 1971 default:
1971 goto illegal; 1972 goto illegal;
1972 } 1973 }
1973 break; 1974 break;
1974 1975
1975 case SDEV_QUIESCE: 1976 case SDEV_QUIESCE:
1976 switch (oldstate) { 1977 switch (oldstate) {
1977 case SDEV_RUNNING: 1978 case SDEV_RUNNING:
1978 case SDEV_OFFLINE: 1979 case SDEV_OFFLINE:
1979 break; 1980 break;
1980 default: 1981 default:
1981 goto illegal; 1982 goto illegal;
1982 } 1983 }
1983 break; 1984 break;
1984 1985
1985 case SDEV_OFFLINE: 1986 case SDEV_OFFLINE:
1986 switch (oldstate) { 1987 switch (oldstate) {
1987 case SDEV_CREATED: 1988 case SDEV_CREATED:
1988 case SDEV_RUNNING: 1989 case SDEV_RUNNING:
1989 case SDEV_QUIESCE: 1990 case SDEV_QUIESCE:
1990 case SDEV_BLOCK: 1991 case SDEV_BLOCK:
1991 break; 1992 break;
1992 default: 1993 default:
1993 goto illegal; 1994 goto illegal;
1994 } 1995 }
1995 break; 1996 break;
1996 1997
1997 case SDEV_BLOCK: 1998 case SDEV_BLOCK:
1998 switch (oldstate) { 1999 switch (oldstate) {
1999 case SDEV_CREATED: 2000 case SDEV_CREATED:
2000 case SDEV_RUNNING: 2001 case SDEV_RUNNING:
2001 break; 2002 break;
2002 default: 2003 default:
2003 goto illegal; 2004 goto illegal;
2004 } 2005 }
2005 break; 2006 break;
2006 2007
2007 case SDEV_CANCEL: 2008 case SDEV_CANCEL:
2008 switch (oldstate) { 2009 switch (oldstate) {
2009 case SDEV_CREATED: 2010 case SDEV_CREATED:
2010 case SDEV_RUNNING: 2011 case SDEV_RUNNING:
2011 case SDEV_OFFLINE: 2012 case SDEV_OFFLINE:
2012 case SDEV_BLOCK: 2013 case SDEV_BLOCK:
2013 break; 2014 break;
2014 default: 2015 default:
2015 goto illegal; 2016 goto illegal;
2016 } 2017 }
2017 break; 2018 break;
2018 2019
2019 case SDEV_DEL: 2020 case SDEV_DEL:
2020 switch (oldstate) { 2021 switch (oldstate) {
2021 case SDEV_CANCEL: 2022 case SDEV_CANCEL:
2022 break; 2023 break;
2023 default: 2024 default:
2024 goto illegal; 2025 goto illegal;
2025 } 2026 }
2026 break; 2027 break;
2027 2028
2028 } 2029 }
2029 sdev->sdev_state = state; 2030 sdev->sdev_state = state;
2030 return 0; 2031 return 0;
2031 2032
2032 illegal: 2033 illegal:
2033 SCSI_LOG_ERROR_RECOVERY(1, 2034 SCSI_LOG_ERROR_RECOVERY(1,
2034 sdev_printk(KERN_ERR, sdev, 2035 sdev_printk(KERN_ERR, sdev,
2035 "Illegal state transition %s->%s\n", 2036 "Illegal state transition %s->%s\n",
2036 scsi_device_state_name(oldstate), 2037 scsi_device_state_name(oldstate),
2037 scsi_device_state_name(state)) 2038 scsi_device_state_name(state))
2038 ); 2039 );
2039 return -EINVAL; 2040 return -EINVAL;
2040 } 2041 }
2041 EXPORT_SYMBOL(scsi_device_set_state); 2042 EXPORT_SYMBOL(scsi_device_set_state);
2042 2043
2043 /** 2044 /**
2044 * scsi_device_quiesce - Block user issued commands. 2045 * scsi_device_quiesce - Block user issued commands.
2045 * @sdev: scsi device to quiesce. 2046 * @sdev: scsi device to quiesce.
2046 * 2047 *
2047 * This works by trying to transition to the SDEV_QUIESCE state 2048 * This works by trying to transition to the SDEV_QUIESCE state
2048 * (which must be a legal transition). When the device is in this 2049 * (which must be a legal transition). When the device is in this
2049 * state, only special requests will be accepted, all others will 2050 * state, only special requests will be accepted, all others will
2050 * be deferred. Since special requests may also be requeued requests, 2051 * be deferred. Since special requests may also be requeued requests,
2051 * a successful return doesn't guarantee the device will be 2052 * a successful return doesn't guarantee the device will be
2052 * totally quiescent. 2053 * totally quiescent.
2053 * 2054 *
2054 * Must be called with user context, may sleep. 2055 * Must be called with user context, may sleep.
2055 * 2056 *
2056 * Returns zero if unsuccessful or an error if not. 2057 * Returns zero if unsuccessful or an error if not.
2057 **/ 2058 **/
2058 int 2059 int
2059 scsi_device_quiesce(struct scsi_device *sdev) 2060 scsi_device_quiesce(struct scsi_device *sdev)
2060 { 2061 {
2061 int err = scsi_device_set_state(sdev, SDEV_QUIESCE); 2062 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2062 if (err) 2063 if (err)
2063 return err; 2064 return err;
2064 2065
2065 scsi_run_queue(sdev->request_queue); 2066 scsi_run_queue(sdev->request_queue);
2066 while (sdev->device_busy) { 2067 while (sdev->device_busy) {
2067 msleep_interruptible(200); 2068 msleep_interruptible(200);
2068 scsi_run_queue(sdev->request_queue); 2069 scsi_run_queue(sdev->request_queue);
2069 } 2070 }
2070 return 0; 2071 return 0;
2071 } 2072 }
2072 EXPORT_SYMBOL(scsi_device_quiesce); 2073 EXPORT_SYMBOL(scsi_device_quiesce);
2073 2074
2074 /** 2075 /**
2075 * scsi_device_resume - Restart user issued commands to a quiesced device. 2076 * scsi_device_resume - Restart user issued commands to a quiesced device.
2076 * @sdev: scsi device to resume. 2077 * @sdev: scsi device to resume.
2077 * 2078 *
2078 * Moves the device from quiesced back to running and restarts the 2079 * Moves the device from quiesced back to running and restarts the
2079 * queues. 2080 * queues.
2080 * 2081 *
2081 * Must be called with user context, may sleep. 2082 * Must be called with user context, may sleep.
2082 **/ 2083 **/
2083 void 2084 void
2084 scsi_device_resume(struct scsi_device *sdev) 2085 scsi_device_resume(struct scsi_device *sdev)
2085 { 2086 {
2086 if(scsi_device_set_state(sdev, SDEV_RUNNING)) 2087 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2087 return; 2088 return;
2088 scsi_run_queue(sdev->request_queue); 2089 scsi_run_queue(sdev->request_queue);
2089 } 2090 }
2090 EXPORT_SYMBOL(scsi_device_resume); 2091 EXPORT_SYMBOL(scsi_device_resume);
2091 2092
2092 static void 2093 static void
2093 device_quiesce_fn(struct scsi_device *sdev, void *data) 2094 device_quiesce_fn(struct scsi_device *sdev, void *data)
2094 { 2095 {
2095 scsi_device_quiesce(sdev); 2096 scsi_device_quiesce(sdev);
2096 } 2097 }
2097 2098
2098 void 2099 void
2099 scsi_target_quiesce(struct scsi_target *starget) 2100 scsi_target_quiesce(struct scsi_target *starget)
2100 { 2101 {
2101 starget_for_each_device(starget, NULL, device_quiesce_fn); 2102 starget_for_each_device(starget, NULL, device_quiesce_fn);
2102 } 2103 }
2103 EXPORT_SYMBOL(scsi_target_quiesce); 2104 EXPORT_SYMBOL(scsi_target_quiesce);
2104 2105
2105 static void 2106 static void
2106 device_resume_fn(struct scsi_device *sdev, void *data) 2107 device_resume_fn(struct scsi_device *sdev, void *data)
2107 { 2108 {
2108 scsi_device_resume(sdev); 2109 scsi_device_resume(sdev);
2109 } 2110 }
2110 2111
2111 void 2112 void
2112 scsi_target_resume(struct scsi_target *starget) 2113 scsi_target_resume(struct scsi_target *starget)
2113 { 2114 {
2114 starget_for_each_device(starget, NULL, device_resume_fn); 2115 starget_for_each_device(starget, NULL, device_resume_fn);
2115 } 2116 }
2116 EXPORT_SYMBOL(scsi_target_resume); 2117 EXPORT_SYMBOL(scsi_target_resume);
2117 2118
2118 /** 2119 /**
2119 * scsi_internal_device_block - internal function to put a device 2120 * scsi_internal_device_block - internal function to put a device
2120 * temporarily into the SDEV_BLOCK state 2121 * temporarily into the SDEV_BLOCK state
2121 * @sdev: device to block 2122 * @sdev: device to block
2122 * 2123 *
2123 * Block request made by scsi lld's to temporarily stop all 2124 * Block request made by scsi lld's to temporarily stop all
2124 * scsi commands on the specified device. Called from interrupt 2125 * scsi commands on the specified device. Called from interrupt
2125 * or normal process context. 2126 * or normal process context.
2126 * 2127 *
2127 * Returns zero if successful or error if not 2128 * Returns zero if successful or error if not
2128 * 2129 *
2129 * Notes: 2130 * Notes:
2130 * This routine transitions the device to the SDEV_BLOCK state 2131 * This routine transitions the device to the SDEV_BLOCK state
2131 * (which must be a legal transition). When the device is in this 2132 * (which must be a legal transition). When the device is in this
2132 * state, all commands are deferred until the scsi lld reenables 2133 * state, all commands are deferred until the scsi lld reenables
2133 * the device with scsi_device_unblock or device_block_tmo fires. 2134 * the device with scsi_device_unblock or device_block_tmo fires.
2134 * This routine assumes the host_lock is held on entry. 2135 * This routine assumes the host_lock is held on entry.
2135 **/ 2136 **/
2136 int 2137 int
2137 scsi_internal_device_block(struct scsi_device *sdev) 2138 scsi_internal_device_block(struct scsi_device *sdev)
2138 { 2139 {
2139 request_queue_t *q = sdev->request_queue; 2140 request_queue_t *q = sdev->request_queue;
2140 unsigned long flags; 2141 unsigned long flags;
2141 int err = 0; 2142 int err = 0;
2142 2143
2143 err = scsi_device_set_state(sdev, SDEV_BLOCK); 2144 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2144 if (err) 2145 if (err)
2145 return err; 2146 return err;
2146 2147
2147 /* 2148 /*
2148 * The device has transitioned to SDEV_BLOCK. Stop the 2149 * The device has transitioned to SDEV_BLOCK. Stop the
2149 * block layer from calling the midlayer with this device's 2150 * block layer from calling the midlayer with this device's
2150 * request queue. 2151 * request queue.
2151 */ 2152 */
2152 spin_lock_irqsave(q->queue_lock, flags); 2153 spin_lock_irqsave(q->queue_lock, flags);
2153 blk_stop_queue(q); 2154 blk_stop_queue(q);
2154 spin_unlock_irqrestore(q->queue_lock, flags); 2155 spin_unlock_irqrestore(q->queue_lock, flags);
2155 2156
2156 return 0; 2157 return 0;
2157 } 2158 }
2158 EXPORT_SYMBOL_GPL(scsi_internal_device_block); 2159 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2159 2160
2160 /** 2161 /**
2161 * scsi_internal_device_unblock - resume a device after a block request 2162 * scsi_internal_device_unblock - resume a device after a block request
2162 * @sdev: device to resume 2163 * @sdev: device to resume
2163 * 2164 *
2164 * Called by scsi lld's or the midlayer to restart the device queue 2165 * Called by scsi lld's or the midlayer to restart the device queue
2165 * for the previously suspended scsi device. Called from interrupt or 2166 * for the previously suspended scsi device. Called from interrupt or
2166 * normal process context. 2167 * normal process context.
2167 * 2168 *
2168 * Returns zero if successful or error if not. 2169 * Returns zero if successful or error if not.
2169 * 2170 *
2170 * Notes: 2171 * Notes:
2171 * This routine transitions the device to the SDEV_RUNNING state 2172 * This routine transitions the device to the SDEV_RUNNING state
2172 * (which must be a legal transition) allowing the midlayer to 2173 * (which must be a legal transition) allowing the midlayer to
2173 * goose the queue for this device. This routine assumes the 2174 * goose the queue for this device. This routine assumes the
2174 * host_lock is held upon entry. 2175 * host_lock is held upon entry.
2175 **/ 2176 **/
2176 int 2177 int
2177 scsi_internal_device_unblock(struct scsi_device *sdev) 2178 scsi_internal_device_unblock(struct scsi_device *sdev)
2178 { 2179 {
2179 request_queue_t *q = sdev->request_queue; 2180 request_queue_t *q = sdev->request_queue;
2180 int err; 2181 int err;
2181 unsigned long flags; 2182 unsigned long flags;
2182 2183
2183 /* 2184 /*
2184 * Try to transition the scsi device to SDEV_RUNNING 2185 * Try to transition the scsi device to SDEV_RUNNING
2185 * and goose the device queue if successful. 2186 * and goose the device queue if successful.
2186 */ 2187 */
2187 err = scsi_device_set_state(sdev, SDEV_RUNNING); 2188 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2188 if (err) 2189 if (err)
2189 return err; 2190 return err;
2190 2191
2191 spin_lock_irqsave(q->queue_lock, flags); 2192 spin_lock_irqsave(q->queue_lock, flags);
2192 blk_start_queue(q); 2193 blk_start_queue(q);
2193 spin_unlock_irqrestore(q->queue_lock, flags); 2194 spin_unlock_irqrestore(q->queue_lock, flags);
2194 2195
2195 return 0; 2196 return 0;
2196 } 2197 }
2197 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); 2198 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2198 2199
2199 static void 2200 static void
2200 device_block(struct scsi_device *sdev, void *data) 2201 device_block(struct scsi_device *sdev, void *data)
2201 { 2202 {
2202 scsi_internal_device_block(sdev); 2203 scsi_internal_device_block(sdev);
2203 } 2204 }
2204 2205
2205 static int 2206 static int
2206 target_block(struct device *dev, void *data) 2207 target_block(struct device *dev, void *data)
2207 { 2208 {
2208 if (scsi_is_target_device(dev)) 2209 if (scsi_is_target_device(dev))
2209 starget_for_each_device(to_scsi_target(dev), NULL, 2210 starget_for_each_device(to_scsi_target(dev), NULL,
2210 device_block); 2211 device_block);
2211 return 0; 2212 return 0;
2212 } 2213 }
2213 2214
2214 void 2215 void
2215 scsi_target_block(struct device *dev) 2216 scsi_target_block(struct device *dev)
2216 { 2217 {
2217 if (scsi_is_target_device(dev)) 2218 if (scsi_is_target_device(dev))
2218 starget_for_each_device(to_scsi_target(dev), NULL, 2219 starget_for_each_device(to_scsi_target(dev), NULL,
2219 device_block); 2220 device_block);
2220 else 2221 else
2221 device_for_each_child(dev, NULL, target_block); 2222 device_for_each_child(dev, NULL, target_block);
2222 } 2223 }
2223 EXPORT_SYMBOL_GPL(scsi_target_block); 2224 EXPORT_SYMBOL_GPL(scsi_target_block);
2224 2225
2225 static void 2226 static void
2226 device_unblock(struct scsi_device *sdev, void *data) 2227 device_unblock(struct scsi_device *sdev, void *data)
2227 { 2228 {
2228 scsi_internal_device_unblock(sdev); 2229 scsi_internal_device_unblock(sdev);
2229 } 2230 }
2230 2231
2231 static int 2232 static int
2232 target_unblock(struct device *dev, void *data) 2233 target_unblock(struct device *dev, void *data)
2233 { 2234 {
2234 if (scsi_is_target_device(dev)) 2235 if (scsi_is_target_device(dev))
2235 starget_for_each_device(to_scsi_target(dev), NULL, 2236 starget_for_each_device(to_scsi_target(dev), NULL,
2236 device_unblock); 2237 device_unblock);
2237 return 0; 2238 return 0;
2238 } 2239 }
2239 2240
2240 void 2241 void
2241 scsi_target_unblock(struct device *dev) 2242 scsi_target_unblock(struct device *dev)
2242 { 2243 {
2243 if (scsi_is_target_device(dev)) 2244 if (scsi_is_target_device(dev))
2244 starget_for_each_device(to_scsi_target(dev), NULL, 2245 starget_for_each_device(to_scsi_target(dev), NULL,
2245 device_unblock); 2246 device_unblock);
2246 else 2247 else
2247 device_for_each_child(dev, NULL, target_unblock); 2248 device_for_each_child(dev, NULL, target_unblock);
2248 } 2249 }
2249 EXPORT_SYMBOL_GPL(scsi_target_unblock); 2250 EXPORT_SYMBOL_GPL(scsi_target_unblock);
2250 2251
1 /* 1 /*
2 * History: 2 * History:
3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), 3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices. 4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY 5 * Development Sponsored by Killy Corp. NY NY
6 * 6 *
7 * Original driver (sg.c): 7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard 8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver: 9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert 10 * Copyright (C) 1998 - 2005 Douglas Gilbert
11 * 11 *
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option) 16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version. 17 * any later version.
18 * 18 *
19 */ 19 */
20 20
21 static int sg_version_num = 30533; /* 2 digits for each component */ 21 static int sg_version_num = 30533; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.33" 22 #define SG_VERSION_STR "3.5.33"
23 23
24 /* 24 /*
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: 25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First 26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING 27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements). 28 * (otherwise the macros compile to empty statements).
29 * 29 *
30 */ 30 */
31 #include <linux/config.h> 31 #include <linux/config.h>
32 #include <linux/module.h> 32 #include <linux/module.h>
33 33
34 #include <linux/fs.h> 34 #include <linux/fs.h>
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/sched.h> 36 #include <linux/sched.h>
37 #include <linux/string.h> 37 #include <linux/string.h>
38 #include <linux/mm.h> 38 #include <linux/mm.h>
39 #include <linux/errno.h> 39 #include <linux/errno.h>
40 #include <linux/mtio.h> 40 #include <linux/mtio.h>
41 #include <linux/ioctl.h> 41 #include <linux/ioctl.h>
42 #include <linux/fcntl.h> 42 #include <linux/fcntl.h>
43 #include <linux/init.h> 43 #include <linux/init.h>
44 #include <linux/poll.h> 44 #include <linux/poll.h>
45 #include <linux/smp_lock.h> 45 #include <linux/smp_lock.h>
46 #include <linux/moduleparam.h> 46 #include <linux/moduleparam.h>
47 #include <linux/devfs_fs_kernel.h> 47 #include <linux/devfs_fs_kernel.h>
48 #include <linux/cdev.h> 48 #include <linux/cdev.h>
49 #include <linux/seq_file.h> 49 #include <linux/seq_file.h>
50 #include <linux/blkdev.h> 50 #include <linux/blkdev.h>
51 #include <linux/delay.h> 51 #include <linux/delay.h>
52 #include <linux/scatterlist.h> 52 #include <linux/scatterlist.h>
53 53
54 #include "scsi.h" 54 #include "scsi.h"
55 #include <scsi/scsi_dbg.h> 55 #include <scsi/scsi_dbg.h>
56 #include <scsi/scsi_host.h> 56 #include <scsi/scsi_host.h>
57 #include <scsi/scsi_driver.h> 57 #include <scsi/scsi_driver.h>
58 #include <scsi/scsi_ioctl.h> 58 #include <scsi/scsi_ioctl.h>
59 #include <scsi/sg.h> 59 #include <scsi/sg.h>
60 60
61 #include "scsi_logging.h" 61 #include "scsi_logging.h"
62 62
63 #ifdef CONFIG_SCSI_PROC_FS 63 #ifdef CONFIG_SCSI_PROC_FS
64 #include <linux/proc_fs.h> 64 #include <linux/proc_fs.h>
65 static char *sg_version_date = "20050908"; 65 static char *sg_version_date = "20050908";
66 66
67 static int sg_proc_init(void); 67 static int sg_proc_init(void);
68 static void sg_proc_cleanup(void); 68 static void sg_proc_cleanup(void);
69 #endif 69 #endif
70 70
71 #define SG_ALLOW_DIO_DEF 0 71 #define SG_ALLOW_DIO_DEF 0
72 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */ 72 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 73
74 #define SG_MAX_DEVS 32768 74 #define SG_MAX_DEVS 32768
75 75
76 /* 76 /*
77 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) 77 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
78 * Then when using 32 bit integers x * m may overflow during the calculation. 78 * Then when using 32 bit integers x * m may overflow during the calculation.
79 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m 79 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
80 * calculates the same, but prevents the overflow when both m and d 80 * calculates the same, but prevents the overflow when both m and d
81 * are "small" numbers (like HZ and USER_HZ). 81 * are "small" numbers (like HZ and USER_HZ).
82 * Of course an overflow is inavoidable if the result of muldiv doesn't fit 82 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
83 * in 32 bits. 83 * in 32 bits.
84 */ 84 */
85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) 85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
86 86
87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) 87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
88 88
89 int sg_big_buff = SG_DEF_RESERVED_SIZE; 89 int sg_big_buff = SG_DEF_RESERVED_SIZE;
90 /* N.B. This variable is readable and writeable via 90 /* N.B. This variable is readable and writeable via
91 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer 91 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
92 of this size (or less if there is not enough memory) will be reserved 92 of this size (or less if there is not enough memory) will be reserved
93 for use by this file descriptor. [Deprecated usage: this variable is also 93 for use by this file descriptor. [Deprecated usage: this variable is also
94 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into 94 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
95 the kernel (i.e. it is not a module).] */ 95 the kernel (i.e. it is not a module).] */
96 static int def_reserved_size = -1; /* picks up init parameter */ 96 static int def_reserved_size = -1; /* picks up init parameter */
97 static int sg_allow_dio = SG_ALLOW_DIO_DEF; 97 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
98 98
99 #define SG_SECTOR_SZ 512 99 #define SG_SECTOR_SZ 512
100 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1) 100 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
101 101
102 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */ 102 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
103 103
104 static int sg_add(struct class_device *, struct class_interface *); 104 static int sg_add(struct class_device *, struct class_interface *);
105 static void sg_remove(struct class_device *, struct class_interface *); 105 static void sg_remove(struct class_device *, struct class_interface *);
106 106
107 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock 107 static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock
108 file descriptor list for device */ 108 file descriptor list for device */
109 109
110 static struct class_interface sg_interface = { 110 static struct class_interface sg_interface = {
111 .add = sg_add, 111 .add = sg_add,
112 .remove = sg_remove, 112 .remove = sg_remove,
113 }; 113 };
114 114
115 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ 115 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ 116 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
117 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */ 117 unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
118 unsigned bufflen; /* Size of (aggregate) data buffer */ 118 unsigned bufflen; /* Size of (aggregate) data buffer */
119 unsigned b_malloc_len; /* actual len malloc'ed in buffer */ 119 unsigned b_malloc_len; /* actual len malloc'ed in buffer */
120 struct scatterlist *buffer;/* scatter list */ 120 struct scatterlist *buffer;/* scatter list */
121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ 121 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
122 unsigned char cmd_opcode; /* first byte of command */ 122 unsigned char cmd_opcode; /* first byte of command */
123 } Sg_scatter_hold; 123 } Sg_scatter_hold;
124 124
125 struct sg_device; /* forward declarations */ 125 struct sg_device; /* forward declarations */
126 struct sg_fd; 126 struct sg_fd;
127 127
128 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ 128 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
129 struct sg_request *nextrp; /* NULL -> tail request (slist) */ 129 struct sg_request *nextrp; /* NULL -> tail request (slist) */
130 struct sg_fd *parentfp; /* NULL -> not in use */ 130 struct sg_fd *parentfp; /* NULL -> not in use */
131 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ 131 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
132 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ 132 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
133 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; 133 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
134 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ 134 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
135 char orphan; /* 1 -> drop on sight, 0 -> normal */ 135 char orphan; /* 1 -> drop on sight, 0 -> normal */
136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */ 136 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
137 volatile char done; /* 0->before bh, 1->before read, 2->read */ 137 volatile char done; /* 0->before bh, 1->before read, 2->read */
138 } Sg_request; 138 } Sg_request;
139 139
140 typedef struct sg_fd { /* holds the state of a file descriptor */ 140 typedef struct sg_fd { /* holds the state of a file descriptor */
141 struct sg_fd *nextfp; /* NULL when last opened fd on this device */ 141 struct sg_fd *nextfp; /* NULL when last opened fd on this device */
142 struct sg_device *parentdp; /* owning device */ 142 struct sg_device *parentdp; /* owning device */
143 wait_queue_head_t read_wait; /* queue read until command done */ 143 wait_queue_head_t read_wait; /* queue read until command done */
144 rwlock_t rq_list_lock; /* protect access to list in req_arr */ 144 rwlock_t rq_list_lock; /* protect access to list in req_arr */
145 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ 145 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
146 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ 146 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
147 Sg_scatter_hold reserve; /* buffer held for this file descriptor */ 147 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
148 unsigned save_scat_len; /* original length of trunc. scat. element */ 148 unsigned save_scat_len; /* original length of trunc. scat. element */
149 Sg_request *headrp; /* head of request slist, NULL->empty */ 149 Sg_request *headrp; /* head of request slist, NULL->empty */
150 struct fasync_struct *async_qp; /* used by asynchronous notification */ 150 struct fasync_struct *async_qp; /* used by asynchronous notification */
151 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ 151 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
152 char low_dma; /* as in parent but possibly overridden to 1 */ 152 char low_dma; /* as in parent but possibly overridden to 1 */
153 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ 153 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
154 volatile char closed; /* 1 -> fd closed but request(s) outstanding */ 154 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
155 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ 155 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
156 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ 156 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
157 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ 157 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
158 char mmap_called; /* 0 -> mmap() never called on this fd */ 158 char mmap_called; /* 0 -> mmap() never called on this fd */
159 } Sg_fd; 159 } Sg_fd;
160 160
161 typedef struct sg_device { /* holds the state of each scsi generic device */ 161 typedef struct sg_device { /* holds the state of each scsi generic device */
162 struct scsi_device *device; 162 struct scsi_device *device;
163 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ 163 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
164 int sg_tablesize; /* adapter's max scatter-gather table size */ 164 int sg_tablesize; /* adapter's max scatter-gather table size */
165 Sg_fd *headfp; /* first open fd belonging to this device */ 165 Sg_fd *headfp; /* first open fd belonging to this device */
166 volatile char detached; /* 0->attached, 1->detached pending removal */ 166 volatile char detached; /* 0->attached, 1->detached pending removal */
167 volatile char exclude; /* opened for exclusive access */ 167 volatile char exclude; /* opened for exclusive access */
168 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ 168 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
169 struct gendisk *disk; 169 struct gendisk *disk;
170 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ 170 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
171 } Sg_device; 171 } Sg_device;
172 172
173 static int sg_fasync(int fd, struct file *filp, int mode); 173 static int sg_fasync(int fd, struct file *filp, int mode);
174 /* tasklet or soft irq callback */ 174 /* tasklet or soft irq callback */
175 static void sg_cmd_done(void *data, char *sense, int result, int resid); 175 static void sg_cmd_done(void *data, char *sense, int result, int resid);
176 static int sg_start_req(Sg_request * srp); 176 static int sg_start_req(Sg_request * srp);
177 static void sg_finish_rem_req(Sg_request * srp); 177 static void sg_finish_rem_req(Sg_request * srp);
178 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); 178 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
179 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, 179 static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
180 int tablesize); 180 int tablesize);
181 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, 181 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
182 Sg_request * srp); 182 Sg_request * srp);
183 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 183 static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
184 int blocking, int read_only, Sg_request ** o_srp); 184 int blocking, int read_only, Sg_request ** o_srp);
185 static int sg_common_write(Sg_fd * sfp, Sg_request * srp, 185 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
186 unsigned char *cmnd, int timeout, int blocking); 186 unsigned char *cmnd, int timeout, int blocking);
187 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 187 static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
188 int wr_xf, int *countp, unsigned char __user **up); 188 int wr_xf, int *countp, unsigned char __user **up);
189 static int sg_write_xfer(Sg_request * srp); 189 static int sg_write_xfer(Sg_request * srp);
190 static int sg_read_xfer(Sg_request * srp); 190 static int sg_read_xfer(Sg_request * srp);
191 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); 191 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
192 static void sg_remove_scat(Sg_scatter_hold * schp); 192 static void sg_remove_scat(Sg_scatter_hold * schp);
193 static void sg_build_reserve(Sg_fd * sfp, int req_size); 193 static void sg_build_reserve(Sg_fd * sfp, int req_size);
194 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); 194 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
195 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); 195 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
196 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp); 196 static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
197 static void sg_page_free(struct page *page, int size); 197 static void sg_page_free(struct page *page, int size);
198 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); 198 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
199 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 199 static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
200 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp); 200 static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
201 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); 201 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
202 static Sg_request *sg_add_request(Sg_fd * sfp); 202 static Sg_request *sg_add_request(Sg_fd * sfp);
203 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); 203 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
204 static int sg_res_in_use(Sg_fd * sfp); 204 static int sg_res_in_use(Sg_fd * sfp);
205 static int sg_allow_access(unsigned char opcode, char dev_type); 205 static int sg_allow_access(unsigned char opcode, char dev_type);
206 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len); 206 static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
207 static Sg_device *sg_get_dev(int dev); 207 static Sg_device *sg_get_dev(int dev);
208 #ifdef CONFIG_SCSI_PROC_FS 208 #ifdef CONFIG_SCSI_PROC_FS
209 static int sg_last_dev(void); 209 static int sg_last_dev(void);
210 #endif 210 #endif
211 211
212 static Sg_device **sg_dev_arr = NULL; 212 static Sg_device **sg_dev_arr = NULL;
213 static int sg_dev_max; 213 static int sg_dev_max;
214 static int sg_nr_dev; 214 static int sg_nr_dev;
215 215
216 #define SZ_SG_HEADER sizeof(struct sg_header) 216 #define SZ_SG_HEADER sizeof(struct sg_header)
217 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) 217 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
218 #define SZ_SG_IOVEC sizeof(sg_iovec_t) 218 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
219 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) 219 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
220 220
221 static int 221 static int
222 sg_open(struct inode *inode, struct file *filp) 222 sg_open(struct inode *inode, struct file *filp)
223 { 223 {
224 int dev = iminor(inode); 224 int dev = iminor(inode);
225 int flags = filp->f_flags; 225 int flags = filp->f_flags;
226 struct request_queue *q; 226 struct request_queue *q;
227 Sg_device *sdp; 227 Sg_device *sdp;
228 Sg_fd *sfp; 228 Sg_fd *sfp;
229 int res; 229 int res;
230 int retval; 230 int retval;
231 231
232 nonseekable_open(inode, filp); 232 nonseekable_open(inode, filp);
233 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); 233 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
234 sdp = sg_get_dev(dev); 234 sdp = sg_get_dev(dev);
235 if ((!sdp) || (!sdp->device)) 235 if ((!sdp) || (!sdp->device))
236 return -ENXIO; 236 return -ENXIO;
237 if (sdp->detached) 237 if (sdp->detached)
238 return -ENODEV; 238 return -ENODEV;
239 239
240 /* This driver's module count bumped by fops_get in <linux/fs.h> */ 240 /* This driver's module count bumped by fops_get in <linux/fs.h> */
241 /* Prevent the device driver from vanishing while we sleep */ 241 /* Prevent the device driver from vanishing while we sleep */
242 retval = scsi_device_get(sdp->device); 242 retval = scsi_device_get(sdp->device);
243 if (retval) 243 if (retval)
244 return retval; 244 return retval;
245 245
246 if (!((flags & O_NONBLOCK) || 246 if (!((flags & O_NONBLOCK) ||
247 scsi_block_when_processing_errors(sdp->device))) { 247 scsi_block_when_processing_errors(sdp->device))) {
248 retval = -ENXIO; 248 retval = -ENXIO;
249 /* we are in error recovery for this device */ 249 /* we are in error recovery for this device */
250 goto error_out; 250 goto error_out;
251 } 251 }
252 252
253 if (flags & O_EXCL) { 253 if (flags & O_EXCL) {
254 if (O_RDONLY == (flags & O_ACCMODE)) { 254 if (O_RDONLY == (flags & O_ACCMODE)) {
255 retval = -EPERM; /* Can't lock it with read only access */ 255 retval = -EPERM; /* Can't lock it with read only access */
256 goto error_out; 256 goto error_out;
257 } 257 }
258 if (sdp->headfp && (flags & O_NONBLOCK)) { 258 if (sdp->headfp && (flags & O_NONBLOCK)) {
259 retval = -EBUSY; 259 retval = -EBUSY;
260 goto error_out; 260 goto error_out;
261 } 261 }
262 res = 0; 262 res = 0;
263 __wait_event_interruptible(sdp->o_excl_wait, 263 __wait_event_interruptible(sdp->o_excl_wait,
264 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res); 264 ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
265 if (res) { 265 if (res) {
266 retval = res; /* -ERESTARTSYS because signal hit process */ 266 retval = res; /* -ERESTARTSYS because signal hit process */
267 goto error_out; 267 goto error_out;
268 } 268 }
269 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */ 269 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
270 if (flags & O_NONBLOCK) { 270 if (flags & O_NONBLOCK) {
271 retval = -EBUSY; 271 retval = -EBUSY;
272 goto error_out; 272 goto error_out;
273 } 273 }
274 res = 0; 274 res = 0;
275 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude), 275 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
276 res); 276 res);
277 if (res) { 277 if (res) {
278 retval = res; /* -ERESTARTSYS because signal hit process */ 278 retval = res; /* -ERESTARTSYS because signal hit process */
279 goto error_out; 279 goto error_out;
280 } 280 }
281 } 281 }
282 if (sdp->detached) { 282 if (sdp->detached) {
283 retval = -ENODEV; 283 retval = -ENODEV;
284 goto error_out; 284 goto error_out;
285 } 285 }
286 if (!sdp->headfp) { /* no existing opens on this device */ 286 if (!sdp->headfp) { /* no existing opens on this device */
287 sdp->sgdebug = 0; 287 sdp->sgdebug = 0;
288 q = sdp->device->request_queue; 288 q = sdp->device->request_queue;
289 sdp->sg_tablesize = min(q->max_hw_segments, 289 sdp->sg_tablesize = min(q->max_hw_segments,
290 q->max_phys_segments); 290 q->max_phys_segments);
291 } 291 }
292 if ((sfp = sg_add_sfp(sdp, dev))) 292 if ((sfp = sg_add_sfp(sdp, dev)))
293 filp->private_data = sfp; 293 filp->private_data = sfp;
294 else { 294 else {
295 if (flags & O_EXCL) 295 if (flags & O_EXCL)
296 sdp->exclude = 0; /* undo if error */ 296 sdp->exclude = 0; /* undo if error */
297 retval = -ENOMEM; 297 retval = -ENOMEM;
298 goto error_out; 298 goto error_out;
299 } 299 }
300 return 0; 300 return 0;
301 301
302 error_out: 302 error_out:
303 scsi_device_put(sdp->device); 303 scsi_device_put(sdp->device);
304 return retval; 304 return retval;
305 } 305 }
306 306
307 /* Following function was formerly called 'sg_close' */ 307 /* Following function was formerly called 'sg_close' */
308 static int 308 static int
309 sg_release(struct inode *inode, struct file *filp) 309 sg_release(struct inode *inode, struct file *filp)
310 { 310 {
311 Sg_device *sdp; 311 Sg_device *sdp;
312 Sg_fd *sfp; 312 Sg_fd *sfp;
313 313
314 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 314 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
315 return -ENXIO; 315 return -ENXIO;
316 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); 316 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
317 sg_fasync(-1, filp, 0); /* remove filp from async notification list */ 317 sg_fasync(-1, filp, 0); /* remove filp from async notification list */
318 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */ 318 if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
319 if (!sdp->detached) { 319 if (!sdp->detached) {
320 scsi_device_put(sdp->device); 320 scsi_device_put(sdp->device);
321 } 321 }
322 sdp->exclude = 0; 322 sdp->exclude = 0;
323 wake_up_interruptible(&sdp->o_excl_wait); 323 wake_up_interruptible(&sdp->o_excl_wait);
324 } 324 }
325 return 0; 325 return 0;
326 } 326 }
327 327
328 static ssize_t 328 static ssize_t
329 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 329 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
330 { 330 {
331 Sg_device *sdp; 331 Sg_device *sdp;
332 Sg_fd *sfp; 332 Sg_fd *sfp;
333 Sg_request *srp; 333 Sg_request *srp;
334 int req_pack_id = -1; 334 int req_pack_id = -1;
335 sg_io_hdr_t *hp; 335 sg_io_hdr_t *hp;
336 struct sg_header *old_hdr = NULL; 336 struct sg_header *old_hdr = NULL;
337 int retval = 0; 337 int retval = 0;
338 338
339 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 339 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
340 return -ENXIO; 340 return -ENXIO;
341 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", 341 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
342 sdp->disk->disk_name, (int) count)); 342 sdp->disk->disk_name, (int) count));
343 343
344 if (!access_ok(VERIFY_WRITE, buf, count)) 344 if (!access_ok(VERIFY_WRITE, buf, count))
345 return -EFAULT; 345 return -EFAULT;
346 if (sfp->force_packid && (count >= SZ_SG_HEADER)) { 346 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
347 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 347 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
348 if (!old_hdr) 348 if (!old_hdr)
349 return -ENOMEM; 349 return -ENOMEM;
350 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { 350 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
351 retval = -EFAULT; 351 retval = -EFAULT;
352 goto free_old_hdr; 352 goto free_old_hdr;
353 } 353 }
354 if (old_hdr->reply_len < 0) { 354 if (old_hdr->reply_len < 0) {
355 if (count >= SZ_SG_IO_HDR) { 355 if (count >= SZ_SG_IO_HDR) {
356 sg_io_hdr_t *new_hdr; 356 sg_io_hdr_t *new_hdr;
357 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); 357 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
358 if (!new_hdr) { 358 if (!new_hdr) {
359 retval = -ENOMEM; 359 retval = -ENOMEM;
360 goto free_old_hdr; 360 goto free_old_hdr;
361 } 361 }
362 retval =__copy_from_user 362 retval =__copy_from_user
363 (new_hdr, buf, SZ_SG_IO_HDR); 363 (new_hdr, buf, SZ_SG_IO_HDR);
364 req_pack_id = new_hdr->pack_id; 364 req_pack_id = new_hdr->pack_id;
365 kfree(new_hdr); 365 kfree(new_hdr);
366 if (retval) { 366 if (retval) {
367 retval = -EFAULT; 367 retval = -EFAULT;
368 goto free_old_hdr; 368 goto free_old_hdr;
369 } 369 }
370 } 370 }
371 } else 371 } else
372 req_pack_id = old_hdr->pack_id; 372 req_pack_id = old_hdr->pack_id;
373 } 373 }
374 srp = sg_get_rq_mark(sfp, req_pack_id); 374 srp = sg_get_rq_mark(sfp, req_pack_id);
375 if (!srp) { /* now wait on packet to arrive */ 375 if (!srp) { /* now wait on packet to arrive */
376 if (sdp->detached) { 376 if (sdp->detached) {
377 retval = -ENODEV; 377 retval = -ENODEV;
378 goto free_old_hdr; 378 goto free_old_hdr;
379 } 379 }
380 if (filp->f_flags & O_NONBLOCK) { 380 if (filp->f_flags & O_NONBLOCK) {
381 retval = -EAGAIN; 381 retval = -EAGAIN;
382 goto free_old_hdr; 382 goto free_old_hdr;
383 } 383 }
384 while (1) { 384 while (1) {
385 retval = 0; /* following macro beats race condition */ 385 retval = 0; /* following macro beats race condition */
386 __wait_event_interruptible(sfp->read_wait, 386 __wait_event_interruptible(sfp->read_wait,
387 (sdp->detached || 387 (sdp->detached ||
388 (srp = sg_get_rq_mark(sfp, req_pack_id))), 388 (srp = sg_get_rq_mark(sfp, req_pack_id))),
389 retval); 389 retval);
390 if (sdp->detached) { 390 if (sdp->detached) {
391 retval = -ENODEV; 391 retval = -ENODEV;
392 goto free_old_hdr; 392 goto free_old_hdr;
393 } 393 }
394 if (0 == retval) 394 if (0 == retval)
395 break; 395 break;
396 396
397 /* -ERESTARTSYS as signal hit process */ 397 /* -ERESTARTSYS as signal hit process */
398 goto free_old_hdr; 398 goto free_old_hdr;
399 } 399 }
400 } 400 }
401 if (srp->header.interface_id != '\0') { 401 if (srp->header.interface_id != '\0') {
402 retval = sg_new_read(sfp, buf, count, srp); 402 retval = sg_new_read(sfp, buf, count, srp);
403 goto free_old_hdr; 403 goto free_old_hdr;
404 } 404 }
405 405
406 hp = &srp->header; 406 hp = &srp->header;
407 if (old_hdr == NULL) { 407 if (old_hdr == NULL) {
408 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); 408 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
409 if (! old_hdr) { 409 if (! old_hdr) {
410 retval = -ENOMEM; 410 retval = -ENOMEM;
411 goto free_old_hdr; 411 goto free_old_hdr;
412 } 412 }
413 } 413 }
414 memset(old_hdr, 0, SZ_SG_HEADER); 414 memset(old_hdr, 0, SZ_SG_HEADER);
415 old_hdr->reply_len = (int) hp->timeout; 415 old_hdr->reply_len = (int) hp->timeout;
416 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ 416 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
417 old_hdr->pack_id = hp->pack_id; 417 old_hdr->pack_id = hp->pack_id;
418 old_hdr->twelve_byte = 418 old_hdr->twelve_byte =
419 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; 419 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
420 old_hdr->target_status = hp->masked_status; 420 old_hdr->target_status = hp->masked_status;
421 old_hdr->host_status = hp->host_status; 421 old_hdr->host_status = hp->host_status;
422 old_hdr->driver_status = hp->driver_status; 422 old_hdr->driver_status = hp->driver_status;
423 if ((CHECK_CONDITION & hp->masked_status) || 423 if ((CHECK_CONDITION & hp->masked_status) ||
424 (DRIVER_SENSE & hp->driver_status)) 424 (DRIVER_SENSE & hp->driver_status))
425 memcpy(old_hdr->sense_buffer, srp->sense_b, 425 memcpy(old_hdr->sense_buffer, srp->sense_b,
426 sizeof (old_hdr->sense_buffer)); 426 sizeof (old_hdr->sense_buffer));
427 switch (hp->host_status) { 427 switch (hp->host_status) {
428 /* This setup of 'result' is for backward compatibility and is best 428 /* This setup of 'result' is for backward compatibility and is best
429 ignored by the user who should use target, host + driver status */ 429 ignored by the user who should use target, host + driver status */
430 case DID_OK: 430 case DID_OK:
431 case DID_PASSTHROUGH: 431 case DID_PASSTHROUGH:
432 case DID_SOFT_ERROR: 432 case DID_SOFT_ERROR:
433 old_hdr->result = 0; 433 old_hdr->result = 0;
434 break; 434 break;
435 case DID_NO_CONNECT: 435 case DID_NO_CONNECT:
436 case DID_BUS_BUSY: 436 case DID_BUS_BUSY:
437 case DID_TIME_OUT: 437 case DID_TIME_OUT:
438 old_hdr->result = EBUSY; 438 old_hdr->result = EBUSY;
439 break; 439 break;
440 case DID_BAD_TARGET: 440 case DID_BAD_TARGET:
441 case DID_ABORT: 441 case DID_ABORT:
442 case DID_PARITY: 442 case DID_PARITY:
443 case DID_RESET: 443 case DID_RESET:
444 case DID_BAD_INTR: 444 case DID_BAD_INTR:
445 old_hdr->result = EIO; 445 old_hdr->result = EIO;
446 break; 446 break;
447 case DID_ERROR: 447 case DID_ERROR:
448 old_hdr->result = (srp->sense_b[0] == 0 && 448 old_hdr->result = (srp->sense_b[0] == 0 &&
449 hp->masked_status == GOOD) ? 0 : EIO; 449 hp->masked_status == GOOD) ? 0 : EIO;
450 break; 450 break;
451 default: 451 default:
452 old_hdr->result = EIO; 452 old_hdr->result = EIO;
453 break; 453 break;
454 } 454 }
455 455
456 /* Now copy the result back to the user buffer. */ 456 /* Now copy the result back to the user buffer. */
457 if (count >= SZ_SG_HEADER) { 457 if (count >= SZ_SG_HEADER) {
458 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { 458 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
459 retval = -EFAULT; 459 retval = -EFAULT;
460 goto free_old_hdr; 460 goto free_old_hdr;
461 } 461 }
462 buf += SZ_SG_HEADER; 462 buf += SZ_SG_HEADER;
463 if (count > old_hdr->reply_len) 463 if (count > old_hdr->reply_len)
464 count = old_hdr->reply_len; 464 count = old_hdr->reply_len;
465 if (count > SZ_SG_HEADER) { 465 if (count > SZ_SG_HEADER) {
466 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { 466 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
467 retval = -EFAULT; 467 retval = -EFAULT;
468 goto free_old_hdr; 468 goto free_old_hdr;
469 } 469 }
470 } 470 }
471 } else 471 } else
472 count = (old_hdr->result == 0) ? 0 : -EIO; 472 count = (old_hdr->result == 0) ? 0 : -EIO;
473 sg_finish_rem_req(srp); 473 sg_finish_rem_req(srp);
474 retval = count; 474 retval = count;
475 free_old_hdr: 475 free_old_hdr:
476 kfree(old_hdr); 476 kfree(old_hdr);
477 return retval; 477 return retval;
478 } 478 }
479 479
480 static ssize_t 480 static ssize_t
481 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) 481 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
482 { 482 {
483 sg_io_hdr_t *hp = &srp->header; 483 sg_io_hdr_t *hp = &srp->header;
484 int err = 0; 484 int err = 0;
485 int len; 485 int len;
486 486
487 if (count < SZ_SG_IO_HDR) { 487 if (count < SZ_SG_IO_HDR) {
488 err = -EINVAL; 488 err = -EINVAL;
489 goto err_out; 489 goto err_out;
490 } 490 }
491 hp->sb_len_wr = 0; 491 hp->sb_len_wr = 0;
492 if ((hp->mx_sb_len > 0) && hp->sbp) { 492 if ((hp->mx_sb_len > 0) && hp->sbp) {
493 if ((CHECK_CONDITION & hp->masked_status) || 493 if ((CHECK_CONDITION & hp->masked_status) ||
494 (DRIVER_SENSE & hp->driver_status)) { 494 (DRIVER_SENSE & hp->driver_status)) {
495 int sb_len = SCSI_SENSE_BUFFERSIZE; 495 int sb_len = SCSI_SENSE_BUFFERSIZE;
496 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; 496 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
497 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ 497 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
498 len = (len > sb_len) ? sb_len : len; 498 len = (len > sb_len) ? sb_len : len;
499 if (copy_to_user(hp->sbp, srp->sense_b, len)) { 499 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
500 err = -EFAULT; 500 err = -EFAULT;
501 goto err_out; 501 goto err_out;
502 } 502 }
503 hp->sb_len_wr = len; 503 hp->sb_len_wr = len;
504 } 504 }
505 } 505 }
506 if (hp->masked_status || hp->host_status || hp->driver_status) 506 if (hp->masked_status || hp->host_status || hp->driver_status)
507 hp->info |= SG_INFO_CHECK; 507 hp->info |= SG_INFO_CHECK;
508 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { 508 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
509 err = -EFAULT; 509 err = -EFAULT;
510 goto err_out; 510 goto err_out;
511 } 511 }
512 err = sg_read_xfer(srp); 512 err = sg_read_xfer(srp);
513 err_out: 513 err_out:
514 sg_finish_rem_req(srp); 514 sg_finish_rem_req(srp);
515 return (0 == err) ? count : err; 515 return (0 == err) ? count : err;
516 } 516 }
517 517
518 static ssize_t 518 static ssize_t
519 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) 519 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
520 { 520 {
521 int mxsize, cmd_size, k; 521 int mxsize, cmd_size, k;
522 int input_size, blocking; 522 int input_size, blocking;
523 unsigned char opcode; 523 unsigned char opcode;
524 Sg_device *sdp; 524 Sg_device *sdp;
525 Sg_fd *sfp; 525 Sg_fd *sfp;
526 Sg_request *srp; 526 Sg_request *srp;
527 struct sg_header old_hdr; 527 struct sg_header old_hdr;
528 sg_io_hdr_t *hp; 528 sg_io_hdr_t *hp;
529 unsigned char cmnd[MAX_COMMAND_SIZE]; 529 unsigned char cmnd[MAX_COMMAND_SIZE];
530 530
531 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 531 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
532 return -ENXIO; 532 return -ENXIO;
533 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", 533 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
534 sdp->disk->disk_name, (int) count)); 534 sdp->disk->disk_name, (int) count));
535 if (sdp->detached) 535 if (sdp->detached)
536 return -ENODEV; 536 return -ENODEV;
537 if (!((filp->f_flags & O_NONBLOCK) || 537 if (!((filp->f_flags & O_NONBLOCK) ||
538 scsi_block_when_processing_errors(sdp->device))) 538 scsi_block_when_processing_errors(sdp->device)))
539 return -ENXIO; 539 return -ENXIO;
540 540
541 if (!access_ok(VERIFY_READ, buf, count)) 541 if (!access_ok(VERIFY_READ, buf, count))
542 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 542 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
543 if (count < SZ_SG_HEADER) 543 if (count < SZ_SG_HEADER)
544 return -EIO; 544 return -EIO;
545 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) 545 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
546 return -EFAULT; 546 return -EFAULT;
547 blocking = !(filp->f_flags & O_NONBLOCK); 547 blocking = !(filp->f_flags & O_NONBLOCK);
548 if (old_hdr.reply_len < 0) 548 if (old_hdr.reply_len < 0)
549 return sg_new_write(sfp, buf, count, blocking, 0, NULL); 549 return sg_new_write(sfp, buf, count, blocking, 0, NULL);
550 if (count < (SZ_SG_HEADER + 6)) 550 if (count < (SZ_SG_HEADER + 6))
551 return -EIO; /* The minimum scsi command length is 6 bytes. */ 551 return -EIO; /* The minimum scsi command length is 6 bytes. */
552 552
553 if (!(srp = sg_add_request(sfp))) { 553 if (!(srp = sg_add_request(sfp))) {
554 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); 554 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
555 return -EDOM; 555 return -EDOM;
556 } 556 }
557 buf += SZ_SG_HEADER; 557 buf += SZ_SG_HEADER;
558 __get_user(opcode, buf); 558 __get_user(opcode, buf);
559 if (sfp->next_cmd_len > 0) { 559 if (sfp->next_cmd_len > 0) {
560 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { 560 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
561 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); 561 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
562 sfp->next_cmd_len = 0; 562 sfp->next_cmd_len = 0;
563 sg_remove_request(sfp, srp); 563 sg_remove_request(sfp, srp);
564 return -EIO; 564 return -EIO;
565 } 565 }
566 cmd_size = sfp->next_cmd_len; 566 cmd_size = sfp->next_cmd_len;
567 sfp->next_cmd_len = 0; /* reset so only this write() effected */ 567 sfp->next_cmd_len = 0; /* reset so only this write() effected */
568 } else { 568 } else {
569 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ 569 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
570 if ((opcode >= 0xc0) && old_hdr.twelve_byte) 570 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
571 cmd_size = 12; 571 cmd_size = 12;
572 } 572 }
573 SCSI_LOG_TIMEOUT(4, printk( 573 SCSI_LOG_TIMEOUT(4, printk(
574 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); 574 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
575 /* Determine buffer size. */ 575 /* Determine buffer size. */
576 input_size = count - cmd_size; 576 input_size = count - cmd_size;
577 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; 577 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
578 mxsize -= SZ_SG_HEADER; 578 mxsize -= SZ_SG_HEADER;
579 input_size -= SZ_SG_HEADER; 579 input_size -= SZ_SG_HEADER;
580 if (input_size < 0) { 580 if (input_size < 0) {
581 sg_remove_request(sfp, srp); 581 sg_remove_request(sfp, srp);
582 return -EIO; /* User did not pass enough bytes for this command. */ 582 return -EIO; /* User did not pass enough bytes for this command. */
583 } 583 }
584 hp = &srp->header; 584 hp = &srp->header;
585 hp->interface_id = '\0'; /* indicator of old interface tunnelled */ 585 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
586 hp->cmd_len = (unsigned char) cmd_size; 586 hp->cmd_len = (unsigned char) cmd_size;
587 hp->iovec_count = 0; 587 hp->iovec_count = 0;
588 hp->mx_sb_len = 0; 588 hp->mx_sb_len = 0;
589 if (input_size > 0) 589 if (input_size > 0)
590 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? 590 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
591 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; 591 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
592 else 592 else
593 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; 593 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
594 hp->dxfer_len = mxsize; 594 hp->dxfer_len = mxsize;
595 hp->dxferp = (char __user *)buf + cmd_size; 595 hp->dxferp = (char __user *)buf + cmd_size;
596 hp->sbp = NULL; 596 hp->sbp = NULL;
597 hp->timeout = old_hdr.reply_len; /* structure abuse ... */ 597 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
598 hp->flags = input_size; /* structure abuse ... */ 598 hp->flags = input_size; /* structure abuse ... */
599 hp->pack_id = old_hdr.pack_id; 599 hp->pack_id = old_hdr.pack_id;
600 hp->usr_ptr = NULL; 600 hp->usr_ptr = NULL;
601 if (__copy_from_user(cmnd, buf, cmd_size)) 601 if (__copy_from_user(cmnd, buf, cmd_size))
602 return -EFAULT; 602 return -EFAULT;
603 /* 603 /*
604 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, 604 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
605 * but is is possible that the app intended SG_DXFER_TO_DEV, because there 605 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
606 * is a non-zero input_size, so emit a warning. 606 * is a non-zero input_size, so emit a warning.
607 */ 607 */
608 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) 608 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
609 if (printk_ratelimit()) 609 if (printk_ratelimit())
610 printk(KERN_WARNING 610 printk(KERN_WARNING
611 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--" 611 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
612 "guessing data in;\n" KERN_WARNING " " 612 "guessing data in;\n" KERN_WARNING " "
613 "program %s not setting count and/or reply_len properly\n", 613 "program %s not setting count and/or reply_len properly\n",
614 old_hdr.reply_len - (int)SZ_SG_HEADER, 614 old_hdr.reply_len - (int)SZ_SG_HEADER,
615 input_size, (unsigned int) cmnd[0], 615 input_size, (unsigned int) cmnd[0],
616 current->comm); 616 current->comm);
617 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); 617 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
618 return (k < 0) ? k : count; 618 return (k < 0) ? k : count;
619 } 619 }
620 620
621 static ssize_t 621 static ssize_t
622 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count, 622 sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
623 int blocking, int read_only, Sg_request ** o_srp) 623 int blocking, int read_only, Sg_request ** o_srp)
624 { 624 {
625 int k; 625 int k;
626 Sg_request *srp; 626 Sg_request *srp;
627 sg_io_hdr_t *hp; 627 sg_io_hdr_t *hp;
628 unsigned char cmnd[MAX_COMMAND_SIZE]; 628 unsigned char cmnd[MAX_COMMAND_SIZE];
629 int timeout; 629 int timeout;
630 unsigned long ul_timeout; 630 unsigned long ul_timeout;
631 631
632 if (count < SZ_SG_IO_HDR) 632 if (count < SZ_SG_IO_HDR)
633 return -EINVAL; 633 return -EINVAL;
634 if (!access_ok(VERIFY_READ, buf, count)) 634 if (!access_ok(VERIFY_READ, buf, count))
635 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 635 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
636 636
637 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ 637 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
638 if (!(srp = sg_add_request(sfp))) { 638 if (!(srp = sg_add_request(sfp))) {
639 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); 639 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
640 return -EDOM; 640 return -EDOM;
641 } 641 }
642 hp = &srp->header; 642 hp = &srp->header;
643 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { 643 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
644 sg_remove_request(sfp, srp); 644 sg_remove_request(sfp, srp);
645 return -EFAULT; 645 return -EFAULT;
646 } 646 }
647 if (hp->interface_id != 'S') { 647 if (hp->interface_id != 'S') {
648 sg_remove_request(sfp, srp); 648 sg_remove_request(sfp, srp);
649 return -ENOSYS; 649 return -ENOSYS;
650 } 650 }
651 if (hp->flags & SG_FLAG_MMAP_IO) { 651 if (hp->flags & SG_FLAG_MMAP_IO) {
652 if (hp->dxfer_len > sfp->reserve.bufflen) { 652 if (hp->dxfer_len > sfp->reserve.bufflen) {
653 sg_remove_request(sfp, srp); 653 sg_remove_request(sfp, srp);
654 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ 654 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
655 } 655 }
656 if (hp->flags & SG_FLAG_DIRECT_IO) { 656 if (hp->flags & SG_FLAG_DIRECT_IO) {
657 sg_remove_request(sfp, srp); 657 sg_remove_request(sfp, srp);
658 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ 658 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
659 } 659 }
660 if (sg_res_in_use(sfp)) { 660 if (sg_res_in_use(sfp)) {
661 sg_remove_request(sfp, srp); 661 sg_remove_request(sfp, srp);
662 return -EBUSY; /* reserve buffer already being used */ 662 return -EBUSY; /* reserve buffer already being used */
663 } 663 }
664 } 664 }
665 ul_timeout = msecs_to_jiffies(srp->header.timeout); 665 ul_timeout = msecs_to_jiffies(srp->header.timeout);
666 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; 666 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
667 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { 667 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
668 sg_remove_request(sfp, srp); 668 sg_remove_request(sfp, srp);
669 return -EMSGSIZE; 669 return -EMSGSIZE;
670 } 670 }
671 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { 671 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
672 sg_remove_request(sfp, srp); 672 sg_remove_request(sfp, srp);
673 return -EFAULT; /* protects following copy_from_user()s + get_user()s */ 673 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
674 } 674 }
675 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { 675 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
676 sg_remove_request(sfp, srp); 676 sg_remove_request(sfp, srp);
677 return -EFAULT; 677 return -EFAULT;
678 } 678 }
679 if (read_only && 679 if (read_only &&
680 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) { 680 (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
681 sg_remove_request(sfp, srp); 681 sg_remove_request(sfp, srp);
682 return -EPERM; 682 return -EPERM;
683 } 683 }
684 k = sg_common_write(sfp, srp, cmnd, timeout, blocking); 684 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
685 if (k < 0) 685 if (k < 0)
686 return k; 686 return k;
687 if (o_srp) 687 if (o_srp)
688 *o_srp = srp; 688 *o_srp = srp;
689 return count; 689 return count;
690 } 690 }
691 691
692 static int 692 static int
693 sg_common_write(Sg_fd * sfp, Sg_request * srp, 693 sg_common_write(Sg_fd * sfp, Sg_request * srp,
694 unsigned char *cmnd, int timeout, int blocking) 694 unsigned char *cmnd, int timeout, int blocking)
695 { 695 {
696 int k, data_dir; 696 int k, data_dir;
697 Sg_device *sdp = sfp->parentdp; 697 Sg_device *sdp = sfp->parentdp;
698 sg_io_hdr_t *hp = &srp->header; 698 sg_io_hdr_t *hp = &srp->header;
699 699
700 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ 700 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
701 hp->status = 0; 701 hp->status = 0;
702 hp->masked_status = 0; 702 hp->masked_status = 0;
703 hp->msg_status = 0; 703 hp->msg_status = 0;
704 hp->info = 0; 704 hp->info = 0;
705 hp->host_status = 0; 705 hp->host_status = 0;
706 hp->driver_status = 0; 706 hp->driver_status = 0;
707 hp->resid = 0; 707 hp->resid = 0;
708 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 708 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
709 (int) cmnd[0], (int) hp->cmd_len)); 709 (int) cmnd[0], (int) hp->cmd_len));
710 710
711 if ((k = sg_start_req(srp))) { 711 if ((k = sg_start_req(srp))) {
712 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k)); 712 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
713 sg_finish_rem_req(srp); 713 sg_finish_rem_req(srp);
714 return k; /* probably out of space --> ENOMEM */ 714 return k; /* probably out of space --> ENOMEM */
715 } 715 }
716 if ((k = sg_write_xfer(srp))) { 716 if ((k = sg_write_xfer(srp))) {
717 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n")); 717 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
718 sg_finish_rem_req(srp); 718 sg_finish_rem_req(srp);
719 return k; 719 return k;
720 } 720 }
721 if (sdp->detached) { 721 if (sdp->detached) {
722 sg_finish_rem_req(srp); 722 sg_finish_rem_req(srp);
723 return -ENODEV; 723 return -ENODEV;
724 } 724 }
725 725
726 switch (hp->dxfer_direction) { 726 switch (hp->dxfer_direction) {
727 case SG_DXFER_TO_FROM_DEV: 727 case SG_DXFER_TO_FROM_DEV:
728 case SG_DXFER_FROM_DEV: 728 case SG_DXFER_FROM_DEV:
729 data_dir = DMA_FROM_DEVICE; 729 data_dir = DMA_FROM_DEVICE;
730 break; 730 break;
731 case SG_DXFER_TO_DEV: 731 case SG_DXFER_TO_DEV:
732 data_dir = DMA_TO_DEVICE; 732 data_dir = DMA_TO_DEVICE;
733 break; 733 break;
734 case SG_DXFER_UNKNOWN: 734 case SG_DXFER_UNKNOWN:
735 data_dir = DMA_BIDIRECTIONAL; 735 data_dir = DMA_BIDIRECTIONAL;
736 break; 736 break;
737 default: 737 default:
738 data_dir = DMA_NONE; 738 data_dir = DMA_NONE;
739 break; 739 break;
740 } 740 }
741 hp->duration = jiffies_to_msecs(jiffies); 741 hp->duration = jiffies_to_msecs(jiffies);
742 /* Now send everything of to mid-level. The next time we hear about this 742 /* Now send everything of to mid-level. The next time we hear about this
743 packet is when sg_cmd_done() is called (i.e. a callback). */ 743 packet is when sg_cmd_done() is called (i.e. a callback). */
744 if (scsi_execute_async(sdp->device, cmnd, data_dir, srp->data.buffer, 744 if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
745 hp->dxfer_len, srp->data.k_use_sg, timeout, 745 hp->dxfer_len, srp->data.k_use_sg, timeout,
746 SG_DEFAULT_RETRIES, srp, sg_cmd_done, 746 SG_DEFAULT_RETRIES, srp, sg_cmd_done,
747 GFP_ATOMIC)) { 747 GFP_ATOMIC)) {
748 SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n")); 748 SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n"));
749 /* 749 /*
750 * most likely out of mem, but could also be a bad map 750 * most likely out of mem, but could also be a bad map
751 */ 751 */
752 return -ENOMEM; 752 return -ENOMEM;
753 } else 753 } else
754 return 0; 754 return 0;
755 } 755 }
756 756
757 static int 757 static int
758 sg_srp_done(Sg_request *srp, Sg_fd *sfp) 758 sg_srp_done(Sg_request *srp, Sg_fd *sfp)
759 { 759 {
760 unsigned long iflags; 760 unsigned long iflags;
761 int done; 761 int done;
762 762
763 read_lock_irqsave(&sfp->rq_list_lock, iflags); 763 read_lock_irqsave(&sfp->rq_list_lock, iflags);
764 done = srp->done; 764 done = srp->done;
765 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 765 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
766 return done; 766 return done;
767 } 767 }
768 768
769 static int 769 static int
770 sg_ioctl(struct inode *inode, struct file *filp, 770 sg_ioctl(struct inode *inode, struct file *filp,
771 unsigned int cmd_in, unsigned long arg) 771 unsigned int cmd_in, unsigned long arg)
772 { 772 {
773 void __user *p = (void __user *)arg; 773 void __user *p = (void __user *)arg;
774 int __user *ip = p; 774 int __user *ip = p;
775 int result, val, read_only; 775 int result, val, read_only;
776 Sg_device *sdp; 776 Sg_device *sdp;
777 Sg_fd *sfp; 777 Sg_fd *sfp;
778 Sg_request *srp; 778 Sg_request *srp;
779 unsigned long iflags; 779 unsigned long iflags;
780 780
781 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 781 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
782 return -ENXIO; 782 return -ENXIO;
783 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", 783 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
784 sdp->disk->disk_name, (int) cmd_in)); 784 sdp->disk->disk_name, (int) cmd_in));
785 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); 785 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
786 786
787 switch (cmd_in) { 787 switch (cmd_in) {
788 case SG_IO: 788 case SG_IO:
789 { 789 {
790 int blocking = 1; /* ignore O_NONBLOCK flag */ 790 int blocking = 1; /* ignore O_NONBLOCK flag */
791 791
792 if (sdp->detached) 792 if (sdp->detached)
793 return -ENODEV; 793 return -ENODEV;
794 if (!scsi_block_when_processing_errors(sdp->device)) 794 if (!scsi_block_when_processing_errors(sdp->device))
795 return -ENXIO; 795 return -ENXIO;
796 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) 796 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
797 return -EFAULT; 797 return -EFAULT;
798 result = 798 result =
799 sg_new_write(sfp, p, SZ_SG_IO_HDR, 799 sg_new_write(sfp, p, SZ_SG_IO_HDR,
800 blocking, read_only, &srp); 800 blocking, read_only, &srp);
801 if (result < 0) 801 if (result < 0)
802 return result; 802 return result;
803 srp->sg_io_owned = 1; 803 srp->sg_io_owned = 1;
804 while (1) { 804 while (1) {
805 result = 0; /* following macro to beat race condition */ 805 result = 0; /* following macro to beat race condition */
806 __wait_event_interruptible(sfp->read_wait, 806 __wait_event_interruptible(sfp->read_wait,
807 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)), 807 (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
808 result); 808 result);
809 if (sdp->detached) 809 if (sdp->detached)
810 return -ENODEV; 810 return -ENODEV;
811 if (sfp->closed) 811 if (sfp->closed)
812 return 0; /* request packet dropped already */ 812 return 0; /* request packet dropped already */
813 if (0 == result) 813 if (0 == result)
814 break; 814 break;
815 srp->orphan = 1; 815 srp->orphan = 1;
816 return result; /* -ERESTARTSYS because signal hit process */ 816 return result; /* -ERESTARTSYS because signal hit process */
817 } 817 }
818 write_lock_irqsave(&sfp->rq_list_lock, iflags); 818 write_lock_irqsave(&sfp->rq_list_lock, iflags);
819 srp->done = 2; 819 srp->done = 2;
820 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 820 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
821 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); 821 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
822 return (result < 0) ? result : 0; 822 return (result < 0) ? result : 0;
823 } 823 }
824 case SG_SET_TIMEOUT: 824 case SG_SET_TIMEOUT:
825 result = get_user(val, ip); 825 result = get_user(val, ip);
826 if (result) 826 if (result)
827 return result; 827 return result;
828 if (val < 0) 828 if (val < 0)
829 return -EIO; 829 return -EIO;
830 if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) 830 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
831 val = MULDIV (INT_MAX, USER_HZ, HZ); 831 val = MULDIV (INT_MAX, USER_HZ, HZ);
832 sfp->timeout_user = val; 832 sfp->timeout_user = val;
833 sfp->timeout = MULDIV (val, HZ, USER_HZ); 833 sfp->timeout = MULDIV (val, HZ, USER_HZ);
834 834
835 return 0; 835 return 0;
836 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ 836 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
837 /* strange ..., for backward compatibility */ 837 /* strange ..., for backward compatibility */
838 return sfp->timeout_user; 838 return sfp->timeout_user;
839 case SG_SET_FORCE_LOW_DMA: 839 case SG_SET_FORCE_LOW_DMA:
840 result = get_user(val, ip); 840 result = get_user(val, ip);
841 if (result) 841 if (result)
842 return result; 842 return result;
843 if (val) { 843 if (val) {
844 sfp->low_dma = 1; 844 sfp->low_dma = 1;
845 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { 845 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
846 val = (int) sfp->reserve.bufflen; 846 val = (int) sfp->reserve.bufflen;
847 sg_remove_scat(&sfp->reserve); 847 sg_remove_scat(&sfp->reserve);
848 sg_build_reserve(sfp, val); 848 sg_build_reserve(sfp, val);
849 } 849 }
850 } else { 850 } else {
851 if (sdp->detached) 851 if (sdp->detached)
852 return -ENODEV; 852 return -ENODEV;
853 sfp->low_dma = sdp->device->host->unchecked_isa_dma; 853 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
854 } 854 }
855 return 0; 855 return 0;
856 case SG_GET_LOW_DMA: 856 case SG_GET_LOW_DMA:
857 return put_user((int) sfp->low_dma, ip); 857 return put_user((int) sfp->low_dma, ip);
858 case SG_GET_SCSI_ID: 858 case SG_GET_SCSI_ID:
859 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) 859 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
860 return -EFAULT; 860 return -EFAULT;
861 else { 861 else {
862 sg_scsi_id_t __user *sg_idp = p; 862 sg_scsi_id_t __user *sg_idp = p;
863 863
864 if (sdp->detached) 864 if (sdp->detached)
865 return -ENODEV; 865 return -ENODEV;
866 __put_user((int) sdp->device->host->host_no, 866 __put_user((int) sdp->device->host->host_no,
867 &sg_idp->host_no); 867 &sg_idp->host_no);
868 __put_user((int) sdp->device->channel, 868 __put_user((int) sdp->device->channel,
869 &sg_idp->channel); 869 &sg_idp->channel);
870 __put_user((int) sdp->device->id, &sg_idp->scsi_id); 870 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
871 __put_user((int) sdp->device->lun, &sg_idp->lun); 871 __put_user((int) sdp->device->lun, &sg_idp->lun);
872 __put_user((int) sdp->device->type, &sg_idp->scsi_type); 872 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
873 __put_user((short) sdp->device->host->cmd_per_lun, 873 __put_user((short) sdp->device->host->cmd_per_lun,
874 &sg_idp->h_cmd_per_lun); 874 &sg_idp->h_cmd_per_lun);
875 __put_user((short) sdp->device->queue_depth, 875 __put_user((short) sdp->device->queue_depth,
876 &sg_idp->d_queue_depth); 876 &sg_idp->d_queue_depth);
877 __put_user(0, &sg_idp->unused[0]); 877 __put_user(0, &sg_idp->unused[0]);
878 __put_user(0, &sg_idp->unused[1]); 878 __put_user(0, &sg_idp->unused[1]);
879 return 0; 879 return 0;
880 } 880 }
881 case SG_SET_FORCE_PACK_ID: 881 case SG_SET_FORCE_PACK_ID:
882 result = get_user(val, ip); 882 result = get_user(val, ip);
883 if (result) 883 if (result)
884 return result; 884 return result;
885 sfp->force_packid = val ? 1 : 0; 885 sfp->force_packid = val ? 1 : 0;
886 return 0; 886 return 0;
887 case SG_GET_PACK_ID: 887 case SG_GET_PACK_ID:
888 if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) 888 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
889 return -EFAULT; 889 return -EFAULT;
890 read_lock_irqsave(&sfp->rq_list_lock, iflags); 890 read_lock_irqsave(&sfp->rq_list_lock, iflags);
891 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 891 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
892 if ((1 == srp->done) && (!srp->sg_io_owned)) { 892 if ((1 == srp->done) && (!srp->sg_io_owned)) {
893 read_unlock_irqrestore(&sfp->rq_list_lock, 893 read_unlock_irqrestore(&sfp->rq_list_lock,
894 iflags); 894 iflags);
895 __put_user(srp->header.pack_id, ip); 895 __put_user(srp->header.pack_id, ip);
896 return 0; 896 return 0;
897 } 897 }
898 } 898 }
899 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 899 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
900 __put_user(-1, ip); 900 __put_user(-1, ip);
901 return 0; 901 return 0;
902 case SG_GET_NUM_WAITING: 902 case SG_GET_NUM_WAITING:
903 read_lock_irqsave(&sfp->rq_list_lock, iflags); 903 read_lock_irqsave(&sfp->rq_list_lock, iflags);
904 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { 904 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
905 if ((1 == srp->done) && (!srp->sg_io_owned)) 905 if ((1 == srp->done) && (!srp->sg_io_owned))
906 ++val; 906 ++val;
907 } 907 }
908 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 908 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
909 return put_user(val, ip); 909 return put_user(val, ip);
910 case SG_GET_SG_TABLESIZE: 910 case SG_GET_SG_TABLESIZE:
911 return put_user(sdp->sg_tablesize, ip); 911 return put_user(sdp->sg_tablesize, ip);
912 case SG_SET_RESERVED_SIZE: 912 case SG_SET_RESERVED_SIZE:
913 result = get_user(val, ip); 913 result = get_user(val, ip);
914 if (result) 914 if (result)
915 return result; 915 return result;
916 if (val < 0) 916 if (val < 0)
917 return -EINVAL; 917 return -EINVAL;
918 if (val != sfp->reserve.bufflen) { 918 if (val != sfp->reserve.bufflen) {
919 if (sg_res_in_use(sfp) || sfp->mmap_called) 919 if (sg_res_in_use(sfp) || sfp->mmap_called)
920 return -EBUSY; 920 return -EBUSY;
921 sg_remove_scat(&sfp->reserve); 921 sg_remove_scat(&sfp->reserve);
922 sg_build_reserve(sfp, val); 922 sg_build_reserve(sfp, val);
923 } 923 }
924 return 0; 924 return 0;
925 case SG_GET_RESERVED_SIZE: 925 case SG_GET_RESERVED_SIZE:
926 val = (int) sfp->reserve.bufflen; 926 val = (int) sfp->reserve.bufflen;
927 return put_user(val, ip); 927 return put_user(val, ip);
928 case SG_SET_COMMAND_Q: 928 case SG_SET_COMMAND_Q:
929 result = get_user(val, ip); 929 result = get_user(val, ip);
930 if (result) 930 if (result)
931 return result; 931 return result;
932 sfp->cmd_q = val ? 1 : 0; 932 sfp->cmd_q = val ? 1 : 0;
933 return 0; 933 return 0;
934 case SG_GET_COMMAND_Q: 934 case SG_GET_COMMAND_Q:
935 return put_user((int) sfp->cmd_q, ip); 935 return put_user((int) sfp->cmd_q, ip);
936 case SG_SET_KEEP_ORPHAN: 936 case SG_SET_KEEP_ORPHAN:
937 result = get_user(val, ip); 937 result = get_user(val, ip);
938 if (result) 938 if (result)
939 return result; 939 return result;
940 sfp->keep_orphan = val; 940 sfp->keep_orphan = val;
941 return 0; 941 return 0;
942 case SG_GET_KEEP_ORPHAN: 942 case SG_GET_KEEP_ORPHAN:
943 return put_user((int) sfp->keep_orphan, ip); 943 return put_user((int) sfp->keep_orphan, ip);
944 case SG_NEXT_CMD_LEN: 944 case SG_NEXT_CMD_LEN:
945 result = get_user(val, ip); 945 result = get_user(val, ip);
946 if (result) 946 if (result)
947 return result; 947 return result;
948 sfp->next_cmd_len = (val > 0) ? val : 0; 948 sfp->next_cmd_len = (val > 0) ? val : 0;
949 return 0; 949 return 0;
950 case SG_GET_VERSION_NUM: 950 case SG_GET_VERSION_NUM:
951 return put_user(sg_version_num, ip); 951 return put_user(sg_version_num, ip);
952 case SG_GET_ACCESS_COUNT: 952 case SG_GET_ACCESS_COUNT:
953 /* faked - we don't have a real access count anymore */ 953 /* faked - we don't have a real access count anymore */
954 val = (sdp->device ? 1 : 0); 954 val = (sdp->device ? 1 : 0);
955 return put_user(val, ip); 955 return put_user(val, ip);
956 case SG_GET_REQUEST_TABLE: 956 case SG_GET_REQUEST_TABLE:
957 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) 957 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
958 return -EFAULT; 958 return -EFAULT;
959 else { 959 else {
960 sg_req_info_t *rinfo; 960 sg_req_info_t *rinfo;
961 unsigned int ms; 961 unsigned int ms;
962 962
963 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, 963 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
964 GFP_KERNEL); 964 GFP_KERNEL);
965 if (!rinfo) 965 if (!rinfo)
966 return -ENOMEM; 966 return -ENOMEM;
967 read_lock_irqsave(&sfp->rq_list_lock, iflags); 967 read_lock_irqsave(&sfp->rq_list_lock, iflags);
968 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; 968 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
969 ++val, srp = srp ? srp->nextrp : srp) { 969 ++val, srp = srp ? srp->nextrp : srp) {
970 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 970 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
971 if (srp) { 971 if (srp) {
972 rinfo[val].req_state = srp->done + 1; 972 rinfo[val].req_state = srp->done + 1;
973 rinfo[val].problem = 973 rinfo[val].problem =
974 srp->header.masked_status & 974 srp->header.masked_status &
975 srp->header.host_status & 975 srp->header.host_status &
976 srp->header.driver_status; 976 srp->header.driver_status;
977 if (srp->done) 977 if (srp->done)
978 rinfo[val].duration = 978 rinfo[val].duration =
979 srp->header.duration; 979 srp->header.duration;
980 else { 980 else {
981 ms = jiffies_to_msecs(jiffies); 981 ms = jiffies_to_msecs(jiffies);
982 rinfo[val].duration = 982 rinfo[val].duration =
983 (ms > srp->header.duration) ? 983 (ms > srp->header.duration) ?
984 (ms - srp->header.duration) : 0; 984 (ms - srp->header.duration) : 0;
985 } 985 }
986 rinfo[val].orphan = srp->orphan; 986 rinfo[val].orphan = srp->orphan;
987 rinfo[val].sg_io_owned = 987 rinfo[val].sg_io_owned =
988 srp->sg_io_owned; 988 srp->sg_io_owned;
989 rinfo[val].pack_id = 989 rinfo[val].pack_id =
990 srp->header.pack_id; 990 srp->header.pack_id;
991 rinfo[val].usr_ptr = 991 rinfo[val].usr_ptr =
992 srp->header.usr_ptr; 992 srp->header.usr_ptr;
993 } 993 }
994 } 994 }
995 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 995 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
996 result = __copy_to_user(p, rinfo, 996 result = __copy_to_user(p, rinfo,
997 SZ_SG_REQ_INFO * SG_MAX_QUEUE); 997 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
998 result = result ? -EFAULT : 0; 998 result = result ? -EFAULT : 0;
999 kfree(rinfo); 999 kfree(rinfo);
1000 return result; 1000 return result;
1001 } 1001 }
1002 case SG_EMULATED_HOST: 1002 case SG_EMULATED_HOST:
1003 if (sdp->detached) 1003 if (sdp->detached)
1004 return -ENODEV; 1004 return -ENODEV;
1005 return put_user(sdp->device->host->hostt->emulated, ip); 1005 return put_user(sdp->device->host->hostt->emulated, ip);
1006 case SG_SCSI_RESET: 1006 case SG_SCSI_RESET:
1007 if (sdp->detached) 1007 if (sdp->detached)
1008 return -ENODEV; 1008 return -ENODEV;
1009 if (filp->f_flags & O_NONBLOCK) { 1009 if (filp->f_flags & O_NONBLOCK) {
1010 if (scsi_host_in_recovery(sdp->device->host)) 1010 if (scsi_host_in_recovery(sdp->device->host))
1011 return -EBUSY; 1011 return -EBUSY;
1012 } else if (!scsi_block_when_processing_errors(sdp->device)) 1012 } else if (!scsi_block_when_processing_errors(sdp->device))
1013 return -EBUSY; 1013 return -EBUSY;
1014 result = get_user(val, ip); 1014 result = get_user(val, ip);
1015 if (result) 1015 if (result)
1016 return result; 1016 return result;
1017 if (SG_SCSI_RESET_NOTHING == val) 1017 if (SG_SCSI_RESET_NOTHING == val)
1018 return 0; 1018 return 0;
1019 switch (val) { 1019 switch (val) {
1020 case SG_SCSI_RESET_DEVICE: 1020 case SG_SCSI_RESET_DEVICE:
1021 val = SCSI_TRY_RESET_DEVICE; 1021 val = SCSI_TRY_RESET_DEVICE;
1022 break; 1022 break;
1023 case SG_SCSI_RESET_BUS: 1023 case SG_SCSI_RESET_BUS:
1024 val = SCSI_TRY_RESET_BUS; 1024 val = SCSI_TRY_RESET_BUS;
1025 break; 1025 break;
1026 case SG_SCSI_RESET_HOST: 1026 case SG_SCSI_RESET_HOST:
1027 val = SCSI_TRY_RESET_HOST; 1027 val = SCSI_TRY_RESET_HOST;
1028 break; 1028 break;
1029 default: 1029 default:
1030 return -EINVAL; 1030 return -EINVAL;
1031 } 1031 }
1032 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 1032 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1033 return -EACCES; 1033 return -EACCES;
1034 return (scsi_reset_provider(sdp->device, val) == 1034 return (scsi_reset_provider(sdp->device, val) ==
1035 SUCCESS) ? 0 : -EIO; 1035 SUCCESS) ? 0 : -EIO;
1036 case SCSI_IOCTL_SEND_COMMAND: 1036 case SCSI_IOCTL_SEND_COMMAND:
1037 if (sdp->detached) 1037 if (sdp->detached)
1038 return -ENODEV; 1038 return -ENODEV;
1039 if (read_only) { 1039 if (read_only) {
1040 unsigned char opcode = WRITE_6; 1040 unsigned char opcode = WRITE_6;
1041 Scsi_Ioctl_Command __user *siocp = p; 1041 Scsi_Ioctl_Command __user *siocp = p;
1042 1042
1043 if (copy_from_user(&opcode, siocp->data, 1)) 1043 if (copy_from_user(&opcode, siocp->data, 1))
1044 return -EFAULT; 1044 return -EFAULT;
1045 if (!sg_allow_access(opcode, sdp->device->type)) 1045 if (!sg_allow_access(opcode, sdp->device->type))
1046 return -EPERM; 1046 return -EPERM;
1047 } 1047 }
1048 return scsi_ioctl_send_command(sdp->device, p); 1048 return scsi_ioctl_send_command(sdp->device, p);
1049 case SG_SET_DEBUG: 1049 case SG_SET_DEBUG:
1050 result = get_user(val, ip); 1050 result = get_user(val, ip);
1051 if (result) 1051 if (result)
1052 return result; 1052 return result;
1053 sdp->sgdebug = (char) val; 1053 sdp->sgdebug = (char) val;
1054 return 0; 1054 return 0;
1055 case SCSI_IOCTL_GET_IDLUN: 1055 case SCSI_IOCTL_GET_IDLUN:
1056 case SCSI_IOCTL_GET_BUS_NUMBER: 1056 case SCSI_IOCTL_GET_BUS_NUMBER:
1057 case SCSI_IOCTL_PROBE_HOST: 1057 case SCSI_IOCTL_PROBE_HOST:
1058 case SG_GET_TRANSFORM: 1058 case SG_GET_TRANSFORM:
1059 if (sdp->detached) 1059 if (sdp->detached)
1060 return -ENODEV; 1060 return -ENODEV;
1061 return scsi_ioctl(sdp->device, cmd_in, p); 1061 return scsi_ioctl(sdp->device, cmd_in, p);
1062 default: 1062 default:
1063 if (read_only) 1063 if (read_only)
1064 return -EPERM; /* don't know so take safe approach */ 1064 return -EPERM; /* don't know so take safe approach */
1065 return scsi_ioctl(sdp->device, cmd_in, p); 1065 return scsi_ioctl(sdp->device, cmd_in, p);
1066 } 1066 }
1067 } 1067 }
1068 1068
1069 #ifdef CONFIG_COMPAT 1069 #ifdef CONFIG_COMPAT
1070 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 1070 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1071 { 1071 {
1072 Sg_device *sdp; 1072 Sg_device *sdp;
1073 Sg_fd *sfp; 1073 Sg_fd *sfp;
1074 struct scsi_device *sdev; 1074 struct scsi_device *sdev;
1075 1075
1076 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1076 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1077 return -ENXIO; 1077 return -ENXIO;
1078 1078
1079 sdev = sdp->device; 1079 sdev = sdp->device;
1080 if (sdev->host->hostt->compat_ioctl) { 1080 if (sdev->host->hostt->compat_ioctl) {
1081 int ret; 1081 int ret;
1082 1082
1083 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); 1083 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1084 1084
1085 return ret; 1085 return ret;
1086 } 1086 }
1087 1087
1088 return -ENOIOCTLCMD; 1088 return -ENOIOCTLCMD;
1089 } 1089 }
1090 #endif 1090 #endif
1091 1091
1092 static unsigned int 1092 static unsigned int
1093 sg_poll(struct file *filp, poll_table * wait) 1093 sg_poll(struct file *filp, poll_table * wait)
1094 { 1094 {
1095 unsigned int res = 0; 1095 unsigned int res = 0;
1096 Sg_device *sdp; 1096 Sg_device *sdp;
1097 Sg_fd *sfp; 1097 Sg_fd *sfp;
1098 Sg_request *srp; 1098 Sg_request *srp;
1099 int count = 0; 1099 int count = 0;
1100 unsigned long iflags; 1100 unsigned long iflags;
1101 1101
1102 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)) 1102 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1103 || sfp->closed) 1103 || sfp->closed)
1104 return POLLERR; 1104 return POLLERR;
1105 poll_wait(filp, &sfp->read_wait, wait); 1105 poll_wait(filp, &sfp->read_wait, wait);
1106 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1106 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1107 for (srp = sfp->headrp; srp; srp = srp->nextrp) { 1107 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1108 /* if any read waiting, flag it */ 1108 /* if any read waiting, flag it */
1109 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) 1109 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1110 res = POLLIN | POLLRDNORM; 1110 res = POLLIN | POLLRDNORM;
1111 ++count; 1111 ++count;
1112 } 1112 }
1113 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1113 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1114 1114
1115 if (sdp->detached) 1115 if (sdp->detached)
1116 res |= POLLHUP; 1116 res |= POLLHUP;
1117 else if (!sfp->cmd_q) { 1117 else if (!sfp->cmd_q) {
1118 if (0 == count) 1118 if (0 == count)
1119 res |= POLLOUT | POLLWRNORM; 1119 res |= POLLOUT | POLLWRNORM;
1120 } else if (count < SG_MAX_QUEUE) 1120 } else if (count < SG_MAX_QUEUE)
1121 res |= POLLOUT | POLLWRNORM; 1121 res |= POLLOUT | POLLWRNORM;
1122 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", 1122 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1123 sdp->disk->disk_name, (int) res)); 1123 sdp->disk->disk_name, (int) res));
1124 return res; 1124 return res;
1125 } 1125 }
1126 1126
1127 static int 1127 static int
1128 sg_fasync(int fd, struct file *filp, int mode) 1128 sg_fasync(int fd, struct file *filp, int mode)
1129 { 1129 {
1130 int retval; 1130 int retval;
1131 Sg_device *sdp; 1131 Sg_device *sdp;
1132 Sg_fd *sfp; 1132 Sg_fd *sfp;
1133 1133
1134 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 1134 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1135 return -ENXIO; 1135 return -ENXIO;
1136 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", 1136 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1137 sdp->disk->disk_name, mode)); 1137 sdp->disk->disk_name, mode));
1138 1138
1139 retval = fasync_helper(fd, filp, mode, &sfp->async_qp); 1139 retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
1140 return (retval < 0) ? retval : 0; 1140 return (retval < 0) ? retval : 0;
1141 } 1141 }
1142 1142
1143 /* When startFinish==1 increments page counts for pages other than the 1143 /* When startFinish==1 increments page counts for pages other than the
1144 first of scatter gather elements obtained from alloc_pages(). 1144 first of scatter gather elements obtained from alloc_pages().
1145 When startFinish==0 decrements ... */ 1145 When startFinish==0 decrements ... */
1146 static void 1146 static void
1147 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish) 1147 sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
1148 { 1148 {
1149 struct scatterlist *sg = rsv_schp->buffer; 1149 struct scatterlist *sg = rsv_schp->buffer;
1150 struct page *page; 1150 struct page *page;
1151 int k, m; 1151 int k, m;
1152 1152
1153 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n", 1153 SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
1154 startFinish, rsv_schp->k_use_sg)); 1154 startFinish, rsv_schp->k_use_sg));
1155 /* N.B. correction _not_ applied to base page of each allocation */ 1155 /* N.B. correction _not_ applied to base page of each allocation */
1156 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { 1156 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
1157 for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) { 1157 for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
1158 page = sg->page; 1158 page = sg->page;
1159 if (startFinish) 1159 if (startFinish)
1160 get_page(page); 1160 get_page(page);
1161 else { 1161 else {
1162 if (page_count(page) > 0) 1162 if (page_count(page) > 0)
1163 __put_page(page); 1163 __put_page(page);
1164 } 1164 }
1165 } 1165 }
1166 } 1166 }
1167 } 1167 }
1168 1168
1169 static struct page * 1169 static struct page *
1170 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type) 1170 sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
1171 { 1171 {
1172 Sg_fd *sfp; 1172 Sg_fd *sfp;
1173 struct page *page = NOPAGE_SIGBUS; 1173 struct page *page = NOPAGE_SIGBUS;
1174 unsigned long offset, len, sa; 1174 unsigned long offset, len, sa;
1175 Sg_scatter_hold *rsv_schp; 1175 Sg_scatter_hold *rsv_schp;
1176 struct scatterlist *sg; 1176 struct scatterlist *sg;
1177 int k; 1177 int k;
1178 1178
1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) 1179 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1180 return page; 1180 return page;
1181 rsv_schp = &sfp->reserve; 1181 rsv_schp = &sfp->reserve;
1182 offset = addr - vma->vm_start; 1182 offset = addr - vma->vm_start;
1183 if (offset >= rsv_schp->bufflen) 1183 if (offset >= rsv_schp->bufflen)
1184 return page; 1184 return page;
1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n", 1185 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1186 offset, rsv_schp->k_use_sg)); 1186 offset, rsv_schp->k_use_sg));
1187 sg = rsv_schp->buffer; 1187 sg = rsv_schp->buffer;
1188 sa = vma->vm_start; 1188 sa = vma->vm_start;
1189 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1189 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1190 ++k, ++sg) { 1190 ++k, ++sg) {
1191 len = vma->vm_end - sa; 1191 len = vma->vm_end - sa;
1192 len = (len < sg->length) ? len : sg->length; 1192 len = (len < sg->length) ? len : sg->length;
1193 if (offset < len) { 1193 if (offset < len) {
1194 page = sg->page; 1194 page = sg->page;
1195 get_page(page); /* increment page count */ 1195 get_page(page); /* increment page count */
1196 break; 1196 break;
1197 } 1197 }
1198 sa += len; 1198 sa += len;
1199 offset -= len; 1199 offset -= len;
1200 } 1200 }
1201 1201
1202 if (type) 1202 if (type)
1203 *type = VM_FAULT_MINOR; 1203 *type = VM_FAULT_MINOR;
1204 return page; 1204 return page;
1205 } 1205 }
1206 1206
1207 static struct vm_operations_struct sg_mmap_vm_ops = { 1207 static struct vm_operations_struct sg_mmap_vm_ops = {
1208 .nopage = sg_vma_nopage, 1208 .nopage = sg_vma_nopage,
1209 }; 1209 };
1210 1210
1211 static int 1211 static int
1212 sg_mmap(struct file *filp, struct vm_area_struct *vma) 1212 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1213 { 1213 {
1214 Sg_fd *sfp; 1214 Sg_fd *sfp;
1215 unsigned long req_sz, len, sa; 1215 unsigned long req_sz, len, sa;
1216 Sg_scatter_hold *rsv_schp; 1216 Sg_scatter_hold *rsv_schp;
1217 int k; 1217 int k;
1218 struct scatterlist *sg; 1218 struct scatterlist *sg;
1219 1219
1220 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) 1220 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1221 return -ENXIO; 1221 return -ENXIO;
1222 req_sz = vma->vm_end - vma->vm_start; 1222 req_sz = vma->vm_end - vma->vm_start;
1223 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", 1223 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1224 (void *) vma->vm_start, (int) req_sz)); 1224 (void *) vma->vm_start, (int) req_sz));
1225 if (vma->vm_pgoff) 1225 if (vma->vm_pgoff)
1226 return -EINVAL; /* want no offset */ 1226 return -EINVAL; /* want no offset */
1227 rsv_schp = &sfp->reserve; 1227 rsv_schp = &sfp->reserve;
1228 if (req_sz > rsv_schp->bufflen) 1228 if (req_sz > rsv_schp->bufflen)
1229 return -ENOMEM; /* cannot map more than reserved buffer */ 1229 return -ENOMEM; /* cannot map more than reserved buffer */
1230 1230
1231 sa = vma->vm_start; 1231 sa = vma->vm_start;
1232 sg = rsv_schp->buffer; 1232 sg = rsv_schp->buffer;
1233 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end); 1233 for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
1234 ++k, ++sg) { 1234 ++k, ++sg) {
1235 len = vma->vm_end - sa; 1235 len = vma->vm_end - sa;
1236 len = (len < sg->length) ? len : sg->length; 1236 len = (len < sg->length) ? len : sg->length;
1237 sa += len; 1237 sa += len;
1238 } 1238 }
1239 1239
1240 if (0 == sfp->mmap_called) { 1240 if (0 == sfp->mmap_called) {
1241 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */ 1241 sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
1242 sfp->mmap_called = 1; 1242 sfp->mmap_called = 1;
1243 } 1243 }
1244 vma->vm_flags |= VM_RESERVED; 1244 vma->vm_flags |= VM_RESERVED;
1245 vma->vm_private_data = sfp; 1245 vma->vm_private_data = sfp;
1246 vma->vm_ops = &sg_mmap_vm_ops; 1246 vma->vm_ops = &sg_mmap_vm_ops;
1247 return 0; 1247 return 0;
1248 } 1248 }
1249 1249
1250 /* This function is a "bottom half" handler that is called by the 1250 /* This function is a "bottom half" handler that is called by the
1251 * mid level when a command is completed (or has failed). */ 1251 * mid level when a command is completed (or has failed). */
1252 static void 1252 static void
1253 sg_cmd_done(void *data, char *sense, int result, int resid) 1253 sg_cmd_done(void *data, char *sense, int result, int resid)
1254 { 1254 {
1255 Sg_request *srp = data; 1255 Sg_request *srp = data;
1256 Sg_device *sdp = NULL; 1256 Sg_device *sdp = NULL;
1257 Sg_fd *sfp; 1257 Sg_fd *sfp;
1258 unsigned long iflags; 1258 unsigned long iflags;
1259 unsigned int ms; 1259 unsigned int ms;
1260 1260
1261 if (NULL == srp) { 1261 if (NULL == srp) {
1262 printk(KERN_ERR "sg_cmd_done: NULL request\n"); 1262 printk(KERN_ERR "sg_cmd_done: NULL request\n");
1263 return; 1263 return;
1264 } 1264 }
1265 sfp = srp->parentfp; 1265 sfp = srp->parentfp;
1266 if (sfp) 1266 if (sfp)
1267 sdp = sfp->parentdp; 1267 sdp = sfp->parentdp;
1268 if ((NULL == sdp) || sdp->detached) { 1268 if ((NULL == sdp) || sdp->detached) {
1269 printk(KERN_INFO "sg_cmd_done: device detached\n"); 1269 printk(KERN_INFO "sg_cmd_done: device detached\n");
1270 return; 1270 return;
1271 } 1271 }
1272 1272
1273 1273
1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1274 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1275 sdp->disk->disk_name, srp->header.pack_id, result)); 1275 sdp->disk->disk_name, srp->header.pack_id, result));
1276 srp->header.resid = resid; 1276 srp->header.resid = resid;
1277 ms = jiffies_to_msecs(jiffies); 1277 ms = jiffies_to_msecs(jiffies);
1278 srp->header.duration = (ms > srp->header.duration) ? 1278 srp->header.duration = (ms > srp->header.duration) ?
1279 (ms - srp->header.duration) : 0; 1279 (ms - srp->header.duration) : 0;
1280 if (0 != result) { 1280 if (0 != result) {
1281 struct scsi_sense_hdr sshdr; 1281 struct scsi_sense_hdr sshdr;
1282 1282
1283 memcpy(srp->sense_b, sense, sizeof (srp->sense_b)); 1283 memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
1284 srp->header.status = 0xff & result; 1284 srp->header.status = 0xff & result;
1285 srp->header.masked_status = status_byte(result); 1285 srp->header.masked_status = status_byte(result);
1286 srp->header.msg_status = msg_byte(result); 1286 srp->header.msg_status = msg_byte(result);
1287 srp->header.host_status = host_byte(result); 1287 srp->header.host_status = host_byte(result);
1288 srp->header.driver_status = driver_byte(result); 1288 srp->header.driver_status = driver_byte(result);
1289 if ((sdp->sgdebug > 0) && 1289 if ((sdp->sgdebug > 0) &&
1290 ((CHECK_CONDITION == srp->header.masked_status) || 1290 ((CHECK_CONDITION == srp->header.masked_status) ||
1291 (COMMAND_TERMINATED == srp->header.masked_status))) 1291 (COMMAND_TERMINATED == srp->header.masked_status)))
1292 __scsi_print_sense("sg_cmd_done", sense, 1292 __scsi_print_sense("sg_cmd_done", sense,
1293 SCSI_SENSE_BUFFERSIZE); 1293 SCSI_SENSE_BUFFERSIZE);
1294 1294
1295 /* Following if statement is a patch supplied by Eric Youngdale */ 1295 /* Following if statement is a patch supplied by Eric Youngdale */
1296 if (driver_byte(result) != 0 1296 if (driver_byte(result) != 0
1297 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) 1297 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1298 && !scsi_sense_is_deferred(&sshdr) 1298 && !scsi_sense_is_deferred(&sshdr)
1299 && sshdr.sense_key == UNIT_ATTENTION 1299 && sshdr.sense_key == UNIT_ATTENTION
1300 && sdp->device->removable) { 1300 && sdp->device->removable) {
1301 /* Detected possible disc change. Set the bit - this */ 1301 /* Detected possible disc change. Set the bit - this */
1302 /* may be used if there are filesystems using this device */ 1302 /* may be used if there are filesystems using this device */
1303 sdp->device->changed = 1; 1303 sdp->device->changed = 1;
1304 } 1304 }
1305 } 1305 }
1306 /* Rely on write phase to clean out srp status values, so no "else" */ 1306 /* Rely on write phase to clean out srp status values, so no "else" */
1307 1307
1308 if (sfp->closed) { /* whoops this fd already released, cleanup */ 1308 if (sfp->closed) { /* whoops this fd already released, cleanup */
1309 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n")); 1309 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1310 sg_finish_rem_req(srp); 1310 sg_finish_rem_req(srp);
1311 srp = NULL; 1311 srp = NULL;
1312 if (NULL == sfp->headrp) { 1312 if (NULL == sfp->headrp) {
1313 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n")); 1313 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1314 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */ 1314 if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
1315 scsi_device_put(sdp->device); 1315 scsi_device_put(sdp->device);
1316 } 1316 }
1317 sfp = NULL; 1317 sfp = NULL;
1318 } 1318 }
1319 } else if (srp && srp->orphan) { 1319 } else if (srp && srp->orphan) {
1320 if (sfp->keep_orphan) 1320 if (sfp->keep_orphan)
1321 srp->sg_io_owned = 0; 1321 srp->sg_io_owned = 0;
1322 else { 1322 else {
1323 sg_finish_rem_req(srp); 1323 sg_finish_rem_req(srp);
1324 srp = NULL; 1324 srp = NULL;
1325 } 1325 }
1326 } 1326 }
1327 if (sfp && srp) { 1327 if (sfp && srp) {
1328 /* Now wake up any sg_read() that is waiting for this packet. */ 1328 /* Now wake up any sg_read() that is waiting for this packet. */
1329 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); 1329 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1330 write_lock_irqsave(&sfp->rq_list_lock, iflags); 1330 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1331 srp->done = 1; 1331 srp->done = 1;
1332 wake_up_interruptible(&sfp->read_wait); 1332 wake_up_interruptible(&sfp->read_wait);
1333 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1333 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1334 } 1334 }
1335 } 1335 }
1336 1336
1337 static struct file_operations sg_fops = { 1337 static struct file_operations sg_fops = {
1338 .owner = THIS_MODULE, 1338 .owner = THIS_MODULE,
1339 .read = sg_read, 1339 .read = sg_read,
1340 .write = sg_write, 1340 .write = sg_write,
1341 .poll = sg_poll, 1341 .poll = sg_poll,
1342 .ioctl = sg_ioctl, 1342 .ioctl = sg_ioctl,
1343 #ifdef CONFIG_COMPAT 1343 #ifdef CONFIG_COMPAT
1344 .compat_ioctl = sg_compat_ioctl, 1344 .compat_ioctl = sg_compat_ioctl,
1345 #endif 1345 #endif
1346 .open = sg_open, 1346 .open = sg_open,
1347 .mmap = sg_mmap, 1347 .mmap = sg_mmap,
1348 .release = sg_release, 1348 .release = sg_release,
1349 .fasync = sg_fasync, 1349 .fasync = sg_fasync,
1350 }; 1350 };
1351 1351
1352 static struct class *sg_sysfs_class; 1352 static struct class *sg_sysfs_class;
1353 1353
1354 static int sg_sysfs_valid = 0; 1354 static int sg_sysfs_valid = 0;
1355 1355
1356 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) 1356 static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1357 { 1357 {
1358 struct request_queue *q = scsidp->request_queue; 1358 struct request_queue *q = scsidp->request_queue;
1359 Sg_device *sdp; 1359 Sg_device *sdp;
1360 unsigned long iflags; 1360 unsigned long iflags;
1361 void *old_sg_dev_arr = NULL; 1361 void *old_sg_dev_arr = NULL;
1362 int k, error; 1362 int k, error;
1363 1363
1364 sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL); 1364 sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
1365 if (!sdp) { 1365 if (!sdp) {
1366 printk(KERN_WARNING "kmalloc Sg_device failure\n"); 1366 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1367 return -ENOMEM; 1367 return -ENOMEM;
1368 } 1368 }
1369 1369
1370 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1370 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1371 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */ 1371 if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */
1372 Sg_device **tmp_da; 1372 Sg_device **tmp_da;
1373 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP; 1373 int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
1374 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1374 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1375 1375
1376 tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL); 1376 tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
1377 if (unlikely(!tmp_da)) 1377 if (unlikely(!tmp_da))
1378 goto expand_failed; 1378 goto expand_failed;
1379 1379
1380 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1380 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1381 memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *)); 1381 memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
1382 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *)); 1382 memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
1383 old_sg_dev_arr = sg_dev_arr; 1383 old_sg_dev_arr = sg_dev_arr;
1384 sg_dev_arr = tmp_da; 1384 sg_dev_arr = tmp_da;
1385 sg_dev_max = tmp_dev_max; 1385 sg_dev_max = tmp_dev_max;
1386 } 1386 }
1387 1387
1388 for (k = 0; k < sg_dev_max; k++) 1388 for (k = 0; k < sg_dev_max; k++)
1389 if (!sg_dev_arr[k]) 1389 if (!sg_dev_arr[k])
1390 break; 1390 break;
1391 if (unlikely(k >= SG_MAX_DEVS)) 1391 if (unlikely(k >= SG_MAX_DEVS))
1392 goto overflow; 1392 goto overflow;
1393 1393
1394 memset(sdp, 0, sizeof(*sdp)); 1394 memset(sdp, 0, sizeof(*sdp));
1395 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); 1395 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1396 sprintf(disk->disk_name, "sg%d", k); 1396 sprintf(disk->disk_name, "sg%d", k);
1397 disk->first_minor = k; 1397 disk->first_minor = k;
1398 sdp->disk = disk; 1398 sdp->disk = disk;
1399 sdp->device = scsidp; 1399 sdp->device = scsidp;
1400 init_waitqueue_head(&sdp->o_excl_wait); 1400 init_waitqueue_head(&sdp->o_excl_wait);
1401 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1401 sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
1402 1402
1403 sg_nr_dev++; 1403 sg_nr_dev++;
1404 sg_dev_arr[k] = sdp; 1404 sg_dev_arr[k] = sdp;
1405 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1405 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1406 error = k; 1406 error = k;
1407 1407
1408 out: 1408 out:
1409 if (error < 0) 1409 if (error < 0)
1410 kfree(sdp); 1410 kfree(sdp);
1411 kfree(old_sg_dev_arr); 1411 kfree(old_sg_dev_arr);
1412 return error; 1412 return error;
1413 1413
1414 expand_failed: 1414 expand_failed:
1415 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n"); 1415 printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
1416 error = -ENOMEM; 1416 error = -ENOMEM;
1417 goto out; 1417 goto out;
1418 1418
1419 overflow: 1419 overflow:
1420 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1420 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1421 sdev_printk(KERN_WARNING, scsidp, 1421 sdev_printk(KERN_WARNING, scsidp,
1422 "Unable to attach sg device type=%d, minor " 1422 "Unable to attach sg device type=%d, minor "
1423 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); 1423 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1424 error = -ENODEV; 1424 error = -ENODEV;
1425 goto out; 1425 goto out;
1426 } 1426 }
1427 1427
1428 static int 1428 static int
1429 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf) 1429 sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
1430 { 1430 {
1431 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1431 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1432 struct gendisk *disk; 1432 struct gendisk *disk;
1433 Sg_device *sdp = NULL; 1433 Sg_device *sdp = NULL;
1434 struct cdev * cdev = NULL; 1434 struct cdev * cdev = NULL;
1435 int error, k; 1435 int error, k;
1436 1436
1437 disk = alloc_disk(1); 1437 disk = alloc_disk(1);
1438 if (!disk) { 1438 if (!disk) {
1439 printk(KERN_WARNING "alloc_disk failed\n"); 1439 printk(KERN_WARNING "alloc_disk failed\n");
1440 return -ENOMEM; 1440 return -ENOMEM;
1441 } 1441 }
1442 disk->major = SCSI_GENERIC_MAJOR; 1442 disk->major = SCSI_GENERIC_MAJOR;
1443 1443
1444 error = -ENOMEM; 1444 error = -ENOMEM;
1445 cdev = cdev_alloc(); 1445 cdev = cdev_alloc();
1446 if (!cdev) { 1446 if (!cdev) {
1447 printk(KERN_WARNING "cdev_alloc failed\n"); 1447 printk(KERN_WARNING "cdev_alloc failed\n");
1448 goto out; 1448 goto out;
1449 } 1449 }
1450 cdev->owner = THIS_MODULE; 1450 cdev->owner = THIS_MODULE;
1451 cdev->ops = &sg_fops; 1451 cdev->ops = &sg_fops;
1452 1452
1453 error = sg_alloc(disk, scsidp); 1453 error = sg_alloc(disk, scsidp);
1454 if (error < 0) { 1454 if (error < 0) {
1455 printk(KERN_WARNING "sg_alloc failed\n"); 1455 printk(KERN_WARNING "sg_alloc failed\n");
1456 goto out; 1456 goto out;
1457 } 1457 }
1458 k = error; 1458 k = error;
1459 sdp = sg_dev_arr[k]; 1459 sdp = sg_dev_arr[k];
1460 1460
1461 devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k), 1461 devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
1462 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 1462 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
1463 "%s/generic", scsidp->devfs_name); 1463 "%s/generic", scsidp->devfs_name);
1464 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1); 1464 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
1465 if (error) { 1465 if (error) {
1466 devfs_remove("%s/generic", scsidp->devfs_name); 1466 devfs_remove("%s/generic", scsidp->devfs_name);
1467 goto out; 1467 goto out;
1468 } 1468 }
1469 sdp->cdev = cdev; 1469 sdp->cdev = cdev;
1470 if (sg_sysfs_valid) { 1470 if (sg_sysfs_valid) {
1471 struct class_device * sg_class_member; 1471 struct class_device * sg_class_member;
1472 1472
1473 sg_class_member = class_device_create(sg_sysfs_class, NULL, 1473 sg_class_member = class_device_create(sg_sysfs_class, NULL,
1474 MKDEV(SCSI_GENERIC_MAJOR, k), 1474 MKDEV(SCSI_GENERIC_MAJOR, k),
1475 cl_dev->dev, "%s", 1475 cl_dev->dev, "%s",
1476 disk->disk_name); 1476 disk->disk_name);
1477 if (IS_ERR(sg_class_member)) 1477 if (IS_ERR(sg_class_member))
1478 printk(KERN_WARNING "sg_add: " 1478 printk(KERN_WARNING "sg_add: "
1479 "class_device_create failed\n"); 1479 "class_device_create failed\n");
1480 class_set_devdata(sg_class_member, sdp); 1480 class_set_devdata(sg_class_member, sdp);
1481 error = sysfs_create_link(&scsidp->sdev_gendev.kobj, 1481 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1482 &sg_class_member->kobj, "generic"); 1482 &sg_class_member->kobj, "generic");
1483 if (error) 1483 if (error)
1484 printk(KERN_ERR "sg_add: unable to make symlink " 1484 printk(KERN_ERR "sg_add: unable to make symlink "
1485 "'generic' back to sg%d\n", k); 1485 "'generic' back to sg%d\n", k);
1486 } else 1486 } else
1487 printk(KERN_WARNING "sg_add: sg_sys INvalid\n"); 1487 printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
1488 1488
1489 sdev_printk(KERN_NOTICE, scsidp, 1489 sdev_printk(KERN_NOTICE, scsidp,
1490 "Attached scsi generic sg%d type %d\n", k,scsidp->type); 1490 "Attached scsi generic sg%d type %d\n", k,scsidp->type);
1491 1491
1492 return 0; 1492 return 0;
1493 1493
1494 out: 1494 out:
1495 put_disk(disk); 1495 put_disk(disk);
1496 if (cdev) 1496 if (cdev)
1497 cdev_del(cdev); 1497 cdev_del(cdev);
1498 return error; 1498 return error;
1499 } 1499 }
1500 1500
1501 static void 1501 static void
1502 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) 1502 sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
1503 { 1503 {
1504 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev); 1504 struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
1505 Sg_device *sdp = NULL; 1505 Sg_device *sdp = NULL;
1506 unsigned long iflags; 1506 unsigned long iflags;
1507 Sg_fd *sfp; 1507 Sg_fd *sfp;
1508 Sg_fd *tsfp; 1508 Sg_fd *tsfp;
1509 Sg_request *srp; 1509 Sg_request *srp;
1510 Sg_request *tsrp; 1510 Sg_request *tsrp;
1511 int k, delay; 1511 int k, delay;
1512 1512
1513 if (NULL == sg_dev_arr) 1513 if (NULL == sg_dev_arr)
1514 return; 1514 return;
1515 delay = 0; 1515 delay = 0;
1516 write_lock_irqsave(&sg_dev_arr_lock, iflags); 1516 write_lock_irqsave(&sg_dev_arr_lock, iflags);
1517 for (k = 0; k < sg_dev_max; k++) { 1517 for (k = 0; k < sg_dev_max; k++) {
1518 sdp = sg_dev_arr[k]; 1518 sdp = sg_dev_arr[k];
1519 if ((NULL == sdp) || (sdp->device != scsidp)) 1519 if ((NULL == sdp) || (sdp->device != scsidp))
1520 continue; /* dirty but lowers nesting */ 1520 continue; /* dirty but lowers nesting */
1521 if (sdp->headfp) { 1521 if (sdp->headfp) {
1522 sdp->detached = 1; 1522 sdp->detached = 1;
1523 for (sfp = sdp->headfp; sfp; sfp = tsfp) { 1523 for (sfp = sdp->headfp; sfp; sfp = tsfp) {
1524 tsfp = sfp->nextfp; 1524 tsfp = sfp->nextfp;
1525 for (srp = sfp->headrp; srp; srp = tsrp) { 1525 for (srp = sfp->headrp; srp; srp = tsrp) {
1526 tsrp = srp->nextrp; 1526 tsrp = srp->nextrp;
1527 if (sfp->closed || (0 == sg_srp_done(srp, sfp))) 1527 if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
1528 sg_finish_rem_req(srp); 1528 sg_finish_rem_req(srp);
1529 } 1529 }
1530 if (sfp->closed) { 1530 if (sfp->closed) {
1531 scsi_device_put(sdp->device); 1531 scsi_device_put(sdp->device);
1532 __sg_remove_sfp(sdp, sfp); 1532 __sg_remove_sfp(sdp, sfp);
1533 } else { 1533 } else {
1534 delay = 1; 1534 delay = 1;
1535 wake_up_interruptible(&sfp->read_wait); 1535 wake_up_interruptible(&sfp->read_wait);
1536 kill_fasync(&sfp->async_qp, SIGPOLL, 1536 kill_fasync(&sfp->async_qp, SIGPOLL,
1537 POLL_HUP); 1537 POLL_HUP);
1538 } 1538 }
1539 } 1539 }
1540 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k)); 1540 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
1541 if (NULL == sdp->headfp) { 1541 if (NULL == sdp->headfp) {
1542 sg_dev_arr[k] = NULL; 1542 sg_dev_arr[k] = NULL;
1543 } 1543 }
1544 } else { /* nothing active, simple case */ 1544 } else { /* nothing active, simple case */
1545 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k)); 1545 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
1546 sg_dev_arr[k] = NULL; 1546 sg_dev_arr[k] = NULL;
1547 } 1547 }
1548 sg_nr_dev--; 1548 sg_nr_dev--;
1549 break; 1549 break;
1550 } 1550 }
1551 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 1551 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
1552 1552
1553 if (sdp) { 1553 if (sdp) {
1554 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); 1554 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1555 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k)); 1555 class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
1556 cdev_del(sdp->cdev); 1556 cdev_del(sdp->cdev);
1557 sdp->cdev = NULL; 1557 sdp->cdev = NULL;
1558 devfs_remove("%s/generic", scsidp->devfs_name); 1558 devfs_remove("%s/generic", scsidp->devfs_name);
1559 put_disk(sdp->disk); 1559 put_disk(sdp->disk);
1560 sdp->disk = NULL; 1560 sdp->disk = NULL;
1561 if (NULL == sdp->headfp) 1561 if (NULL == sdp->headfp)
1562 kfree((char *) sdp); 1562 kfree((char *) sdp);
1563 } 1563 }
1564 1564
1565 if (delay) 1565 if (delay)
1566 msleep(10); /* dirty detach so delay device destruction */ 1566 msleep(10); /* dirty detach so delay device destruction */
1567 } 1567 }
1568 1568
1569 /* Set 'perm' (4th argument) to 0 to disable module_param's definition 1569 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1570 * of sysfs parameters (which module_param doesn't yet support). 1570 * of sysfs parameters (which module_param doesn't yet support).
1571 * Sysfs parameters defined explicitly below. 1571 * Sysfs parameters defined explicitly below.
1572 */ 1572 */
1573 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO); 1573 module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
1574 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); 1574 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1575 1575
1576 MODULE_AUTHOR("Douglas Gilbert"); 1576 MODULE_AUTHOR("Douglas Gilbert");
1577 MODULE_DESCRIPTION("SCSI generic (sg) driver"); 1577 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1578 MODULE_LICENSE("GPL"); 1578 MODULE_LICENSE("GPL");
1579 MODULE_VERSION(SG_VERSION_STR); 1579 MODULE_VERSION(SG_VERSION_STR);
1580 1580
1581 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); 1581 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1582 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); 1582 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1583 1583
1584 static int __init 1584 static int __init
1585 init_sg(void) 1585 init_sg(void)
1586 { 1586 {
1587 int rc; 1587 int rc;
1588 1588
1589 if (def_reserved_size >= 0) 1589 if (def_reserved_size >= 0)
1590 sg_big_buff = def_reserved_size; 1590 sg_big_buff = def_reserved_size;
1591 1591
1592 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1592 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1593 SG_MAX_DEVS, "sg"); 1593 SG_MAX_DEVS, "sg");
1594 if (rc) 1594 if (rc)
1595 return rc; 1595 return rc;
1596 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); 1596 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1597 if ( IS_ERR(sg_sysfs_class) ) { 1597 if ( IS_ERR(sg_sysfs_class) ) {
1598 rc = PTR_ERR(sg_sysfs_class); 1598 rc = PTR_ERR(sg_sysfs_class);
1599 goto err_out; 1599 goto err_out;
1600 } 1600 }
1601 sg_sysfs_valid = 1; 1601 sg_sysfs_valid = 1;
1602 rc = scsi_register_interface(&sg_interface); 1602 rc = scsi_register_interface(&sg_interface);
1603 if (0 == rc) { 1603 if (0 == rc) {
1604 #ifdef CONFIG_SCSI_PROC_FS 1604 #ifdef CONFIG_SCSI_PROC_FS
1605 sg_proc_init(); 1605 sg_proc_init();
1606 #endif /* CONFIG_SCSI_PROC_FS */ 1606 #endif /* CONFIG_SCSI_PROC_FS */
1607 return 0; 1607 return 0;
1608 } 1608 }
1609 class_destroy(sg_sysfs_class); 1609 class_destroy(sg_sysfs_class);
1610 err_out: 1610 err_out:
1611 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); 1611 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1612 return rc; 1612 return rc;
1613 } 1613 }
1614 1614
1615 static void __exit 1615 static void __exit
1616 exit_sg(void) 1616 exit_sg(void)
1617 { 1617 {
1618 #ifdef CONFIG_SCSI_PROC_FS 1618 #ifdef CONFIG_SCSI_PROC_FS
1619 sg_proc_cleanup(); 1619 sg_proc_cleanup();
1620 #endif /* CONFIG_SCSI_PROC_FS */ 1620 #endif /* CONFIG_SCSI_PROC_FS */
1621 scsi_unregister_interface(&sg_interface); 1621 scsi_unregister_interface(&sg_interface);
1622 class_destroy(sg_sysfs_class); 1622 class_destroy(sg_sysfs_class);
1623 sg_sysfs_valid = 0; 1623 sg_sysfs_valid = 0;
1624 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 1624 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1625 SG_MAX_DEVS); 1625 SG_MAX_DEVS);
1626 kfree((char *)sg_dev_arr); 1626 kfree((char *)sg_dev_arr);
1627 sg_dev_arr = NULL; 1627 sg_dev_arr = NULL;
1628 sg_dev_max = 0; 1628 sg_dev_max = 0;
1629 } 1629 }
1630 1630
1631 static int 1631 static int
1632 sg_start_req(Sg_request * srp) 1632 sg_start_req(Sg_request * srp)
1633 { 1633 {
1634 int res; 1634 int res;
1635 Sg_fd *sfp = srp->parentfp; 1635 Sg_fd *sfp = srp->parentfp;
1636 sg_io_hdr_t *hp = &srp->header; 1636 sg_io_hdr_t *hp = &srp->header;
1637 int dxfer_len = (int) hp->dxfer_len; 1637 int dxfer_len = (int) hp->dxfer_len;
1638 int dxfer_dir = hp->dxfer_direction; 1638 int dxfer_dir = hp->dxfer_direction;
1639 Sg_scatter_hold *req_schp = &srp->data; 1639 Sg_scatter_hold *req_schp = &srp->data;
1640 Sg_scatter_hold *rsv_schp = &sfp->reserve; 1640 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1641 1641
1642 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len)); 1642 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
1643 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) 1643 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1644 return 0; 1644 return 0;
1645 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) && 1645 if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
1646 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) && 1646 (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
1647 (!sfp->parentdp->device->host->unchecked_isa_dma)) { 1647 (!sfp->parentdp->device->host->unchecked_isa_dma)) {
1648 res = sg_build_direct(srp, sfp, dxfer_len); 1648 res = sg_build_direct(srp, sfp, dxfer_len);
1649 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */ 1649 if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1650 return res; 1650 return res;
1651 } 1651 }
1652 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen)) 1652 if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
1653 sg_link_reserve(sfp, srp, dxfer_len); 1653 sg_link_reserve(sfp, srp, dxfer_len);
1654 else { 1654 else {
1655 res = sg_build_indirect(req_schp, sfp, dxfer_len); 1655 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1656 if (res) { 1656 if (res) {
1657 sg_remove_scat(req_schp); 1657 sg_remove_scat(req_schp);
1658 return res; 1658 return res;
1659 } 1659 }
1660 } 1660 }
1661 return 0; 1661 return 0;
1662 } 1662 }
1663 1663
1664 static void 1664 static void
1665 sg_finish_rem_req(Sg_request * srp) 1665 sg_finish_rem_req(Sg_request * srp)
1666 { 1666 {
1667 Sg_fd *sfp = srp->parentfp; 1667 Sg_fd *sfp = srp->parentfp;
1668 Sg_scatter_hold *req_schp = &srp->data; 1668 Sg_scatter_hold *req_schp = &srp->data;
1669 1669
1670 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); 1670 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1671 if (srp->res_used) 1671 if (srp->res_used)
1672 sg_unlink_reserve(sfp, srp); 1672 sg_unlink_reserve(sfp, srp);
1673 else 1673 else
1674 sg_remove_scat(req_schp); 1674 sg_remove_scat(req_schp);
1675 sg_remove_request(sfp, srp); 1675 sg_remove_request(sfp, srp);
1676 } 1676 }
1677 1677
1678 static int 1678 static int
1679 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) 1679 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1680 { 1680 {
1681 int sg_bufflen = tablesize * sizeof(struct scatterlist); 1681 int sg_bufflen = tablesize * sizeof(struct scatterlist);
1682 unsigned int gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 1682 unsigned int gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1683 1683
1684 /* 1684 /*
1685 * TODO: test without low_dma, we should not need it since 1685 * TODO: test without low_dma, we should not need it since
1686 * the block layer will bounce the buffer for us 1686 * the block layer will bounce the buffer for us
1687 * 1687 *
1688 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list. 1688 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1689 */ 1689 */
1690 if (sfp->low_dma) 1690 if (sfp->low_dma)
1691 gfp_flags |= GFP_DMA; 1691 gfp_flags |= GFP_DMA;
1692 schp->buffer = kzalloc(sg_bufflen, gfp_flags); 1692 schp->buffer = kzalloc(sg_bufflen, gfp_flags);
1693 if (!schp->buffer) 1693 if (!schp->buffer)
1694 return -ENOMEM; 1694 return -ENOMEM;
1695 schp->sglist_len = sg_bufflen; 1695 schp->sglist_len = sg_bufflen;
1696 return tablesize; /* number of scat_gath elements allocated */ 1696 return tablesize; /* number of scat_gath elements allocated */
1697 } 1697 }
1698 1698
1699 #ifdef SG_ALLOW_DIO_CODE 1699 #ifdef SG_ALLOW_DIO_CODE
1700 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */ 1700 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1701 /* TODO: hopefully we can use the generic block layer code */ 1701 /* TODO: hopefully we can use the generic block layer code */
1702 1702
1703 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if 1703 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1704 - mapping of all pages not successful 1704 - mapping of all pages not successful
1705 (i.e., either completely successful or fails) 1705 (i.e., either completely successful or fails)
1706 */ 1706 */
1707 static int 1707 static int
1708 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 1708 st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1709 unsigned long uaddr, size_t count, int rw) 1709 unsigned long uaddr, size_t count, int rw)
1710 { 1710 {
1711 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 1711 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
1712 unsigned long start = uaddr >> PAGE_SHIFT; 1712 unsigned long start = uaddr >> PAGE_SHIFT;
1713 const int nr_pages = end - start; 1713 const int nr_pages = end - start;
1714 int res, i, j; 1714 int res, i, j;
1715 struct page **pages; 1715 struct page **pages;
1716 1716
1717 /* User attempted Overflow! */ 1717 /* User attempted Overflow! */
1718 if ((uaddr + count) < uaddr) 1718 if ((uaddr + count) < uaddr)
1719 return -EINVAL; 1719 return -EINVAL;
1720 1720
1721 /* Too big */ 1721 /* Too big */
1722 if (nr_pages > max_pages) 1722 if (nr_pages > max_pages)
1723 return -ENOMEM; 1723 return -ENOMEM;
1724 1724
1725 /* Hmm? */ 1725 /* Hmm? */
1726 if (count == 0) 1726 if (count == 0)
1727 return 0; 1727 return 0;
1728 1728
1729 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL) 1729 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
1730 return -ENOMEM; 1730 return -ENOMEM;
1731 1731
1732 /* Try to fault in all of the necessary pages */ 1732 /* Try to fault in all of the necessary pages */
1733 down_read(&current->mm->mmap_sem); 1733 down_read(&current->mm->mmap_sem);
1734 /* rw==READ means read from drive, write into memory area */ 1734 /* rw==READ means read from drive, write into memory area */
1735 res = get_user_pages( 1735 res = get_user_pages(
1736 current, 1736 current,
1737 current->mm, 1737 current->mm,
1738 uaddr, 1738 uaddr,
1739 nr_pages, 1739 nr_pages,
1740 rw == READ, 1740 rw == READ,
1741 0, /* don't force */ 1741 0, /* don't force */
1742 pages, 1742 pages,
1743 NULL); 1743 NULL);
1744 up_read(&current->mm->mmap_sem); 1744 up_read(&current->mm->mmap_sem);
1745 1745
1746 /* Errors and no page mapped should return here */ 1746 /* Errors and no page mapped should return here */
1747 if (res < nr_pages) 1747 if (res < nr_pages)
1748 goto out_unmap; 1748 goto out_unmap;
1749 1749
1750 for (i=0; i < nr_pages; i++) { 1750 for (i=0; i < nr_pages; i++) {
1751 /* FIXME: flush superflous for rw==READ, 1751 /* FIXME: flush superflous for rw==READ,
1752 * probably wrong function for rw==WRITE 1752 * probably wrong function for rw==WRITE
1753 */ 1753 */
1754 flush_dcache_page(pages[i]); 1754 flush_dcache_page(pages[i]);
1755 /* ?? Is locking needed? I don't think so */ 1755 /* ?? Is locking needed? I don't think so */
1756 /* if (TestSetPageLocked(pages[i])) 1756 /* if (TestSetPageLocked(pages[i]))
1757 goto out_unlock; */ 1757 goto out_unlock; */
1758 } 1758 }
1759 1759
1760 sgl[0].page = pages[0]; 1760 sgl[0].page = pages[0];
1761 sgl[0].offset = uaddr & ~PAGE_MASK; 1761 sgl[0].offset = uaddr & ~PAGE_MASK;
1762 if (nr_pages > 1) { 1762 if (nr_pages > 1) {
1763 sgl[0].length = PAGE_SIZE - sgl[0].offset; 1763 sgl[0].length = PAGE_SIZE - sgl[0].offset;
1764 count -= sgl[0].length; 1764 count -= sgl[0].length;
1765 for (i=1; i < nr_pages ; i++) { 1765 for (i=1; i < nr_pages ; i++) {
1766 sgl[i].page = pages[i]; 1766 sgl[i].page = pages[i];
1767 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 1767 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
1768 count -= PAGE_SIZE; 1768 count -= PAGE_SIZE;
1769 } 1769 }
1770 } 1770 }
1771 else { 1771 else {
1772 sgl[0].length = count; 1772 sgl[0].length = count;
1773 } 1773 }
1774 1774
1775 kfree(pages); 1775 kfree(pages);
1776 return nr_pages; 1776 return nr_pages;
1777 1777
1778 out_unmap: 1778 out_unmap:
1779 if (res > 0) { 1779 if (res > 0) {
1780 for (j=0; j < res; j++) 1780 for (j=0; j < res; j++)
1781 page_cache_release(pages[j]); 1781 page_cache_release(pages[j]);
1782 res = 0; 1782 res = 0;
1783 } 1783 }
1784 kfree(pages); 1784 kfree(pages);
1785 return res; 1785 return res;
1786 } 1786 }
1787 1787
1788 1788
1789 /* And unmap them... */ 1789 /* And unmap them... */
1790 static int 1790 static int
1791 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 1791 st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
1792 int dirtied) 1792 int dirtied)
1793 { 1793 {
1794 int i; 1794 int i;
1795 1795
1796 for (i=0; i < nr_pages; i++) { 1796 for (i=0; i < nr_pages; i++) {
1797 struct page *page = sgl[i].page; 1797 struct page *page = sgl[i].page;
1798 1798
1799 if (dirtied) 1799 if (dirtied)
1800 SetPageDirty(page); 1800 SetPageDirty(page);
1801 /* unlock_page(page); */ 1801 /* unlock_page(page); */
1802 /* FIXME: cache flush missing for rw==READ 1802 /* FIXME: cache flush missing for rw==READ
1803 * FIXME: call the correct reference counting function 1803 * FIXME: call the correct reference counting function
1804 */ 1804 */
1805 page_cache_release(page); 1805 page_cache_release(page);
1806 } 1806 }
1807 1807
1808 return 0; 1808 return 0;
1809 } 1809 }
1810 1810
1811 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */ 1811 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1812 #endif 1812 #endif
1813 1813
1814 1814
1815 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */ 1815 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1816 static int 1816 static int
1817 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len) 1817 sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
1818 { 1818 {
1819 #ifdef SG_ALLOW_DIO_CODE 1819 #ifdef SG_ALLOW_DIO_CODE
1820 sg_io_hdr_t *hp = &srp->header; 1820 sg_io_hdr_t *hp = &srp->header;
1821 Sg_scatter_hold *schp = &srp->data; 1821 Sg_scatter_hold *schp = &srp->data;
1822 int sg_tablesize = sfp->parentdp->sg_tablesize; 1822 int sg_tablesize = sfp->parentdp->sg_tablesize;
1823 int mx_sc_elems, res; 1823 int mx_sc_elems, res;
1824 struct scsi_device *sdev = sfp->parentdp->device; 1824 struct scsi_device *sdev = sfp->parentdp->device;
1825 1825
1826 if (((unsigned long)hp->dxferp & 1826 if (((unsigned long)hp->dxferp &
1827 queue_dma_alignment(sdev->request_queue)) != 0) 1827 queue_dma_alignment(sdev->request_queue)) != 0)
1828 return 1; 1828 return 1;
1829 1829
1830 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1830 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1831 if (mx_sc_elems <= 0) { 1831 if (mx_sc_elems <= 0) {
1832 return 1; 1832 return 1;
1833 } 1833 }
1834 res = st_map_user_pages(schp->buffer, mx_sc_elems, 1834 res = st_map_user_pages(schp->buffer, mx_sc_elems,
1835 (unsigned long)hp->dxferp, dxfer_len, 1835 (unsigned long)hp->dxferp, dxfer_len,
1836 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0); 1836 (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
1837 if (res <= 0) 1837 if (res <= 0)
1838 return 1; 1838 return 1;
1839 schp->k_use_sg = res; 1839 schp->k_use_sg = res;
1840 schp->dio_in_use = 1; 1840 schp->dio_in_use = 1;
1841 hp->info |= SG_INFO_DIRECT_IO; 1841 hp->info |= SG_INFO_DIRECT_IO;
1842 return 0; 1842 return 0;
1843 #else 1843 #else
1844 return 1; 1844 return 1;
1845 #endif 1845 #endif
1846 } 1846 }
1847 1847
1848 static int 1848 static int
1849 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) 1849 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1850 { 1850 {
1851 struct scatterlist *sg; 1851 struct scatterlist *sg;
1852 int ret_sz = 0, k, rem_sz, num, mx_sc_elems; 1852 int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
1853 int sg_tablesize = sfp->parentdp->sg_tablesize; 1853 int sg_tablesize = sfp->parentdp->sg_tablesize;
1854 int blk_size = buff_size; 1854 int blk_size = buff_size;
1855 struct page *p = NULL; 1855 struct page *p = NULL;
1856 1856
1857 if ((blk_size < 0) || (!sfp)) 1857 if ((blk_size < 0) || (!sfp))
1858 return -EFAULT; 1858 return -EFAULT;
1859 if (0 == blk_size) 1859 if (0 == blk_size)
1860 ++blk_size; /* don't know why */ 1860 ++blk_size; /* don't know why */
1861 /* round request up to next highest SG_SECTOR_SZ byte boundary */ 1861 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1862 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK); 1862 blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
1863 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", 1863 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1864 buff_size, blk_size)); 1864 buff_size, blk_size));
1865 1865
1866 /* N.B. ret_sz carried into this block ... */ 1866 /* N.B. ret_sz carried into this block ... */
1867 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); 1867 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1868 if (mx_sc_elems < 0) 1868 if (mx_sc_elems < 0)
1869 return mx_sc_elems; /* most likely -ENOMEM */ 1869 return mx_sc_elems; /* most likely -ENOMEM */
1870 1870
1871 for (k = 0, sg = schp->buffer, rem_sz = blk_size; 1871 for (k = 0, sg = schp->buffer, rem_sz = blk_size;
1872 (rem_sz > 0) && (k < mx_sc_elems); 1872 (rem_sz > 0) && (k < mx_sc_elems);
1873 ++k, rem_sz -= ret_sz, ++sg) { 1873 ++k, rem_sz -= ret_sz, ++sg) {
1874 1874
1875 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz; 1875 num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
1876 p = sg_page_malloc(num, sfp->low_dma, &ret_sz); 1876 p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
1877 if (!p) 1877 if (!p)
1878 return -ENOMEM; 1878 return -ENOMEM;
1879 1879
1880 sg->page = p; 1880 sg->page = p;
1881 sg->length = ret_sz; 1881 sg->length = ret_sz;
1882 1882
1883 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n", 1883 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
1884 k, p, ret_sz)); 1884 k, p, ret_sz));
1885 } /* end of for loop */ 1885 } /* end of for loop */
1886 1886
1887 schp->k_use_sg = k; 1887 schp->k_use_sg = k;
1888 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz)); 1888 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
1889 1889
1890 schp->bufflen = blk_size; 1890 schp->bufflen = blk_size;
1891 if (rem_sz > 0) /* must have failed */ 1891 if (rem_sz > 0) /* must have failed */
1892 return -ENOMEM; 1892 return -ENOMEM;
1893 1893
1894 return 0; 1894 return 0;
1895 } 1895 }
1896 1896
1897 static int 1897 static int
1898 sg_write_xfer(Sg_request * srp) 1898 sg_write_xfer(Sg_request * srp)
1899 { 1899 {
1900 sg_io_hdr_t *hp = &srp->header; 1900 sg_io_hdr_t *hp = &srp->header;
1901 Sg_scatter_hold *schp = &srp->data; 1901 Sg_scatter_hold *schp = &srp->data;
1902 struct scatterlist *sg = schp->buffer; 1902 struct scatterlist *sg = schp->buffer;
1903 int num_xfer = 0; 1903 int num_xfer = 0;
1904 int j, k, onum, usglen, ksglen, res; 1904 int j, k, onum, usglen, ksglen, res;
1905 int iovec_count = (int) hp->iovec_count; 1905 int iovec_count = (int) hp->iovec_count;
1906 int dxfer_dir = hp->dxfer_direction; 1906 int dxfer_dir = hp->dxfer_direction;
1907 unsigned char *p; 1907 unsigned char *p;
1908 unsigned char __user *up; 1908 unsigned char __user *up;
1909 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 1909 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
1910 1910
1911 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) || 1911 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
1912 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 1912 (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
1913 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags); 1913 num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
1914 if (schp->bufflen < num_xfer) 1914 if (schp->bufflen < num_xfer)
1915 num_xfer = schp->bufflen; 1915 num_xfer = schp->bufflen;
1916 } 1916 }
1917 if ((num_xfer <= 0) || (schp->dio_in_use) || 1917 if ((num_xfer <= 0) || (schp->dio_in_use) ||
1918 (new_interface 1918 (new_interface
1919 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 1919 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
1920 return 0; 1920 return 0;
1921 1921
1922 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 1922 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1923 num_xfer, iovec_count, schp->k_use_sg)); 1923 num_xfer, iovec_count, schp->k_use_sg));
1924 if (iovec_count) { 1924 if (iovec_count) {
1925 onum = iovec_count; 1925 onum = iovec_count;
1926 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 1926 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
1927 return -EFAULT; 1927 return -EFAULT;
1928 } else 1928 } else
1929 onum = 1; 1929 onum = 1;
1930 1930
1931 ksglen = sg->length; 1931 ksglen = sg->length;
1932 p = page_address(sg->page); 1932 p = page_address(sg->page);
1933 for (j = 0, k = 0; j < onum; ++j) { 1933 for (j = 0, k = 0; j < onum; ++j) {
1934 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up); 1934 res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
1935 if (res) 1935 if (res)
1936 return res; 1936 return res;
1937 1937
1938 for (; p; ++sg, ksglen = sg->length, 1938 for (; p; ++sg, ksglen = sg->length,
1939 p = page_address(sg->page)) { 1939 p = page_address(sg->page)) {
1940 if (usglen <= 0) 1940 if (usglen <= 0)
1941 break; 1941 break;
1942 if (ksglen > usglen) { 1942 if (ksglen > usglen) {
1943 if (usglen >= num_xfer) { 1943 if (usglen >= num_xfer) {
1944 if (__copy_from_user(p, up, num_xfer)) 1944 if (__copy_from_user(p, up, num_xfer))
1945 return -EFAULT; 1945 return -EFAULT;
1946 return 0; 1946 return 0;
1947 } 1947 }
1948 if (__copy_from_user(p, up, usglen)) 1948 if (__copy_from_user(p, up, usglen))
1949 return -EFAULT; 1949 return -EFAULT;
1950 p += usglen; 1950 p += usglen;
1951 ksglen -= usglen; 1951 ksglen -= usglen;
1952 break; 1952 break;
1953 } else { 1953 } else {
1954 if (ksglen >= num_xfer) { 1954 if (ksglen >= num_xfer) {
1955 if (__copy_from_user(p, up, num_xfer)) 1955 if (__copy_from_user(p, up, num_xfer))
1956 return -EFAULT; 1956 return -EFAULT;
1957 return 0; 1957 return 0;
1958 } 1958 }
1959 if (__copy_from_user(p, up, ksglen)) 1959 if (__copy_from_user(p, up, ksglen))
1960 return -EFAULT; 1960 return -EFAULT;
1961 up += ksglen; 1961 up += ksglen;
1962 usglen -= ksglen; 1962 usglen -= ksglen;
1963 } 1963 }
1964 ++k; 1964 ++k;
1965 if (k >= schp->k_use_sg) 1965 if (k >= schp->k_use_sg)
1966 return 0; 1966 return 0;
1967 } 1967 }
1968 } 1968 }
1969 1969
1970 return 0; 1970 return 0;
1971 } 1971 }
1972 1972
1973 static int 1973 static int
1974 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind, 1974 sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
1975 int wr_xf, int *countp, unsigned char __user **up) 1975 int wr_xf, int *countp, unsigned char __user **up)
1976 { 1976 {
1977 int num_xfer = (int) hp->dxfer_len; 1977 int num_xfer = (int) hp->dxfer_len;
1978 unsigned char __user *p = hp->dxferp; 1978 unsigned char __user *p = hp->dxferp;
1979 int count; 1979 int count;
1980 1980
1981 if (0 == sg_num) { 1981 if (0 == sg_num) {
1982 if (wr_xf && ('\0' == hp->interface_id)) 1982 if (wr_xf && ('\0' == hp->interface_id))
1983 count = (int) hp->flags; /* holds "old" input_size */ 1983 count = (int) hp->flags; /* holds "old" input_size */
1984 else 1984 else
1985 count = num_xfer; 1985 count = num_xfer;
1986 } else { 1986 } else {
1987 sg_iovec_t iovec; 1987 sg_iovec_t iovec;
1988 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC)) 1988 if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
1989 return -EFAULT; 1989 return -EFAULT;
1990 p = iovec.iov_base; 1990 p = iovec.iov_base;
1991 count = (int) iovec.iov_len; 1991 count = (int) iovec.iov_len;
1992 } 1992 }
1993 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count)) 1993 if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
1994 return -EFAULT; 1994 return -EFAULT;
1995 if (up) 1995 if (up)
1996 *up = p; 1996 *up = p;
1997 if (countp) 1997 if (countp)
1998 *countp = count; 1998 *countp = count;
1999 return 0; 1999 return 0;
2000 } 2000 }
2001 2001
2002 static void 2002 static void
2003 sg_remove_scat(Sg_scatter_hold * schp) 2003 sg_remove_scat(Sg_scatter_hold * schp)
2004 { 2004 {
2005 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); 2005 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
2006 if (schp->buffer && (schp->sglist_len > 0)) { 2006 if (schp->buffer && (schp->sglist_len > 0)) {
2007 struct scatterlist *sg = schp->buffer; 2007 struct scatterlist *sg = schp->buffer;
2008 2008
2009 if (schp->dio_in_use) { 2009 if (schp->dio_in_use) {
2010 #ifdef SG_ALLOW_DIO_CODE 2010 #ifdef SG_ALLOW_DIO_CODE
2011 st_unmap_user_pages(sg, schp->k_use_sg, TRUE); 2011 st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
2012 #endif 2012 #endif
2013 } else { 2013 } else {
2014 int k; 2014 int k;
2015 2015
2016 for (k = 0; (k < schp->k_use_sg) && sg->page; 2016 for (k = 0; (k < schp->k_use_sg) && sg->page;
2017 ++k, ++sg) { 2017 ++k, ++sg) {
2018 SCSI_LOG_TIMEOUT(5, printk( 2018 SCSI_LOG_TIMEOUT(5, printk(
2019 "sg_remove_scat: k=%d, a=0x%p, len=%d\n", 2019 "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
2020 k, sg->page, sg->length)); 2020 k, sg->page, sg->length));
2021 sg_page_free(sg->page, sg->length); 2021 sg_page_free(sg->page, sg->length);
2022 } 2022 }
2023 } 2023 }
2024 kfree(schp->buffer); 2024 kfree(schp->buffer);
2025 } 2025 }
2026 memset(schp, 0, sizeof (*schp)); 2026 memset(schp, 0, sizeof (*schp));
2027 } 2027 }
2028 2028
2029 static int 2029 static int
2030 sg_read_xfer(Sg_request * srp) 2030 sg_read_xfer(Sg_request * srp)
2031 { 2031 {
2032 sg_io_hdr_t *hp = &srp->header; 2032 sg_io_hdr_t *hp = &srp->header;
2033 Sg_scatter_hold *schp = &srp->data; 2033 Sg_scatter_hold *schp = &srp->data;
2034 struct scatterlist *sg = schp->buffer; 2034 struct scatterlist *sg = schp->buffer;
2035 int num_xfer = 0; 2035 int num_xfer = 0;
2036 int j, k, onum, usglen, ksglen, res; 2036 int j, k, onum, usglen, ksglen, res;
2037 int iovec_count = (int) hp->iovec_count; 2037 int iovec_count = (int) hp->iovec_count;
2038 int dxfer_dir = hp->dxfer_direction; 2038 int dxfer_dir = hp->dxfer_direction;
2039 unsigned char *p; 2039 unsigned char *p;
2040 unsigned char __user *up; 2040 unsigned char __user *up;
2041 int new_interface = ('\0' == hp->interface_id) ? 0 : 1; 2041 int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
2042 2042
2043 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir) 2043 if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
2044 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) { 2044 || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
2045 num_xfer = hp->dxfer_len; 2045 num_xfer = hp->dxfer_len;
2046 if (schp->bufflen < num_xfer) 2046 if (schp->bufflen < num_xfer)
2047 num_xfer = schp->bufflen; 2047 num_xfer = schp->bufflen;
2048 } 2048 }
2049 if ((num_xfer <= 0) || (schp->dio_in_use) || 2049 if ((num_xfer <= 0) || (schp->dio_in_use) ||
2050 (new_interface 2050 (new_interface
2051 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags))) 2051 && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
2052 return 0; 2052 return 0;
2053 2053
2054 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n", 2054 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2055 num_xfer, iovec_count, schp->k_use_sg)); 2055 num_xfer, iovec_count, schp->k_use_sg));
2056 if (iovec_count) { 2056 if (iovec_count) {
2057 onum = iovec_count; 2057 onum = iovec_count;
2058 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum)) 2058 if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
2059 return -EFAULT; 2059 return -EFAULT;
2060 } else 2060 } else
2061 onum = 1; 2061 onum = 1;
2062 2062
2063 p = page_address(sg->page); 2063 p = page_address(sg->page);
2064 ksglen = sg->length; 2064 ksglen = sg->length;
2065 for (j = 0, k = 0; j < onum; ++j) { 2065 for (j = 0, k = 0; j < onum; ++j) {
2066 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up); 2066 res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
2067 if (res) 2067 if (res)
2068 return res; 2068 return res;
2069 2069
2070 for (; p; ++sg, ksglen = sg->length, 2070 for (; p; ++sg, ksglen = sg->length,
2071 p = page_address(sg->page)) { 2071 p = page_address(sg->page)) {
2072 if (usglen <= 0) 2072 if (usglen <= 0)
2073 break; 2073 break;
2074 if (ksglen > usglen) { 2074 if (ksglen > usglen) {
2075 if (usglen >= num_xfer) { 2075 if (usglen >= num_xfer) {
2076 if (__copy_to_user(up, p, num_xfer)) 2076 if (__copy_to_user(up, p, num_xfer))
2077 return -EFAULT; 2077 return -EFAULT;
2078 return 0; 2078 return 0;
2079 } 2079 }
2080 if (__copy_to_user(up, p, usglen)) 2080 if (__copy_to_user(up, p, usglen))
2081 return -EFAULT; 2081 return -EFAULT;
2082 p += usglen; 2082 p += usglen;
2083 ksglen -= usglen; 2083 ksglen -= usglen;
2084 break; 2084 break;
2085 } else { 2085 } else {
2086 if (ksglen >= num_xfer) { 2086 if (ksglen >= num_xfer) {
2087 if (__copy_to_user(up, p, num_xfer)) 2087 if (__copy_to_user(up, p, num_xfer))
2088 return -EFAULT; 2088 return -EFAULT;
2089 return 0; 2089 return 0;
2090 } 2090 }
2091 if (__copy_to_user(up, p, ksglen)) 2091 if (__copy_to_user(up, p, ksglen))
2092 return -EFAULT; 2092 return -EFAULT;
2093 up += ksglen; 2093 up += ksglen;
2094 usglen -= ksglen; 2094 usglen -= ksglen;
2095 } 2095 }
2096 ++k; 2096 ++k;
2097 if (k >= schp->k_use_sg) 2097 if (k >= schp->k_use_sg)
2098 return 0; 2098 return 0;
2099 } 2099 }
2100 } 2100 }
2101 2101
2102 return 0; 2102 return 0;
2103 } 2103 }
2104 2104
2105 static int 2105 static int
2106 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) 2106 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
2107 { 2107 {
2108 Sg_scatter_hold *schp = &srp->data; 2108 Sg_scatter_hold *schp = &srp->data;
2109 struct scatterlist *sg = schp->buffer; 2109 struct scatterlist *sg = schp->buffer;
2110 int k, num; 2110 int k, num;
2111 2111
2112 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", 2112 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2113 num_read_xfer)); 2113 num_read_xfer));
2114 if ((!outp) || (num_read_xfer <= 0)) 2114 if ((!outp) || (num_read_xfer <= 0))
2115 return 0; 2115 return 0;
2116 2116
2117 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) { 2117 for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
2118 num = sg->length; 2118 num = sg->length;
2119 if (num > num_read_xfer) { 2119 if (num > num_read_xfer) {
2120 if (__copy_to_user(outp, page_address(sg->page), 2120 if (__copy_to_user(outp, page_address(sg->page),
2121 num_read_xfer)) 2121 num_read_xfer))
2122 return -EFAULT; 2122 return -EFAULT;
2123 break; 2123 break;
2124 } else { 2124 } else {
2125 if (__copy_to_user(outp, page_address(sg->page), 2125 if (__copy_to_user(outp, page_address(sg->page),
2126 num)) 2126 num))
2127 return -EFAULT; 2127 return -EFAULT;
2128 num_read_xfer -= num; 2128 num_read_xfer -= num;
2129 if (num_read_xfer <= 0) 2129 if (num_read_xfer <= 0)
2130 break; 2130 break;
2131 outp += num; 2131 outp += num;
2132 } 2132 }
2133 } 2133 }
2134 2134
2135 return 0; 2135 return 0;
2136 } 2136 }
2137 2137
2138 static void 2138 static void
2139 sg_build_reserve(Sg_fd * sfp, int req_size) 2139 sg_build_reserve(Sg_fd * sfp, int req_size)
2140 { 2140 {
2141 Sg_scatter_hold *schp = &sfp->reserve; 2141 Sg_scatter_hold *schp = &sfp->reserve;
2142 2142
2143 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); 2143 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
2144 do { 2144 do {
2145 if (req_size < PAGE_SIZE) 2145 if (req_size < PAGE_SIZE)
2146 req_size = PAGE_SIZE; 2146 req_size = PAGE_SIZE;
2147 if (0 == sg_build_indirect(schp, sfp, req_size)) 2147 if (0 == sg_build_indirect(schp, sfp, req_size))
2148 return; 2148 return;
2149 else 2149 else
2150 sg_remove_scat(schp); 2150 sg_remove_scat(schp);
2151 req_size >>= 1; /* divide by 2 */ 2151 req_size >>= 1; /* divide by 2 */
2152 } while (req_size > (PAGE_SIZE / 2)); 2152 } while (req_size > (PAGE_SIZE / 2));
2153 } 2153 }
2154 2154
2155 static void 2155 static void
2156 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) 2156 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
2157 { 2157 {
2158 Sg_scatter_hold *req_schp = &srp->data; 2158 Sg_scatter_hold *req_schp = &srp->data;
2159 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2159 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2160 struct scatterlist *sg = rsv_schp->buffer; 2160 struct scatterlist *sg = rsv_schp->buffer;
2161 int k, num, rem; 2161 int k, num, rem;
2162 2162
2163 srp->res_used = 1; 2163 srp->res_used = 1;
2164 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); 2164 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
2165 rem = size = (size + 1) & (~1); /* round to even for aha1542 */ 2165 rem = size = (size + 1) & (~1); /* round to even for aha1542 */
2166 2166
2167 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) { 2167 for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
2168 num = sg->length; 2168 num = sg->length;
2169 if (rem <= num) { 2169 if (rem <= num) {
2170 sfp->save_scat_len = num; 2170 sfp->save_scat_len = num;
2171 sg->length = rem; 2171 sg->length = rem;
2172 req_schp->k_use_sg = k + 1; 2172 req_schp->k_use_sg = k + 1;
2173 req_schp->sglist_len = rsv_schp->sglist_len; 2173 req_schp->sglist_len = rsv_schp->sglist_len;
2174 req_schp->buffer = rsv_schp->buffer; 2174 req_schp->buffer = rsv_schp->buffer;
2175 2175
2176 req_schp->bufflen = size; 2176 req_schp->bufflen = size;
2177 req_schp->b_malloc_len = rsv_schp->b_malloc_len; 2177 req_schp->b_malloc_len = rsv_schp->b_malloc_len;
2178 break; 2178 break;
2179 } else 2179 } else
2180 rem -= num; 2180 rem -= num;
2181 } 2181 }
2182 2182
2183 if (k >= rsv_schp->k_use_sg) 2183 if (k >= rsv_schp->k_use_sg)
2184 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); 2184 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2185 } 2185 }
2186 2186
2187 static void 2187 static void
2188 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) 2188 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
2189 { 2189 {
2190 Sg_scatter_hold *req_schp = &srp->data; 2190 Sg_scatter_hold *req_schp = &srp->data;
2191 Sg_scatter_hold *rsv_schp = &sfp->reserve; 2191 Sg_scatter_hold *rsv_schp = &sfp->reserve;
2192 2192
2193 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", 2193 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2194 (int) req_schp->k_use_sg)); 2194 (int) req_schp->k_use_sg));
2195 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) { 2195 if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
2196 struct scatterlist *sg = rsv_schp->buffer; 2196 struct scatterlist *sg = rsv_schp->buffer;
2197 2197
2198 if (sfp->save_scat_len > 0) 2198 if (sfp->save_scat_len > 0)
2199 (sg + (req_schp->k_use_sg - 1))->length = 2199 (sg + (req_schp->k_use_sg - 1))->length =
2200 (unsigned) sfp->save_scat_len; 2200 (unsigned) sfp->save_scat_len;
2201 else 2201 else
2202 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n")); 2202 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2203 } 2203 }
2204 req_schp->k_use_sg = 0; 2204 req_schp->k_use_sg = 0;
2205 req_schp->bufflen = 0; 2205 req_schp->bufflen = 0;
2206 req_schp->buffer = NULL; 2206 req_schp->buffer = NULL;
2207 req_schp->sglist_len = 0; 2207 req_schp->sglist_len = 0;
2208 sfp->save_scat_len = 0; 2208 sfp->save_scat_len = 0;
2209 srp->res_used = 0; 2209 srp->res_used = 0;
2210 } 2210 }
2211 2211
2212 static Sg_request * 2212 static Sg_request *
2213 sg_get_rq_mark(Sg_fd * sfp, int pack_id) 2213 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
2214 { 2214 {
2215 Sg_request *resp; 2215 Sg_request *resp;
2216 unsigned long iflags; 2216 unsigned long iflags;
2217 2217
2218 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2218 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2219 for (resp = sfp->headrp; resp; resp = resp->nextrp) { 2219 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
2220 /* look for requests that are ready + not SG_IO owned */ 2220 /* look for requests that are ready + not SG_IO owned */
2221 if ((1 == resp->done) && (!resp->sg_io_owned) && 2221 if ((1 == resp->done) && (!resp->sg_io_owned) &&
2222 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { 2222 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
2223 resp->done = 2; /* guard against other readers */ 2223 resp->done = 2; /* guard against other readers */
2224 break; 2224 break;
2225 } 2225 }
2226 } 2226 }
2227 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2227 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2228 return resp; 2228 return resp;
2229 } 2229 }
2230 2230
2231 #ifdef CONFIG_SCSI_PROC_FS 2231 #ifdef CONFIG_SCSI_PROC_FS
2232 static Sg_request * 2232 static Sg_request *
2233 sg_get_nth_request(Sg_fd * sfp, int nth) 2233 sg_get_nth_request(Sg_fd * sfp, int nth)
2234 { 2234 {
2235 Sg_request *resp; 2235 Sg_request *resp;
2236 unsigned long iflags; 2236 unsigned long iflags;
2237 int k; 2237 int k;
2238 2238
2239 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2239 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2240 for (k = 0, resp = sfp->headrp; resp && (k < nth); 2240 for (k = 0, resp = sfp->headrp; resp && (k < nth);
2241 ++k, resp = resp->nextrp) ; 2241 ++k, resp = resp->nextrp) ;
2242 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2242 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2243 return resp; 2243 return resp;
2244 } 2244 }
2245 #endif 2245 #endif
2246 2246
2247 /* always adds to end of list */ 2247 /* always adds to end of list */
2248 static Sg_request * 2248 static Sg_request *
2249 sg_add_request(Sg_fd * sfp) 2249 sg_add_request(Sg_fd * sfp)
2250 { 2250 {
2251 int k; 2251 int k;
2252 unsigned long iflags; 2252 unsigned long iflags;
2253 Sg_request *resp; 2253 Sg_request *resp;
2254 Sg_request *rp = sfp->req_arr; 2254 Sg_request *rp = sfp->req_arr;
2255 2255
2256 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2256 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2257 resp = sfp->headrp; 2257 resp = sfp->headrp;
2258 if (!resp) { 2258 if (!resp) {
2259 memset(rp, 0, sizeof (Sg_request)); 2259 memset(rp, 0, sizeof (Sg_request));
2260 rp->parentfp = sfp; 2260 rp->parentfp = sfp;
2261 resp = rp; 2261 resp = rp;
2262 sfp->headrp = resp; 2262 sfp->headrp = resp;
2263 } else { 2263 } else {
2264 if (0 == sfp->cmd_q) 2264 if (0 == sfp->cmd_q)
2265 resp = NULL; /* command queuing disallowed */ 2265 resp = NULL; /* command queuing disallowed */
2266 else { 2266 else {
2267 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { 2267 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2268 if (!rp->parentfp) 2268 if (!rp->parentfp)
2269 break; 2269 break;
2270 } 2270 }
2271 if (k < SG_MAX_QUEUE) { 2271 if (k < SG_MAX_QUEUE) {
2272 memset(rp, 0, sizeof (Sg_request)); 2272 memset(rp, 0, sizeof (Sg_request));
2273 rp->parentfp = sfp; 2273 rp->parentfp = sfp;
2274 while (resp->nextrp) 2274 while (resp->nextrp)
2275 resp = resp->nextrp; 2275 resp = resp->nextrp;
2276 resp->nextrp = rp; 2276 resp->nextrp = rp;
2277 resp = rp; 2277 resp = rp;
2278 } else 2278 } else
2279 resp = NULL; 2279 resp = NULL;
2280 } 2280 }
2281 } 2281 }
2282 if (resp) { 2282 if (resp) {
2283 resp->nextrp = NULL; 2283 resp->nextrp = NULL;
2284 resp->header.duration = jiffies_to_msecs(jiffies); 2284 resp->header.duration = jiffies_to_msecs(jiffies);
2285 } 2285 }
2286 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2286 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2287 return resp; 2287 return resp;
2288 } 2288 }
2289 2289
2290 /* Return of 1 for found; 0 for not found */ 2290 /* Return of 1 for found; 0 for not found */
2291 static int 2291 static int
2292 sg_remove_request(Sg_fd * sfp, Sg_request * srp) 2292 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2293 { 2293 {
2294 Sg_request *prev_rp; 2294 Sg_request *prev_rp;
2295 Sg_request *rp; 2295 Sg_request *rp;
2296 unsigned long iflags; 2296 unsigned long iflags;
2297 int res = 0; 2297 int res = 0;
2298 2298
2299 if ((!sfp) || (!srp) || (!sfp->headrp)) 2299 if ((!sfp) || (!srp) || (!sfp->headrp))
2300 return res; 2300 return res;
2301 write_lock_irqsave(&sfp->rq_list_lock, iflags); 2301 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2302 prev_rp = sfp->headrp; 2302 prev_rp = sfp->headrp;
2303 if (srp == prev_rp) { 2303 if (srp == prev_rp) {
2304 sfp->headrp = prev_rp->nextrp; 2304 sfp->headrp = prev_rp->nextrp;
2305 prev_rp->parentfp = NULL; 2305 prev_rp->parentfp = NULL;
2306 res = 1; 2306 res = 1;
2307 } else { 2307 } else {
2308 while ((rp = prev_rp->nextrp)) { 2308 while ((rp = prev_rp->nextrp)) {
2309 if (srp == rp) { 2309 if (srp == rp) {
2310 prev_rp->nextrp = rp->nextrp; 2310 prev_rp->nextrp = rp->nextrp;
2311 rp->parentfp = NULL; 2311 rp->parentfp = NULL;
2312 res = 1; 2312 res = 1;
2313 break; 2313 break;
2314 } 2314 }
2315 prev_rp = rp; 2315 prev_rp = rp;
2316 } 2316 }
2317 } 2317 }
2318 write_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2318 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2319 return res; 2319 return res;
2320 } 2320 }
2321 2321
2322 #ifdef CONFIG_SCSI_PROC_FS 2322 #ifdef CONFIG_SCSI_PROC_FS
2323 static Sg_fd * 2323 static Sg_fd *
2324 sg_get_nth_sfp(Sg_device * sdp, int nth) 2324 sg_get_nth_sfp(Sg_device * sdp, int nth)
2325 { 2325 {
2326 Sg_fd *resp; 2326 Sg_fd *resp;
2327 unsigned long iflags; 2327 unsigned long iflags;
2328 int k; 2328 int k;
2329 2329
2330 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2330 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2331 for (k = 0, resp = sdp->headfp; resp && (k < nth); 2331 for (k = 0, resp = sdp->headfp; resp && (k < nth);
2332 ++k, resp = resp->nextfp) ; 2332 ++k, resp = resp->nextfp) ;
2333 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2333 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2334 return resp; 2334 return resp;
2335 } 2335 }
2336 #endif 2336 #endif
2337 2337
2338 static Sg_fd * 2338 static Sg_fd *
2339 sg_add_sfp(Sg_device * sdp, int dev) 2339 sg_add_sfp(Sg_device * sdp, int dev)
2340 { 2340 {
2341 Sg_fd *sfp; 2341 Sg_fd *sfp;
2342 unsigned long iflags; 2342 unsigned long iflags;
2343 2343
2344 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); 2344 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2345 if (!sfp) 2345 if (!sfp)
2346 return NULL; 2346 return NULL;
2347 2347
2348 init_waitqueue_head(&sfp->read_wait); 2348 init_waitqueue_head(&sfp->read_wait);
2349 rwlock_init(&sfp->rq_list_lock); 2349 rwlock_init(&sfp->rq_list_lock);
2350 2350
2351 sfp->timeout = SG_DEFAULT_TIMEOUT; 2351 sfp->timeout = SG_DEFAULT_TIMEOUT;
2352 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; 2352 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2353 sfp->force_packid = SG_DEF_FORCE_PACK_ID; 2353 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2354 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? 2354 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2355 sdp->device->host->unchecked_isa_dma : 1; 2355 sdp->device->host->unchecked_isa_dma : 1;
2356 sfp->cmd_q = SG_DEF_COMMAND_Q; 2356 sfp->cmd_q = SG_DEF_COMMAND_Q;
2357 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; 2357 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2358 sfp->parentdp = sdp; 2358 sfp->parentdp = sdp;
2359 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2359 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2360 if (!sdp->headfp) 2360 if (!sdp->headfp)
2361 sdp->headfp = sfp; 2361 sdp->headfp = sfp;
2362 else { /* add to tail of existing list */ 2362 else { /* add to tail of existing list */
2363 Sg_fd *pfp = sdp->headfp; 2363 Sg_fd *pfp = sdp->headfp;
2364 while (pfp->nextfp) 2364 while (pfp->nextfp)
2365 pfp = pfp->nextfp; 2365 pfp = pfp->nextfp;
2366 pfp->nextfp = sfp; 2366 pfp->nextfp = sfp;
2367 } 2367 }
2368 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2368 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2369 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); 2369 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2370 sg_build_reserve(sfp, sg_big_buff); 2370 sg_build_reserve(sfp, sg_big_buff);
2371 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2371 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2372 sfp->reserve.bufflen, sfp->reserve.k_use_sg)); 2372 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2373 return sfp; 2373 return sfp;
2374 } 2374 }
2375 2375
2376 static void 2376 static void
2377 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2377 __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2378 { 2378 {
2379 Sg_fd *fp; 2379 Sg_fd *fp;
2380 Sg_fd *prev_fp; 2380 Sg_fd *prev_fp;
2381 2381
2382 prev_fp = sdp->headfp; 2382 prev_fp = sdp->headfp;
2383 if (sfp == prev_fp) 2383 if (sfp == prev_fp)
2384 sdp->headfp = prev_fp->nextfp; 2384 sdp->headfp = prev_fp->nextfp;
2385 else { 2385 else {
2386 while ((fp = prev_fp->nextfp)) { 2386 while ((fp = prev_fp->nextfp)) {
2387 if (sfp == fp) { 2387 if (sfp == fp) {
2388 prev_fp->nextfp = fp->nextfp; 2388 prev_fp->nextfp = fp->nextfp;
2389 break; 2389 break;
2390 } 2390 }
2391 prev_fp = fp; 2391 prev_fp = fp;
2392 } 2392 }
2393 } 2393 }
2394 if (sfp->reserve.bufflen > 0) { 2394 if (sfp->reserve.bufflen > 0) {
2395 SCSI_LOG_TIMEOUT(6, 2395 SCSI_LOG_TIMEOUT(6,
2396 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", 2396 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2397 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); 2397 (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
2398 if (sfp->mmap_called) 2398 if (sfp->mmap_called)
2399 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */ 2399 sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
2400 sg_remove_scat(&sfp->reserve); 2400 sg_remove_scat(&sfp->reserve);
2401 } 2401 }
2402 sfp->parentdp = NULL; 2402 sfp->parentdp = NULL;
2403 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp)); 2403 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
2404 kfree(sfp); 2404 kfree(sfp);
2405 } 2405 }
2406 2406
2407 /* Returns 0 in normal case, 1 when detached and sdp object removed */ 2407 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2408 static int 2408 static int
2409 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp) 2409 sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
2410 { 2410 {
2411 Sg_request *srp; 2411 Sg_request *srp;
2412 Sg_request *tsrp; 2412 Sg_request *tsrp;
2413 int dirty = 0; 2413 int dirty = 0;
2414 int res = 0; 2414 int res = 0;
2415 2415
2416 for (srp = sfp->headrp; srp; srp = tsrp) { 2416 for (srp = sfp->headrp; srp; srp = tsrp) {
2417 tsrp = srp->nextrp; 2417 tsrp = srp->nextrp;
2418 if (sg_srp_done(srp, sfp)) 2418 if (sg_srp_done(srp, sfp))
2419 sg_finish_rem_req(srp); 2419 sg_finish_rem_req(srp);
2420 else 2420 else
2421 ++dirty; 2421 ++dirty;
2422 } 2422 }
2423 if (0 == dirty) { 2423 if (0 == dirty) {
2424 unsigned long iflags; 2424 unsigned long iflags;
2425 2425
2426 write_lock_irqsave(&sg_dev_arr_lock, iflags); 2426 write_lock_irqsave(&sg_dev_arr_lock, iflags);
2427 __sg_remove_sfp(sdp, sfp); 2427 __sg_remove_sfp(sdp, sfp);
2428 if (sdp->detached && (NULL == sdp->headfp)) { 2428 if (sdp->detached && (NULL == sdp->headfp)) {
2429 int k, maxd; 2429 int k, maxd;
2430 2430
2431 maxd = sg_dev_max; 2431 maxd = sg_dev_max;
2432 for (k = 0; k < maxd; ++k) { 2432 for (k = 0; k < maxd; ++k) {
2433 if (sdp == sg_dev_arr[k]) 2433 if (sdp == sg_dev_arr[k])
2434 break; 2434 break;
2435 } 2435 }
2436 if (k < maxd) 2436 if (k < maxd)
2437 sg_dev_arr[k] = NULL; 2437 sg_dev_arr[k] = NULL;
2438 kfree((char *) sdp); 2438 kfree((char *) sdp);
2439 res = 1; 2439 res = 1;
2440 } 2440 }
2441 write_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2441 write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2442 } else { 2442 } else {
2443 /* MOD_INC's to inhibit unloading sg and associated adapter driver */ 2443 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2444 /* only bump the access_count if we actually succeeded in 2444 /* only bump the access_count if we actually succeeded in
2445 * throwing another counter on the host module */ 2445 * throwing another counter on the host module */
2446 scsi_device_get(sdp->device); /* XXX: retval ignored? */ 2446 scsi_device_get(sdp->device); /* XXX: retval ignored? */
2447 sfp->closed = 1; /* flag dirty state on this fd */ 2447 sfp->closed = 1; /* flag dirty state on this fd */
2448 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n", 2448 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2449 dirty)); 2449 dirty));
2450 } 2450 }
2451 return res; 2451 return res;
2452 } 2452 }
2453 2453
2454 static int 2454 static int
2455 sg_res_in_use(Sg_fd * sfp) 2455 sg_res_in_use(Sg_fd * sfp)
2456 { 2456 {
2457 const Sg_request *srp; 2457 const Sg_request *srp;
2458 unsigned long iflags; 2458 unsigned long iflags;
2459 2459
2460 read_lock_irqsave(&sfp->rq_list_lock, iflags); 2460 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2461 for (srp = sfp->headrp; srp; srp = srp->nextrp) 2461 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2462 if (srp->res_used) 2462 if (srp->res_used)
2463 break; 2463 break;
2464 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 2464 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2465 return srp ? 1 : 0; 2465 return srp ? 1 : 0;
2466 } 2466 }
2467 2467
2468 /* If retSzp==NULL want exact size or fail */ 2468 /* If retSzp==NULL want exact size or fail */
2469 static struct page * 2469 static struct page *
2470 sg_page_malloc(int rqSz, int lowDma, int *retSzp) 2470 sg_page_malloc(int rqSz, int lowDma, int *retSzp)
2471 { 2471 {
2472 struct page *resp = NULL; 2472 struct page *resp = NULL;
2473 gfp_t page_mask; 2473 gfp_t page_mask;
2474 int order, a_size; 2474 int order, a_size;
2475 int resSz = rqSz; 2475 int resSz = rqSz;
2476 2476
2477 if (rqSz <= 0) 2477 if (rqSz <= 0)
2478 return resp; 2478 return resp;
2479 2479
2480 if (lowDma) 2480 if (lowDma)
2481 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN; 2481 page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
2482 else 2482 else
2483 page_mask = GFP_ATOMIC | __GFP_NOWARN; 2483 page_mask = GFP_ATOMIC | __GFP_NOWARN;
2484 2484
2485 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz; 2485 for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
2486 order++, a_size <<= 1) ; 2486 order++, a_size <<= 1) ;
2487 resp = alloc_pages(page_mask, order); 2487 resp = alloc_pages(page_mask, order);
2488 while ((!resp) && order && retSzp) { 2488 while ((!resp) && order && retSzp) {
2489 --order; 2489 --order;
2490 a_size >>= 1; /* divide by 2, until PAGE_SIZE */ 2490 a_size >>= 1; /* divide by 2, until PAGE_SIZE */
2491 resp = alloc_pages(page_mask, order); /* try half */ 2491 resp = alloc_pages(page_mask, order); /* try half */
2492 resSz = a_size; 2492 resSz = a_size;
2493 } 2493 }
2494 if (resp) { 2494 if (resp) {
2495 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2495 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2496 memset(page_address(resp), 0, resSz); 2496 memset(page_address(resp), 0, resSz);
2497 if (retSzp) 2497 if (retSzp)
2498 *retSzp = resSz; 2498 *retSzp = resSz;
2499 } 2499 }
2500 return resp; 2500 return resp;
2501 } 2501 }
2502 2502
2503 static void 2503 static void
2504 sg_page_free(struct page *page, int size) 2504 sg_page_free(struct page *page, int size)
2505 { 2505 {
2506 int order, a_size; 2506 int order, a_size;
2507 2507
2508 if (!page) 2508 if (!page)
2509 return; 2509 return;
2510 for (order = 0, a_size = PAGE_SIZE; a_size < size; 2510 for (order = 0, a_size = PAGE_SIZE; a_size < size;
2511 order++, a_size <<= 1) ; 2511 order++, a_size <<= 1) ;
2512 __free_pages(page, order); 2512 __free_pages(page, order);
2513 } 2513 }
2514 2514
2515 #ifndef MAINTENANCE_IN_CMD 2515 #ifndef MAINTENANCE_IN_CMD
2516 #define MAINTENANCE_IN_CMD 0xa3 2516 #define MAINTENANCE_IN_CMD 0xa3
2517 #endif 2517 #endif
2518 2518
2519 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE, 2519 static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
2520 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12, 2520 INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
2521 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS, 2521 READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
2522 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD 2522 SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
2523 }; 2523 };
2524 2524
2525 static int 2525 static int
2526 sg_allow_access(unsigned char opcode, char dev_type) 2526 sg_allow_access(unsigned char opcode, char dev_type)
2527 { 2527 {
2528 int k; 2528 int k;
2529 2529
2530 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */ 2530 if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
2531 return 1; 2531 return 1;
2532 for (k = 0; k < sizeof (allow_ops); ++k) { 2532 for (k = 0; k < sizeof (allow_ops); ++k) {
2533 if (opcode == allow_ops[k]) 2533 if (opcode == allow_ops[k])
2534 return 1; 2534 return 1;
2535 } 2535 }
2536 return 0; 2536 return 0;
2537 } 2537 }
2538 2538
2539 #ifdef CONFIG_SCSI_PROC_FS 2539 #ifdef CONFIG_SCSI_PROC_FS
2540 static int 2540 static int
2541 sg_last_dev(void) 2541 sg_last_dev(void)
2542 { 2542 {
2543 int k; 2543 int k;
2544 unsigned long iflags; 2544 unsigned long iflags;
2545 2545
2546 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2546 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2547 for (k = sg_dev_max - 1; k >= 0; --k) 2547 for (k = sg_dev_max - 1; k >= 0; --k)
2548 if (sg_dev_arr[k] && sg_dev_arr[k]->device) 2548 if (sg_dev_arr[k] && sg_dev_arr[k]->device)
2549 break; 2549 break;
2550 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2550 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2551 return k + 1; /* origin 1 */ 2551 return k + 1; /* origin 1 */
2552 } 2552 }
2553 #endif 2553 #endif
2554 2554
2555 static Sg_device * 2555 static Sg_device *
2556 sg_get_dev(int dev) 2556 sg_get_dev(int dev)
2557 { 2557 {
2558 Sg_device *sdp = NULL; 2558 Sg_device *sdp = NULL;
2559 unsigned long iflags; 2559 unsigned long iflags;
2560 2560
2561 if (sg_dev_arr && (dev >= 0)) { 2561 if (sg_dev_arr && (dev >= 0)) {
2562 read_lock_irqsave(&sg_dev_arr_lock, iflags); 2562 read_lock_irqsave(&sg_dev_arr_lock, iflags);
2563 if (dev < sg_dev_max) 2563 if (dev < sg_dev_max)
2564 sdp = sg_dev_arr[dev]; 2564 sdp = sg_dev_arr[dev];
2565 read_unlock_irqrestore(&sg_dev_arr_lock, iflags); 2565 read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
2566 } 2566 }
2567 return sdp; 2567 return sdp;
2568 } 2568 }
2569 2569
2570 #ifdef CONFIG_SCSI_PROC_FS 2570 #ifdef CONFIG_SCSI_PROC_FS
2571 2571
2572 static struct proc_dir_entry *sg_proc_sgp = NULL; 2572 static struct proc_dir_entry *sg_proc_sgp = NULL;
2573 2573
2574 static char sg_proc_sg_dirname[] = "scsi/sg"; 2574 static char sg_proc_sg_dirname[] = "scsi/sg";
2575 2575
2576 static int sg_proc_seq_show_int(struct seq_file *s, void *v); 2576 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2577 2577
2578 static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2578 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2579 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2579 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2580 size_t count, loff_t *off); 2580 size_t count, loff_t *off);
2581 static struct file_operations adio_fops = { 2581 static struct file_operations adio_fops = {
2582 /* .owner, .read and .llseek added in sg_proc_init() */ 2582 /* .owner, .read and .llseek added in sg_proc_init() */
2583 .open = sg_proc_single_open_adio, 2583 .open = sg_proc_single_open_adio,
2584 .write = sg_proc_write_adio, 2584 .write = sg_proc_write_adio,
2585 .release = single_release, 2585 .release = single_release,
2586 }; 2586 };
2587 2587
2588 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2588 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2589 static ssize_t sg_proc_write_dressz(struct file *filp, 2589 static ssize_t sg_proc_write_dressz(struct file *filp,
2590 const char __user *buffer, size_t count, loff_t *off); 2590 const char __user *buffer, size_t count, loff_t *off);
2591 static struct file_operations dressz_fops = { 2591 static struct file_operations dressz_fops = {
2592 .open = sg_proc_single_open_dressz, 2592 .open = sg_proc_single_open_dressz,
2593 .write = sg_proc_write_dressz, 2593 .write = sg_proc_write_dressz,
2594 .release = single_release, 2594 .release = single_release,
2595 }; 2595 };
2596 2596
2597 static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2597 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2598 static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2598 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2599 static struct file_operations version_fops = { 2599 static struct file_operations version_fops = {
2600 .open = sg_proc_single_open_version, 2600 .open = sg_proc_single_open_version,
2601 .release = single_release, 2601 .release = single_release,
2602 }; 2602 };
2603 2603
2604 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2604 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2605 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2605 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2606 static struct file_operations devhdr_fops = { 2606 static struct file_operations devhdr_fops = {
2607 .open = sg_proc_single_open_devhdr, 2607 .open = sg_proc_single_open_devhdr,
2608 .release = single_release, 2608 .release = single_release,
2609 }; 2609 };
2610 2610
2611 static int sg_proc_seq_show_dev(struct seq_file *s, void *v); 2611 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2612 static int sg_proc_open_dev(struct inode *inode, struct file *file); 2612 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2613 static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2613 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2614 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2614 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2615 static void dev_seq_stop(struct seq_file *s, void *v); 2615 static void dev_seq_stop(struct seq_file *s, void *v);
2616 static struct file_operations dev_fops = { 2616 static struct file_operations dev_fops = {
2617 .open = sg_proc_open_dev, 2617 .open = sg_proc_open_dev,
2618 .release = seq_release, 2618 .release = seq_release,
2619 }; 2619 };
2620 static struct seq_operations dev_seq_ops = { 2620 static struct seq_operations dev_seq_ops = {
2621 .start = dev_seq_start, 2621 .start = dev_seq_start,
2622 .next = dev_seq_next, 2622 .next = dev_seq_next,
2623 .stop = dev_seq_stop, 2623 .stop = dev_seq_stop,
2624 .show = sg_proc_seq_show_dev, 2624 .show = sg_proc_seq_show_dev,
2625 }; 2625 };
2626 2626
2627 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2627 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2628 static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2628 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2629 static struct file_operations devstrs_fops = { 2629 static struct file_operations devstrs_fops = {
2630 .open = sg_proc_open_devstrs, 2630 .open = sg_proc_open_devstrs,
2631 .release = seq_release, 2631 .release = seq_release,
2632 }; 2632 };
2633 static struct seq_operations devstrs_seq_ops = { 2633 static struct seq_operations devstrs_seq_ops = {
2634 .start = dev_seq_start, 2634 .start = dev_seq_start,
2635 .next = dev_seq_next, 2635 .next = dev_seq_next,
2636 .stop = dev_seq_stop, 2636 .stop = dev_seq_stop,
2637 .show = sg_proc_seq_show_devstrs, 2637 .show = sg_proc_seq_show_devstrs,
2638 }; 2638 };
2639 2639
2640 static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2640 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2641 static int sg_proc_open_debug(struct inode *inode, struct file *file); 2641 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2642 static struct file_operations debug_fops = { 2642 static struct file_operations debug_fops = {
2643 .open = sg_proc_open_debug, 2643 .open = sg_proc_open_debug,
2644 .release = seq_release, 2644 .release = seq_release,
2645 }; 2645 };
2646 static struct seq_operations debug_seq_ops = { 2646 static struct seq_operations debug_seq_ops = {
2647 .start = dev_seq_start, 2647 .start = dev_seq_start,
2648 .next = dev_seq_next, 2648 .next = dev_seq_next,
2649 .stop = dev_seq_stop, 2649 .stop = dev_seq_stop,
2650 .show = sg_proc_seq_show_debug, 2650 .show = sg_proc_seq_show_debug,
2651 }; 2651 };
2652 2652
2653 2653
2654 struct sg_proc_leaf { 2654 struct sg_proc_leaf {
2655 const char * name; 2655 const char * name;
2656 struct file_operations * fops; 2656 struct file_operations * fops;
2657 }; 2657 };
2658 2658
2659 static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2659 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2660 {"allow_dio", &adio_fops}, 2660 {"allow_dio", &adio_fops},
2661 {"debug", &debug_fops}, 2661 {"debug", &debug_fops},
2662 {"def_reserved_size", &dressz_fops}, 2662 {"def_reserved_size", &dressz_fops},
2663 {"device_hdr", &devhdr_fops}, 2663 {"device_hdr", &devhdr_fops},
2664 {"devices", &dev_fops}, 2664 {"devices", &dev_fops},
2665 {"device_strs", &devstrs_fops}, 2665 {"device_strs", &devstrs_fops},
2666 {"version", &version_fops} 2666 {"version", &version_fops}
2667 }; 2667 };
2668 2668
2669 static int 2669 static int
2670 sg_proc_init(void) 2670 sg_proc_init(void)
2671 { 2671 {
2672 int k, mask; 2672 int k, mask;
2673 int num_leaves = 2673 int num_leaves =
2674 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]); 2674 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2675 struct proc_dir_entry *pdep; 2675 struct proc_dir_entry *pdep;
2676 struct sg_proc_leaf * leaf; 2676 struct sg_proc_leaf * leaf;
2677 2677
2678 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); 2678 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2679 if (!sg_proc_sgp) 2679 if (!sg_proc_sgp)
2680 return 1; 2680 return 1;
2681 for (k = 0; k < num_leaves; ++k) { 2681 for (k = 0; k < num_leaves; ++k) {
2682 leaf = &sg_proc_leaf_arr[k]; 2682 leaf = &sg_proc_leaf_arr[k];
2683 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2683 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2684 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); 2684 pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
2685 if (pdep) { 2685 if (pdep) {
2686 leaf->fops->owner = THIS_MODULE, 2686 leaf->fops->owner = THIS_MODULE,
2687 leaf->fops->read = seq_read, 2687 leaf->fops->read = seq_read,
2688 leaf->fops->llseek = seq_lseek, 2688 leaf->fops->llseek = seq_lseek,
2689 pdep->proc_fops = leaf->fops; 2689 pdep->proc_fops = leaf->fops;
2690 } 2690 }
2691 } 2691 }
2692 return 0; 2692 return 0;
2693 } 2693 }
2694 2694
2695 static void 2695 static void
2696 sg_proc_cleanup(void) 2696 sg_proc_cleanup(void)
2697 { 2697 {
2698 int k; 2698 int k;
2699 int num_leaves = 2699 int num_leaves =
2700 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]); 2700 sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
2701 2701
2702 if (!sg_proc_sgp) 2702 if (!sg_proc_sgp)
2703 return; 2703 return;
2704 for (k = 0; k < num_leaves; ++k) 2704 for (k = 0; k < num_leaves; ++k)
2705 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); 2705 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2706 remove_proc_entry(sg_proc_sg_dirname, NULL); 2706 remove_proc_entry(sg_proc_sg_dirname, NULL);
2707 } 2707 }
2708 2708
2709 2709
2710 static int sg_proc_seq_show_int(struct seq_file *s, void *v) 2710 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2711 { 2711 {
2712 seq_printf(s, "%d\n", *((int *)s->private)); 2712 seq_printf(s, "%d\n", *((int *)s->private));
2713 return 0; 2713 return 0;
2714 } 2714 }
2715 2715
2716 static int sg_proc_single_open_adio(struct inode *inode, struct file *file) 2716 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2717 { 2717 {
2718 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); 2718 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2719 } 2719 }
2720 2720
2721 static ssize_t 2721 static ssize_t
2722 sg_proc_write_adio(struct file *filp, const char __user *buffer, 2722 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2723 size_t count, loff_t *off) 2723 size_t count, loff_t *off)
2724 { 2724 {
2725 int num; 2725 int num;
2726 char buff[11]; 2726 char buff[11];
2727 2727
2728 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2728 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2729 return -EACCES; 2729 return -EACCES;
2730 num = (count < 10) ? count : 10; 2730 num = (count < 10) ? count : 10;
2731 if (copy_from_user(buff, buffer, num)) 2731 if (copy_from_user(buff, buffer, num))
2732 return -EFAULT; 2732 return -EFAULT;
2733 buff[num] = '\0'; 2733 buff[num] = '\0';
2734 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0; 2734 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2735 return count; 2735 return count;
2736 } 2736 }
2737 2737
2738 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) 2738 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2739 { 2739 {
2740 return single_open(file, sg_proc_seq_show_int, &sg_big_buff); 2740 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2741 } 2741 }
2742 2742
2743 static ssize_t 2743 static ssize_t
2744 sg_proc_write_dressz(struct file *filp, const char __user *buffer, 2744 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2745 size_t count, loff_t *off) 2745 size_t count, loff_t *off)
2746 { 2746 {
2747 int num; 2747 int num;
2748 unsigned long k = ULONG_MAX; 2748 unsigned long k = ULONG_MAX;
2749 char buff[11]; 2749 char buff[11];
2750 2750
2751 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 2751 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2752 return -EACCES; 2752 return -EACCES;
2753 num = (count < 10) ? count : 10; 2753 num = (count < 10) ? count : 10;
2754 if (copy_from_user(buff, buffer, num)) 2754 if (copy_from_user(buff, buffer, num))
2755 return -EFAULT; 2755 return -EFAULT;
2756 buff[num] = '\0'; 2756 buff[num] = '\0';
2757 k = simple_strtoul(buff, NULL, 10); 2757 k = simple_strtoul(buff, NULL, 10);
2758 if (k <= 1048576) { /* limit "big buff" to 1 MB */ 2758 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2759 sg_big_buff = k; 2759 sg_big_buff = k;
2760 return count; 2760 return count;
2761 } 2761 }
2762 return -ERANGE; 2762 return -ERANGE;
2763 } 2763 }
2764 2764
2765 static int sg_proc_seq_show_version(struct seq_file *s, void *v) 2765 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2766 { 2766 {
2767 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, 2767 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2768 sg_version_date); 2768 sg_version_date);
2769 return 0; 2769 return 0;
2770 } 2770 }
2771 2771
2772 static int sg_proc_single_open_version(struct inode *inode, struct file *file) 2772 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2773 { 2773 {
2774 return single_open(file, sg_proc_seq_show_version, NULL); 2774 return single_open(file, sg_proc_seq_show_version, NULL);
2775 } 2775 }
2776 2776
2777 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) 2777 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2778 { 2778 {
2779 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" 2779 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2780 "online\n"); 2780 "online\n");
2781 return 0; 2781 return 0;
2782 } 2782 }
2783 2783
2784 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) 2784 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2785 { 2785 {
2786 return single_open(file, sg_proc_seq_show_devhdr, NULL); 2786 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2787 } 2787 }
2788 2788
2789 struct sg_proc_deviter { 2789 struct sg_proc_deviter {
2790 loff_t index; 2790 loff_t index;
2791 size_t max; 2791 size_t max;
2792 }; 2792 };
2793 2793
2794 static void * dev_seq_start(struct seq_file *s, loff_t *pos) 2794 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2795 { 2795 {
2796 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); 2796 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2797 2797
2798 s->private = it; 2798 s->private = it;
2799 if (! it) 2799 if (! it)
2800 return NULL; 2800 return NULL;
2801 2801
2802 if (NULL == sg_dev_arr) 2802 if (NULL == sg_dev_arr)
2803 return NULL; 2803 return NULL;
2804 it->index = *pos; 2804 it->index = *pos;
2805 it->max = sg_last_dev(); 2805 it->max = sg_last_dev();
2806 if (it->index >= it->max) 2806 if (it->index >= it->max)
2807 return NULL; 2807 return NULL;
2808 return it; 2808 return it;
2809 } 2809 }
2810 2810
2811 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) 2811 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2812 { 2812 {
2813 struct sg_proc_deviter * it = s->private; 2813 struct sg_proc_deviter * it = s->private;
2814 2814
2815 *pos = ++it->index; 2815 *pos = ++it->index;
2816 return (it->index < it->max) ? it : NULL; 2816 return (it->index < it->max) ? it : NULL;
2817 } 2817 }
2818 2818
2819 static void dev_seq_stop(struct seq_file *s, void *v) 2819 static void dev_seq_stop(struct seq_file *s, void *v)
2820 { 2820 {
2821 kfree(s->private); 2821 kfree(s->private);
2822 } 2822 }
2823 2823
2824 static int sg_proc_open_dev(struct inode *inode, struct file *file) 2824 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2825 { 2825 {
2826 return seq_open(file, &dev_seq_ops); 2826 return seq_open(file, &dev_seq_ops);
2827 } 2827 }
2828 2828
2829 static int sg_proc_seq_show_dev(struct seq_file *s, void *v) 2829 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2830 { 2830 {
2831 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2831 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2832 Sg_device *sdp; 2832 Sg_device *sdp;
2833 struct scsi_device *scsidp; 2833 struct scsi_device *scsidp;
2834 2834
2835 sdp = it ? sg_get_dev(it->index) : NULL; 2835 sdp = it ? sg_get_dev(it->index) : NULL;
2836 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2836 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2837 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", 2837 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2838 scsidp->host->host_no, scsidp->channel, 2838 scsidp->host->host_no, scsidp->channel,
2839 scsidp->id, scsidp->lun, (int) scsidp->type, 2839 scsidp->id, scsidp->lun, (int) scsidp->type,
2840 1, 2840 1,
2841 (int) scsidp->queue_depth, 2841 (int) scsidp->queue_depth,
2842 (int) scsidp->device_busy, 2842 (int) scsidp->device_busy,
2843 (int) scsi_device_online(scsidp)); 2843 (int) scsi_device_online(scsidp));
2844 else 2844 else
2845 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); 2845 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2846 return 0; 2846 return 0;
2847 } 2847 }
2848 2848
2849 static int sg_proc_open_devstrs(struct inode *inode, struct file *file) 2849 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2850 { 2850 {
2851 return seq_open(file, &devstrs_seq_ops); 2851 return seq_open(file, &devstrs_seq_ops);
2852 } 2852 }
2853 2853
2854 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) 2854 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2855 { 2855 {
2856 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2856 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2857 Sg_device *sdp; 2857 Sg_device *sdp;
2858 struct scsi_device *scsidp; 2858 struct scsi_device *scsidp;
2859 2859
2860 sdp = it ? sg_get_dev(it->index) : NULL; 2860 sdp = it ? sg_get_dev(it->index) : NULL;
2861 if (sdp && (scsidp = sdp->device) && (!sdp->detached)) 2861 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2862 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", 2862 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2863 scsidp->vendor, scsidp->model, scsidp->rev); 2863 scsidp->vendor, scsidp->model, scsidp->rev);
2864 else 2864 else
2865 seq_printf(s, "<no active device>\n"); 2865 seq_printf(s, "<no active device>\n");
2866 return 0; 2866 return 0;
2867 } 2867 }
2868 2868
2869 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) 2869 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2870 { 2870 {
2871 int k, m, new_interface, blen, usg; 2871 int k, m, new_interface, blen, usg;
2872 Sg_request *srp; 2872 Sg_request *srp;
2873 Sg_fd *fp; 2873 Sg_fd *fp;
2874 const sg_io_hdr_t *hp; 2874 const sg_io_hdr_t *hp;
2875 const char * cp; 2875 const char * cp;
2876 unsigned int ms; 2876 unsigned int ms;
2877 2877
2878 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) { 2878 for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
2879 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " 2879 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2880 "(res)sgat=%d low_dma=%d\n", k + 1, 2880 "(res)sgat=%d low_dma=%d\n", k + 1,
2881 jiffies_to_msecs(fp->timeout), 2881 jiffies_to_msecs(fp->timeout),
2882 fp->reserve.bufflen, 2882 fp->reserve.bufflen,
2883 (int) fp->reserve.k_use_sg, 2883 (int) fp->reserve.k_use_sg,
2884 (int) fp->low_dma); 2884 (int) fp->low_dma);
2885 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n", 2885 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2886 (int) fp->cmd_q, (int) fp->force_packid, 2886 (int) fp->cmd_q, (int) fp->force_packid,
2887 (int) fp->keep_orphan, (int) fp->closed); 2887 (int) fp->keep_orphan, (int) fp->closed);
2888 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) { 2888 for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
2889 hp = &srp->header; 2889 hp = &srp->header;
2890 new_interface = (hp->interface_id == '\0') ? 0 : 1; 2890 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2891 if (srp->res_used) { 2891 if (srp->res_used) {
2892 if (new_interface && 2892 if (new_interface &&
2893 (SG_FLAG_MMAP_IO & hp->flags)) 2893 (SG_FLAG_MMAP_IO & hp->flags))
2894 cp = " mmap>> "; 2894 cp = " mmap>> ";
2895 else 2895 else
2896 cp = " rb>> "; 2896 cp = " rb>> ";
2897 } else { 2897 } else {
2898 if (SG_INFO_DIRECT_IO_MASK & hp->info) 2898 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2899 cp = " dio>> "; 2899 cp = " dio>> ";
2900 else 2900 else
2901 cp = " "; 2901 cp = " ";
2902 } 2902 }
2903 seq_printf(s, cp); 2903 seq_printf(s, cp);
2904 blen = srp->data.bufflen; 2904 blen = srp->data.bufflen;
2905 usg = srp->data.k_use_sg; 2905 usg = srp->data.k_use_sg;
2906 seq_printf(s, srp->done ? 2906 seq_printf(s, srp->done ?
2907 ((1 == srp->done) ? "rcv:" : "fin:") 2907 ((1 == srp->done) ? "rcv:" : "fin:")
2908 : "act:"); 2908 : "act:");
2909 seq_printf(s, " id=%d blen=%d", 2909 seq_printf(s, " id=%d blen=%d",
2910 srp->header.pack_id, blen); 2910 srp->header.pack_id, blen);
2911 if (srp->done) 2911 if (srp->done)
2912 seq_printf(s, " dur=%d", hp->duration); 2912 seq_printf(s, " dur=%d", hp->duration);
2913 else { 2913 else {
2914 ms = jiffies_to_msecs(jiffies); 2914 ms = jiffies_to_msecs(jiffies);
2915 seq_printf(s, " t_o/elap=%d/%d", 2915 seq_printf(s, " t_o/elap=%d/%d",
2916 (new_interface ? hp->timeout : 2916 (new_interface ? hp->timeout :
2917 jiffies_to_msecs(fp->timeout)), 2917 jiffies_to_msecs(fp->timeout)),
2918 (ms > hp->duration ? ms - hp->duration : 0)); 2918 (ms > hp->duration ? ms - hp->duration : 0));
2919 } 2919 }
2920 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, 2920 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2921 (int) srp->data.cmd_opcode); 2921 (int) srp->data.cmd_opcode);
2922 } 2922 }
2923 if (0 == m) 2923 if (0 == m)
2924 seq_printf(s, " No requests active\n"); 2924 seq_printf(s, " No requests active\n");
2925 } 2925 }
2926 } 2926 }
2927 2927
2928 static int sg_proc_open_debug(struct inode *inode, struct file *file) 2928 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2929 { 2929 {
2930 return seq_open(file, &debug_seq_ops); 2930 return seq_open(file, &debug_seq_ops);
2931 } 2931 }
2932 2932
2933 static int sg_proc_seq_show_debug(struct seq_file *s, void *v) 2933 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2934 { 2934 {
2935 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; 2935 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2936 Sg_device *sdp; 2936 Sg_device *sdp;
2937 2937
2938 if (it && (0 == it->index)) { 2938 if (it && (0 == it->index)) {
2939 seq_printf(s, "dev_max(currently)=%d max_active_device=%d " 2939 seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
2940 "(origin 1)\n", sg_dev_max, (int)it->max); 2940 "(origin 1)\n", sg_dev_max, (int)it->max);
2941 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); 2941 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2942 } 2942 }
2943 sdp = it ? sg_get_dev(it->index) : NULL; 2943 sdp = it ? sg_get_dev(it->index) : NULL;
2944 if (sdp) { 2944 if (sdp) {
2945 struct scsi_device *scsidp = sdp->device; 2945 struct scsi_device *scsidp = sdp->device;
2946 2946
2947 if (NULL == scsidp) { 2947 if (NULL == scsidp) {
2948 seq_printf(s, "device %d detached ??\n", 2948 seq_printf(s, "device %d detached ??\n",
2949 (int)it->index); 2949 (int)it->index);
2950 return 0; 2950 return 0;
2951 } 2951 }
2952 2952
2953 if (sg_get_nth_sfp(sdp, 0)) { 2953 if (sg_get_nth_sfp(sdp, 0)) {
2954 seq_printf(s, " >>> device=%s ", 2954 seq_printf(s, " >>> device=%s ",
2955 sdp->disk->disk_name); 2955 sdp->disk->disk_name);
2956 if (sdp->detached) 2956 if (sdp->detached)
2957 seq_printf(s, "detached pending close "); 2957 seq_printf(s, "detached pending close ");
2958 else 2958 else
2959 seq_printf 2959 seq_printf
2960 (s, "scsi%d chan=%d id=%d lun=%d em=%d", 2960 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2961 scsidp->host->host_no, 2961 scsidp->host->host_no,
2962 scsidp->channel, scsidp->id, 2962 scsidp->channel, scsidp->id,
2963 scsidp->lun, 2963 scsidp->lun,
2964 scsidp->host->hostt->emulated); 2964 scsidp->host->hostt->emulated);
2965 seq_printf(s, " sg_tablesize=%d excl=%d\n", 2965 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2966 sdp->sg_tablesize, sdp->exclude); 2966 sdp->sg_tablesize, sdp->exclude);
2967 } 2967 }
2968 sg_proc_debug_helper(s, sdp); 2968 sg_proc_debug_helper(s, sdp);
2969 } 2969 }
2970 return 0; 2970 return 0;
2971 } 2971 }
2972 2972
2973 #endif /* CONFIG_SCSI_PROC_FS */ 2973 #endif /* CONFIG_SCSI_PROC_FS */
2974 2974
2975 module_init(init_sg); 2975 module_init(init_sg);
2976 module_exit(exit_sg); 2976 module_exit(exit_sg);
2977 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); 2977 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
2978 2978
1 /* 1 /*
2 SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying 2 SCSI Tape Driver for Linux version 1.1 and newer. See the accompanying
3 file Documentation/scsi/st.txt for more information. 3 file Documentation/scsi/st.txt for more information.
4 4
5 History: 5 History:
6 Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara. 6 Rewritten from Dwayne Forsyth's SCSI tape driver by Kai Makisara.
7 Contribution and ideas from several people including (in alphabetical 7 Contribution and ideas from several people including (in alphabetical
8 order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk, 8 order) Klaus Ehrenfried, Eugene Exarevsky, Eric Lee Green, Wolfgang Denk,
9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky, 9 Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
10 Michael Schaefer, J"org Weule, and Eric Youngdale. 10 Michael Schaefer, J"org Weule, and Eric Youngdale.
11 11
12 Copyright 1992 - 2005 Kai Makisara 12 Copyright 1992 - 2005 Kai Makisara
13 email Kai.Makisara@kolumbus.fi 13 email Kai.Makisara@kolumbus.fi
14 14
15 Some small formal changes - aeb, 950809 15 Some small formal changes - aeb, 950809
16 16
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20 static const char *verstr = "20050830"; 20 static const char *verstr = "20050830";
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 23
24 #include <linux/fs.h> 24 #include <linux/fs.h>
25 #include <linux/kernel.h> 25 #include <linux/kernel.h>
26 #include <linux/sched.h> 26 #include <linux/sched.h>
27 #include <linux/mm.h> 27 #include <linux/mm.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 #include <linux/string.h> 29 #include <linux/string.h>
30 #include <linux/errno.h> 30 #include <linux/errno.h>
31 #include <linux/mtio.h> 31 #include <linux/mtio.h>
32 #include <linux/cdrom.h> 32 #include <linux/cdrom.h>
33 #include <linux/ioctl.h> 33 #include <linux/ioctl.h>
34 #include <linux/fcntl.h> 34 #include <linux/fcntl.h>
35 #include <linux/spinlock.h> 35 #include <linux/spinlock.h>
36 #include <linux/blkdev.h> 36 #include <linux/blkdev.h>
37 #include <linux/moduleparam.h> 37 #include <linux/moduleparam.h>
38 #include <linux/devfs_fs_kernel.h> 38 #include <linux/devfs_fs_kernel.h>
39 #include <linux/cdev.h> 39 #include <linux/cdev.h>
40 #include <linux/delay.h> 40 #include <linux/delay.h>
41 #include <linux/mutex.h> 41 #include <linux/mutex.h>
42 42
43 #include <asm/uaccess.h> 43 #include <asm/uaccess.h>
44 #include <asm/dma.h> 44 #include <asm/dma.h>
45 #include <asm/system.h> 45 #include <asm/system.h>
46 46
47 #include <scsi/scsi.h> 47 #include <scsi/scsi.h>
48 #include <scsi/scsi_dbg.h> 48 #include <scsi/scsi_dbg.h>
49 #include <scsi/scsi_device.h> 49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_driver.h> 50 #include <scsi/scsi_driver.h>
51 #include <scsi/scsi_eh.h> 51 #include <scsi/scsi_eh.h>
52 #include <scsi/scsi_host.h> 52 #include <scsi/scsi_host.h>
53 #include <scsi/scsi_ioctl.h> 53 #include <scsi/scsi_ioctl.h>
54 #include <scsi/sg.h> 54 #include <scsi/sg.h>
55 55
56 56
57 /* The driver prints some debugging information on the console if DEBUG 57 /* The driver prints some debugging information on the console if DEBUG
58 is defined and non-zero. */ 58 is defined and non-zero. */
59 #define DEBUG 0 59 #define DEBUG 0
60 60
61 #if DEBUG 61 #if DEBUG
62 /* The message level for the debug messages is currently set to KERN_NOTICE 62 /* The message level for the debug messages is currently set to KERN_NOTICE
63 so that people can easily see the messages. Later when the debugging messages 63 so that people can easily see the messages. Later when the debugging messages
64 in the drivers are more widely classified, this may be changed to KERN_DEBUG. */ 64 in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
65 #define ST_DEB_MSG KERN_NOTICE 65 #define ST_DEB_MSG KERN_NOTICE
66 #define DEB(a) a 66 #define DEB(a) a
67 #define DEBC(a) if (debugging) { a ; } 67 #define DEBC(a) if (debugging) { a ; }
68 #else 68 #else
69 #define DEB(a) 69 #define DEB(a)
70 #define DEBC(a) 70 #define DEBC(a)
71 #endif 71 #endif
72 72
73 #define ST_KILOBYTE 1024 73 #define ST_KILOBYTE 1024
74 74
75 #include "st_options.h" 75 #include "st_options.h"
76 #include "st.h" 76 #include "st.h"
77 77
78 static int buffer_kbs; 78 static int buffer_kbs;
79 static int max_sg_segs; 79 static int max_sg_segs;
80 static int try_direct_io = TRY_DIRECT_IO; 80 static int try_direct_io = TRY_DIRECT_IO;
81 static int try_rdio = 1; 81 static int try_rdio = 1;
82 static int try_wdio = 1; 82 static int try_wdio = 1;
83 83
84 static int st_dev_max; 84 static int st_dev_max;
85 static int st_nr_dev; 85 static int st_nr_dev;
86 86
87 static struct class *st_sysfs_class; 87 static struct class *st_sysfs_class;
88 88
89 MODULE_AUTHOR("Kai Makisara"); 89 MODULE_AUTHOR("Kai Makisara");
90 MODULE_DESCRIPTION("SCSI Tape Driver"); 90 MODULE_DESCRIPTION("SCSI Tape Driver");
91 MODULE_LICENSE("GPL"); 91 MODULE_LICENSE("GPL");
92 92
93 /* Set 'perm' (4th argument) to 0 to disable module_param's definition 93 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
94 * of sysfs parameters (which module_param doesn't yet support). 94 * of sysfs parameters (which module_param doesn't yet support).
95 * Sysfs parameters defined explicitly later. 95 * Sysfs parameters defined explicitly later.
96 */ 96 */
97 module_param_named(buffer_kbs, buffer_kbs, int, 0); 97 module_param_named(buffer_kbs, buffer_kbs, int, 0);
98 MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size for fixed block mode (KB; 32)"); 98 MODULE_PARM_DESC(buffer_kbs, "Default driver buffer size for fixed block mode (KB; 32)");
99 module_param_named(max_sg_segs, max_sg_segs, int, 0); 99 module_param_named(max_sg_segs, max_sg_segs, int, 0);
100 MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)"); 100 MODULE_PARM_DESC(max_sg_segs, "Maximum number of scatter/gather segments to use (256)");
101 module_param_named(try_direct_io, try_direct_io, int, 0); 101 module_param_named(try_direct_io, try_direct_io, int, 0);
102 MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)"); 102 MODULE_PARM_DESC(try_direct_io, "Try direct I/O between user buffer and tape drive (1)");
103 103
104 /* Extra parameters for testing */ 104 /* Extra parameters for testing */
105 module_param_named(try_rdio, try_rdio, int, 0); 105 module_param_named(try_rdio, try_rdio, int, 0);
106 MODULE_PARM_DESC(try_rdio, "Try direct read i/o when possible"); 106 MODULE_PARM_DESC(try_rdio, "Try direct read i/o when possible");
107 module_param_named(try_wdio, try_wdio, int, 0); 107 module_param_named(try_wdio, try_wdio, int, 0);
108 MODULE_PARM_DESC(try_wdio, "Try direct write i/o when possible"); 108 MODULE_PARM_DESC(try_wdio, "Try direct write i/o when possible");
109 109
110 #ifndef MODULE 110 #ifndef MODULE
111 static int write_threshold_kbs; /* retained for compatibility */ 111 static int write_threshold_kbs; /* retained for compatibility */
112 static struct st_dev_parm { 112 static struct st_dev_parm {
113 char *name; 113 char *name;
114 int *val; 114 int *val;
115 } parms[] __initdata = { 115 } parms[] __initdata = {
116 { 116 {
117 "buffer_kbs", &buffer_kbs 117 "buffer_kbs", &buffer_kbs
118 }, 118 },
119 { /* Retained for compatibility with 2.4 */ 119 { /* Retained for compatibility with 2.4 */
120 "write_threshold_kbs", &write_threshold_kbs 120 "write_threshold_kbs", &write_threshold_kbs
121 }, 121 },
122 { 122 {
123 "max_sg_segs", NULL 123 "max_sg_segs", NULL
124 }, 124 },
125 { 125 {
126 "try_direct_io", &try_direct_io 126 "try_direct_io", &try_direct_io
127 } 127 }
128 }; 128 };
129 #endif 129 #endif
130 130
131 /* Restrict the number of modes so that names for all are assigned */ 131 /* Restrict the number of modes so that names for all are assigned */
132 #if ST_NBR_MODES > 16 132 #if ST_NBR_MODES > 16
133 #error "Maximum number of modes is 16" 133 #error "Maximum number of modes is 16"
134 #endif 134 #endif
135 /* Bit reversed order to get same names for same minors with all 135 /* Bit reversed order to get same names for same minors with all
136 mode counts */ 136 mode counts */
137 static const char *st_formats[] = { 137 static const char *st_formats[] = {
138 "", "r", "k", "s", "l", "t", "o", "u", 138 "", "r", "k", "s", "l", "t", "o", "u",
139 "m", "v", "p", "x", "a", "y", "q", "z"}; 139 "m", "v", "p", "x", "a", "y", "q", "z"};
140 140
141 /* The default definitions have been moved to st_options.h */ 141 /* The default definitions have been moved to st_options.h */
142 142
143 #define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE) 143 #define ST_FIXED_BUFFER_SIZE (ST_FIXED_BUFFER_BLOCKS * ST_KILOBYTE)
144 144
145 /* The buffer size should fit into the 24 bits for length in the 145 /* The buffer size should fit into the 24 bits for length in the
146 6-byte SCSI read and write commands. */ 146 6-byte SCSI read and write commands. */
147 #if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1) 147 #if ST_FIXED_BUFFER_SIZE >= (2 << 24 - 1)
148 #error "Buffer size should not exceed (2 << 24 - 1) bytes!" 148 #error "Buffer size should not exceed (2 << 24 - 1) bytes!"
149 #endif 149 #endif
150 150
151 static int debugging = DEBUG; 151 static int debugging = DEBUG;
152 152
153 #define MAX_RETRIES 0 153 #define MAX_RETRIES 0
154 #define MAX_WRITE_RETRIES 0 154 #define MAX_WRITE_RETRIES 0
155 #define MAX_READY_RETRIES 0 155 #define MAX_READY_RETRIES 0
156 #define NO_TAPE NOT_READY 156 #define NO_TAPE NOT_READY
157 157
158 #define ST_TIMEOUT (900 * HZ) 158 #define ST_TIMEOUT (900 * HZ)
159 #define ST_LONG_TIMEOUT (14000 * HZ) 159 #define ST_LONG_TIMEOUT (14000 * HZ)
160 160
161 /* Remove mode bits and auto-rewind bit (7) */ 161 /* Remove mode bits and auto-rewind bit (7) */
162 #define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \ 162 #define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \
163 (iminor(x) & ~(-1 << ST_MODE_SHIFT)) ) 163 (iminor(x) & ~(-1 << ST_MODE_SHIFT)) )
164 #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT) 164 #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT)
165 165
166 /* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */ 166 /* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */
167 #define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \ 167 #define TAPE_MINOR(d, m, n) (((d & ~(255 >> (ST_NBR_MODE_BITS + 1))) << (ST_NBR_MODE_BITS + 1)) | \
168 (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) ) 168 (d & (255 >> (ST_NBR_MODE_BITS + 1))) | (m << ST_MODE_SHIFT) | ((n != 0) << 7) )
169 169
170 /* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower 170 /* Internal ioctl to set both density (uppermost 8 bits) and blocksize (lower
171 24 bits) */ 171 24 bits) */
172 #define SET_DENS_AND_BLK 0x10001 172 #define SET_DENS_AND_BLK 0x10001
173 173
174 static DEFINE_RWLOCK(st_dev_arr_lock); 174 static DEFINE_RWLOCK(st_dev_arr_lock);
175 175
176 static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE; 176 static int st_fixed_buffer_size = ST_FIXED_BUFFER_SIZE;
177 static int st_max_sg_segs = ST_MAX_SG; 177 static int st_max_sg_segs = ST_MAX_SG;
178 178
179 static struct scsi_tape **scsi_tapes = NULL; 179 static struct scsi_tape **scsi_tapes = NULL;
180 180
181 static int modes_defined; 181 static int modes_defined;
182 182
183 static struct st_buffer *new_tape_buffer(int, int, int); 183 static struct st_buffer *new_tape_buffer(int, int, int);
184 static int enlarge_buffer(struct st_buffer *, int, int); 184 static int enlarge_buffer(struct st_buffer *, int, int);
185 static void normalize_buffer(struct st_buffer *); 185 static void normalize_buffer(struct st_buffer *);
186 static int append_to_buffer(const char __user *, struct st_buffer *, int); 186 static int append_to_buffer(const char __user *, struct st_buffer *, int);
187 static int from_buffer(struct st_buffer *, char __user *, int); 187 static int from_buffer(struct st_buffer *, char __user *, int);
188 static void move_buffer_data(struct st_buffer *, int); 188 static void move_buffer_data(struct st_buffer *, int);
189 static void buf_to_sg(struct st_buffer *, unsigned int); 189 static void buf_to_sg(struct st_buffer *, unsigned int);
190 190
191 static int sgl_map_user_pages(struct scatterlist *, const unsigned int, 191 static int sgl_map_user_pages(struct scatterlist *, const unsigned int,
192 unsigned long, size_t, int); 192 unsigned long, size_t, int);
193 static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int); 193 static int sgl_unmap_user_pages(struct scatterlist *, const unsigned int, int);
194 194
195 static int st_probe(struct device *); 195 static int st_probe(struct device *);
196 static int st_remove(struct device *); 196 static int st_remove(struct device *);
197 197
198 static void do_create_driverfs_files(void); 198 static void do_create_driverfs_files(void);
199 static void do_remove_driverfs_files(void); 199 static void do_remove_driverfs_files(void);
200 static void do_create_class_files(struct scsi_tape *, int, int); 200 static void do_create_class_files(struct scsi_tape *, int, int);
201 201
202 static struct scsi_driver st_template = { 202 static struct scsi_driver st_template = {
203 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
204 .gendrv = { 204 .gendrv = {
205 .name = "st", 205 .name = "st",
206 .probe = st_probe, 206 .probe = st_probe,
207 .remove = st_remove, 207 .remove = st_remove,
208 }, 208 },
209 }; 209 };
210 210
211 static int st_compression(struct scsi_tape *, int); 211 static int st_compression(struct scsi_tape *, int);
212 212
213 static int find_partition(struct scsi_tape *); 213 static int find_partition(struct scsi_tape *);
214 static int switch_partition(struct scsi_tape *); 214 static int switch_partition(struct scsi_tape *);
215 215
216 static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long); 216 static int st_int_ioctl(struct scsi_tape *, unsigned int, unsigned long);
217 217
218 static void scsi_tape_release(struct kref *); 218 static void scsi_tape_release(struct kref *);
219 219
220 #define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref) 220 #define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
221 221
222 static DEFINE_MUTEX(st_ref_mutex); 222 static DEFINE_MUTEX(st_ref_mutex);
223 223
224 224
225 #include "osst_detect.h" 225 #include "osst_detect.h"
226 #ifndef SIGS_FROM_OSST 226 #ifndef SIGS_FROM_OSST
227 #define SIGS_FROM_OSST \ 227 #define SIGS_FROM_OSST \
228 {"OnStream", "SC-", "", "osst"}, \ 228 {"OnStream", "SC-", "", "osst"}, \
229 {"OnStream", "DI-", "", "osst"}, \ 229 {"OnStream", "DI-", "", "osst"}, \
230 {"OnStream", "DP-", "", "osst"}, \ 230 {"OnStream", "DP-", "", "osst"}, \
231 {"OnStream", "USB", "", "osst"}, \ 231 {"OnStream", "USB", "", "osst"}, \
232 {"OnStream", "FW-", "", "osst"} 232 {"OnStream", "FW-", "", "osst"}
233 #endif 233 #endif
234 234
235 static struct scsi_tape *scsi_tape_get(int dev) 235 static struct scsi_tape *scsi_tape_get(int dev)
236 { 236 {
237 struct scsi_tape *STp = NULL; 237 struct scsi_tape *STp = NULL;
238 238
239 mutex_lock(&st_ref_mutex); 239 mutex_lock(&st_ref_mutex);
240 write_lock(&st_dev_arr_lock); 240 write_lock(&st_dev_arr_lock);
241 241
242 if (dev < st_dev_max && scsi_tapes != NULL) 242 if (dev < st_dev_max && scsi_tapes != NULL)
243 STp = scsi_tapes[dev]; 243 STp = scsi_tapes[dev];
244 if (!STp) goto out; 244 if (!STp) goto out;
245 245
246 kref_get(&STp->kref); 246 kref_get(&STp->kref);
247 247
248 if (!STp->device) 248 if (!STp->device)
249 goto out_put; 249 goto out_put;
250 250
251 if (scsi_device_get(STp->device)) 251 if (scsi_device_get(STp->device))
252 goto out_put; 252 goto out_put;
253 253
254 goto out; 254 goto out;
255 255
256 out_put: 256 out_put:
257 kref_put(&STp->kref, scsi_tape_release); 257 kref_put(&STp->kref, scsi_tape_release);
258 STp = NULL; 258 STp = NULL;
259 out: 259 out:
260 write_unlock(&st_dev_arr_lock); 260 write_unlock(&st_dev_arr_lock);
261 mutex_unlock(&st_ref_mutex); 261 mutex_unlock(&st_ref_mutex);
262 return STp; 262 return STp;
263 } 263 }
264 264
265 static void scsi_tape_put(struct scsi_tape *STp) 265 static void scsi_tape_put(struct scsi_tape *STp)
266 { 266 {
267 struct scsi_device *sdev = STp->device; 267 struct scsi_device *sdev = STp->device;
268 268
269 mutex_lock(&st_ref_mutex); 269 mutex_lock(&st_ref_mutex);
270 kref_put(&STp->kref, scsi_tape_release); 270 kref_put(&STp->kref, scsi_tape_release);
271 scsi_device_put(sdev); 271 scsi_device_put(sdev);
272 mutex_unlock(&st_ref_mutex); 272 mutex_unlock(&st_ref_mutex);
273 } 273 }
274 274
275 struct st_reject_data { 275 struct st_reject_data {
276 char *vendor; 276 char *vendor;
277 char *model; 277 char *model;
278 char *rev; 278 char *rev;
279 char *driver_hint; /* Name of the correct driver, NULL if unknown */ 279 char *driver_hint; /* Name of the correct driver, NULL if unknown */
280 }; 280 };
281 281
282 static struct st_reject_data reject_list[] = { 282 static struct st_reject_data reject_list[] = {
283 /* {"XXX", "Yy-", "", NULL}, example */ 283 /* {"XXX", "Yy-", "", NULL}, example */
284 SIGS_FROM_OSST, 284 SIGS_FROM_OSST,
285 {NULL, }}; 285 {NULL, }};
286 286
287 /* If the device signature is on the list of incompatible drives, the 287 /* If the device signature is on the list of incompatible drives, the
288 function returns a pointer to the name of the correct driver (if known) */ 288 function returns a pointer to the name of the correct driver (if known) */
289 static char * st_incompatible(struct scsi_device* SDp) 289 static char * st_incompatible(struct scsi_device* SDp)
290 { 290 {
291 struct st_reject_data *rp; 291 struct st_reject_data *rp;
292 292
293 for (rp=&(reject_list[0]); rp->vendor != NULL; rp++) 293 for (rp=&(reject_list[0]); rp->vendor != NULL; rp++)
294 if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) && 294 if (!strncmp(rp->vendor, SDp->vendor, strlen(rp->vendor)) &&
295 !strncmp(rp->model, SDp->model, strlen(rp->model)) && 295 !strncmp(rp->model, SDp->model, strlen(rp->model)) &&
296 !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) { 296 !strncmp(rp->rev, SDp->rev, strlen(rp->rev))) {
297 if (rp->driver_hint) 297 if (rp->driver_hint)
298 return rp->driver_hint; 298 return rp->driver_hint;
299 else 299 else
300 return "unknown"; 300 return "unknown";
301 } 301 }
302 return NULL; 302 return NULL;
303 } 303 }
304 304
305 305
306 static inline char *tape_name(struct scsi_tape *tape) 306 static inline char *tape_name(struct scsi_tape *tape)
307 { 307 {
308 return tape->disk->disk_name; 308 return tape->disk->disk_name;
309 } 309 }
310 310
311 311
312 static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) 312 static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
313 { 313 {
314 const u8 *ucp; 314 const u8 *ucp;
315 const u8 *sense = SRpnt->sense; 315 const u8 *sense = SRpnt->sense;
316 316
317 s->have_sense = scsi_normalize_sense(SRpnt->sense, 317 s->have_sense = scsi_normalize_sense(SRpnt->sense,
318 SCSI_SENSE_BUFFERSIZE, &s->sense_hdr); 318 SCSI_SENSE_BUFFERSIZE, &s->sense_hdr);
319 s->flags = 0; 319 s->flags = 0;
320 320
321 if (s->have_sense) { 321 if (s->have_sense) {
322 s->deferred = 0; 322 s->deferred = 0;
323 s->remainder_valid = 323 s->remainder_valid =
324 scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64); 324 scsi_get_sense_info_fld(sense, SCSI_SENSE_BUFFERSIZE, &s->uremainder64);
325 switch (sense[0] & 0x7f) { 325 switch (sense[0] & 0x7f) {
326 case 0x71: 326 case 0x71:
327 s->deferred = 1; 327 s->deferred = 1;
328 case 0x70: 328 case 0x70:
329 s->fixed_format = 1; 329 s->fixed_format = 1;
330 s->flags = sense[2] & 0xe0; 330 s->flags = sense[2] & 0xe0;
331 break; 331 break;
332 case 0x73: 332 case 0x73:
333 s->deferred = 1; 333 s->deferred = 1;
334 case 0x72: 334 case 0x72:
335 s->fixed_format = 0; 335 s->fixed_format = 0;
336 ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); 336 ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4);
337 s->flags = ucp ? (ucp[3] & 0xe0) : 0; 337 s->flags = ucp ? (ucp[3] & 0xe0) : 0;
338 break; 338 break;
339 } 339 }
340 } 340 }
341 } 341 }
342 342
343 343
344 /* Convert the result to success code */ 344 /* Convert the result to success code */
345 static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) 345 static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
346 { 346 {
347 int result = SRpnt->result; 347 int result = SRpnt->result;
348 u8 scode; 348 u8 scode;
349 DEB(const char *stp;) 349 DEB(const char *stp;)
350 char *name = tape_name(STp); 350 char *name = tape_name(STp);
351 struct st_cmdstatus *cmdstatp; 351 struct st_cmdstatus *cmdstatp;
352 352
353 if (!result) 353 if (!result)
354 return 0; 354 return 0;
355 355
356 cmdstatp = &STp->buffer->cmdstat; 356 cmdstatp = &STp->buffer->cmdstat;
357 st_analyze_sense(SRpnt, cmdstatp); 357 st_analyze_sense(SRpnt, cmdstatp);
358 358
359 if (cmdstatp->have_sense) 359 if (cmdstatp->have_sense)
360 scode = STp->buffer->cmdstat.sense_hdr.sense_key; 360 scode = STp->buffer->cmdstat.sense_hdr.sense_key;
361 else 361 else
362 scode = 0; 362 scode = 0;
363 363
364 DEB( 364 DEB(
365 if (debugging) { 365 if (debugging) {
366 printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x\n", 366 printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x\n",
367 name, result, 367 name, result,
368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2], 368 SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]); 369 SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
370 if (cmdstatp->have_sense) 370 if (cmdstatp->have_sense)
371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE); 371 __scsi_print_sense("st", SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
372 } ) /* end DEB */ 372 } ) /* end DEB */
373 if (!debugging) { /* Abnormal conditions for tape */ 373 if (!debugging) { /* Abnormal conditions for tape */
374 if (!cmdstatp->have_sense) 374 if (!cmdstatp->have_sense)
375 printk(KERN_WARNING 375 printk(KERN_WARNING
376 "%s: Error %x (sugg. bt 0x%x, driver bt 0x%x, host bt 0x%x).\n", 376 "%s: Error %x (sugg. bt 0x%x, driver bt 0x%x, host bt 0x%x).\n",
377 name, result, suggestion(result), 377 name, result, suggestion(result),
378 driver_byte(result) & DRIVER_MASK, host_byte(result)); 378 driver_byte(result) & DRIVER_MASK, host_byte(result));
379 else if (cmdstatp->have_sense && 379 else if (cmdstatp->have_sense &&
380 scode != NO_SENSE && 380 scode != NO_SENSE &&
381 scode != RECOVERED_ERROR && 381 scode != RECOVERED_ERROR &&
382 /* scode != UNIT_ATTENTION && */ 382 /* scode != UNIT_ATTENTION && */
383 scode != BLANK_CHECK && 383 scode != BLANK_CHECK &&
384 scode != VOLUME_OVERFLOW && 384 scode != VOLUME_OVERFLOW &&
385 SRpnt->cmd[0] != MODE_SENSE && 385 SRpnt->cmd[0] != MODE_SENSE &&
386 SRpnt->cmd[0] != TEST_UNIT_READY) { 386 SRpnt->cmd[0] != TEST_UNIT_READY) {
387 printk(KERN_WARNING "%s: Error with sense data: ", name); 387 printk(KERN_WARNING "%s: Error with sense data: ", name);
388 __scsi_print_sense("st", SRpnt->sense, 388 __scsi_print_sense("st", SRpnt->sense,
389 SCSI_SENSE_BUFFERSIZE); 389 SCSI_SENSE_BUFFERSIZE);
390 } 390 }
391 } 391 }
392 392
393 if (cmdstatp->fixed_format && 393 if (cmdstatp->fixed_format &&
394 STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */ 394 STp->cln_mode >= EXTENDED_SENSE_START) { /* Only fixed format sense */
395 if (STp->cln_sense_value) 395 if (STp->cln_sense_value)
396 STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & 396 STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
397 STp->cln_sense_mask) == STp->cln_sense_value); 397 STp->cln_sense_mask) == STp->cln_sense_value);
398 else 398 else
399 STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] & 399 STp->cleaning_req |= ((SRpnt->sense[STp->cln_mode] &
400 STp->cln_sense_mask) != 0); 400 STp->cln_sense_mask) != 0);
401 } 401 }
402 if (cmdstatp->have_sense && 402 if (cmdstatp->have_sense &&
403 cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17) 403 cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17)
404 STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */ 404 STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */
405 405
406 STp->pos_unknown |= STp->device->was_reset; 406 STp->pos_unknown |= STp->device->was_reset;
407 407
408 if (cmdstatp->have_sense && 408 if (cmdstatp->have_sense &&
409 scode == RECOVERED_ERROR 409 scode == RECOVERED_ERROR
410 #if ST_RECOVERED_WRITE_FATAL 410 #if ST_RECOVERED_WRITE_FATAL
411 && SRpnt->cmd[0] != WRITE_6 411 && SRpnt->cmd[0] != WRITE_6
412 && SRpnt->cmd[0] != WRITE_FILEMARKS 412 && SRpnt->cmd[0] != WRITE_FILEMARKS
413 #endif 413 #endif
414 ) { 414 ) {
415 STp->recover_count++; 415 STp->recover_count++;
416 STp->recover_reg++; 416 STp->recover_reg++;
417 417
418 DEB( 418 DEB(
419 if (debugging) { 419 if (debugging) {
420 if (SRpnt->cmd[0] == READ_6) 420 if (SRpnt->cmd[0] == READ_6)
421 stp = "read"; 421 stp = "read";
422 else if (SRpnt->cmd[0] == WRITE_6) 422 else if (SRpnt->cmd[0] == WRITE_6)
423 stp = "write"; 423 stp = "write";
424 else 424 else
425 stp = "ioctl"; 425 stp = "ioctl";
426 printk(ST_DEB_MSG "%s: Recovered %s error (%d).\n", name, stp, 426 printk(ST_DEB_MSG "%s: Recovered %s error (%d).\n", name, stp,
427 STp->recover_count); 427 STp->recover_count);
428 } ) /* end DEB */ 428 } ) /* end DEB */
429 429
430 if (cmdstatp->flags == 0) 430 if (cmdstatp->flags == 0)
431 return 0; 431 return 0;
432 } 432 }
433 return (-EIO); 433 return (-EIO);
434 } 434 }
435 435
436 436
437 /* Wakeup from interrupt */ 437 /* Wakeup from interrupt */
438 static void st_sleep_done(void *data, char *sense, int result, int resid) 438 static void st_sleep_done(void *data, char *sense, int result, int resid)
439 { 439 {
440 struct st_request *SRpnt = data; 440 struct st_request *SRpnt = data;
441 struct scsi_tape *STp = SRpnt->stp; 441 struct scsi_tape *STp = SRpnt->stp;
442 442
443 memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE); 443 memcpy(SRpnt->sense, sense, SCSI_SENSE_BUFFERSIZE);
444 (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result; 444 (STp->buffer)->cmdstat.midlevel_result = SRpnt->result = result;
445 DEB( STp->write_pending = 0; ) 445 DEB( STp->write_pending = 0; )
446 446
447 if (SRpnt->waiting) 447 if (SRpnt->waiting)
448 complete(SRpnt->waiting); 448 complete(SRpnt->waiting);
449 } 449 }
450 450
451 static struct st_request *st_allocate_request(void) 451 static struct st_request *st_allocate_request(void)
452 { 452 {
453 return kzalloc(sizeof(struct st_request), GFP_KERNEL); 453 return kzalloc(sizeof(struct st_request), GFP_KERNEL);
454 } 454 }
455 455
456 static void st_release_request(struct st_request *streq) 456 static void st_release_request(struct st_request *streq)
457 { 457 {
458 kfree(streq); 458 kfree(streq);
459 } 459 }
460 460
461 /* Do the scsi command. Waits until command performed if do_wait is true. 461 /* Do the scsi command. Waits until command performed if do_wait is true.
462 Otherwise write_behind_check() is used to check that the command 462 Otherwise write_behind_check() is used to check that the command
463 has finished. */ 463 has finished. */
464 static struct st_request * 464 static struct st_request *
465 st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd, 465 st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd,
466 int bytes, int direction, int timeout, int retries, int do_wait) 466 int bytes, int direction, int timeout, int retries, int do_wait)
467 { 467 {
468 struct completion *waiting; 468 struct completion *waiting;
469 469
470 /* if async, make sure there's no command outstanding */ 470 /* if async, make sure there's no command outstanding */
471 if (!do_wait && ((STp->buffer)->last_SRpnt)) { 471 if (!do_wait && ((STp->buffer)->last_SRpnt)) {
472 printk(KERN_ERR "%s: Async command already active.\n", 472 printk(KERN_ERR "%s: Async command already active.\n",
473 tape_name(STp)); 473 tape_name(STp));
474 if (signal_pending(current)) 474 if (signal_pending(current))
475 (STp->buffer)->syscall_result = (-EINTR); 475 (STp->buffer)->syscall_result = (-EINTR);
476 else 476 else
477 (STp->buffer)->syscall_result = (-EBUSY); 477 (STp->buffer)->syscall_result = (-EBUSY);
478 return NULL; 478 return NULL;
479 } 479 }
480 480
481 if (SRpnt == NULL) { 481 if (SRpnt == NULL) {
482 SRpnt = st_allocate_request(); 482 SRpnt = st_allocate_request();
483 if (SRpnt == NULL) { 483 if (SRpnt == NULL) {
484 DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n", 484 DEBC( printk(KERN_ERR "%s: Can't get SCSI request.\n",
485 tape_name(STp)); ); 485 tape_name(STp)); );
486 if (signal_pending(current)) 486 if (signal_pending(current))
487 (STp->buffer)->syscall_result = (-EINTR); 487 (STp->buffer)->syscall_result = (-EINTR);
488 else 488 else
489 (STp->buffer)->syscall_result = (-EBUSY); 489 (STp->buffer)->syscall_result = (-EBUSY);
490 return NULL; 490 return NULL;
491 } 491 }
492 SRpnt->stp = STp; 492 SRpnt->stp = STp;
493 } 493 }
494 494
495 /* If async IO, set last_SRpnt. This ptr tells write_behind_check 495 /* If async IO, set last_SRpnt. This ptr tells write_behind_check
496 which IO is outstanding. It's nulled out when the IO completes. */ 496 which IO is outstanding. It's nulled out when the IO completes. */
497 if (!do_wait) 497 if (!do_wait)
498 (STp->buffer)->last_SRpnt = SRpnt; 498 (STp->buffer)->last_SRpnt = SRpnt;
499 499
500 waiting = &STp->wait; 500 waiting = &STp->wait;
501 init_completion(waiting); 501 init_completion(waiting);
502 SRpnt->waiting = waiting; 502 SRpnt->waiting = waiting;
503 503
504 if (!STp->buffer->do_dio) 504 if (!STp->buffer->do_dio)
505 buf_to_sg(STp->buffer, bytes); 505 buf_to_sg(STp->buffer, bytes);
506 506
507 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); 507 memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
508 STp->buffer->cmdstat.have_sense = 0; 508 STp->buffer->cmdstat.have_sense = 0;
509 STp->buffer->syscall_result = 0; 509 STp->buffer->syscall_result = 0;
510 510
511 if (scsi_execute_async(STp->device, cmd, direction, 511 if (scsi_execute_async(STp->device, cmd, COMMAND_SIZE(cmd[0]), direction,
512 &((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs, 512 &((STp->buffer)->sg[0]), bytes, (STp->buffer)->sg_segs,
513 timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) { 513 timeout, retries, SRpnt, st_sleep_done, GFP_KERNEL)) {
514 /* could not allocate the buffer or request was too large */ 514 /* could not allocate the buffer or request was too large */
515 (STp->buffer)->syscall_result = (-EBUSY); 515 (STp->buffer)->syscall_result = (-EBUSY);
516 (STp->buffer)->last_SRpnt = NULL; 516 (STp->buffer)->last_SRpnt = NULL;
517 } 517 }
518 else if (do_wait) { 518 else if (do_wait) {
519 wait_for_completion(waiting); 519 wait_for_completion(waiting);
520 SRpnt->waiting = NULL; 520 SRpnt->waiting = NULL;
521 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 521 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
522 } 522 }
523 523
524 return SRpnt; 524 return SRpnt;
525 } 525 }
526 526
527 527
528 /* Handle the write-behind checking (waits for completion). Returns -ENOSPC if 528 /* Handle the write-behind checking (waits for completion). Returns -ENOSPC if
529 write has been correct but EOM early warning reached, -EIO if write ended in 529 write has been correct but EOM early warning reached, -EIO if write ended in
530 error or zero if write successful. Asynchronous writes are used only in 530 error or zero if write successful. Asynchronous writes are used only in
531 variable block mode. */ 531 variable block mode. */
532 static int write_behind_check(struct scsi_tape * STp) 532 static int write_behind_check(struct scsi_tape * STp)
533 { 533 {
534 int retval = 0; 534 int retval = 0;
535 struct st_buffer *STbuffer; 535 struct st_buffer *STbuffer;
536 struct st_partstat *STps; 536 struct st_partstat *STps;
537 struct st_cmdstatus *cmdstatp; 537 struct st_cmdstatus *cmdstatp;
538 struct st_request *SRpnt; 538 struct st_request *SRpnt;
539 539
540 STbuffer = STp->buffer; 540 STbuffer = STp->buffer;
541 if (!STbuffer->writing) 541 if (!STbuffer->writing)
542 return 0; 542 return 0;
543 543
544 DEB( 544 DEB(
545 if (STp->write_pending) 545 if (STp->write_pending)
546 STp->nbr_waits++; 546 STp->nbr_waits++;
547 else 547 else
548 STp->nbr_finished++; 548 STp->nbr_finished++;
549 ) /* end DEB */ 549 ) /* end DEB */
550 550
551 wait_for_completion(&(STp->wait)); 551 wait_for_completion(&(STp->wait));
552 SRpnt = STbuffer->last_SRpnt; 552 SRpnt = STbuffer->last_SRpnt;
553 STbuffer->last_SRpnt = NULL; 553 STbuffer->last_SRpnt = NULL;
554 SRpnt->waiting = NULL; 554 SRpnt->waiting = NULL;
555 555
556 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt); 556 (STp->buffer)->syscall_result = st_chk_result(STp, SRpnt);
557 st_release_request(SRpnt); 557 st_release_request(SRpnt);
558 558
559 STbuffer->buffer_bytes -= STbuffer->writing; 559 STbuffer->buffer_bytes -= STbuffer->writing;
560 STps = &(STp->ps[STp->partition]); 560 STps = &(STp->ps[STp->partition]);
561 if (STps->drv_block >= 0) { 561 if (STps->drv_block >= 0) {
562 if (STp->block_size == 0) 562 if (STp->block_size == 0)
563 STps->drv_block++; 563 STps->drv_block++;
564 else 564 else
565 STps->drv_block += STbuffer->writing / STp->block_size; 565 STps->drv_block += STbuffer->writing / STp->block_size;
566 } 566 }
567 567
568 cmdstatp = &STbuffer->cmdstat; 568 cmdstatp = &STbuffer->cmdstat;
569 if (STbuffer->syscall_result) { 569 if (STbuffer->syscall_result) {
570 retval = -EIO; 570 retval = -EIO;
571 if (cmdstatp->have_sense && !cmdstatp->deferred && 571 if (cmdstatp->have_sense && !cmdstatp->deferred &&
572 (cmdstatp->flags & SENSE_EOM) && 572 (cmdstatp->flags & SENSE_EOM) &&
573 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 573 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
574 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR)) { 574 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR)) {
575 /* EOM at write-behind, has all data been written? */ 575 /* EOM at write-behind, has all data been written? */
576 if (!cmdstatp->remainder_valid || 576 if (!cmdstatp->remainder_valid ||
577 cmdstatp->uremainder64 == 0) 577 cmdstatp->uremainder64 == 0)
578 retval = -ENOSPC; 578 retval = -ENOSPC;
579 } 579 }
580 if (retval == -EIO) 580 if (retval == -EIO)
581 STps->drv_block = -1; 581 STps->drv_block = -1;
582 } 582 }
583 STbuffer->writing = 0; 583 STbuffer->writing = 0;
584 584
585 DEB(if (debugging && retval) 585 DEB(if (debugging && retval)
586 printk(ST_DEB_MSG "%s: Async write error %x, return value %d.\n", 586 printk(ST_DEB_MSG "%s: Async write error %x, return value %d.\n",
587 tape_name(STp), STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */ 587 tape_name(STp), STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */
588 588
589 return retval; 589 return retval;
590 } 590 }
591 591
592 592
593 /* Step over EOF if it has been inadvertently crossed (ioctl not used because 593 /* Step over EOF if it has been inadvertently crossed (ioctl not used because
594 it messes up the block number). */ 594 it messes up the block number). */
595 static int cross_eof(struct scsi_tape * STp, int forward) 595 static int cross_eof(struct scsi_tape * STp, int forward)
596 { 596 {
597 struct st_request *SRpnt; 597 struct st_request *SRpnt;
598 unsigned char cmd[MAX_COMMAND_SIZE]; 598 unsigned char cmd[MAX_COMMAND_SIZE];
599 599
600 cmd[0] = SPACE; 600 cmd[0] = SPACE;
601 cmd[1] = 0x01; /* Space FileMarks */ 601 cmd[1] = 0x01; /* Space FileMarks */
602 if (forward) { 602 if (forward) {
603 cmd[2] = cmd[3] = 0; 603 cmd[2] = cmd[3] = 0;
604 cmd[4] = 1; 604 cmd[4] = 1;
605 } else 605 } else
606 cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */ 606 cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */
607 cmd[5] = 0; 607 cmd[5] = 0;
608 608
609 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n", 609 DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
610 tape_name(STp), forward ? "forward" : "backward")); 610 tape_name(STp), forward ? "forward" : "backward"));
611 611
612 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 612 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
613 STp->device->timeout, MAX_RETRIES, 1); 613 STp->device->timeout, MAX_RETRIES, 1);
614 if (!SRpnt) 614 if (!SRpnt)
615 return (STp->buffer)->syscall_result; 615 return (STp->buffer)->syscall_result;
616 616
617 st_release_request(SRpnt); 617 st_release_request(SRpnt);
618 SRpnt = NULL; 618 SRpnt = NULL;
619 619
620 if ((STp->buffer)->cmdstat.midlevel_result != 0) 620 if ((STp->buffer)->cmdstat.midlevel_result != 0)
621 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n", 621 printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
622 tape_name(STp), forward ? "forward" : "backward"); 622 tape_name(STp), forward ? "forward" : "backward");
623 623
624 return (STp->buffer)->syscall_result; 624 return (STp->buffer)->syscall_result;
625 } 625 }
626 626
627 627
628 /* Flush the write buffer (never need to write if variable blocksize). */ 628 /* Flush the write buffer (never need to write if variable blocksize). */
629 static int flush_write_buffer(struct scsi_tape * STp) 629 static int flush_write_buffer(struct scsi_tape * STp)
630 { 630 {
631 int offset, transfer, blks; 631 int offset, transfer, blks;
632 int result; 632 int result;
633 unsigned char cmd[MAX_COMMAND_SIZE]; 633 unsigned char cmd[MAX_COMMAND_SIZE];
634 struct st_request *SRpnt; 634 struct st_request *SRpnt;
635 struct st_partstat *STps; 635 struct st_partstat *STps;
636 636
637 result = write_behind_check(STp); 637 result = write_behind_check(STp);
638 if (result) 638 if (result)
639 return result; 639 return result;
640 640
641 result = 0; 641 result = 0;
642 if (STp->dirty == 1) { 642 if (STp->dirty == 1) {
643 643
644 offset = (STp->buffer)->buffer_bytes; 644 offset = (STp->buffer)->buffer_bytes;
645 transfer = ((offset + STp->block_size - 1) / 645 transfer = ((offset + STp->block_size - 1) /
646 STp->block_size) * STp->block_size; 646 STp->block_size) * STp->block_size;
647 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n", 647 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
648 tape_name(STp), transfer)); 648 tape_name(STp), transfer));
649 649
650 memset((STp->buffer)->b_data + offset, 0, transfer - offset); 650 memset((STp->buffer)->b_data + offset, 0, transfer - offset);
651 651
652 memset(cmd, 0, MAX_COMMAND_SIZE); 652 memset(cmd, 0, MAX_COMMAND_SIZE);
653 cmd[0] = WRITE_6; 653 cmd[0] = WRITE_6;
654 cmd[1] = 1; 654 cmd[1] = 1;
655 blks = transfer / STp->block_size; 655 blks = transfer / STp->block_size;
656 cmd[2] = blks >> 16; 656 cmd[2] = blks >> 16;
657 cmd[3] = blks >> 8; 657 cmd[3] = blks >> 8;
658 cmd[4] = blks; 658 cmd[4] = blks;
659 659
660 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE, 660 SRpnt = st_do_scsi(NULL, STp, cmd, transfer, DMA_TO_DEVICE,
661 STp->device->timeout, MAX_WRITE_RETRIES, 1); 661 STp->device->timeout, MAX_WRITE_RETRIES, 1);
662 if (!SRpnt) 662 if (!SRpnt)
663 return (STp->buffer)->syscall_result; 663 return (STp->buffer)->syscall_result;
664 664
665 STps = &(STp->ps[STp->partition]); 665 STps = &(STp->ps[STp->partition]);
666 if ((STp->buffer)->syscall_result != 0) { 666 if ((STp->buffer)->syscall_result != 0) {
667 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 667 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
668 668
669 if (cmdstatp->have_sense && !cmdstatp->deferred && 669 if (cmdstatp->have_sense && !cmdstatp->deferred &&
670 (cmdstatp->flags & SENSE_EOM) && 670 (cmdstatp->flags & SENSE_EOM) &&
671 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 671 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
672 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 672 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
673 (!cmdstatp->remainder_valid || 673 (!cmdstatp->remainder_valid ||
674 cmdstatp->uremainder64 == 0)) { /* All written at EOM early warning */ 674 cmdstatp->uremainder64 == 0)) { /* All written at EOM early warning */
675 STp->dirty = 0; 675 STp->dirty = 0;
676 (STp->buffer)->buffer_bytes = 0; 676 (STp->buffer)->buffer_bytes = 0;
677 if (STps->drv_block >= 0) 677 if (STps->drv_block >= 0)
678 STps->drv_block += blks; 678 STps->drv_block += blks;
679 result = (-ENOSPC); 679 result = (-ENOSPC);
680 } else { 680 } else {
681 printk(KERN_ERR "%s: Error on flush.\n", 681 printk(KERN_ERR "%s: Error on flush.\n",
682 tape_name(STp)); 682 tape_name(STp));
683 STps->drv_block = (-1); 683 STps->drv_block = (-1);
684 result = (-EIO); 684 result = (-EIO);
685 } 685 }
686 } else { 686 } else {
687 if (STps->drv_block >= 0) 687 if (STps->drv_block >= 0)
688 STps->drv_block += blks; 688 STps->drv_block += blks;
689 STp->dirty = 0; 689 STp->dirty = 0;
690 (STp->buffer)->buffer_bytes = 0; 690 (STp->buffer)->buffer_bytes = 0;
691 } 691 }
692 st_release_request(SRpnt); 692 st_release_request(SRpnt);
693 SRpnt = NULL; 693 SRpnt = NULL;
694 } 694 }
695 return result; 695 return result;
696 } 696 }
697 697
698 698
699 /* Flush the tape buffer. The tape will be positioned correctly unless 699 /* Flush the tape buffer. The tape will be positioned correctly unless
700 seek_next is true. */ 700 seek_next is true. */
701 static int flush_buffer(struct scsi_tape *STp, int seek_next) 701 static int flush_buffer(struct scsi_tape *STp, int seek_next)
702 { 702 {
703 int backspace, result; 703 int backspace, result;
704 struct st_buffer *STbuffer; 704 struct st_buffer *STbuffer;
705 struct st_partstat *STps; 705 struct st_partstat *STps;
706 706
707 STbuffer = STp->buffer; 707 STbuffer = STp->buffer;
708 708
709 /* 709 /*
710 * If there was a bus reset, block further access 710 * If there was a bus reset, block further access
711 * to this device. 711 * to this device.
712 */ 712 */
713 if (STp->pos_unknown) 713 if (STp->pos_unknown)
714 return (-EIO); 714 return (-EIO);
715 715
716 if (STp->ready != ST_READY) 716 if (STp->ready != ST_READY)
717 return 0; 717 return 0;
718 STps = &(STp->ps[STp->partition]); 718 STps = &(STp->ps[STp->partition]);
719 if (STps->rw == ST_WRITING) /* Writing */ 719 if (STps->rw == ST_WRITING) /* Writing */
720 return flush_write_buffer(STp); 720 return flush_write_buffer(STp);
721 721
722 if (STp->block_size == 0) 722 if (STp->block_size == 0)
723 return 0; 723 return 0;
724 724
725 backspace = ((STp->buffer)->buffer_bytes + 725 backspace = ((STp->buffer)->buffer_bytes +
726 (STp->buffer)->read_pointer) / STp->block_size - 726 (STp->buffer)->read_pointer) / STp->block_size -
727 ((STp->buffer)->read_pointer + STp->block_size - 1) / 727 ((STp->buffer)->read_pointer + STp->block_size - 1) /
728 STp->block_size; 728 STp->block_size;
729 (STp->buffer)->buffer_bytes = 0; 729 (STp->buffer)->buffer_bytes = 0;
730 (STp->buffer)->read_pointer = 0; 730 (STp->buffer)->read_pointer = 0;
731 result = 0; 731 result = 0;
732 if (!seek_next) { 732 if (!seek_next) {
733 if (STps->eof == ST_FM_HIT) { 733 if (STps->eof == ST_FM_HIT) {
734 result = cross_eof(STp, 0); /* Back over the EOF hit */ 734 result = cross_eof(STp, 0); /* Back over the EOF hit */
735 if (!result) 735 if (!result)
736 STps->eof = ST_NOEOF; 736 STps->eof = ST_NOEOF;
737 else { 737 else {
738 if (STps->drv_file >= 0) 738 if (STps->drv_file >= 0)
739 STps->drv_file++; 739 STps->drv_file++;
740 STps->drv_block = 0; 740 STps->drv_block = 0;
741 } 741 }
742 } 742 }
743 if (!result && backspace > 0) 743 if (!result && backspace > 0)
744 result = st_int_ioctl(STp, MTBSR, backspace); 744 result = st_int_ioctl(STp, MTBSR, backspace);
745 } else if (STps->eof == ST_FM_HIT) { 745 } else if (STps->eof == ST_FM_HIT) {
746 if (STps->drv_file >= 0) 746 if (STps->drv_file >= 0)
747 STps->drv_file++; 747 STps->drv_file++;
748 STps->drv_block = 0; 748 STps->drv_block = 0;
749 STps->eof = ST_NOEOF; 749 STps->eof = ST_NOEOF;
750 } 750 }
751 return result; 751 return result;
752 752
753 } 753 }
754 754
755 /* Set the mode parameters */ 755 /* Set the mode parameters */
756 static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm) 756 static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
757 { 757 {
758 int set_it = 0; 758 int set_it = 0;
759 unsigned long arg; 759 unsigned long arg;
760 char *name = tape_name(STp); 760 char *name = tape_name(STp);
761 761
762 if (!STp->density_changed && 762 if (!STp->density_changed &&
763 STm->default_density >= 0 && 763 STm->default_density >= 0 &&
764 STm->default_density != STp->density) { 764 STm->default_density != STp->density) {
765 arg = STm->default_density; 765 arg = STm->default_density;
766 set_it = 1; 766 set_it = 1;
767 } else 767 } else
768 arg = STp->density; 768 arg = STp->density;
769 arg <<= MT_ST_DENSITY_SHIFT; 769 arg <<= MT_ST_DENSITY_SHIFT;
770 if (!STp->blksize_changed && 770 if (!STp->blksize_changed &&
771 STm->default_blksize >= 0 && 771 STm->default_blksize >= 0 &&
772 STm->default_blksize != STp->block_size) { 772 STm->default_blksize != STp->block_size) {
773 arg |= STm->default_blksize; 773 arg |= STm->default_blksize;
774 set_it = 1; 774 set_it = 1;
775 } else 775 } else
776 arg |= STp->block_size; 776 arg |= STp->block_size;
777 if (set_it && 777 if (set_it &&
778 st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) { 778 st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) {
779 printk(KERN_WARNING 779 printk(KERN_WARNING
780 "%s: Can't set default block size to %d bytes and density %x.\n", 780 "%s: Can't set default block size to %d bytes and density %x.\n",
781 name, STm->default_blksize, STm->default_density); 781 name, STm->default_blksize, STm->default_density);
782 if (modes_defined) 782 if (modes_defined)
783 return (-EINVAL); 783 return (-EINVAL);
784 } 784 }
785 return 0; 785 return 0;
786 } 786 }
787 787
788 788
789 /* Lock or unlock the drive door. Don't use when st_request allocated. */ 789 /* Lock or unlock the drive door. Don't use when st_request allocated. */
790 static int do_door_lock(struct scsi_tape * STp, int do_lock) 790 static int do_door_lock(struct scsi_tape * STp, int do_lock)
791 { 791 {
792 int retval, cmd; 792 int retval, cmd;
793 DEB(char *name = tape_name(STp);) 793 DEB(char *name = tape_name(STp);)
794 794
795 795
796 cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK; 796 cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK;
797 DEBC(printk(ST_DEB_MSG "%s: %socking drive door.\n", name, 797 DEBC(printk(ST_DEB_MSG "%s: %socking drive door.\n", name,
798 do_lock ? "L" : "Unl")); 798 do_lock ? "L" : "Unl"));
799 retval = scsi_ioctl(STp->device, cmd, NULL); 799 retval = scsi_ioctl(STp->device, cmd, NULL);
800 if (!retval) { 800 if (!retval) {
801 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED; 801 STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
802 } 802 }
803 else { 803 else {
804 STp->door_locked = ST_LOCK_FAILS; 804 STp->door_locked = ST_LOCK_FAILS;
805 } 805 }
806 return retval; 806 return retval;
807 } 807 }
808 808
809 809
810 /* Set the internal state after reset */ 810 /* Set the internal state after reset */
811 static void reset_state(struct scsi_tape *STp) 811 static void reset_state(struct scsi_tape *STp)
812 { 812 {
813 int i; 813 int i;
814 struct st_partstat *STps; 814 struct st_partstat *STps;
815 815
816 STp->pos_unknown = 0; 816 STp->pos_unknown = 0;
817 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 817 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
818 STps = &(STp->ps[i]); 818 STps = &(STp->ps[i]);
819 STps->rw = ST_IDLE; 819 STps->rw = ST_IDLE;
820 STps->eof = ST_NOEOF; 820 STps->eof = ST_NOEOF;
821 STps->at_sm = 0; 821 STps->at_sm = 0;
822 STps->last_block_valid = 0; 822 STps->last_block_valid = 0;
823 STps->drv_block = -1; 823 STps->drv_block = -1;
824 STps->drv_file = -1; 824 STps->drv_file = -1;
825 } 825 }
826 if (STp->can_partitions) { 826 if (STp->can_partitions) {
827 STp->partition = find_partition(STp); 827 STp->partition = find_partition(STp);
828 if (STp->partition < 0) 828 if (STp->partition < 0)
829 STp->partition = 0; 829 STp->partition = 0;
830 STp->new_partition = STp->partition; 830 STp->new_partition = STp->partition;
831 } 831 }
832 } 832 }
833 833
834 /* Test if the drive is ready. Returns either one of the codes below or a negative system 834 /* Test if the drive is ready. Returns either one of the codes below or a negative system
835 error code. */ 835 error code. */
836 #define CHKRES_READY 0 836 #define CHKRES_READY 0
837 #define CHKRES_NEW_SESSION 1 837 #define CHKRES_NEW_SESSION 1
838 #define CHKRES_NOT_READY 2 838 #define CHKRES_NOT_READY 2
839 #define CHKRES_NO_TAPE 3 839 #define CHKRES_NO_TAPE 3
840 840
841 #define MAX_ATTENTIONS 10 841 #define MAX_ATTENTIONS 10
842 842
843 static int test_ready(struct scsi_tape *STp, int do_wait) 843 static int test_ready(struct scsi_tape *STp, int do_wait)
844 { 844 {
845 int attentions, waits, max_wait, scode; 845 int attentions, waits, max_wait, scode;
846 int retval = CHKRES_READY, new_session = 0; 846 int retval = CHKRES_READY, new_session = 0;
847 unsigned char cmd[MAX_COMMAND_SIZE]; 847 unsigned char cmd[MAX_COMMAND_SIZE];
848 struct st_request *SRpnt = NULL; 848 struct st_request *SRpnt = NULL;
849 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 849 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
850 850
851 max_wait = do_wait ? ST_BLOCK_SECONDS : 0; 851 max_wait = do_wait ? ST_BLOCK_SECONDS : 0;
852 852
853 for (attentions=waits=0; ; ) { 853 for (attentions=waits=0; ; ) {
854 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 854 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
855 cmd[0] = TEST_UNIT_READY; 855 cmd[0] = TEST_UNIT_READY;
856 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE, 856 SRpnt = st_do_scsi(SRpnt, STp, cmd, 0, DMA_NONE,
857 STp->long_timeout, MAX_READY_RETRIES, 1); 857 STp->long_timeout, MAX_READY_RETRIES, 1);
858 858
859 if (!SRpnt) { 859 if (!SRpnt) {
860 retval = (STp->buffer)->syscall_result; 860 retval = (STp->buffer)->syscall_result;
861 break; 861 break;
862 } 862 }
863 863
864 if (cmdstatp->have_sense) { 864 if (cmdstatp->have_sense) {
865 865
866 scode = cmdstatp->sense_hdr.sense_key; 866 scode = cmdstatp->sense_hdr.sense_key;
867 867
868 if (scode == UNIT_ATTENTION) { /* New media? */ 868 if (scode == UNIT_ATTENTION) { /* New media? */
869 new_session = 1; 869 new_session = 1;
870 if (attentions < MAX_ATTENTIONS) { 870 if (attentions < MAX_ATTENTIONS) {
871 attentions++; 871 attentions++;
872 continue; 872 continue;
873 } 873 }
874 else { 874 else {
875 retval = (-EIO); 875 retval = (-EIO);
876 break; 876 break;
877 } 877 }
878 } 878 }
879 879
880 if (scode == NOT_READY) { 880 if (scode == NOT_READY) {
881 if (waits < max_wait) { 881 if (waits < max_wait) {
882 if (msleep_interruptible(1000)) { 882 if (msleep_interruptible(1000)) {
883 retval = (-EINTR); 883 retval = (-EINTR);
884 break; 884 break;
885 } 885 }
886 waits++; 886 waits++;
887 continue; 887 continue;
888 } 888 }
889 else { 889 else {
890 if ((STp->device)->scsi_level >= SCSI_2 && 890 if ((STp->device)->scsi_level >= SCSI_2 &&
891 cmdstatp->sense_hdr.asc == 0x3a) /* Check ASC */ 891 cmdstatp->sense_hdr.asc == 0x3a) /* Check ASC */
892 retval = CHKRES_NO_TAPE; 892 retval = CHKRES_NO_TAPE;
893 else 893 else
894 retval = CHKRES_NOT_READY; 894 retval = CHKRES_NOT_READY;
895 break; 895 break;
896 } 896 }
897 } 897 }
898 } 898 }
899 899
900 retval = (STp->buffer)->syscall_result; 900 retval = (STp->buffer)->syscall_result;
901 if (!retval) 901 if (!retval)
902 retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY; 902 retval = new_session ? CHKRES_NEW_SESSION : CHKRES_READY;
903 break; 903 break;
904 } 904 }
905 905
906 if (SRpnt != NULL) 906 if (SRpnt != NULL)
907 st_release_request(SRpnt); 907 st_release_request(SRpnt);
908 return retval; 908 return retval;
909 } 909 }
910 910
911 911
912 /* See if the drive is ready and gather information about the tape. Return values: 912 /* See if the drive is ready and gather information about the tape. Return values:
913 < 0 negative error code from errno.h 913 < 0 negative error code from errno.h
914 0 drive ready 914 0 drive ready
915 1 drive not ready (possibly no tape) 915 1 drive not ready (possibly no tape)
916 */ 916 */
917 static int check_tape(struct scsi_tape *STp, struct file *filp) 917 static int check_tape(struct scsi_tape *STp, struct file *filp)
918 { 918 {
919 int i, retval, new_session = 0, do_wait; 919 int i, retval, new_session = 0, do_wait;
920 unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning; 920 unsigned char cmd[MAX_COMMAND_SIZE], saved_cleaning;
921 unsigned short st_flags = filp->f_flags; 921 unsigned short st_flags = filp->f_flags;
922 struct st_request *SRpnt = NULL; 922 struct st_request *SRpnt = NULL;
923 struct st_modedef *STm; 923 struct st_modedef *STm;
924 struct st_partstat *STps; 924 struct st_partstat *STps;
925 char *name = tape_name(STp); 925 char *name = tape_name(STp);
926 struct inode *inode = filp->f_dentry->d_inode; 926 struct inode *inode = filp->f_dentry->d_inode;
927 int mode = TAPE_MODE(inode); 927 int mode = TAPE_MODE(inode);
928 928
929 STp->ready = ST_READY; 929 STp->ready = ST_READY;
930 930
931 if (mode != STp->current_mode) { 931 if (mode != STp->current_mode) {
932 DEBC(printk(ST_DEB_MSG "%s: Mode change from %d to %d.\n", 932 DEBC(printk(ST_DEB_MSG "%s: Mode change from %d to %d.\n",
933 name, STp->current_mode, mode)); 933 name, STp->current_mode, mode));
934 new_session = 1; 934 new_session = 1;
935 STp->current_mode = mode; 935 STp->current_mode = mode;
936 } 936 }
937 STm = &(STp->modes[STp->current_mode]); 937 STm = &(STp->modes[STp->current_mode]);
938 938
939 saved_cleaning = STp->cleaning_req; 939 saved_cleaning = STp->cleaning_req;
940 STp->cleaning_req = 0; 940 STp->cleaning_req = 0;
941 941
942 do_wait = ((filp->f_flags & O_NONBLOCK) == 0); 942 do_wait = ((filp->f_flags & O_NONBLOCK) == 0);
943 retval = test_ready(STp, do_wait); 943 retval = test_ready(STp, do_wait);
944 944
945 if (retval < 0) 945 if (retval < 0)
946 goto err_out; 946 goto err_out;
947 947
948 if (retval == CHKRES_NEW_SESSION) { 948 if (retval == CHKRES_NEW_SESSION) {
949 STp->pos_unknown = 0; 949 STp->pos_unknown = 0;
950 STp->partition = STp->new_partition = 0; 950 STp->partition = STp->new_partition = 0;
951 if (STp->can_partitions) 951 if (STp->can_partitions)
952 STp->nbr_partitions = 1; /* This guess will be updated later 952 STp->nbr_partitions = 1; /* This guess will be updated later
953 if necessary */ 953 if necessary */
954 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 954 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
955 STps = &(STp->ps[i]); 955 STps = &(STp->ps[i]);
956 STps->rw = ST_IDLE; 956 STps->rw = ST_IDLE;
957 STps->eof = ST_NOEOF; 957 STps->eof = ST_NOEOF;
958 STps->at_sm = 0; 958 STps->at_sm = 0;
959 STps->last_block_valid = 0; 959 STps->last_block_valid = 0;
960 STps->drv_block = 0; 960 STps->drv_block = 0;
961 STps->drv_file = 0; 961 STps->drv_file = 0;
962 } 962 }
963 new_session = 1; 963 new_session = 1;
964 } 964 }
965 else { 965 else {
966 STp->cleaning_req |= saved_cleaning; 966 STp->cleaning_req |= saved_cleaning;
967 967
968 if (retval == CHKRES_NOT_READY || retval == CHKRES_NO_TAPE) { 968 if (retval == CHKRES_NOT_READY || retval == CHKRES_NO_TAPE) {
969 if (retval == CHKRES_NO_TAPE) 969 if (retval == CHKRES_NO_TAPE)
970 STp->ready = ST_NO_TAPE; 970 STp->ready = ST_NO_TAPE;
971 else 971 else
972 STp->ready = ST_NOT_READY; 972 STp->ready = ST_NOT_READY;
973 973
974 STp->density = 0; /* Clear the erroneous "residue" */ 974 STp->density = 0; /* Clear the erroneous "residue" */
975 STp->write_prot = 0; 975 STp->write_prot = 0;
976 STp->block_size = 0; 976 STp->block_size = 0;
977 STp->ps[0].drv_file = STp->ps[0].drv_block = (-1); 977 STp->ps[0].drv_file = STp->ps[0].drv_block = (-1);
978 STp->partition = STp->new_partition = 0; 978 STp->partition = STp->new_partition = 0;
979 STp->door_locked = ST_UNLOCKED; 979 STp->door_locked = ST_UNLOCKED;
980 return CHKRES_NOT_READY; 980 return CHKRES_NOT_READY;
981 } 981 }
982 } 982 }
983 983
984 if (STp->omit_blklims) 984 if (STp->omit_blklims)
985 STp->min_block = STp->max_block = (-1); 985 STp->min_block = STp->max_block = (-1);
986 else { 986 else {
987 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 987 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
988 cmd[0] = READ_BLOCK_LIMITS; 988 cmd[0] = READ_BLOCK_LIMITS;
989 989
990 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE, 990 SRpnt = st_do_scsi(SRpnt, STp, cmd, 6, DMA_FROM_DEVICE,
991 STp->device->timeout, MAX_READY_RETRIES, 1); 991 STp->device->timeout, MAX_READY_RETRIES, 1);
992 if (!SRpnt) { 992 if (!SRpnt) {
993 retval = (STp->buffer)->syscall_result; 993 retval = (STp->buffer)->syscall_result;
994 goto err_out; 994 goto err_out;
995 } 995 }
996 996
997 if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) { 997 if (!SRpnt->result && !STp->buffer->cmdstat.have_sense) {
998 STp->max_block = ((STp->buffer)->b_data[1] << 16) | 998 STp->max_block = ((STp->buffer)->b_data[1] << 16) |
999 ((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3]; 999 ((STp->buffer)->b_data[2] << 8) | (STp->buffer)->b_data[3];
1000 STp->min_block = ((STp->buffer)->b_data[4] << 8) | 1000 STp->min_block = ((STp->buffer)->b_data[4] << 8) |
1001 (STp->buffer)->b_data[5]; 1001 (STp->buffer)->b_data[5];
1002 if ( DEB( debugging || ) !STp->inited) 1002 if ( DEB( debugging || ) !STp->inited)
1003 printk(KERN_WARNING 1003 printk(KERN_WARNING
1004 "%s: Block limits %d - %d bytes.\n", name, 1004 "%s: Block limits %d - %d bytes.\n", name,
1005 STp->min_block, STp->max_block); 1005 STp->min_block, STp->max_block);
1006 } else { 1006 } else {
1007 STp->min_block = STp->max_block = (-1); 1007 STp->min_block = STp->max_block = (-1);
1008 DEBC(printk(ST_DEB_MSG "%s: Can't read block limits.\n", 1008 DEBC(printk(ST_DEB_MSG "%s: Can't read block limits.\n",
1009 name)); 1009 name));
1010 } 1010 }
1011 } 1011 }
1012 1012
1013 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE); 1013 memset((void *) &cmd[0], 0, MAX_COMMAND_SIZE);
1014 cmd[0] = MODE_SENSE; 1014 cmd[0] = MODE_SENSE;
1015 cmd[4] = 12; 1015 cmd[4] = 12;
1016 1016
1017 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE, 1017 SRpnt = st_do_scsi(SRpnt, STp, cmd, 12, DMA_FROM_DEVICE,
1018 STp->device->timeout, MAX_READY_RETRIES, 1); 1018 STp->device->timeout, MAX_READY_RETRIES, 1);
1019 if (!SRpnt) { 1019 if (!SRpnt) {
1020 retval = (STp->buffer)->syscall_result; 1020 retval = (STp->buffer)->syscall_result;
1021 goto err_out; 1021 goto err_out;
1022 } 1022 }
1023 1023
1024 if ((STp->buffer)->syscall_result != 0) { 1024 if ((STp->buffer)->syscall_result != 0) {
1025 DEBC(printk(ST_DEB_MSG "%s: No Mode Sense.\n", name)); 1025 DEBC(printk(ST_DEB_MSG "%s: No Mode Sense.\n", name));
1026 STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */ 1026 STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */
1027 (STp->buffer)->syscall_result = 0; /* Prevent error propagation */ 1027 (STp->buffer)->syscall_result = 0; /* Prevent error propagation */
1028 STp->drv_write_prot = 0; 1028 STp->drv_write_prot = 0;
1029 } else { 1029 } else {
1030 DEBC(printk(ST_DEB_MSG 1030 DEBC(printk(ST_DEB_MSG
1031 "%s: Mode sense. Length %d, medium %x, WBS %x, BLL %d\n", 1031 "%s: Mode sense. Length %d, medium %x, WBS %x, BLL %d\n",
1032 name, 1032 name,
1033 (STp->buffer)->b_data[0], (STp->buffer)->b_data[1], 1033 (STp->buffer)->b_data[0], (STp->buffer)->b_data[1],
1034 (STp->buffer)->b_data[2], (STp->buffer)->b_data[3])); 1034 (STp->buffer)->b_data[2], (STp->buffer)->b_data[3]));
1035 1035
1036 if ((STp->buffer)->b_data[3] >= 8) { 1036 if ((STp->buffer)->b_data[3] >= 8) {
1037 STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7; 1037 STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7;
1038 STp->density = (STp->buffer)->b_data[4]; 1038 STp->density = (STp->buffer)->b_data[4];
1039 STp->block_size = (STp->buffer)->b_data[9] * 65536 + 1039 STp->block_size = (STp->buffer)->b_data[9] * 65536 +
1040 (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11]; 1040 (STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11];
1041 DEBC(printk(ST_DEB_MSG 1041 DEBC(printk(ST_DEB_MSG
1042 "%s: Density %x, tape length: %x, drv buffer: %d\n", 1042 "%s: Density %x, tape length: %x, drv buffer: %d\n",
1043 name, STp->density, (STp->buffer)->b_data[5] * 65536 + 1043 name, STp->density, (STp->buffer)->b_data[5] * 65536 +
1044 (STp->buffer)->b_data[6] * 256 + (STp->buffer)->b_data[7], 1044 (STp->buffer)->b_data[6] * 256 + (STp->buffer)->b_data[7],
1045 STp->drv_buffer)); 1045 STp->drv_buffer));
1046 } 1046 }
1047 STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0; 1047 STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
1048 } 1048 }
1049 st_release_request(SRpnt); 1049 st_release_request(SRpnt);
1050 SRpnt = NULL; 1050 SRpnt = NULL;
1051 STp->inited = 1; 1051 STp->inited = 1;
1052 1052
1053 if (STp->block_size > 0) 1053 if (STp->block_size > 0)
1054 (STp->buffer)->buffer_blocks = 1054 (STp->buffer)->buffer_blocks =
1055 (STp->buffer)->buffer_size / STp->block_size; 1055 (STp->buffer)->buffer_size / STp->block_size;
1056 else 1056 else
1057 (STp->buffer)->buffer_blocks = 1; 1057 (STp->buffer)->buffer_blocks = 1;
1058 (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; 1058 (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
1059 1059
1060 DEBC(printk(ST_DEB_MSG 1060 DEBC(printk(ST_DEB_MSG
1061 "%s: Block size: %d, buffer size: %d (%d blocks).\n", name, 1061 "%s: Block size: %d, buffer size: %d (%d blocks).\n", name,
1062 STp->block_size, (STp->buffer)->buffer_size, 1062 STp->block_size, (STp->buffer)->buffer_size,
1063 (STp->buffer)->buffer_blocks)); 1063 (STp->buffer)->buffer_blocks));
1064 1064
1065 if (STp->drv_write_prot) { 1065 if (STp->drv_write_prot) {
1066 STp->write_prot = 1; 1066 STp->write_prot = 1;
1067 1067
1068 DEBC(printk(ST_DEB_MSG "%s: Write protected\n", name)); 1068 DEBC(printk(ST_DEB_MSG "%s: Write protected\n", name));
1069 1069
1070 if (do_wait && 1070 if (do_wait &&
1071 ((st_flags & O_ACCMODE) == O_WRONLY || 1071 ((st_flags & O_ACCMODE) == O_WRONLY ||
1072 (st_flags & O_ACCMODE) == O_RDWR)) { 1072 (st_flags & O_ACCMODE) == O_RDWR)) {
1073 retval = (-EROFS); 1073 retval = (-EROFS);
1074 goto err_out; 1074 goto err_out;
1075 } 1075 }
1076 } 1076 }
1077 1077
1078 if (STp->can_partitions && STp->nbr_partitions < 1) { 1078 if (STp->can_partitions && STp->nbr_partitions < 1) {
1079 /* This code is reached when the device is opened for the first time 1079 /* This code is reached when the device is opened for the first time
1080 after the driver has been initialized with tape in the drive and the 1080 after the driver has been initialized with tape in the drive and the
1081 partition support has been enabled. */ 1081 partition support has been enabled. */
1082 DEBC(printk(ST_DEB_MSG 1082 DEBC(printk(ST_DEB_MSG
1083 "%s: Updating partition number in status.\n", name)); 1083 "%s: Updating partition number in status.\n", name));
1084 if ((STp->partition = find_partition(STp)) < 0) { 1084 if ((STp->partition = find_partition(STp)) < 0) {
1085 retval = STp->partition; 1085 retval = STp->partition;
1086 goto err_out; 1086 goto err_out;
1087 } 1087 }
1088 STp->new_partition = STp->partition; 1088 STp->new_partition = STp->partition;
1089 STp->nbr_partitions = 1; /* This guess will be updated when necessary */ 1089 STp->nbr_partitions = 1; /* This guess will be updated when necessary */
1090 } 1090 }
1091 1091
1092 if (new_session) { /* Change the drive parameters for the new mode */ 1092 if (new_session) { /* Change the drive parameters for the new mode */
1093 STp->density_changed = STp->blksize_changed = 0; 1093 STp->density_changed = STp->blksize_changed = 0;
1094 STp->compression_changed = 0; 1094 STp->compression_changed = 0;
1095 if (!(STm->defaults_for_writes) && 1095 if (!(STm->defaults_for_writes) &&
1096 (retval = set_mode_densblk(STp, STm)) < 0) 1096 (retval = set_mode_densblk(STp, STm)) < 0)
1097 goto err_out; 1097 goto err_out;
1098 1098
1099 if (STp->default_drvbuffer != 0xff) { 1099 if (STp->default_drvbuffer != 0xff) {
1100 if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer)) 1100 if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer))
1101 printk(KERN_WARNING 1101 printk(KERN_WARNING
1102 "%s: Can't set default drive buffering to %d.\n", 1102 "%s: Can't set default drive buffering to %d.\n",
1103 name, STp->default_drvbuffer); 1103 name, STp->default_drvbuffer);
1104 } 1104 }
1105 } 1105 }
1106 1106
1107 return CHKRES_READY; 1107 return CHKRES_READY;
1108 1108
1109 err_out: 1109 err_out:
1110 return retval; 1110 return retval;
1111 } 1111 }
1112 1112
1113 1113
1114 /* Open the device. Needs to be called with BKL only because of incrementing the SCSI host 1114 /* Open the device. Needs to be called with BKL only because of incrementing the SCSI host
1115 module count. */ 1115 module count. */
1116 static int st_open(struct inode *inode, struct file *filp) 1116 static int st_open(struct inode *inode, struct file *filp)
1117 { 1117 {
1118 int i, retval = (-EIO); 1118 int i, retval = (-EIO);
1119 struct scsi_tape *STp; 1119 struct scsi_tape *STp;
1120 struct st_partstat *STps; 1120 struct st_partstat *STps;
1121 int dev = TAPE_NR(inode); 1121 int dev = TAPE_NR(inode);
1122 char *name; 1122 char *name;
1123 1123
1124 /* 1124 /*
1125 * We really want to do nonseekable_open(inode, filp); here, but some 1125 * We really want to do nonseekable_open(inode, filp); here, but some
1126 * versions of tar incorrectly call lseek on tapes and bail out if that 1126 * versions of tar incorrectly call lseek on tapes and bail out if that
1127 * fails. So we disallow pread() and pwrite(), but permit lseeks. 1127 * fails. So we disallow pread() and pwrite(), but permit lseeks.
1128 */ 1128 */
1129 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); 1129 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
1130 1130
1131 if (!(STp = scsi_tape_get(dev))) 1131 if (!(STp = scsi_tape_get(dev)))
1132 return -ENXIO; 1132 return -ENXIO;
1133 1133
1134 write_lock(&st_dev_arr_lock); 1134 write_lock(&st_dev_arr_lock);
1135 filp->private_data = STp; 1135 filp->private_data = STp;
1136 name = tape_name(STp); 1136 name = tape_name(STp);
1137 1137
1138 if (STp->in_use) { 1138 if (STp->in_use) {
1139 write_unlock(&st_dev_arr_lock); 1139 write_unlock(&st_dev_arr_lock);
1140 scsi_tape_put(STp); 1140 scsi_tape_put(STp);
1141 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) 1141 DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
1142 return (-EBUSY); 1142 return (-EBUSY);
1143 } 1143 }
1144 1144
1145 STp->in_use = 1; 1145 STp->in_use = 1;
1146 write_unlock(&st_dev_arr_lock); 1146 write_unlock(&st_dev_arr_lock);
1147 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; 1147 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
1148 1148
1149 if (!scsi_block_when_processing_errors(STp->device)) { 1149 if (!scsi_block_when_processing_errors(STp->device)) {
1150 retval = (-ENXIO); 1150 retval = (-ENXIO);
1151 goto err_out; 1151 goto err_out;
1152 } 1152 }
1153 1153
1154 /* See that we have at least a one page buffer available */ 1154 /* See that we have at least a one page buffer available */
1155 if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) { 1155 if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
1156 printk(KERN_WARNING "%s: Can't allocate one page tape buffer.\n", 1156 printk(KERN_WARNING "%s: Can't allocate one page tape buffer.\n",
1157 name); 1157 name);
1158 retval = (-EOVERFLOW); 1158 retval = (-EOVERFLOW);
1159 goto err_out; 1159 goto err_out;
1160 } 1160 }
1161 1161
1162 (STp->buffer)->writing = 0; 1162 (STp->buffer)->writing = 0;
1163 (STp->buffer)->syscall_result = 0; 1163 (STp->buffer)->syscall_result = 0;
1164 1164
1165 STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY); 1165 STp->write_prot = ((filp->f_flags & O_ACCMODE) == O_RDONLY);
1166 1166
1167 STp->dirty = 0; 1167 STp->dirty = 0;
1168 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 1168 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
1169 STps = &(STp->ps[i]); 1169 STps = &(STp->ps[i]);
1170 STps->rw = ST_IDLE; 1170 STps->rw = ST_IDLE;
1171 } 1171 }
1172 STp->recover_count = 0; 1172 STp->recover_count = 0;
1173 DEB( STp->nbr_waits = STp->nbr_finished = 0; 1173 DEB( STp->nbr_waits = STp->nbr_finished = 0;
1174 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) 1174 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; )
1175 1175
1176 retval = check_tape(STp, filp); 1176 retval = check_tape(STp, filp);
1177 if (retval < 0) 1177 if (retval < 0)
1178 goto err_out; 1178 goto err_out;
1179 if ((filp->f_flags & O_NONBLOCK) == 0 && 1179 if ((filp->f_flags & O_NONBLOCK) == 0 &&
1180 retval != CHKRES_READY) { 1180 retval != CHKRES_READY) {
1181 retval = (-EIO); 1181 retval = (-EIO);
1182 goto err_out; 1182 goto err_out;
1183 } 1183 }
1184 return 0; 1184 return 0;
1185 1185
1186 err_out: 1186 err_out:
1187 normalize_buffer(STp->buffer); 1187 normalize_buffer(STp->buffer);
1188 STp->in_use = 0; 1188 STp->in_use = 0;
1189 scsi_tape_put(STp); 1189 scsi_tape_put(STp);
1190 return retval; 1190 return retval;
1191 1191
1192 } 1192 }
1193 1193
1194 1194
1195 /* Flush the tape buffer before close */ 1195 /* Flush the tape buffer before close */
1196 static int st_flush(struct file *filp) 1196 static int st_flush(struct file *filp)
1197 { 1197 {
1198 int result = 0, result2; 1198 int result = 0, result2;
1199 unsigned char cmd[MAX_COMMAND_SIZE]; 1199 unsigned char cmd[MAX_COMMAND_SIZE];
1200 struct st_request *SRpnt; 1200 struct st_request *SRpnt;
1201 struct scsi_tape *STp = filp->private_data; 1201 struct scsi_tape *STp = filp->private_data;
1202 struct st_modedef *STm = &(STp->modes[STp->current_mode]); 1202 struct st_modedef *STm = &(STp->modes[STp->current_mode]);
1203 struct st_partstat *STps = &(STp->ps[STp->partition]); 1203 struct st_partstat *STps = &(STp->ps[STp->partition]);
1204 char *name = tape_name(STp); 1204 char *name = tape_name(STp);
1205 1205
1206 if (file_count(filp) > 1) 1206 if (file_count(filp) > 1)
1207 return 0; 1207 return 0;
1208 1208
1209 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1209 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
1210 result = flush_write_buffer(STp); 1210 result = flush_write_buffer(STp);
1211 if (result != 0 && result != (-ENOSPC)) 1211 if (result != 0 && result != (-ENOSPC))
1212 goto out; 1212 goto out;
1213 } 1213 }
1214 1214
1215 if (STp->can_partitions && 1215 if (STp->can_partitions &&
1216 (result2 = switch_partition(STp)) < 0) { 1216 (result2 = switch_partition(STp)) < 0) {
1217 DEBC(printk(ST_DEB_MSG 1217 DEBC(printk(ST_DEB_MSG
1218 "%s: switch_partition at close failed.\n", name)); 1218 "%s: switch_partition at close failed.\n", name));
1219 if (result == 0) 1219 if (result == 0)
1220 result = result2; 1220 result = result2;
1221 goto out; 1221 goto out;
1222 } 1222 }
1223 1223
1224 DEBC( if (STp->nbr_requests) 1224 DEBC( if (STp->nbr_requests)
1225 printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1225 printk(KERN_WARNING "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n",
1226 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1226 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable));
1227 1227
1228 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1228 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
1229 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1229 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
1230 1230
1231 DEBC(printk(ST_DEB_MSG "%s: Async write waits %d, finished %d.\n", 1231 DEBC(printk(ST_DEB_MSG "%s: Async write waits %d, finished %d.\n",
1232 name, STp->nbr_waits, STp->nbr_finished); 1232 name, STp->nbr_waits, STp->nbr_finished);
1233 ) 1233 )
1234 1234
1235 memset(cmd, 0, MAX_COMMAND_SIZE); 1235 memset(cmd, 0, MAX_COMMAND_SIZE);
1236 cmd[0] = WRITE_FILEMARKS; 1236 cmd[0] = WRITE_FILEMARKS;
1237 cmd[4] = 1 + STp->two_fm; 1237 cmd[4] = 1 + STp->two_fm;
1238 1238
1239 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 1239 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
1240 STp->device->timeout, MAX_WRITE_RETRIES, 1); 1240 STp->device->timeout, MAX_WRITE_RETRIES, 1);
1241 if (!SRpnt) { 1241 if (!SRpnt) {
1242 result = (STp->buffer)->syscall_result; 1242 result = (STp->buffer)->syscall_result;
1243 goto out; 1243 goto out;
1244 } 1244 }
1245 1245
1246 if (STp->buffer->syscall_result == 0 || 1246 if (STp->buffer->syscall_result == 0 ||
1247 (cmdstatp->have_sense && !cmdstatp->deferred && 1247 (cmdstatp->have_sense && !cmdstatp->deferred &&
1248 (cmdstatp->flags & SENSE_EOM) && 1248 (cmdstatp->flags & SENSE_EOM) &&
1249 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 1249 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
1250 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 1250 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
1251 (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) { 1251 (!cmdstatp->remainder_valid || cmdstatp->uremainder64 == 0))) {
1252 /* Write successful at EOM */ 1252 /* Write successful at EOM */
1253 st_release_request(SRpnt); 1253 st_release_request(SRpnt);
1254 SRpnt = NULL; 1254 SRpnt = NULL;
1255 if (STps->drv_file >= 0) 1255 if (STps->drv_file >= 0)
1256 STps->drv_file++; 1256 STps->drv_file++;
1257 STps->drv_block = 0; 1257 STps->drv_block = 0;
1258 if (STp->two_fm) 1258 if (STp->two_fm)
1259 cross_eof(STp, 0); 1259 cross_eof(STp, 0);
1260 STps->eof = ST_FM; 1260 STps->eof = ST_FM;
1261 } 1261 }
1262 else { /* Write error */ 1262 else { /* Write error */
1263 st_release_request(SRpnt); 1263 st_release_request(SRpnt);
1264 SRpnt = NULL; 1264 SRpnt = NULL;
1265 printk(KERN_ERR "%s: Error on write filemark.\n", name); 1265 printk(KERN_ERR "%s: Error on write filemark.\n", name);
1266 if (result == 0) 1266 if (result == 0)
1267 result = (-EIO); 1267 result = (-EIO);
1268 } 1268 }
1269 1269
1270 DEBC(printk(ST_DEB_MSG "%s: Buffer flushed, %d EOF(s) written\n", 1270 DEBC(printk(ST_DEB_MSG "%s: Buffer flushed, %d EOF(s) written\n",
1271 name, cmd[4])); 1271 name, cmd[4]));
1272 } else if (!STp->rew_at_close) { 1272 } else if (!STp->rew_at_close) {
1273 STps = &(STp->ps[STp->partition]); 1273 STps = &(STp->ps[STp->partition]);
1274 if (!STm->sysv || STps->rw != ST_READING) { 1274 if (!STm->sysv || STps->rw != ST_READING) {
1275 if (STp->can_bsr) 1275 if (STp->can_bsr)
1276 result = flush_buffer(STp, 0); 1276 result = flush_buffer(STp, 0);
1277 else if (STps->eof == ST_FM_HIT) { 1277 else if (STps->eof == ST_FM_HIT) {
1278 result = cross_eof(STp, 0); 1278 result = cross_eof(STp, 0);
1279 if (result) { 1279 if (result) {
1280 if (STps->drv_file >= 0) 1280 if (STps->drv_file >= 0)
1281 STps->drv_file++; 1281 STps->drv_file++;
1282 STps->drv_block = 0; 1282 STps->drv_block = 0;
1283 STps->eof = ST_FM; 1283 STps->eof = ST_FM;
1284 } else 1284 } else
1285 STps->eof = ST_NOEOF; 1285 STps->eof = ST_NOEOF;
1286 } 1286 }
1287 } else if ((STps->eof == ST_NOEOF && 1287 } else if ((STps->eof == ST_NOEOF &&
1288 !(result = cross_eof(STp, 1))) || 1288 !(result = cross_eof(STp, 1))) ||
1289 STps->eof == ST_FM_HIT) { 1289 STps->eof == ST_FM_HIT) {
1290 if (STps->drv_file >= 0) 1290 if (STps->drv_file >= 0)
1291 STps->drv_file++; 1291 STps->drv_file++;
1292 STps->drv_block = 0; 1292 STps->drv_block = 0;
1293 STps->eof = ST_FM; 1293 STps->eof = ST_FM;
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 out: 1297 out:
1298 if (STp->rew_at_close) { 1298 if (STp->rew_at_close) {
1299 result2 = st_int_ioctl(STp, MTREW, 1); 1299 result2 = st_int_ioctl(STp, MTREW, 1);
1300 if (result == 0) 1300 if (result == 0)
1301 result = result2; 1301 result = result2;
1302 } 1302 }
1303 return result; 1303 return result;
1304 } 1304 }
1305 1305
1306 1306
1307 /* Close the device and release it. BKL is not needed: this is the only thread 1307 /* Close the device and release it. BKL is not needed: this is the only thread
1308 accessing this tape. */ 1308 accessing this tape. */
1309 static int st_release(struct inode *inode, struct file *filp) 1309 static int st_release(struct inode *inode, struct file *filp)
1310 { 1310 {
1311 int result = 0; 1311 int result = 0;
1312 struct scsi_tape *STp = filp->private_data; 1312 struct scsi_tape *STp = filp->private_data;
1313 1313
1314 if (STp->door_locked == ST_LOCKED_AUTO) 1314 if (STp->door_locked == ST_LOCKED_AUTO)
1315 do_door_lock(STp, 0); 1315 do_door_lock(STp, 0);
1316 1316
1317 normalize_buffer(STp->buffer); 1317 normalize_buffer(STp->buffer);
1318 write_lock(&st_dev_arr_lock); 1318 write_lock(&st_dev_arr_lock);
1319 STp->in_use = 0; 1319 STp->in_use = 0;
1320 write_unlock(&st_dev_arr_lock); 1320 write_unlock(&st_dev_arr_lock);
1321 scsi_tape_put(STp); 1321 scsi_tape_put(STp);
1322 1322
1323 return result; 1323 return result;
1324 } 1324 }
1325 1325
1326 /* The checks common to both reading and writing */ 1326 /* The checks common to both reading and writing */
1327 static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count) 1327 static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
1328 { 1328 {
1329 ssize_t retval = 0; 1329 ssize_t retval = 0;
1330 1330
1331 /* 1331 /*
1332 * If we are in the middle of error recovery, don't let anyone 1332 * If we are in the middle of error recovery, don't let anyone
1333 * else try and use this device. Also, if error recovery fails, it 1333 * else try and use this device. Also, if error recovery fails, it
1334 * may try and take the device offline, in which case all further 1334 * may try and take the device offline, in which case all further
1335 * access to the device is prohibited. 1335 * access to the device is prohibited.
1336 */ 1336 */
1337 if (!scsi_block_when_processing_errors(STp->device)) { 1337 if (!scsi_block_when_processing_errors(STp->device)) {
1338 retval = (-ENXIO); 1338 retval = (-ENXIO);
1339 goto out; 1339 goto out;
1340 } 1340 }
1341 1341
1342 if (STp->ready != ST_READY) { 1342 if (STp->ready != ST_READY) {
1343 if (STp->ready == ST_NO_TAPE) 1343 if (STp->ready == ST_NO_TAPE)
1344 retval = (-ENOMEDIUM); 1344 retval = (-ENOMEDIUM);
1345 else 1345 else
1346 retval = (-EIO); 1346 retval = (-EIO);
1347 goto out; 1347 goto out;
1348 } 1348 }
1349 1349
1350 if (! STp->modes[STp->current_mode].defined) { 1350 if (! STp->modes[STp->current_mode].defined) {
1351 retval = (-ENXIO); 1351 retval = (-ENXIO);
1352 goto out; 1352 goto out;
1353 } 1353 }
1354 1354
1355 1355
1356 /* 1356 /*
1357 * If there was a bus reset, block further access 1357 * If there was a bus reset, block further access
1358 * to this device. 1358 * to this device.
1359 */ 1359 */
1360 if (STp->pos_unknown) { 1360 if (STp->pos_unknown) {
1361 retval = (-EIO); 1361 retval = (-EIO);
1362 goto out; 1362 goto out;
1363 } 1363 }
1364 1364
1365 if (count == 0) 1365 if (count == 0)
1366 goto out; 1366 goto out;
1367 1367
1368 DEB( 1368 DEB(
1369 if (!STp->in_use) { 1369 if (!STp->in_use) {
1370 printk(ST_DEB_MSG "%s: Incorrect device.\n", tape_name(STp)); 1370 printk(ST_DEB_MSG "%s: Incorrect device.\n", tape_name(STp));
1371 retval = (-EIO); 1371 retval = (-EIO);
1372 goto out; 1372 goto out;
1373 } ) /* end DEB */ 1373 } ) /* end DEB */
1374 1374
1375 if (STp->can_partitions && 1375 if (STp->can_partitions &&
1376 (retval = switch_partition(STp)) < 0) 1376 (retval = switch_partition(STp)) < 0)
1377 goto out; 1377 goto out;
1378 1378
1379 if (STp->block_size == 0 && STp->max_block > 0 && 1379 if (STp->block_size == 0 && STp->max_block > 0 &&
1380 (count < STp->min_block || count > STp->max_block)) { 1380 (count < STp->min_block || count > STp->max_block)) {
1381 retval = (-EINVAL); 1381 retval = (-EINVAL);
1382 goto out; 1382 goto out;
1383 } 1383 }
1384 1384
1385 if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED && 1385 if (STp->do_auto_lock && STp->door_locked == ST_UNLOCKED &&
1386 !do_door_lock(STp, 1)) 1386 !do_door_lock(STp, 1))
1387 STp->door_locked = ST_LOCKED_AUTO; 1387 STp->door_locked = ST_LOCKED_AUTO;
1388 1388
1389 out: 1389 out:
1390 return retval; 1390 return retval;
1391 } 1391 }
1392 1392
1393 1393
1394 static int setup_buffering(struct scsi_tape *STp, const char __user *buf, 1394 static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1395 size_t count, int is_read) 1395 size_t count, int is_read)
1396 { 1396 {
1397 int i, bufsize, retval = 0; 1397 int i, bufsize, retval = 0;
1398 struct st_buffer *STbp = STp->buffer; 1398 struct st_buffer *STbp = STp->buffer;
1399 1399
1400 if (is_read) 1400 if (is_read)
1401 i = STp->try_dio && try_rdio; 1401 i = STp->try_dio && try_rdio;
1402 else 1402 else
1403 i = STp->try_dio && try_wdio; 1403 i = STp->try_dio && try_wdio;
1404 1404
1405 if (i && ((unsigned long)buf & queue_dma_alignment( 1405 if (i && ((unsigned long)buf & queue_dma_alignment(
1406 STp->device->request_queue)) == 0) { 1406 STp->device->request_queue)) == 0) {
1407 i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg, 1407 i = sgl_map_user_pages(&(STbp->sg[0]), STbp->use_sg,
1408 (unsigned long)buf, count, (is_read ? READ : WRITE)); 1408 (unsigned long)buf, count, (is_read ? READ : WRITE));
1409 if (i > 0) { 1409 if (i > 0) {
1410 STbp->do_dio = i; 1410 STbp->do_dio = i;
1411 STbp->buffer_bytes = 0; /* can be used as transfer counter */ 1411 STbp->buffer_bytes = 0; /* can be used as transfer counter */
1412 } 1412 }
1413 else 1413 else
1414 STbp->do_dio = 0; /* fall back to buffering with any error */ 1414 STbp->do_dio = 0; /* fall back to buffering with any error */
1415 STbp->sg_segs = STbp->do_dio; 1415 STbp->sg_segs = STbp->do_dio;
1416 STbp->frp_sg_current = 0; 1416 STbp->frp_sg_current = 0;
1417 DEB( 1417 DEB(
1418 if (STbp->do_dio) { 1418 if (STbp->do_dio) {
1419 STp->nbr_dio++; 1419 STp->nbr_dio++;
1420 STp->nbr_pages += STbp->do_dio; 1420 STp->nbr_pages += STbp->do_dio;
1421 for (i=1; i < STbp->do_dio; i++) 1421 for (i=1; i < STbp->do_dio; i++)
1422 if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1) 1422 if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1)
1423 STp->nbr_combinable++; 1423 STp->nbr_combinable++;
1424 } 1424 }
1425 ) 1425 )
1426 } else 1426 } else
1427 STbp->do_dio = 0; 1427 STbp->do_dio = 0;
1428 DEB( STp->nbr_requests++; ) 1428 DEB( STp->nbr_requests++; )
1429 1429
1430 if (!STbp->do_dio) { 1430 if (!STbp->do_dio) {
1431 if (STp->block_size) 1431 if (STp->block_size)
1432 bufsize = STp->block_size > st_fixed_buffer_size ? 1432 bufsize = STp->block_size > st_fixed_buffer_size ?
1433 STp->block_size : st_fixed_buffer_size; 1433 STp->block_size : st_fixed_buffer_size;
1434 else 1434 else
1435 bufsize = count; 1435 bufsize = count;
1436 if (bufsize > STbp->buffer_size && 1436 if (bufsize > STbp->buffer_size &&
1437 !enlarge_buffer(STbp, bufsize, STp->restr_dma)) { 1437 !enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
1438 printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n", 1438 printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n",
1439 tape_name(STp), bufsize); 1439 tape_name(STp), bufsize);
1440 retval = (-EOVERFLOW); 1440 retval = (-EOVERFLOW);
1441 goto out; 1441 goto out;
1442 } 1442 }
1443 if (STp->block_size) 1443 if (STp->block_size)
1444 STbp->buffer_blocks = bufsize / STp->block_size; 1444 STbp->buffer_blocks = bufsize / STp->block_size;
1445 } 1445 }
1446 1446
1447 out: 1447 out:
1448 return retval; 1448 return retval;
1449 } 1449 }
1450 1450
1451 1451
1452 /* Can be called more than once after each setup_buffer() */ 1452 /* Can be called more than once after each setup_buffer() */
1453 static void release_buffering(struct scsi_tape *STp, int is_read) 1453 static void release_buffering(struct scsi_tape *STp, int is_read)
1454 { 1454 {
1455 struct st_buffer *STbp; 1455 struct st_buffer *STbp;
1456 1456
1457 STbp = STp->buffer; 1457 STbp = STp->buffer;
1458 if (STbp->do_dio) { 1458 if (STbp->do_dio) {
1459 sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read); 1459 sgl_unmap_user_pages(&(STbp->sg[0]), STbp->do_dio, is_read);
1460 STbp->do_dio = 0; 1460 STbp->do_dio = 0;
1461 STbp->sg_segs = 0; 1461 STbp->sg_segs = 0;
1462 } 1462 }
1463 } 1463 }
1464 1464
1465 1465
1466 /* Write command */ 1466 /* Write command */
1467 static ssize_t 1467 static ssize_t
1468 st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) 1468 st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
1469 { 1469 {
1470 ssize_t total; 1470 ssize_t total;
1471 ssize_t i, do_count, blks, transfer; 1471 ssize_t i, do_count, blks, transfer;
1472 ssize_t retval; 1472 ssize_t retval;
1473 int undone, retry_eot = 0, scode; 1473 int undone, retry_eot = 0, scode;
1474 int async_write; 1474 int async_write;
1475 unsigned char cmd[MAX_COMMAND_SIZE]; 1475 unsigned char cmd[MAX_COMMAND_SIZE];
1476 const char __user *b_point; 1476 const char __user *b_point;
1477 struct st_request *SRpnt = NULL; 1477 struct st_request *SRpnt = NULL;
1478 struct scsi_tape *STp = filp->private_data; 1478 struct scsi_tape *STp = filp->private_data;
1479 struct st_modedef *STm; 1479 struct st_modedef *STm;
1480 struct st_partstat *STps; 1480 struct st_partstat *STps;
1481 struct st_buffer *STbp; 1481 struct st_buffer *STbp;
1482 char *name = tape_name(STp); 1482 char *name = tape_name(STp);
1483 1483
1484 if (down_interruptible(&STp->lock)) 1484 if (down_interruptible(&STp->lock))
1485 return -ERESTARTSYS; 1485 return -ERESTARTSYS;
1486 1486
1487 retval = rw_checks(STp, filp, count); 1487 retval = rw_checks(STp, filp, count);
1488 if (retval || count == 0) 1488 if (retval || count == 0)
1489 goto out; 1489 goto out;
1490 1490
1491 /* Write must be integral number of blocks */ 1491 /* Write must be integral number of blocks */
1492 if (STp->block_size != 0 && (count % STp->block_size) != 0) { 1492 if (STp->block_size != 0 && (count % STp->block_size) != 0) {
1493 printk(KERN_WARNING "%s: Write not multiple of tape block size.\n", 1493 printk(KERN_WARNING "%s: Write not multiple of tape block size.\n",
1494 name); 1494 name);
1495 retval = (-EINVAL); 1495 retval = (-EINVAL);
1496 goto out; 1496 goto out;
1497 } 1497 }
1498 1498
1499 STm = &(STp->modes[STp->current_mode]); 1499 STm = &(STp->modes[STp->current_mode]);
1500 STps = &(STp->ps[STp->partition]); 1500 STps = &(STp->ps[STp->partition]);
1501 1501
1502 if (STp->write_prot) { 1502 if (STp->write_prot) {
1503 retval = (-EACCES); 1503 retval = (-EACCES);
1504 goto out; 1504 goto out;
1505 } 1505 }
1506 1506
1507 1507
1508 if (STps->rw == ST_READING) { 1508 if (STps->rw == ST_READING) {
1509 retval = flush_buffer(STp, 0); 1509 retval = flush_buffer(STp, 0);
1510 if (retval) 1510 if (retval)
1511 goto out; 1511 goto out;
1512 STps->rw = ST_WRITING; 1512 STps->rw = ST_WRITING;
1513 } else if (STps->rw != ST_WRITING && 1513 } else if (STps->rw != ST_WRITING &&
1514 STps->drv_file == 0 && STps->drv_block == 0) { 1514 STps->drv_file == 0 && STps->drv_block == 0) {
1515 if ((retval = set_mode_densblk(STp, STm)) < 0) 1515 if ((retval = set_mode_densblk(STp, STm)) < 0)
1516 goto out; 1516 goto out;
1517 if (STm->default_compression != ST_DONT_TOUCH && 1517 if (STm->default_compression != ST_DONT_TOUCH &&
1518 !(STp->compression_changed)) { 1518 !(STp->compression_changed)) {
1519 if (st_compression(STp, (STm->default_compression == ST_YES))) { 1519 if (st_compression(STp, (STm->default_compression == ST_YES))) {
1520 printk(KERN_WARNING "%s: Can't set default compression.\n", 1520 printk(KERN_WARNING "%s: Can't set default compression.\n",
1521 name); 1521 name);
1522 if (modes_defined) { 1522 if (modes_defined) {
1523 retval = (-EINVAL); 1523 retval = (-EINVAL);
1524 goto out; 1524 goto out;
1525 } 1525 }
1526 } 1526 }
1527 } 1527 }
1528 } 1528 }
1529 1529
1530 STbp = STp->buffer; 1530 STbp = STp->buffer;
1531 i = write_behind_check(STp); 1531 i = write_behind_check(STp);
1532 if (i) { 1532 if (i) {
1533 if (i == -ENOSPC) 1533 if (i == -ENOSPC)
1534 STps->eof = ST_EOM_OK; 1534 STps->eof = ST_EOM_OK;
1535 else 1535 else
1536 STps->eof = ST_EOM_ERROR; 1536 STps->eof = ST_EOM_ERROR;
1537 } 1537 }
1538 1538
1539 if (STps->eof == ST_EOM_OK) { 1539 if (STps->eof == ST_EOM_OK) {
1540 STps->eof = ST_EOD_1; /* allow next write */ 1540 STps->eof = ST_EOD_1; /* allow next write */
1541 retval = (-ENOSPC); 1541 retval = (-ENOSPC);
1542 goto out; 1542 goto out;
1543 } 1543 }
1544 else if (STps->eof == ST_EOM_ERROR) { 1544 else if (STps->eof == ST_EOM_ERROR) {
1545 retval = (-EIO); 1545 retval = (-EIO);
1546 goto out; 1546 goto out;
1547 } 1547 }
1548 1548
1549 /* Check the buffer readability in cases where copy_user might catch 1549 /* Check the buffer readability in cases where copy_user might catch
1550 the problems after some tape movement. */ 1550 the problems after some tape movement. */
1551 if (STp->block_size != 0 && 1551 if (STp->block_size != 0 &&
1552 !STbp->do_dio && 1552 !STbp->do_dio &&
1553 (copy_from_user(&i, buf, 1) != 0 || 1553 (copy_from_user(&i, buf, 1) != 0 ||
1554 copy_from_user(&i, buf + count - 1, 1) != 0)) { 1554 copy_from_user(&i, buf + count - 1, 1) != 0)) {
1555 retval = (-EFAULT); 1555 retval = (-EFAULT);
1556 goto out; 1556 goto out;
1557 } 1557 }
1558 1558
1559 retval = setup_buffering(STp, buf, count, 0); 1559 retval = setup_buffering(STp, buf, count, 0);
1560 if (retval) 1560 if (retval)
1561 goto out; 1561 goto out;
1562 1562
1563 total = count; 1563 total = count;
1564 1564
1565 memset(cmd, 0, MAX_COMMAND_SIZE); 1565 memset(cmd, 0, MAX_COMMAND_SIZE);
1566 cmd[0] = WRITE_6; 1566 cmd[0] = WRITE_6;
1567 cmd[1] = (STp->block_size != 0); 1567 cmd[1] = (STp->block_size != 0);
1568 1568
1569 STps->rw = ST_WRITING; 1569 STps->rw = ST_WRITING;
1570 1570
1571 b_point = buf; 1571 b_point = buf;
1572 while (count > 0 && !retry_eot) { 1572 while (count > 0 && !retry_eot) {
1573 1573
1574 if (STbp->do_dio) { 1574 if (STbp->do_dio) {
1575 do_count = count; 1575 do_count = count;
1576 } 1576 }
1577 else { 1577 else {
1578 if (STp->block_size == 0) 1578 if (STp->block_size == 0)
1579 do_count = count; 1579 do_count = count;
1580 else { 1580 else {
1581 do_count = STbp->buffer_blocks * STp->block_size - 1581 do_count = STbp->buffer_blocks * STp->block_size -
1582 STbp->buffer_bytes; 1582 STbp->buffer_bytes;
1583 if (do_count > count) 1583 if (do_count > count)
1584 do_count = count; 1584 do_count = count;
1585 } 1585 }
1586 1586
1587 i = append_to_buffer(b_point, STbp, do_count); 1587 i = append_to_buffer(b_point, STbp, do_count);
1588 if (i) { 1588 if (i) {
1589 retval = i; 1589 retval = i;
1590 goto out; 1590 goto out;
1591 } 1591 }
1592 } 1592 }
1593 count -= do_count; 1593 count -= do_count;
1594 b_point += do_count; 1594 b_point += do_count;
1595 1595
1596 async_write = STp->block_size == 0 && !STbp->do_dio && 1596 async_write = STp->block_size == 0 && !STbp->do_dio &&
1597 STm->do_async_writes && STps->eof < ST_EOM_OK; 1597 STm->do_async_writes && STps->eof < ST_EOM_OK;
1598 1598
1599 if (STp->block_size != 0 && STm->do_buffer_writes && 1599 if (STp->block_size != 0 && STm->do_buffer_writes &&
1600 !(STp->try_dio && try_wdio) && STps->eof < ST_EOM_OK && 1600 !(STp->try_dio && try_wdio) && STps->eof < ST_EOM_OK &&
1601 STbp->buffer_bytes < STbp->buffer_size) { 1601 STbp->buffer_bytes < STbp->buffer_size) {
1602 STp->dirty = 1; 1602 STp->dirty = 1;
1603 /* Don't write a buffer that is not full enough. */ 1603 /* Don't write a buffer that is not full enough. */
1604 if (!async_write && count == 0) 1604 if (!async_write && count == 0)
1605 break; 1605 break;
1606 } 1606 }
1607 1607
1608 retry_write: 1608 retry_write:
1609 if (STp->block_size == 0) 1609 if (STp->block_size == 0)
1610 blks = transfer = do_count; 1610 blks = transfer = do_count;
1611 else { 1611 else {
1612 if (!STbp->do_dio) 1612 if (!STbp->do_dio)
1613 blks = STbp->buffer_bytes; 1613 blks = STbp->buffer_bytes;
1614 else 1614 else
1615 blks = do_count; 1615 blks = do_count;
1616 blks /= STp->block_size; 1616 blks /= STp->block_size;
1617 transfer = blks * STp->block_size; 1617 transfer = blks * STp->block_size;
1618 } 1618 }
1619 cmd[2] = blks >> 16; 1619 cmd[2] = blks >> 16;
1620 cmd[3] = blks >> 8; 1620 cmd[3] = blks >> 8;
1621 cmd[4] = blks; 1621 cmd[4] = blks;
1622 1622
1623 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE, 1623 SRpnt = st_do_scsi(SRpnt, STp, cmd, transfer, DMA_TO_DEVICE,
1624 STp->device->timeout, MAX_WRITE_RETRIES, !async_write); 1624 STp->device->timeout, MAX_WRITE_RETRIES, !async_write);
1625 if (!SRpnt) { 1625 if (!SRpnt) {
1626 retval = STbp->syscall_result; 1626 retval = STbp->syscall_result;
1627 goto out; 1627 goto out;
1628 } 1628 }
1629 if (async_write && !STbp->syscall_result) { 1629 if (async_write && !STbp->syscall_result) {
1630 STbp->writing = transfer; 1630 STbp->writing = transfer;
1631 STp->dirty = !(STbp->writing == 1631 STp->dirty = !(STbp->writing ==
1632 STbp->buffer_bytes); 1632 STbp->buffer_bytes);
1633 SRpnt = NULL; /* Prevent releasing this request! */ 1633 SRpnt = NULL; /* Prevent releasing this request! */
1634 DEB( STp->write_pending = 1; ) 1634 DEB( STp->write_pending = 1; )
1635 break; 1635 break;
1636 } 1636 }
1637 1637
1638 if (STbp->syscall_result != 0) { 1638 if (STbp->syscall_result != 0) {
1639 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1639 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
1640 1640
1641 DEBC(printk(ST_DEB_MSG "%s: Error on write:\n", name)); 1641 DEBC(printk(ST_DEB_MSG "%s: Error on write:\n", name));
1642 if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) { 1642 if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) {
1643 scode = cmdstatp->sense_hdr.sense_key; 1643 scode = cmdstatp->sense_hdr.sense_key;
1644 if (cmdstatp->remainder_valid) 1644 if (cmdstatp->remainder_valid)
1645 undone = (int)cmdstatp->uremainder64; 1645 undone = (int)cmdstatp->uremainder64;
1646 else if (STp->block_size == 0 && 1646 else if (STp->block_size == 0 &&
1647 scode == VOLUME_OVERFLOW) 1647 scode == VOLUME_OVERFLOW)
1648 undone = transfer; 1648 undone = transfer;
1649 else 1649 else
1650 undone = 0; 1650 undone = 0;
1651 if (STp->block_size != 0) 1651 if (STp->block_size != 0)
1652 undone *= STp->block_size; 1652 undone *= STp->block_size;
1653 if (undone <= do_count) { 1653 if (undone <= do_count) {
1654 /* Only data from this write is not written */ 1654 /* Only data from this write is not written */
1655 count += undone; 1655 count += undone;
1656 do_count -= undone; 1656 do_count -= undone;
1657 if (STp->block_size) 1657 if (STp->block_size)
1658 blks = (transfer - undone) / STp->block_size; 1658 blks = (transfer - undone) / STp->block_size;
1659 STps->eof = ST_EOM_OK; 1659 STps->eof = ST_EOM_OK;
1660 /* Continue in fixed block mode if all written 1660 /* Continue in fixed block mode if all written
1661 in this request but still something left to write 1661 in this request but still something left to write
1662 (retval left to zero) 1662 (retval left to zero)
1663 */ 1663 */
1664 if (STp->block_size == 0 || 1664 if (STp->block_size == 0 ||
1665 undone > 0 || count == 0) 1665 undone > 0 || count == 0)
1666 retval = (-ENOSPC); /* EOM within current request */ 1666 retval = (-ENOSPC); /* EOM within current request */
1667 DEBC(printk(ST_DEB_MSG 1667 DEBC(printk(ST_DEB_MSG
1668 "%s: EOM with %d bytes unwritten.\n", 1668 "%s: EOM with %d bytes unwritten.\n",
1669 name, (int)count)); 1669 name, (int)count));
1670 } else { 1670 } else {
1671 /* EOT within data buffered earlier (possible only 1671 /* EOT within data buffered earlier (possible only
1672 in fixed block mode without direct i/o) */ 1672 in fixed block mode without direct i/o) */
1673 if (!retry_eot && !cmdstatp->deferred && 1673 if (!retry_eot && !cmdstatp->deferred &&
1674 (scode == NO_SENSE || scode == RECOVERED_ERROR)) { 1674 (scode == NO_SENSE || scode == RECOVERED_ERROR)) {
1675 move_buffer_data(STp->buffer, transfer - undone); 1675 move_buffer_data(STp->buffer, transfer - undone);
1676 retry_eot = 1; 1676 retry_eot = 1;
1677 if (STps->drv_block >= 0) { 1677 if (STps->drv_block >= 0) {
1678 STps->drv_block += (transfer - undone) / 1678 STps->drv_block += (transfer - undone) /
1679 STp->block_size; 1679 STp->block_size;
1680 } 1680 }
1681 STps->eof = ST_EOM_OK; 1681 STps->eof = ST_EOM_OK;
1682 DEBC(printk(ST_DEB_MSG 1682 DEBC(printk(ST_DEB_MSG
1683 "%s: Retry write of %d bytes at EOM.\n", 1683 "%s: Retry write of %d bytes at EOM.\n",
1684 name, STp->buffer->buffer_bytes)); 1684 name, STp->buffer->buffer_bytes));
1685 goto retry_write; 1685 goto retry_write;
1686 } 1686 }
1687 else { 1687 else {
1688 /* Either error within data buffered by driver or 1688 /* Either error within data buffered by driver or
1689 failed retry */ 1689 failed retry */
1690 count -= do_count; 1690 count -= do_count;
1691 blks = do_count = 0; 1691 blks = do_count = 0;
1692 STps->eof = ST_EOM_ERROR; 1692 STps->eof = ST_EOM_ERROR;
1693 STps->drv_block = (-1); /* Too cautious? */ 1693 STps->drv_block = (-1); /* Too cautious? */
1694 retval = (-EIO); /* EOM for old data */ 1694 retval = (-EIO); /* EOM for old data */
1695 DEBC(printk(ST_DEB_MSG 1695 DEBC(printk(ST_DEB_MSG
1696 "%s: EOM with lost data.\n", 1696 "%s: EOM with lost data.\n",
1697 name)); 1697 name));
1698 } 1698 }
1699 } 1699 }
1700 } else { 1700 } else {
1701 count += do_count; 1701 count += do_count;
1702 STps->drv_block = (-1); /* Too cautious? */ 1702 STps->drv_block = (-1); /* Too cautious? */
1703 retval = STbp->syscall_result; 1703 retval = STbp->syscall_result;
1704 } 1704 }
1705 1705
1706 } 1706 }
1707 1707
1708 if (STps->drv_block >= 0) { 1708 if (STps->drv_block >= 0) {
1709 if (STp->block_size == 0) 1709 if (STp->block_size == 0)
1710 STps->drv_block += (do_count > 0); 1710 STps->drv_block += (do_count > 0);
1711 else 1711 else
1712 STps->drv_block += blks; 1712 STps->drv_block += blks;
1713 } 1713 }
1714 1714
1715 STbp->buffer_bytes = 0; 1715 STbp->buffer_bytes = 0;
1716 STp->dirty = 0; 1716 STp->dirty = 0;
1717 1717
1718 if (retval || retry_eot) { 1718 if (retval || retry_eot) {
1719 if (count < total) 1719 if (count < total)
1720 retval = total - count; 1720 retval = total - count;
1721 goto out; 1721 goto out;
1722 } 1722 }
1723 } 1723 }
1724 1724
1725 if (STps->eof == ST_EOD_1) 1725 if (STps->eof == ST_EOD_1)
1726 STps->eof = ST_EOM_OK; 1726 STps->eof = ST_EOM_OK;
1727 else if (STps->eof != ST_EOM_OK) 1727 else if (STps->eof != ST_EOM_OK)
1728 STps->eof = ST_NOEOF; 1728 STps->eof = ST_NOEOF;
1729 retval = total - count; 1729 retval = total - count;
1730 1730
1731 out: 1731 out:
1732 if (SRpnt != NULL) 1732 if (SRpnt != NULL)
1733 st_release_request(SRpnt); 1733 st_release_request(SRpnt);
1734 release_buffering(STp, 0); 1734 release_buffering(STp, 0);
1735 up(&STp->lock); 1735 up(&STp->lock);
1736 1736
1737 return retval; 1737 return retval;
1738 } 1738 }
1739 1739
1740 /* Read data from the tape. Returns zero in the normal case, one if the 1740 /* Read data from the tape. Returns zero in the normal case, one if the
1741 eof status has changed, and the negative error code in case of a 1741 eof status has changed, and the negative error code in case of a
1742 fatal error. Otherwise updates the buffer and the eof state. 1742 fatal error. Otherwise updates the buffer and the eof state.
1743 1743
1744 Does release user buffer mapping if it is set. 1744 Does release user buffer mapping if it is set.
1745 */ 1745 */
1746 static long read_tape(struct scsi_tape *STp, long count, 1746 static long read_tape(struct scsi_tape *STp, long count,
1747 struct st_request ** aSRpnt) 1747 struct st_request ** aSRpnt)
1748 { 1748 {
1749 int transfer, blks, bytes; 1749 int transfer, blks, bytes;
1750 unsigned char cmd[MAX_COMMAND_SIZE]; 1750 unsigned char cmd[MAX_COMMAND_SIZE];
1751 struct st_request *SRpnt; 1751 struct st_request *SRpnt;
1752 struct st_modedef *STm; 1752 struct st_modedef *STm;
1753 struct st_partstat *STps; 1753 struct st_partstat *STps;
1754 struct st_buffer *STbp; 1754 struct st_buffer *STbp;
1755 int retval = 0; 1755 int retval = 0;
1756 char *name = tape_name(STp); 1756 char *name = tape_name(STp);
1757 1757
1758 if (count == 0) 1758 if (count == 0)
1759 return 0; 1759 return 0;
1760 1760
1761 STm = &(STp->modes[STp->current_mode]); 1761 STm = &(STp->modes[STp->current_mode]);
1762 STps = &(STp->ps[STp->partition]); 1762 STps = &(STp->ps[STp->partition]);
1763 if (STps->eof == ST_FM_HIT) 1763 if (STps->eof == ST_FM_HIT)
1764 return 1; 1764 return 1;
1765 STbp = STp->buffer; 1765 STbp = STp->buffer;
1766 1766
1767 if (STp->block_size == 0) 1767 if (STp->block_size == 0)
1768 blks = bytes = count; 1768 blks = bytes = count;
1769 else { 1769 else {
1770 if (!(STp->try_dio && try_rdio) && STm->do_read_ahead) { 1770 if (!(STp->try_dio && try_rdio) && STm->do_read_ahead) {
1771 blks = (STp->buffer)->buffer_blocks; 1771 blks = (STp->buffer)->buffer_blocks;
1772 bytes = blks * STp->block_size; 1772 bytes = blks * STp->block_size;
1773 } else { 1773 } else {
1774 bytes = count; 1774 bytes = count;
1775 if (!STbp->do_dio && bytes > (STp->buffer)->buffer_size) 1775 if (!STbp->do_dio && bytes > (STp->buffer)->buffer_size)
1776 bytes = (STp->buffer)->buffer_size; 1776 bytes = (STp->buffer)->buffer_size;
1777 blks = bytes / STp->block_size; 1777 blks = bytes / STp->block_size;
1778 bytes = blks * STp->block_size; 1778 bytes = blks * STp->block_size;
1779 } 1779 }
1780 } 1780 }
1781 1781
1782 memset(cmd, 0, MAX_COMMAND_SIZE); 1782 memset(cmd, 0, MAX_COMMAND_SIZE);
1783 cmd[0] = READ_6; 1783 cmd[0] = READ_6;
1784 cmd[1] = (STp->block_size != 0); 1784 cmd[1] = (STp->block_size != 0);
1785 cmd[2] = blks >> 16; 1785 cmd[2] = blks >> 16;
1786 cmd[3] = blks >> 8; 1786 cmd[3] = blks >> 8;
1787 cmd[4] = blks; 1787 cmd[4] = blks;
1788 1788
1789 SRpnt = *aSRpnt; 1789 SRpnt = *aSRpnt;
1790 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE, 1790 SRpnt = st_do_scsi(SRpnt, STp, cmd, bytes, DMA_FROM_DEVICE,
1791 STp->device->timeout, MAX_RETRIES, 1); 1791 STp->device->timeout, MAX_RETRIES, 1);
1792 release_buffering(STp, 1); 1792 release_buffering(STp, 1);
1793 *aSRpnt = SRpnt; 1793 *aSRpnt = SRpnt;
1794 if (!SRpnt) 1794 if (!SRpnt)
1795 return STbp->syscall_result; 1795 return STbp->syscall_result;
1796 1796
1797 STbp->read_pointer = 0; 1797 STbp->read_pointer = 0;
1798 STps->at_sm = 0; 1798 STps->at_sm = 0;
1799 1799
1800 /* Something to check */ 1800 /* Something to check */
1801 if (STbp->syscall_result) { 1801 if (STbp->syscall_result) {
1802 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1802 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
1803 1803
1804 retval = 1; 1804 retval = 1;
1805 DEBC(printk(ST_DEB_MSG "%s: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n", 1805 DEBC(printk(ST_DEB_MSG "%s: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
1806 name, 1806 name,
1807 SRpnt->sense[0], SRpnt->sense[1], 1807 SRpnt->sense[0], SRpnt->sense[1],
1808 SRpnt->sense[2], SRpnt->sense[3], 1808 SRpnt->sense[2], SRpnt->sense[3],
1809 SRpnt->sense[4], SRpnt->sense[5], 1809 SRpnt->sense[4], SRpnt->sense[5],
1810 SRpnt->sense[6], SRpnt->sense[7])); 1810 SRpnt->sense[6], SRpnt->sense[7]));
1811 if (cmdstatp->have_sense) { 1811 if (cmdstatp->have_sense) {
1812 1812
1813 if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) 1813 if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
1814 cmdstatp->flags &= 0xcf; /* No need for EOM in this case */ 1814 cmdstatp->flags &= 0xcf; /* No need for EOM in this case */
1815 1815
1816 if (cmdstatp->flags != 0) { /* EOF, EOM, or ILI */ 1816 if (cmdstatp->flags != 0) { /* EOF, EOM, or ILI */
1817 /* Compute the residual count */ 1817 /* Compute the residual count */
1818 if (cmdstatp->remainder_valid) 1818 if (cmdstatp->remainder_valid)
1819 transfer = (int)cmdstatp->uremainder64; 1819 transfer = (int)cmdstatp->uremainder64;
1820 else 1820 else
1821 transfer = 0; 1821 transfer = 0;
1822 if (STp->block_size == 0 && 1822 if (STp->block_size == 0 &&
1823 cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) 1823 cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR)
1824 transfer = bytes; 1824 transfer = bytes;
1825 1825
1826 if (cmdstatp->flags & SENSE_ILI) { /* ILI */ 1826 if (cmdstatp->flags & SENSE_ILI) { /* ILI */
1827 if (STp->block_size == 0) { 1827 if (STp->block_size == 0) {
1828 if (transfer <= 0) { 1828 if (transfer <= 0) {
1829 if (transfer < 0) 1829 if (transfer < 0)
1830 printk(KERN_NOTICE 1830 printk(KERN_NOTICE
1831 "%s: Failed to read %d byte block with %d byte transfer.\n", 1831 "%s: Failed to read %d byte block with %d byte transfer.\n",
1832 name, bytes - transfer, bytes); 1832 name, bytes - transfer, bytes);
1833 if (STps->drv_block >= 0) 1833 if (STps->drv_block >= 0)
1834 STps->drv_block += 1; 1834 STps->drv_block += 1;
1835 STbp->buffer_bytes = 0; 1835 STbp->buffer_bytes = 0;
1836 return (-ENOMEM); 1836 return (-ENOMEM);
1837 } 1837 }
1838 STbp->buffer_bytes = bytes - transfer; 1838 STbp->buffer_bytes = bytes - transfer;
1839 } else { 1839 } else {
1840 st_release_request(SRpnt); 1840 st_release_request(SRpnt);
1841 SRpnt = *aSRpnt = NULL; 1841 SRpnt = *aSRpnt = NULL;
1842 if (transfer == blks) { /* We did not get anything, error */ 1842 if (transfer == blks) { /* We did not get anything, error */
1843 printk(KERN_NOTICE "%s: Incorrect block size.\n", name); 1843 printk(KERN_NOTICE "%s: Incorrect block size.\n", name);
1844 if (STps->drv_block >= 0) 1844 if (STps->drv_block >= 0)
1845 STps->drv_block += blks - transfer + 1; 1845 STps->drv_block += blks - transfer + 1;
1846 st_int_ioctl(STp, MTBSR, 1); 1846 st_int_ioctl(STp, MTBSR, 1);
1847 return (-EIO); 1847 return (-EIO);
1848 } 1848 }
1849 /* We have some data, deliver it */ 1849 /* We have some data, deliver it */
1850 STbp->buffer_bytes = (blks - transfer) * 1850 STbp->buffer_bytes = (blks - transfer) *
1851 STp->block_size; 1851 STp->block_size;
1852 DEBC(printk(ST_DEB_MSG 1852 DEBC(printk(ST_DEB_MSG
1853 "%s: ILI but enough data received %ld %d.\n", 1853 "%s: ILI but enough data received %ld %d.\n",
1854 name, count, STbp->buffer_bytes)); 1854 name, count, STbp->buffer_bytes));
1855 if (STps->drv_block >= 0) 1855 if (STps->drv_block >= 0)
1856 STps->drv_block += 1; 1856 STps->drv_block += 1;
1857 if (st_int_ioctl(STp, MTBSR, 1)) 1857 if (st_int_ioctl(STp, MTBSR, 1))
1858 return (-EIO); 1858 return (-EIO);
1859 } 1859 }
1860 } else if (cmdstatp->flags & SENSE_FMK) { /* FM overrides EOM */ 1860 } else if (cmdstatp->flags & SENSE_FMK) { /* FM overrides EOM */
1861 if (STps->eof != ST_FM_HIT) 1861 if (STps->eof != ST_FM_HIT)
1862 STps->eof = ST_FM_HIT; 1862 STps->eof = ST_FM_HIT;
1863 else 1863 else
1864 STps->eof = ST_EOD_2; 1864 STps->eof = ST_EOD_2;
1865 if (STp->block_size == 0) 1865 if (STp->block_size == 0)
1866 STbp->buffer_bytes = 0; 1866 STbp->buffer_bytes = 0;
1867 else 1867 else
1868 STbp->buffer_bytes = 1868 STbp->buffer_bytes =
1869 bytes - transfer * STp->block_size; 1869 bytes - transfer * STp->block_size;
1870 DEBC(printk(ST_DEB_MSG 1870 DEBC(printk(ST_DEB_MSG
1871 "%s: EOF detected (%d bytes read).\n", 1871 "%s: EOF detected (%d bytes read).\n",
1872 name, STbp->buffer_bytes)); 1872 name, STbp->buffer_bytes));
1873 } else if (cmdstatp->flags & SENSE_EOM) { 1873 } else if (cmdstatp->flags & SENSE_EOM) {
1874 if (STps->eof == ST_FM) 1874 if (STps->eof == ST_FM)
1875 STps->eof = ST_EOD_1; 1875 STps->eof = ST_EOD_1;
1876 else 1876 else
1877 STps->eof = ST_EOM_OK; 1877 STps->eof = ST_EOM_OK;
1878 if (STp->block_size == 0) 1878 if (STp->block_size == 0)
1879 STbp->buffer_bytes = bytes - transfer; 1879 STbp->buffer_bytes = bytes - transfer;
1880 else 1880 else
1881 STbp->buffer_bytes = 1881 STbp->buffer_bytes =
1882 bytes - transfer * STp->block_size; 1882 bytes - transfer * STp->block_size;
1883 1883
1884 DEBC(printk(ST_DEB_MSG "%s: EOM detected (%d bytes read).\n", 1884 DEBC(printk(ST_DEB_MSG "%s: EOM detected (%d bytes read).\n",
1885 name, STbp->buffer_bytes)); 1885 name, STbp->buffer_bytes));
1886 } 1886 }
1887 } 1887 }
1888 /* end of EOF, EOM, ILI test */ 1888 /* end of EOF, EOM, ILI test */
1889 else { /* nonzero sense key */ 1889 else { /* nonzero sense key */
1890 DEBC(printk(ST_DEB_MSG 1890 DEBC(printk(ST_DEB_MSG
1891 "%s: Tape error while reading.\n", name)); 1891 "%s: Tape error while reading.\n", name));
1892 STps->drv_block = (-1); 1892 STps->drv_block = (-1);
1893 if (STps->eof == ST_FM && 1893 if (STps->eof == ST_FM &&
1894 cmdstatp->sense_hdr.sense_key == BLANK_CHECK) { 1894 cmdstatp->sense_hdr.sense_key == BLANK_CHECK) {
1895 DEBC(printk(ST_DEB_MSG 1895 DEBC(printk(ST_DEB_MSG
1896 "%s: Zero returned for first BLANK CHECK after EOF.\n", 1896 "%s: Zero returned for first BLANK CHECK after EOF.\n",
1897 name)); 1897 name));
1898 STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */ 1898 STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */
1899 } else /* Some other extended sense code */ 1899 } else /* Some other extended sense code */
1900 retval = (-EIO); 1900 retval = (-EIO);
1901 } 1901 }
1902 1902
1903 if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */ 1903 if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */
1904 STbp->buffer_bytes = 0; 1904 STbp->buffer_bytes = 0;
1905 } 1905 }
1906 /* End of extended sense test */ 1906 /* End of extended sense test */
1907 else { /* Non-extended sense */ 1907 else { /* Non-extended sense */
1908 retval = STbp->syscall_result; 1908 retval = STbp->syscall_result;
1909 } 1909 }
1910 1910
1911 } 1911 }
1912 /* End of error handling */ 1912 /* End of error handling */
1913 else /* Read successful */ 1913 else /* Read successful */
1914 STbp->buffer_bytes = bytes; 1914 STbp->buffer_bytes = bytes;
1915 1915
1916 if (STps->drv_block >= 0) { 1916 if (STps->drv_block >= 0) {
1917 if (STp->block_size == 0) 1917 if (STp->block_size == 0)
1918 STps->drv_block++; 1918 STps->drv_block++;
1919 else 1919 else
1920 STps->drv_block += STbp->buffer_bytes / STp->block_size; 1920 STps->drv_block += STbp->buffer_bytes / STp->block_size;
1921 } 1921 }
1922 return retval; 1922 return retval;
1923 } 1923 }
1924 1924
1925 1925
1926 /* Read command */ 1926 /* Read command */
1927 static ssize_t 1927 static ssize_t
1928 st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) 1928 st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
1929 { 1929 {
1930 ssize_t total; 1930 ssize_t total;
1931 ssize_t retval = 0; 1931 ssize_t retval = 0;
1932 ssize_t i, transfer; 1932 ssize_t i, transfer;
1933 int special, do_dio = 0; 1933 int special, do_dio = 0;
1934 struct st_request *SRpnt = NULL; 1934 struct st_request *SRpnt = NULL;
1935 struct scsi_tape *STp = filp->private_data; 1935 struct scsi_tape *STp = filp->private_data;
1936 struct st_modedef *STm; 1936 struct st_modedef *STm;
1937 struct st_partstat *STps; 1937 struct st_partstat *STps;
1938 struct st_buffer *STbp = STp->buffer; 1938 struct st_buffer *STbp = STp->buffer;
1939 DEB( char *name = tape_name(STp); ) 1939 DEB( char *name = tape_name(STp); )
1940 1940
1941 if (down_interruptible(&STp->lock)) 1941 if (down_interruptible(&STp->lock))
1942 return -ERESTARTSYS; 1942 return -ERESTARTSYS;
1943 1943
1944 retval = rw_checks(STp, filp, count); 1944 retval = rw_checks(STp, filp, count);
1945 if (retval || count == 0) 1945 if (retval || count == 0)
1946 goto out; 1946 goto out;
1947 1947
1948 STm = &(STp->modes[STp->current_mode]); 1948 STm = &(STp->modes[STp->current_mode]);
1949 if (!(STm->do_read_ahead) && STp->block_size != 0 && 1949 if (!(STm->do_read_ahead) && STp->block_size != 0 &&
1950 (count % STp->block_size) != 0) { 1950 (count % STp->block_size) != 0) {
1951 retval = (-EINVAL); /* Read must be integral number of blocks */ 1951 retval = (-EINVAL); /* Read must be integral number of blocks */
1952 goto out; 1952 goto out;
1953 } 1953 }
1954 1954
1955 STps = &(STp->ps[STp->partition]); 1955 STps = &(STp->ps[STp->partition]);
1956 if (STps->rw == ST_WRITING) { 1956 if (STps->rw == ST_WRITING) {
1957 retval = flush_buffer(STp, 0); 1957 retval = flush_buffer(STp, 0);
1958 if (retval) 1958 if (retval)
1959 goto out; 1959 goto out;
1960 STps->rw = ST_READING; 1960 STps->rw = ST_READING;
1961 } 1961 }
1962 DEB( 1962 DEB(
1963 if (debugging && STps->eof != ST_NOEOF) 1963 if (debugging && STps->eof != ST_NOEOF)
1964 printk(ST_DEB_MSG "%s: EOF/EOM flag up (%d). Bytes %d\n", name, 1964 printk(ST_DEB_MSG "%s: EOF/EOM flag up (%d). Bytes %d\n", name,
1965 STps->eof, STbp->buffer_bytes); 1965 STps->eof, STbp->buffer_bytes);
1966 ) /* end DEB */ 1966 ) /* end DEB */
1967 1967
1968 retval = setup_buffering(STp, buf, count, 1); 1968 retval = setup_buffering(STp, buf, count, 1);
1969 if (retval) 1969 if (retval)
1970 goto out; 1970 goto out;
1971 do_dio = STbp->do_dio; 1971 do_dio = STbp->do_dio;
1972 1972
1973 if (STbp->buffer_bytes == 0 && 1973 if (STbp->buffer_bytes == 0 &&
1974 STps->eof >= ST_EOD_1) { 1974 STps->eof >= ST_EOD_1) {
1975 if (STps->eof < ST_EOD) { 1975 if (STps->eof < ST_EOD) {
1976 STps->eof += 1; 1976 STps->eof += 1;
1977 retval = 0; 1977 retval = 0;
1978 goto out; 1978 goto out;
1979 } 1979 }
1980 retval = (-EIO); /* EOM or Blank Check */ 1980 retval = (-EIO); /* EOM or Blank Check */
1981 goto out; 1981 goto out;
1982 } 1982 }
1983 1983
1984 if (do_dio) { 1984 if (do_dio) {
1985 /* Check the buffer writability before any tape movement. Don't alter 1985 /* Check the buffer writability before any tape movement. Don't alter
1986 buffer data. */ 1986 buffer data. */
1987 if (copy_from_user(&i, buf, 1) != 0 || 1987 if (copy_from_user(&i, buf, 1) != 0 ||
1988 copy_to_user(buf, &i, 1) != 0 || 1988 copy_to_user(buf, &i, 1) != 0 ||
1989 copy_from_user(&i, buf + count - 1, 1) != 0 || 1989 copy_from_user(&i, buf + count - 1, 1) != 0 ||
1990 copy_to_user(buf + count - 1, &i, 1) != 0) { 1990 copy_to_user(buf + count - 1, &i, 1) != 0) {
1991 retval = (-EFAULT); 1991 retval = (-EFAULT);
1992 goto out; 1992 goto out;
1993 } 1993 }
1994 } 1994 }
1995 1995
1996 STps->rw = ST_READING; 1996 STps->rw = ST_READING;
1997 1997
1998 1998
1999 /* Loop until enough data in buffer or a special condition found */ 1999 /* Loop until enough data in buffer or a special condition found */
2000 for (total = 0, special = 0; total < count && !special;) { 2000 for (total = 0, special = 0; total < count && !special;) {
2001 2001
2002 /* Get new data if the buffer is empty */ 2002 /* Get new data if the buffer is empty */
2003 if (STbp->buffer_bytes == 0) { 2003 if (STbp->buffer_bytes == 0) {
2004 special = read_tape(STp, count - total, &SRpnt); 2004 special = read_tape(STp, count - total, &SRpnt);
2005 if (special < 0) { /* No need to continue read */ 2005 if (special < 0) { /* No need to continue read */
2006 retval = special; 2006 retval = special;
2007 goto out; 2007 goto out;
2008 } 2008 }
2009 } 2009 }
2010 2010
2011 /* Move the data from driver buffer to user buffer */ 2011 /* Move the data from driver buffer to user buffer */
2012 if (STbp->buffer_bytes > 0) { 2012 if (STbp->buffer_bytes > 0) {
2013 DEB( 2013 DEB(
2014 if (debugging && STps->eof != ST_NOEOF) 2014 if (debugging && STps->eof != ST_NOEOF)
2015 printk(ST_DEB_MSG 2015 printk(ST_DEB_MSG
2016 "%s: EOF up (%d). Left %d, needed %d.\n", name, 2016 "%s: EOF up (%d). Left %d, needed %d.\n", name,
2017 STps->eof, STbp->buffer_bytes, 2017 STps->eof, STbp->buffer_bytes,
2018 (int)(count - total)); 2018 (int)(count - total));
2019 ) /* end DEB */ 2019 ) /* end DEB */
2020 transfer = STbp->buffer_bytes < count - total ? 2020 transfer = STbp->buffer_bytes < count - total ?
2021 STbp->buffer_bytes : count - total; 2021 STbp->buffer_bytes : count - total;
2022 if (!do_dio) { 2022 if (!do_dio) {
2023 i = from_buffer(STbp, buf, transfer); 2023 i = from_buffer(STbp, buf, transfer);
2024 if (i) { 2024 if (i) {
2025 retval = i; 2025 retval = i;
2026 goto out; 2026 goto out;
2027 } 2027 }
2028 } 2028 }
2029 buf += transfer; 2029 buf += transfer;
2030 total += transfer; 2030 total += transfer;
2031 } 2031 }
2032 2032
2033 if (STp->block_size == 0) 2033 if (STp->block_size == 0)
2034 break; /* Read only one variable length block */ 2034 break; /* Read only one variable length block */
2035 2035
2036 } /* for (total = 0, special = 0; 2036 } /* for (total = 0, special = 0;
2037 total < count && !special; ) */ 2037 total < count && !special; ) */
2038 2038
2039 /* Change the eof state if no data from tape or buffer */ 2039 /* Change the eof state if no data from tape or buffer */
2040 if (total == 0) { 2040 if (total == 0) {
2041 if (STps->eof == ST_FM_HIT) { 2041 if (STps->eof == ST_FM_HIT) {
2042 STps->eof = ST_FM; 2042 STps->eof = ST_FM;
2043 STps->drv_block = 0; 2043 STps->drv_block = 0;
2044 if (STps->drv_file >= 0) 2044 if (STps->drv_file >= 0)
2045 STps->drv_file++; 2045 STps->drv_file++;
2046 } else if (STps->eof == ST_EOD_1) { 2046 } else if (STps->eof == ST_EOD_1) {
2047 STps->eof = ST_EOD_2; 2047 STps->eof = ST_EOD_2;
2048 STps->drv_block = 0; 2048 STps->drv_block = 0;
2049 if (STps->drv_file >= 0) 2049 if (STps->drv_file >= 0)
2050 STps->drv_file++; 2050 STps->drv_file++;
2051 } else if (STps->eof == ST_EOD_2) 2051 } else if (STps->eof == ST_EOD_2)
2052 STps->eof = ST_EOD; 2052 STps->eof = ST_EOD;
2053 } else if (STps->eof == ST_FM) 2053 } else if (STps->eof == ST_FM)
2054 STps->eof = ST_NOEOF; 2054 STps->eof = ST_NOEOF;
2055 retval = total; 2055 retval = total;
2056 2056
2057 out: 2057 out:
2058 if (SRpnt != NULL) { 2058 if (SRpnt != NULL) {
2059 st_release_request(SRpnt); 2059 st_release_request(SRpnt);
2060 SRpnt = NULL; 2060 SRpnt = NULL;
2061 } 2061 }
2062 if (do_dio) { 2062 if (do_dio) {
2063 release_buffering(STp, 1); 2063 release_buffering(STp, 1);
2064 STbp->buffer_bytes = 0; 2064 STbp->buffer_bytes = 0;
2065 } 2065 }
2066 up(&STp->lock); 2066 up(&STp->lock);
2067 2067
2068 return retval; 2068 return retval;
2069 } 2069 }
2070 2070
2071 2071
2072 2072
2073 DEB( 2073 DEB(
2074 /* Set the driver options */ 2074 /* Set the driver options */
2075 static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char *name) 2075 static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char *name)
2076 { 2076 {
2077 if (debugging) { 2077 if (debugging) {
2078 printk(KERN_INFO 2078 printk(KERN_INFO
2079 "%s: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n", 2079 "%s: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n",
2080 name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes, 2080 name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes,
2081 STm->do_read_ahead); 2081 STm->do_read_ahead);
2082 printk(KERN_INFO 2082 printk(KERN_INFO
2083 "%s: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n", 2083 "%s: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n",
2084 name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock); 2084 name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock);
2085 printk(KERN_INFO 2085 printk(KERN_INFO
2086 "%s: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n", 2086 "%s: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n",
2087 name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions, 2087 name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
2088 STp->scsi2_logical); 2088 STp->scsi2_logical);
2089 printk(KERN_INFO 2089 printk(KERN_INFO
2090 "%s: sysv: %d nowait: %d\n", name, STm->sysv, STp->immediate); 2090 "%s: sysv: %d nowait: %d\n", name, STm->sysv, STp->immediate);
2091 printk(KERN_INFO "%s: debugging: %d\n", 2091 printk(KERN_INFO "%s: debugging: %d\n",
2092 name, debugging); 2092 name, debugging);
2093 } 2093 }
2094 } 2094 }
2095 ) 2095 )
2096 2096
2097 2097
2098 static int st_set_options(struct scsi_tape *STp, long options) 2098 static int st_set_options(struct scsi_tape *STp, long options)
2099 { 2099 {
2100 int value; 2100 int value;
2101 long code; 2101 long code;
2102 struct st_modedef *STm; 2102 struct st_modedef *STm;
2103 char *name = tape_name(STp); 2103 char *name = tape_name(STp);
2104 struct cdev *cd0, *cd1; 2104 struct cdev *cd0, *cd1;
2105 2105
2106 STm = &(STp->modes[STp->current_mode]); 2106 STm = &(STp->modes[STp->current_mode]);
2107 if (!STm->defined) { 2107 if (!STm->defined) {
2108 cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1]; 2108 cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1];
2109 memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); 2109 memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef));
2110 STm->cdevs[0] = cd0; STm->cdevs[1] = cd1; 2110 STm->cdevs[0] = cd0; STm->cdevs[1] = cd1;
2111 modes_defined = 1; 2111 modes_defined = 1;
2112 DEBC(printk(ST_DEB_MSG 2112 DEBC(printk(ST_DEB_MSG
2113 "%s: Initialized mode %d definition from mode 0\n", 2113 "%s: Initialized mode %d definition from mode 0\n",
2114 name, STp->current_mode)); 2114 name, STp->current_mode));
2115 } 2115 }
2116 2116
2117 code = options & MT_ST_OPTIONS; 2117 code = options & MT_ST_OPTIONS;
2118 if (code == MT_ST_BOOLEANS) { 2118 if (code == MT_ST_BOOLEANS) {
2119 STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0; 2119 STm->do_buffer_writes = (options & MT_ST_BUFFER_WRITES) != 0;
2120 STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0; 2120 STm->do_async_writes = (options & MT_ST_ASYNC_WRITES) != 0;
2121 STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0; 2121 STm->defaults_for_writes = (options & MT_ST_DEF_WRITES) != 0;
2122 STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0; 2122 STm->do_read_ahead = (options & MT_ST_READ_AHEAD) != 0;
2123 STp->two_fm = (options & MT_ST_TWO_FM) != 0; 2123 STp->two_fm = (options & MT_ST_TWO_FM) != 0;
2124 STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0; 2124 STp->fast_mteom = (options & MT_ST_FAST_MTEOM) != 0;
2125 STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0; 2125 STp->do_auto_lock = (options & MT_ST_AUTO_LOCK) != 0;
2126 STp->can_bsr = (options & MT_ST_CAN_BSR) != 0; 2126 STp->can_bsr = (options & MT_ST_CAN_BSR) != 0;
2127 STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0; 2127 STp->omit_blklims = (options & MT_ST_NO_BLKLIMS) != 0;
2128 if ((STp->device)->scsi_level >= SCSI_2) 2128 if ((STp->device)->scsi_level >= SCSI_2)
2129 STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0; 2129 STp->can_partitions = (options & MT_ST_CAN_PARTITIONS) != 0;
2130 STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0; 2130 STp->scsi2_logical = (options & MT_ST_SCSI2LOGICAL) != 0;
2131 STp->immediate = (options & MT_ST_NOWAIT) != 0; 2131 STp->immediate = (options & MT_ST_NOWAIT) != 0;
2132 STm->sysv = (options & MT_ST_SYSV) != 0; 2132 STm->sysv = (options & MT_ST_SYSV) != 0;
2133 DEB( debugging = (options & MT_ST_DEBUGGING) != 0; 2133 DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
2134 st_log_options(STp, STm, name); ) 2134 st_log_options(STp, STm, name); )
2135 } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) { 2135 } else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
2136 value = (code == MT_ST_SETBOOLEANS); 2136 value = (code == MT_ST_SETBOOLEANS);
2137 if ((options & MT_ST_BUFFER_WRITES) != 0) 2137 if ((options & MT_ST_BUFFER_WRITES) != 0)
2138 STm->do_buffer_writes = value; 2138 STm->do_buffer_writes = value;
2139 if ((options & MT_ST_ASYNC_WRITES) != 0) 2139 if ((options & MT_ST_ASYNC_WRITES) != 0)
2140 STm->do_async_writes = value; 2140 STm->do_async_writes = value;
2141 if ((options & MT_ST_DEF_WRITES) != 0) 2141 if ((options & MT_ST_DEF_WRITES) != 0)
2142 STm->defaults_for_writes = value; 2142 STm->defaults_for_writes = value;
2143 if ((options & MT_ST_READ_AHEAD) != 0) 2143 if ((options & MT_ST_READ_AHEAD) != 0)
2144 STm->do_read_ahead = value; 2144 STm->do_read_ahead = value;
2145 if ((options & MT_ST_TWO_FM) != 0) 2145 if ((options & MT_ST_TWO_FM) != 0)
2146 STp->two_fm = value; 2146 STp->two_fm = value;
2147 if ((options & MT_ST_FAST_MTEOM) != 0) 2147 if ((options & MT_ST_FAST_MTEOM) != 0)
2148 STp->fast_mteom = value; 2148 STp->fast_mteom = value;
2149 if ((options & MT_ST_AUTO_LOCK) != 0) 2149 if ((options & MT_ST_AUTO_LOCK) != 0)
2150 STp->do_auto_lock = value; 2150 STp->do_auto_lock = value;
2151 if ((options & MT_ST_CAN_BSR) != 0) 2151 if ((options & MT_ST_CAN_BSR) != 0)
2152 STp->can_bsr = value; 2152 STp->can_bsr = value;
2153 if ((options & MT_ST_NO_BLKLIMS) != 0) 2153 if ((options & MT_ST_NO_BLKLIMS) != 0)
2154 STp->omit_blklims = value; 2154 STp->omit_blklims = value;
2155 if ((STp->device)->scsi_level >= SCSI_2 && 2155 if ((STp->device)->scsi_level >= SCSI_2 &&
2156 (options & MT_ST_CAN_PARTITIONS) != 0) 2156 (options & MT_ST_CAN_PARTITIONS) != 0)
2157 STp->can_partitions = value; 2157 STp->can_partitions = value;
2158 if ((options & MT_ST_SCSI2LOGICAL) != 0) 2158 if ((options & MT_ST_SCSI2LOGICAL) != 0)
2159 STp->scsi2_logical = value; 2159 STp->scsi2_logical = value;
2160 if ((options & MT_ST_NOWAIT) != 0) 2160 if ((options & MT_ST_NOWAIT) != 0)
2161 STp->immediate = value; 2161 STp->immediate = value;
2162 if ((options & MT_ST_SYSV) != 0) 2162 if ((options & MT_ST_SYSV) != 0)
2163 STm->sysv = value; 2163 STm->sysv = value;
2164 DEB( 2164 DEB(
2165 if ((options & MT_ST_DEBUGGING) != 0) 2165 if ((options & MT_ST_DEBUGGING) != 0)
2166 debugging = value; 2166 debugging = value;
2167 st_log_options(STp, STm, name); ) 2167 st_log_options(STp, STm, name); )
2168 } else if (code == MT_ST_WRITE_THRESHOLD) { 2168 } else if (code == MT_ST_WRITE_THRESHOLD) {
2169 /* Retained for compatibility */ 2169 /* Retained for compatibility */
2170 } else if (code == MT_ST_DEF_BLKSIZE) { 2170 } else if (code == MT_ST_DEF_BLKSIZE) {
2171 value = (options & ~MT_ST_OPTIONS); 2171 value = (options & ~MT_ST_OPTIONS);
2172 if (value == ~MT_ST_OPTIONS) { 2172 if (value == ~MT_ST_OPTIONS) {
2173 STm->default_blksize = (-1); 2173 STm->default_blksize = (-1);
2174 DEBC( printk(KERN_INFO "%s: Default block size disabled.\n", name)); 2174 DEBC( printk(KERN_INFO "%s: Default block size disabled.\n", name));
2175 } else { 2175 } else {
2176 STm->default_blksize = value; 2176 STm->default_blksize = value;
2177 DEBC( printk(KERN_INFO "%s: Default block size set to %d bytes.\n", 2177 DEBC( printk(KERN_INFO "%s: Default block size set to %d bytes.\n",
2178 name, STm->default_blksize)); 2178 name, STm->default_blksize));
2179 if (STp->ready == ST_READY) { 2179 if (STp->ready == ST_READY) {
2180 STp->blksize_changed = 0; 2180 STp->blksize_changed = 0;
2181 set_mode_densblk(STp, STm); 2181 set_mode_densblk(STp, STm);
2182 } 2182 }
2183 } 2183 }
2184 } else if (code == MT_ST_TIMEOUTS) { 2184 } else if (code == MT_ST_TIMEOUTS) {
2185 value = (options & ~MT_ST_OPTIONS); 2185 value = (options & ~MT_ST_OPTIONS);
2186 if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) { 2186 if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
2187 STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ; 2187 STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
2188 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name, 2188 DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
2189 (value & ~MT_ST_SET_LONG_TIMEOUT))); 2189 (value & ~MT_ST_SET_LONG_TIMEOUT)));
2190 } else { 2190 } else {
2191 STp->device->timeout = value * HZ; 2191 STp->device->timeout = value * HZ;
2192 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n", 2192 DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
2193 name, value) ); 2193 name, value) );
2194 } 2194 }
2195 } else if (code == MT_ST_SET_CLN) { 2195 } else if (code == MT_ST_SET_CLN) {
2196 value = (options & ~MT_ST_OPTIONS) & 0xff; 2196 value = (options & ~MT_ST_OPTIONS) & 0xff;
2197 if (value != 0 && 2197 if (value != 0 &&
2198 value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE) 2198 value < EXTENDED_SENSE_START && value >= SCSI_SENSE_BUFFERSIZE)
2199 return (-EINVAL); 2199 return (-EINVAL);
2200 STp->cln_mode = value; 2200 STp->cln_mode = value;
2201 STp->cln_sense_mask = (options >> 8) & 0xff; 2201 STp->cln_sense_mask = (options >> 8) & 0xff;
2202 STp->cln_sense_value = (options >> 16) & 0xff; 2202 STp->cln_sense_value = (options >> 16) & 0xff;
2203 printk(KERN_INFO 2203 printk(KERN_INFO
2204 "%s: Cleaning request mode %d, mask %02x, value %02x\n", 2204 "%s: Cleaning request mode %d, mask %02x, value %02x\n",
2205 name, value, STp->cln_sense_mask, STp->cln_sense_value); 2205 name, value, STp->cln_sense_mask, STp->cln_sense_value);
2206 } else if (code == MT_ST_DEF_OPTIONS) { 2206 } else if (code == MT_ST_DEF_OPTIONS) {
2207 code = (options & ~MT_ST_CLEAR_DEFAULT); 2207 code = (options & ~MT_ST_CLEAR_DEFAULT);
2208 value = (options & MT_ST_CLEAR_DEFAULT); 2208 value = (options & MT_ST_CLEAR_DEFAULT);
2209 if (code == MT_ST_DEF_DENSITY) { 2209 if (code == MT_ST_DEF_DENSITY) {
2210 if (value == MT_ST_CLEAR_DEFAULT) { 2210 if (value == MT_ST_CLEAR_DEFAULT) {
2211 STm->default_density = (-1); 2211 STm->default_density = (-1);
2212 DEBC( printk(KERN_INFO "%s: Density default disabled.\n", 2212 DEBC( printk(KERN_INFO "%s: Density default disabled.\n",
2213 name)); 2213 name));
2214 } else { 2214 } else {
2215 STm->default_density = value & 0xff; 2215 STm->default_density = value & 0xff;
2216 DEBC( printk(KERN_INFO "%s: Density default set to %x\n", 2216 DEBC( printk(KERN_INFO "%s: Density default set to %x\n",
2217 name, STm->default_density)); 2217 name, STm->default_density));
2218 if (STp->ready == ST_READY) { 2218 if (STp->ready == ST_READY) {
2219 STp->density_changed = 0; 2219 STp->density_changed = 0;
2220 set_mode_densblk(STp, STm); 2220 set_mode_densblk(STp, STm);
2221 } 2221 }
2222 } 2222 }
2223 } else if (code == MT_ST_DEF_DRVBUFFER) { 2223 } else if (code == MT_ST_DEF_DRVBUFFER) {
2224 if (value == MT_ST_CLEAR_DEFAULT) { 2224 if (value == MT_ST_CLEAR_DEFAULT) {
2225 STp->default_drvbuffer = 0xff; 2225 STp->default_drvbuffer = 0xff;
2226 DEBC( printk(KERN_INFO 2226 DEBC( printk(KERN_INFO
2227 "%s: Drive buffer default disabled.\n", name)); 2227 "%s: Drive buffer default disabled.\n", name));
2228 } else { 2228 } else {
2229 STp->default_drvbuffer = value & 7; 2229 STp->default_drvbuffer = value & 7;
2230 DEBC( printk(KERN_INFO 2230 DEBC( printk(KERN_INFO
2231 "%s: Drive buffer default set to %x\n", 2231 "%s: Drive buffer default set to %x\n",
2232 name, STp->default_drvbuffer)); 2232 name, STp->default_drvbuffer));
2233 if (STp->ready == ST_READY) 2233 if (STp->ready == ST_READY)
2234 st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer); 2234 st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer);
2235 } 2235 }
2236 } else if (code == MT_ST_DEF_COMPRESSION) { 2236 } else if (code == MT_ST_DEF_COMPRESSION) {
2237 if (value == MT_ST_CLEAR_DEFAULT) { 2237 if (value == MT_ST_CLEAR_DEFAULT) {
2238 STm->default_compression = ST_DONT_TOUCH; 2238 STm->default_compression = ST_DONT_TOUCH;
2239 DEBC( printk(KERN_INFO 2239 DEBC( printk(KERN_INFO
2240 "%s: Compression default disabled.\n", name)); 2240 "%s: Compression default disabled.\n", name));
2241 } else { 2241 } else {
2242 if ((value & 0xff00) != 0) { 2242 if ((value & 0xff00) != 0) {
2243 STp->c_algo = (value & 0xff00) >> 8; 2243 STp->c_algo = (value & 0xff00) >> 8;
2244 DEBC( printk(KERN_INFO "%s: Compression algorithm set to 0x%x.\n", 2244 DEBC( printk(KERN_INFO "%s: Compression algorithm set to 0x%x.\n",
2245 name, STp->c_algo)); 2245 name, STp->c_algo));
2246 } 2246 }
2247 if ((value & 0xff) != 0xff) { 2247 if ((value & 0xff) != 0xff) {
2248 STm->default_compression = (value & 1 ? ST_YES : ST_NO); 2248 STm->default_compression = (value & 1 ? ST_YES : ST_NO);
2249 DEBC( printk(KERN_INFO "%s: Compression default set to %x\n", 2249 DEBC( printk(KERN_INFO "%s: Compression default set to %x\n",
2250 name, (value & 1))); 2250 name, (value & 1)));
2251 if (STp->ready == ST_READY) { 2251 if (STp->ready == ST_READY) {
2252 STp->compression_changed = 0; 2252 STp->compression_changed = 0;
2253 st_compression(STp, (STm->default_compression == ST_YES)); 2253 st_compression(STp, (STm->default_compression == ST_YES));
2254 } 2254 }
2255 } 2255 }
2256 } 2256 }
2257 } 2257 }
2258 } else 2258 } else
2259 return (-EIO); 2259 return (-EIO);
2260 2260
2261 return 0; 2261 return 0;
2262 } 2262 }
2263 2263
2264 #define MODE_HEADER_LENGTH 4 2264 #define MODE_HEADER_LENGTH 4
2265 2265
2266 /* Mode header and page byte offsets */ 2266 /* Mode header and page byte offsets */
2267 #define MH_OFF_DATA_LENGTH 0 2267 #define MH_OFF_DATA_LENGTH 0
2268 #define MH_OFF_MEDIUM_TYPE 1 2268 #define MH_OFF_MEDIUM_TYPE 1
2269 #define MH_OFF_DEV_SPECIFIC 2 2269 #define MH_OFF_DEV_SPECIFIC 2
2270 #define MH_OFF_BDESCS_LENGTH 3 2270 #define MH_OFF_BDESCS_LENGTH 3
2271 #define MP_OFF_PAGE_NBR 0 2271 #define MP_OFF_PAGE_NBR 0
2272 #define MP_OFF_PAGE_LENGTH 1 2272 #define MP_OFF_PAGE_LENGTH 1
2273 2273
2274 /* Mode header and page bit masks */ 2274 /* Mode header and page bit masks */
2275 #define MH_BIT_WP 0x80 2275 #define MH_BIT_WP 0x80
2276 #define MP_MSK_PAGE_NBR 0x3f 2276 #define MP_MSK_PAGE_NBR 0x3f
2277 2277
2278 /* Don't return block descriptors */ 2278 /* Don't return block descriptors */
2279 #define MODE_SENSE_OMIT_BDESCS 0x08 2279 #define MODE_SENSE_OMIT_BDESCS 0x08
2280 2280
2281 #define MODE_SELECT_PAGE_FORMAT 0x10 2281 #define MODE_SELECT_PAGE_FORMAT 0x10
2282 2282
2283 /* Read a mode page into the tape buffer. The block descriptors are included 2283 /* Read a mode page into the tape buffer. The block descriptors are included
2284 if incl_block_descs is true. The page control is ored to the page number 2284 if incl_block_descs is true. The page control is ored to the page number
2285 parameter, if necessary. */ 2285 parameter, if necessary. */
2286 static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs) 2286 static int read_mode_page(struct scsi_tape *STp, int page, int omit_block_descs)
2287 { 2287 {
2288 unsigned char cmd[MAX_COMMAND_SIZE]; 2288 unsigned char cmd[MAX_COMMAND_SIZE];
2289 struct st_request *SRpnt = NULL; 2289 struct st_request *SRpnt = NULL;
2290 2290
2291 memset(cmd, 0, MAX_COMMAND_SIZE); 2291 memset(cmd, 0, MAX_COMMAND_SIZE);
2292 cmd[0] = MODE_SENSE; 2292 cmd[0] = MODE_SENSE;
2293 if (omit_block_descs) 2293 if (omit_block_descs)
2294 cmd[1] = MODE_SENSE_OMIT_BDESCS; 2294 cmd[1] = MODE_SENSE_OMIT_BDESCS;
2295 cmd[2] = page; 2295 cmd[2] = page;
2296 cmd[4] = 255; 2296 cmd[4] = 255;
2297 2297
2298 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE, 2298 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_FROM_DEVICE,
2299 STp->device->timeout, 0, 1); 2299 STp->device->timeout, 0, 1);
2300 if (SRpnt == NULL) 2300 if (SRpnt == NULL)
2301 return (STp->buffer)->syscall_result; 2301 return (STp->buffer)->syscall_result;
2302 2302
2303 st_release_request(SRpnt); 2303 st_release_request(SRpnt);
2304 2304
2305 return (STp->buffer)->syscall_result; 2305 return (STp->buffer)->syscall_result;
2306 } 2306 }
2307 2307
2308 2308
2309 /* Send the mode page in the tape buffer to the drive. Assumes that the mode data 2309 /* Send the mode page in the tape buffer to the drive. Assumes that the mode data
2310 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */ 2310 in the buffer is correctly formatted. The long timeout is used if slow is non-zero. */
2311 static int write_mode_page(struct scsi_tape *STp, int page, int slow) 2311 static int write_mode_page(struct scsi_tape *STp, int page, int slow)
2312 { 2312 {
2313 int pgo; 2313 int pgo;
2314 unsigned char cmd[MAX_COMMAND_SIZE]; 2314 unsigned char cmd[MAX_COMMAND_SIZE];
2315 struct st_request *SRpnt = NULL; 2315 struct st_request *SRpnt = NULL;
2316 2316
2317 memset(cmd, 0, MAX_COMMAND_SIZE); 2317 memset(cmd, 0, MAX_COMMAND_SIZE);
2318 cmd[0] = MODE_SELECT; 2318 cmd[0] = MODE_SELECT;
2319 cmd[1] = MODE_SELECT_PAGE_FORMAT; 2319 cmd[1] = MODE_SELECT_PAGE_FORMAT;
2320 pgo = MODE_HEADER_LENGTH + (STp->buffer)->b_data[MH_OFF_BDESCS_LENGTH]; 2320 pgo = MODE_HEADER_LENGTH + (STp->buffer)->b_data[MH_OFF_BDESCS_LENGTH];
2321 cmd[4] = pgo + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_LENGTH] + 2; 2321 cmd[4] = pgo + (STp->buffer)->b_data[pgo + MP_OFF_PAGE_LENGTH] + 2;
2322 2322
2323 /* Clear reserved fields */ 2323 /* Clear reserved fields */
2324 (STp->buffer)->b_data[MH_OFF_DATA_LENGTH] = 0; 2324 (STp->buffer)->b_data[MH_OFF_DATA_LENGTH] = 0;
2325 (STp->buffer)->b_data[MH_OFF_MEDIUM_TYPE] = 0; 2325 (STp->buffer)->b_data[MH_OFF_MEDIUM_TYPE] = 0;
2326 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP; 2326 (STp->buffer)->b_data[MH_OFF_DEV_SPECIFIC] &= ~MH_BIT_WP;
2327 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR; 2327 (STp->buffer)->b_data[pgo + MP_OFF_PAGE_NBR] &= MP_MSK_PAGE_NBR;
2328 2328
2329 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE, 2329 SRpnt = st_do_scsi(SRpnt, STp, cmd, cmd[4], DMA_TO_DEVICE,
2330 (slow ? STp->long_timeout : STp->device->timeout), 0, 1); 2330 (slow ? STp->long_timeout : STp->device->timeout), 0, 1);
2331 if (SRpnt == NULL) 2331 if (SRpnt == NULL)
2332 return (STp->buffer)->syscall_result; 2332 return (STp->buffer)->syscall_result;
2333 2333
2334 st_release_request(SRpnt); 2334 st_release_request(SRpnt);
2335 2335
2336 return (STp->buffer)->syscall_result; 2336 return (STp->buffer)->syscall_result;
2337 } 2337 }
2338 2338
2339 2339
2340 #define COMPRESSION_PAGE 0x0f 2340 #define COMPRESSION_PAGE 0x0f
2341 #define COMPRESSION_PAGE_LENGTH 16 2341 #define COMPRESSION_PAGE_LENGTH 16
2342 2342
2343 #define CP_OFF_DCE_DCC 2 2343 #define CP_OFF_DCE_DCC 2
2344 #define CP_OFF_C_ALGO 7 2344 #define CP_OFF_C_ALGO 7
2345 2345
2346 #define DCE_MASK 0x80 2346 #define DCE_MASK 0x80
2347 #define DCC_MASK 0x40 2347 #define DCC_MASK 0x40
2348 #define RED_MASK 0x60 2348 #define RED_MASK 0x60
2349 2349
2350 2350
2351 /* Control the compression with mode page 15. Algorithm not changed if zero. 2351 /* Control the compression with mode page 15. Algorithm not changed if zero.
2352 2352
2353 The block descriptors are read and written because Sony SDT-7000 does not 2353 The block descriptors are read and written because Sony SDT-7000 does not
2354 work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>). 2354 work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>).
2355 Including block descriptors should not cause any harm to other drives. */ 2355 Including block descriptors should not cause any harm to other drives. */
2356 2356
2357 static int st_compression(struct scsi_tape * STp, int state) 2357 static int st_compression(struct scsi_tape * STp, int state)
2358 { 2358 {
2359 int retval; 2359 int retval;
2360 int mpoffs; /* Offset to mode page start */ 2360 int mpoffs; /* Offset to mode page start */
2361 unsigned char *b_data = (STp->buffer)->b_data; 2361 unsigned char *b_data = (STp->buffer)->b_data;
2362 DEB( char *name = tape_name(STp); ) 2362 DEB( char *name = tape_name(STp); )
2363 2363
2364 if (STp->ready != ST_READY) 2364 if (STp->ready != ST_READY)
2365 return (-EIO); 2365 return (-EIO);
2366 2366
2367 /* Read the current page contents */ 2367 /* Read the current page contents */
2368 retval = read_mode_page(STp, COMPRESSION_PAGE, 0); 2368 retval = read_mode_page(STp, COMPRESSION_PAGE, 0);
2369 if (retval) { 2369 if (retval) {
2370 DEBC(printk(ST_DEB_MSG "%s: Compression mode page not supported.\n", 2370 DEBC(printk(ST_DEB_MSG "%s: Compression mode page not supported.\n",
2371 name)); 2371 name));
2372 return (-EIO); 2372 return (-EIO);
2373 } 2373 }
2374 2374
2375 mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH]; 2375 mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH];
2376 DEBC(printk(ST_DEB_MSG "%s: Compression state is %d.\n", name, 2376 DEBC(printk(ST_DEB_MSG "%s: Compression state is %d.\n", name,
2377 (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0))); 2377 (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0)));
2378 2378
2379 /* Check if compression can be changed */ 2379 /* Check if compression can be changed */
2380 if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) { 2380 if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) {
2381 DEBC(printk(ST_DEB_MSG "%s: Compression not supported.\n", name)); 2381 DEBC(printk(ST_DEB_MSG "%s: Compression not supported.\n", name));
2382 return (-EIO); 2382 return (-EIO);
2383 } 2383 }
2384 2384
2385 /* Do the change */ 2385 /* Do the change */
2386 if (state) { 2386 if (state) {
2387 b_data[mpoffs + CP_OFF_DCE_DCC] |= DCE_MASK; 2387 b_data[mpoffs + CP_OFF_DCE_DCC] |= DCE_MASK;
2388 if (STp->c_algo != 0) 2388 if (STp->c_algo != 0)
2389 b_data[mpoffs + CP_OFF_C_ALGO] = STp->c_algo; 2389 b_data[mpoffs + CP_OFF_C_ALGO] = STp->c_algo;
2390 } 2390 }
2391 else { 2391 else {
2392 b_data[mpoffs + CP_OFF_DCE_DCC] &= ~DCE_MASK; 2392 b_data[mpoffs + CP_OFF_DCE_DCC] &= ~DCE_MASK;
2393 if (STp->c_algo != 0) 2393 if (STp->c_algo != 0)
2394 b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */ 2394 b_data[mpoffs + CP_OFF_C_ALGO] = 0; /* no compression */
2395 } 2395 }
2396 2396
2397 retval = write_mode_page(STp, COMPRESSION_PAGE, 0); 2397 retval = write_mode_page(STp, COMPRESSION_PAGE, 0);
2398 if (retval) { 2398 if (retval) {
2399 DEBC(printk(ST_DEB_MSG "%s: Compression change failed.\n", name)); 2399 DEBC(printk(ST_DEB_MSG "%s: Compression change failed.\n", name));
2400 return (-EIO); 2400 return (-EIO);
2401 } 2401 }
2402 DEBC(printk(ST_DEB_MSG "%s: Compression state changed to %d.\n", 2402 DEBC(printk(ST_DEB_MSG "%s: Compression state changed to %d.\n",
2403 name, state)); 2403 name, state));
2404 2404
2405 STp->compression_changed = 1; 2405 STp->compression_changed = 1;
2406 return 0; 2406 return 0;
2407 } 2407 }
2408 2408
2409 2409
2410 /* Process the load and unload commands (does unload if the load code is zero) */ 2410 /* Process the load and unload commands (does unload if the load code is zero) */
2411 static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code) 2411 static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code)
2412 { 2412 {
2413 int retval = (-EIO), timeout; 2413 int retval = (-EIO), timeout;
2414 DEB( char *name = tape_name(STp); ) 2414 DEB( char *name = tape_name(STp); )
2415 unsigned char cmd[MAX_COMMAND_SIZE]; 2415 unsigned char cmd[MAX_COMMAND_SIZE];
2416 struct st_partstat *STps; 2416 struct st_partstat *STps;
2417 struct st_request *SRpnt; 2417 struct st_request *SRpnt;
2418 2418
2419 if (STp->ready != ST_READY && !load_code) { 2419 if (STp->ready != ST_READY && !load_code) {
2420 if (STp->ready == ST_NO_TAPE) 2420 if (STp->ready == ST_NO_TAPE)
2421 return (-ENOMEDIUM); 2421 return (-ENOMEDIUM);
2422 else 2422 else
2423 return (-EIO); 2423 return (-EIO);
2424 } 2424 }
2425 2425
2426 memset(cmd, 0, MAX_COMMAND_SIZE); 2426 memset(cmd, 0, MAX_COMMAND_SIZE);
2427 cmd[0] = START_STOP; 2427 cmd[0] = START_STOP;
2428 if (load_code) 2428 if (load_code)
2429 cmd[4] |= 1; 2429 cmd[4] |= 1;
2430 /* 2430 /*
2431 * If arg >= 1 && arg <= 6 Enhanced load/unload in HP C1553A 2431 * If arg >= 1 && arg <= 6 Enhanced load/unload in HP C1553A
2432 */ 2432 */
2433 if (load_code >= 1 + MT_ST_HPLOADER_OFFSET 2433 if (load_code >= 1 + MT_ST_HPLOADER_OFFSET
2434 && load_code <= 6 + MT_ST_HPLOADER_OFFSET) { 2434 && load_code <= 6 + MT_ST_HPLOADER_OFFSET) {
2435 DEBC(printk(ST_DEB_MSG "%s: Enhanced %sload slot %2d.\n", 2435 DEBC(printk(ST_DEB_MSG "%s: Enhanced %sload slot %2d.\n",
2436 name, (cmd[4]) ? "" : "un", 2436 name, (cmd[4]) ? "" : "un",
2437 load_code - MT_ST_HPLOADER_OFFSET)); 2437 load_code - MT_ST_HPLOADER_OFFSET));
2438 cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */ 2438 cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */
2439 } 2439 }
2440 if (STp->immediate) { 2440 if (STp->immediate) {
2441 cmd[1] = 1; /* Don't wait for completion */ 2441 cmd[1] = 1; /* Don't wait for completion */
2442 timeout = STp->device->timeout; 2442 timeout = STp->device->timeout;
2443 } 2443 }
2444 else 2444 else
2445 timeout = STp->long_timeout; 2445 timeout = STp->long_timeout;
2446 2446
2447 DEBC( 2447 DEBC(
2448 if (!load_code) 2448 if (!load_code)
2449 printk(ST_DEB_MSG "%s: Unloading tape.\n", name); 2449 printk(ST_DEB_MSG "%s: Unloading tape.\n", name);
2450 else 2450 else
2451 printk(ST_DEB_MSG "%s: Loading tape.\n", name); 2451 printk(ST_DEB_MSG "%s: Loading tape.\n", name);
2452 ); 2452 );
2453 2453
2454 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE, 2454 SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
2455 timeout, MAX_RETRIES, 1); 2455 timeout, MAX_RETRIES, 1);
2456 if (!SRpnt) 2456 if (!SRpnt)
2457 return (STp->buffer)->syscall_result; 2457 return (STp->buffer)->syscall_result;
2458 2458
2459 retval = (STp->buffer)->syscall_result; 2459 retval = (STp->buffer)->syscall_result;
2460 st_release_request(SRpnt); 2460 st_release_request(SRpnt);
2461 2461
2462 if (!retval) { /* SCSI command successful */ 2462 if (!retval) { /* SCSI command successful */
2463 2463
2464 if (!load_code) { 2464 if (!load_code) {
2465 STp->rew_at_close = 0; 2465 STp->rew_at_close = 0;
2466 STp->ready = ST_NO_TAPE; 2466 STp->ready = ST_NO_TAPE;
2467 } 2467 }
2468 else { 2468 else {
2469 STp->rew_at_close = STp->autorew_dev; 2469 STp->rew_at_close = STp->autorew_dev;
2470 retval = check_tape(STp, filp); 2470 retval = check_tape(STp, filp);
2471 if (retval > 0) 2471 if (retval > 0)
2472 retval = 0; 2472 retval = 0;
2473 } 2473 }
2474 } 2474 }
2475 else { 2475 else {
2476 STps = &(STp->ps[STp->partition]); 2476 STps = &(STp->ps[STp->partition]);
2477 STps->drv_file = STps->drv_block = (-1); 2477 STps->drv_file = STps->drv_block = (-1);
2478 } 2478 }
2479 2479
2480 return retval; 2480 return retval;
2481 } 2481 }
2482 2482
2483 #if DEBUG 2483 #if DEBUG
2484 #define ST_DEB_FORWARD 0 2484 #define ST_DEB_FORWARD 0
2485 #define ST_DEB_BACKWARD 1 2485 #define ST_DEB_BACKWARD 1
2486 static void deb_space_print(char *name, int direction, char *units, unsigned char *cmd) 2486 static void deb_space_print(char *name, int direction, char *units, unsigned char *cmd)
2487 { 2487 {
2488 s32 sc; 2488 s32 sc;
2489 2489
2490 sc = cmd[2] & 0x80 ? 0xff000000 : 0; 2490 sc = cmd[2] & 0x80 ? 0xff000000 : 0;
2491 sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4]; 2491 sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
2492 if (direction) 2492 if (direction)
2493 sc = -sc; 2493 sc = -sc;
2494 printk(ST_DEB_MSG "%s: Spacing tape %s over %d %s.\n", name, 2494 printk(ST_DEB_MSG "%s: Spacing tape %s over %d %s.\n", name,
2495 direction ? "backward" : "forward", sc, units); 2495 direction ? "backward" : "forward", sc, units);
2496 } 2496 }
2497 #endif 2497 #endif
2498 2498
2499 2499
2500 /* Internal ioctl function */ 2500 /* Internal ioctl function */
2501 static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned long arg) 2501 static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned long arg)
2502 { 2502 {
2503 int timeout; 2503 int timeout;
2504 long ltmp; 2504 long ltmp;
2505 int ioctl_result; 2505 int ioctl_result;
2506 int chg_eof = 1; 2506 int chg_eof = 1;
2507 unsigned char cmd[MAX_COMMAND_SIZE]; 2507 unsigned char cmd[MAX_COMMAND_SIZE];
2508 struct st_request *SRpnt; 2508 struct st_request *SRpnt;
2509 struct st_partstat *STps; 2509 struct st_partstat *STps;
2510 int fileno, blkno, at_sm, undone; 2510 int fileno, blkno, at_sm, undone;
2511 int datalen = 0, direction = DMA_NONE; 2511 int datalen = 0, direction = DMA_NONE;
2512 char *name = tape_name(STp); 2512 char *name = tape_name(STp);
2513 2513
2514 WARN_ON(STp->buffer->do_dio != 0); 2514 WARN_ON(STp->buffer->do_dio != 0);
2515 if (STp->ready != ST_READY) { 2515 if (STp->ready != ST_READY) {
2516 if (STp->ready == ST_NO_TAPE) 2516 if (STp->ready == ST_NO_TAPE)
2517 return (-ENOMEDIUM); 2517 return (-ENOMEDIUM);
2518 else 2518 else
2519 return (-EIO); 2519 return (-EIO);
2520 } 2520 }
2521 timeout = STp->long_timeout; 2521 timeout = STp->long_timeout;
2522 STps = &(STp->ps[STp->partition]); 2522 STps = &(STp->ps[STp->partition]);
2523 fileno = STps->drv_file; 2523 fileno = STps->drv_file;
2524 blkno = STps->drv_block; 2524 blkno = STps->drv_block;
2525 at_sm = STps->at_sm; 2525 at_sm = STps->at_sm;
2526 2526
2527 memset(cmd, 0, MAX_COMMAND_SIZE); 2527 memset(cmd, 0, MAX_COMMAND_SIZE);
2528 switch (cmd_in) { 2528 switch (cmd_in) {
2529 case MTFSFM: 2529 case MTFSFM:
2530 chg_eof = 0; /* Changed from the FSF after this */ 2530 chg_eof = 0; /* Changed from the FSF after this */
2531 case MTFSF: 2531 case MTFSF:
2532 cmd[0] = SPACE; 2532 cmd[0] = SPACE;
2533 cmd[1] = 0x01; /* Space FileMarks */ 2533 cmd[1] = 0x01; /* Space FileMarks */
2534 cmd[2] = (arg >> 16); 2534 cmd[2] = (arg >> 16);
2535 cmd[3] = (arg >> 8); 2535 cmd[3] = (arg >> 8);
2536 cmd[4] = arg; 2536 cmd[4] = arg;
2537 DEBC(deb_space_print(name, ST_DEB_FORWARD, "filemarks", cmd);) 2537 DEBC(deb_space_print(name, ST_DEB_FORWARD, "filemarks", cmd);)
2538 if (fileno >= 0) 2538 if (fileno >= 0)
2539 fileno += arg; 2539 fileno += arg;
2540 blkno = 0; 2540 blkno = 0;
2541 at_sm &= (arg == 0); 2541 at_sm &= (arg == 0);
2542 break; 2542 break;
2543 case MTBSFM: 2543 case MTBSFM:
2544 chg_eof = 0; /* Changed from the FSF after this */ 2544 chg_eof = 0; /* Changed from the FSF after this */
2545 case MTBSF: 2545 case MTBSF:
2546 cmd[0] = SPACE; 2546 cmd[0] = SPACE;
2547 cmd[1] = 0x01; /* Space FileMarks */ 2547 cmd[1] = 0x01; /* Space FileMarks */
2548 ltmp = (-arg); 2548 ltmp = (-arg);
2549 cmd[2] = (ltmp >> 16); 2549 cmd[2] = (ltmp >> 16);
2550 cmd[3] = (ltmp >> 8); 2550 cmd[3] = (ltmp >> 8);
2551 cmd[4] = ltmp; 2551 cmd[4] = ltmp;
2552 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "filemarks", cmd);) 2552 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "filemarks", cmd);)
2553 if (fileno >= 0) 2553 if (fileno >= 0)
2554 fileno -= arg; 2554 fileno -= arg;
2555 blkno = (-1); /* We can't know the block number */ 2555 blkno = (-1); /* We can't know the block number */
2556 at_sm &= (arg == 0); 2556 at_sm &= (arg == 0);
2557 break; 2557 break;
2558 case MTFSR: 2558 case MTFSR:
2559 cmd[0] = SPACE; 2559 cmd[0] = SPACE;
2560 cmd[1] = 0x00; /* Space Blocks */ 2560 cmd[1] = 0x00; /* Space Blocks */
2561 cmd[2] = (arg >> 16); 2561 cmd[2] = (arg >> 16);
2562 cmd[3] = (arg >> 8); 2562 cmd[3] = (arg >> 8);
2563 cmd[4] = arg; 2563 cmd[4] = arg;
2564 DEBC(deb_space_print(name, ST_DEB_FORWARD, "blocks", cmd);) 2564 DEBC(deb_space_print(name, ST_DEB_FORWARD, "blocks", cmd);)
2565 if (blkno >= 0) 2565 if (blkno >= 0)
2566 blkno += arg; 2566 blkno += arg;
2567 at_sm &= (arg == 0); 2567 at_sm &= (arg == 0);
2568 break; 2568 break;
2569 case MTBSR: 2569 case MTBSR:
2570 cmd[0] = SPACE; 2570 cmd[0] = SPACE;
2571 cmd[1] = 0x00; /* Space Blocks */ 2571 cmd[1] = 0x00; /* Space Blocks */
2572 ltmp = (-arg); 2572 ltmp = (-arg);
2573 cmd[2] = (ltmp >> 16); 2573 cmd[2] = (ltmp >> 16);
2574 cmd[3] = (ltmp >> 8); 2574 cmd[3] = (ltmp >> 8);
2575 cmd[4] = ltmp; 2575 cmd[4] = ltmp;
2576 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "blocks", cmd);) 2576 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "blocks", cmd);)
2577 if (blkno >= 0) 2577 if (blkno >= 0)
2578 blkno -= arg; 2578 blkno -= arg;
2579 at_sm &= (arg == 0); 2579 at_sm &= (arg == 0);
2580 break; 2580 break;
2581 case MTFSS: 2581 case MTFSS:
2582 cmd[0] = SPACE; 2582 cmd[0] = SPACE;
2583 cmd[1] = 0x04; /* Space Setmarks */ 2583 cmd[1] = 0x04; /* Space Setmarks */
2584 cmd[2] = (arg >> 16); 2584 cmd[2] = (arg >> 16);
2585 cmd[3] = (arg >> 8); 2585 cmd[3] = (arg >> 8);
2586 cmd[4] = arg; 2586 cmd[4] = arg;
2587 DEBC(deb_space_print(name, ST_DEB_FORWARD, "setmarks", cmd);) 2587 DEBC(deb_space_print(name, ST_DEB_FORWARD, "setmarks", cmd);)
2588 if (arg != 0) { 2588 if (arg != 0) {
2589 blkno = fileno = (-1); 2589 blkno = fileno = (-1);
2590 at_sm = 1; 2590 at_sm = 1;
2591 } 2591 }
2592 break; 2592 break;
2593 case MTBSS: 2593 case MTBSS:
2594 cmd[0] = SPACE; 2594 cmd[0] = SPACE;
2595 cmd[1] = 0x04; /* Space Setmarks */ 2595 cmd[1] = 0x04; /* Space Setmarks */
2596 ltmp = (-arg); 2596 ltmp = (-arg);
2597 cmd[2] = (ltmp >> 16); 2597 cmd[2] = (ltmp >> 16);
2598 cmd[3] = (ltmp >> 8); 2598 cmd[3] = (ltmp >> 8);
2599 cmd[4] = ltmp; 2599 cmd[4] = ltmp;
2600 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "setmarks", cmd);) 2600 DEBC(deb_space_print(name, ST_DEB_BACKWARD, "setmarks", cmd);)
2601 if (arg != 0) { 2601 if (arg != 0) {
2602 blkno = fileno = (-1); 2602 blkno = fileno = (-1);
2603 at_sm = 1; 2603 at_sm = 1;
2604 } 2604 }
2605 break; 2605 break;
2606 case MTWEOF: 2606 case MTWEOF:
2607 case MTWSM: 2607 case MTWSM:
2608 if (STp->write_prot) 2608 if (STp->write_prot)
2609 return (-EACCES); 2609 return (-EACCES);
2610 cmd[0] = WRITE_FILEMARKS; 2610 cmd[0] = WRITE_FILEMARKS;
2611 if (cmd_in == MTWSM) 2611 if (cmd_in == MTWSM)
2612 cmd[1] = 2; 2612 cmd[1] = 2;
2613 cmd[2] = (arg >> 16); 2613 cmd[2] = (arg >> 16);
2614 cmd[3] = (arg >> 8); 2614 cmd[3] = (arg >> 8);
2615 cmd[4] = arg; 2615 cmd[4] = arg;
2616 timeout = STp->device->timeout; 2616 timeout = STp->device->timeout;
2617 DEBC( 2617 DEBC(
2618 if (cmd_in == MTWEOF) 2618 if (cmd_in == MTWEOF)
2619 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name, 2619 printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
2620 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); 2620 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
2621 else 2621 else
2622 printk(ST_DEB_MSG "%s: Writing %d setmarks.\n", name, 2622 printk(ST_DEB_MSG "%s: Writing %d setmarks.\n", name,
2623 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]); 2623 cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
2624 ) 2624 )
2625 if (fileno >= 0) 2625 if (fileno >= 0)
2626 fileno += arg; 2626 fileno += arg;
2627 blkno = 0; 2627 blkno = 0;
2628 at_sm = (cmd_in == MTWSM); 2628 at_sm = (cmd_in == MTWSM);
2629 break; 2629 break;
2630 case MTREW: 2630 case MTREW:
2631 cmd[0] = REZERO_UNIT; 2631 cmd[0] = REZERO_UNIT;
2632 if (STp->immediate) { 2632 if (STp->immediate) {
2633 cmd[1] = 1; /* Don't wait for completion */ 2633 cmd[1] = 1; /* Don't wait for completion */
2634 timeout = STp->device->timeout; 2634 timeout = STp->device->timeout;
2635 } 2635 }
2636 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name)); 2636 DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
2637 fileno = blkno = at_sm = 0; 2637 fileno = blkno = at_sm = 0;
2638 break; 2638 break;
2639 case MTNOP: 2639 case MTNOP:
2640 DEBC(printk(ST_DEB_MSG "%s: No op on tape.\n", name)); 2640 DEBC(printk(ST_DEB_MSG "%s: No op on tape.\n", name));
2641 return 0; /* Should do something ? */ 2641 return 0; /* Should do something ? */
2642 break; 2642 break;
2643 case MTRETEN: 2643 case MTRETEN:
2644 cmd[0] = START_STOP; 2644 cmd[0] = START_STOP;
2645 if (STp->immediate) { 2645 if (STp->immediate) {
2646 cmd[1] = 1; /* Don't wait for completion */ 2646 cmd[1] = 1; /* Don't wait for completion */
2647 timeout = STp->device->timeout; 2647 timeout = STp->device->timeout;
2648 } 2648 }
2649 cmd[4] = 3; 2649 cmd[4] = 3;
2650 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name)); 2650 DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
2651 fileno = blkno = at_sm = 0; 2651 fileno = blkno = at_sm = 0;
2652 break; 2652 break;
2653 case MTEOM: 2653 case MTEOM:
2654 if (!STp->fast_mteom) { 2654 if (!STp->fast_mteom) {
2655 /* space to the end of tape */ 2655 /* space to the end of tape */
2656 ioctl_result = st_int_ioctl(STp, MTFSF, 0x7fffff); 2656 ioctl_result = st_int_ioctl(STp, MTFSF, 0x7fffff);
2657 fileno = STps->drv_file; 2657 fileno = STps->drv_file;
2658 if (STps->eof >= ST_EOD_1) 2658 if (STps->eof >= ST_EOD_1)
2659 return 0; 2659 return 0;
2660 /* The next lines would hide the number of spaced FileMarks 2660 /* The next lines would hide the number of spaced FileMarks
2661 That's why I inserted the previous lines. I had no luck 2661 That's why I inserted the previous lines. I had no luck
2662 with detecting EOM with FSF, so we go now to EOM. 2662 with detecting EOM with FSF, so we go now to EOM.
2663 Joerg Weule */ 2663 Joerg Weule */
2664 } else 2664 } else
2665 fileno = (-1); 2665 fileno = (-1);
2666 cmd[0] = SPACE; 2666 cmd[0] = SPACE;
2667 cmd[1] = 3; 2667 cmd[1] = 3;
2668 DEBC(printk(ST_DEB_MSG "%s: Spacing to end of recorded medium.\n", 2668 DEBC(printk(ST_DEB_MSG "%s: Spacing to end of recorded medium.\n",
2669 name)); 2669 name));
2670 blkno = -1; 2670 blkno = -1;
2671 at_sm = 0; 2671 at_sm = 0;
2672 break; 2672 break;
2673 case MTERASE: 2673 case MTERASE:
2674 if (STp->write_prot) 2674 if (STp->write_prot)
2675 return (-EACCES); 2675 return (-EACCES);
2676 cmd[0] = ERASE; 2676 cmd[0] = ERASE;
2677 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */ 2677 cmd[1] = (arg ? 1 : 0); /* Long erase with non-zero argument */
2678 if (STp->immediate) { 2678 if (STp->immediate) {
2679 cmd[1] |= 2; /* Don't wait for completion */ 2679 cmd[1] |= 2; /* Don't wait for completion */
2680 timeout = STp->device->timeout; 2680 timeout = STp->device->timeout;
2681 } 2681 }
2682 else 2682 else
2683 timeout = STp->long_timeout * 8; 2683 timeout = STp->long_timeout * 8;
2684 2684
2685 DEBC(printk(ST_DEB_MSG "%s: Erasing tape.\n", name)); 2685 DEBC(printk(ST_DEB_MSG "%s: Erasing tape.\n", name));
2686 fileno = blkno = at_sm = 0; 2686 fileno = blkno = at_sm = 0;
2687 break; 2687 break;
2688 case MTSETBLK: /* Set block length */ 2688 case MTSETBLK: /* Set block length */
2689 case MTSETDENSITY: /* Set tape density */ 2689 case MTSETDENSITY: /* Set tape density */
2690 case MTSETDRVBUFFER: /* Set drive buffering */ 2690 case MTSETDRVBUFFER: /* Set drive buffering */
2691 case SET_DENS_AND_BLK: /* Set density and block size */ 2691 case SET_DENS_AND_BLK: /* Set density and block size */
2692 chg_eof = 0; 2692 chg_eof = 0;
2693 if (STp->dirty || (STp->buffer)->buffer_bytes != 0) 2693 if (STp->dirty || (STp->buffer)->buffer_bytes != 0)
2694 return (-EIO); /* Not allowed if data in buffer */ 2694 return (-EIO); /* Not allowed if data in buffer */
2695 if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) && 2695 if ((cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) &&
2696 (arg & MT_ST_BLKSIZE_MASK) != 0 && 2696 (arg & MT_ST_BLKSIZE_MASK) != 0 &&
2697 STp->max_block > 0 && 2697 STp->max_block > 0 &&
2698 ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block || 2698 ((arg & MT_ST_BLKSIZE_MASK) < STp->min_block ||
2699 (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) { 2699 (arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) {
2700 printk(KERN_WARNING "%s: Illegal block size.\n", name); 2700 printk(KERN_WARNING "%s: Illegal block size.\n", name);
2701 return (-EINVAL); 2701 return (-EINVAL);
2702 } 2702 }
2703 cmd[0] = MODE_SELECT; 2703 cmd[0] = MODE_SELECT;
2704 if ((STp->use_pf & USE_PF)) 2704 if ((STp->use_pf & USE_PF))
2705 cmd[1] = MODE_SELECT_PAGE_FORMAT; 2705 cmd[1] = MODE_SELECT_PAGE_FORMAT;
2706 cmd[4] = datalen = 12; 2706 cmd[4] = datalen = 12;
2707 direction = DMA_TO_DEVICE; 2707 direction = DMA_TO_DEVICE;
2708 2708
2709 memset((STp->buffer)->b_data, 0, 12); 2709 memset((STp->buffer)->b_data, 0, 12);
2710 if (cmd_in == MTSETDRVBUFFER) 2710 if (cmd_in == MTSETDRVBUFFER)
2711 (STp->buffer)->b_data[2] = (arg & 7) << 4; 2711 (STp->buffer)->b_data[2] = (arg & 7) << 4;
2712 else 2712 else
2713 (STp->buffer)->b_data[2] = 2713 (STp->buffer)->b_data[2] =
2714 STp->drv_buffer << 4; 2714 STp->drv_buffer << 4;
2715 (STp->buffer)->b_data[3] = 8; /* block descriptor length */ 2715 (STp->buffer)->b_data[3] = 8; /* block descriptor length */
2716 if (cmd_in == MTSETDENSITY) { 2716 if (cmd_in == MTSETDENSITY) {
2717 (STp->buffer)->b_data[4] = arg; 2717 (STp->buffer)->b_data[4] = arg;
2718 STp->density_changed = 1; /* At least we tried ;-) */ 2718 STp->density_changed = 1; /* At least we tried ;-) */
2719 } else if (cmd_in == SET_DENS_AND_BLK) 2719 } else if (cmd_in == SET_DENS_AND_BLK)
2720 (STp->buffer)->b_data[4] = arg >> 24; 2720 (STp->buffer)->b_data[4] = arg >> 24;
2721 else 2721 else
2722 (STp->buffer)->b_data[4] = STp->density; 2722 (STp->buffer)->b_data[4] = STp->density;
2723 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { 2723 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
2724 ltmp = arg & MT_ST_BLKSIZE_MASK; 2724 ltmp = arg & MT_ST_BLKSIZE_MASK;
2725 if (cmd_in == MTSETBLK) 2725 if (cmd_in == MTSETBLK)
2726 STp->blksize_changed = 1; /* At least we tried ;-) */ 2726 STp->blksize_changed = 1; /* At least we tried ;-) */
2727 } else 2727 } else
2728 ltmp = STp->block_size; 2728 ltmp = STp->block_size;
2729 (STp->buffer)->b_data[9] = (ltmp >> 16); 2729 (STp->buffer)->b_data[9] = (ltmp >> 16);
2730 (STp->buffer)->b_data[10] = (ltmp >> 8); 2730 (STp->buffer)->b_data[10] = (ltmp >> 8);
2731 (STp->buffer)->b_data[11] = ltmp; 2731 (STp->buffer)->b_data[11] = ltmp;
2732 timeout = STp->device->timeout; 2732 timeout = STp->device->timeout;
2733 DEBC( 2733 DEBC(
2734 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) 2734 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
2735 printk(ST_DEB_MSG 2735 printk(ST_DEB_MSG
2736 "%s: Setting block size to %d bytes.\n", name, 2736 "%s: Setting block size to %d bytes.\n", name,
2737 (STp->buffer)->b_data[9] * 65536 + 2737 (STp->buffer)->b_data[9] * 65536 +
2738 (STp->buffer)->b_data[10] * 256 + 2738 (STp->buffer)->b_data[10] * 256 +
2739 (STp->buffer)->b_data[11]); 2739 (STp->buffer)->b_data[11]);
2740 if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK) 2740 if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK)
2741 printk(ST_DEB_MSG 2741 printk(ST_DEB_MSG
2742 "%s: Setting density code to %x.\n", name, 2742 "%s: Setting density code to %x.\n", name,
2743 (STp->buffer)->b_data[4]); 2743 (STp->buffer)->b_data[4]);
2744 if (cmd_in == MTSETDRVBUFFER) 2744 if (cmd_in == MTSETDRVBUFFER)
2745 printk(ST_DEB_MSG 2745 printk(ST_DEB_MSG
2746 "%s: Setting drive buffer code to %d.\n", name, 2746 "%s: Setting drive buffer code to %d.\n", name,
2747 ((STp->buffer)->b_data[2] >> 4) & 7); 2747 ((STp->buffer)->b_data[2] >> 4) & 7);
2748 ) 2748 )
2749 break; 2749 break;
2750 default: 2750 default:
2751 return (-ENOSYS); 2751 return (-ENOSYS);
2752 } 2752 }
2753 2753
2754 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction, 2754 SRpnt = st_do_scsi(NULL, STp, cmd, datalen, direction,
2755 timeout, MAX_RETRIES, 1); 2755 timeout, MAX_RETRIES, 1);
2756 if (!SRpnt) 2756 if (!SRpnt)
2757 return (STp->buffer)->syscall_result; 2757 return (STp->buffer)->syscall_result;
2758 2758
2759 ioctl_result = (STp->buffer)->syscall_result; 2759 ioctl_result = (STp->buffer)->syscall_result;
2760 2760
2761 if (!ioctl_result) { /* SCSI command successful */ 2761 if (!ioctl_result) { /* SCSI command successful */
2762 st_release_request(SRpnt); 2762 st_release_request(SRpnt);
2763 SRpnt = NULL; 2763 SRpnt = NULL;
2764 STps->drv_block = blkno; 2764 STps->drv_block = blkno;
2765 STps->drv_file = fileno; 2765 STps->drv_file = fileno;
2766 STps->at_sm = at_sm; 2766 STps->at_sm = at_sm;
2767 2767
2768 if (cmd_in == MTBSFM) 2768 if (cmd_in == MTBSFM)
2769 ioctl_result = st_int_ioctl(STp, MTFSF, 1); 2769 ioctl_result = st_int_ioctl(STp, MTFSF, 1);
2770 else if (cmd_in == MTFSFM) 2770 else if (cmd_in == MTFSFM)
2771 ioctl_result = st_int_ioctl(STp, MTBSF, 1); 2771 ioctl_result = st_int_ioctl(STp, MTBSF, 1);
2772 2772
2773 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { 2773 if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) {
2774 int old_block_size = STp->block_size; 2774 int old_block_size = STp->block_size;
2775 STp->block_size = arg & MT_ST_BLKSIZE_MASK; 2775 STp->block_size = arg & MT_ST_BLKSIZE_MASK;
2776 if (STp->block_size != 0) { 2776 if (STp->block_size != 0) {
2777 if (old_block_size == 0) 2777 if (old_block_size == 0)
2778 normalize_buffer(STp->buffer); 2778 normalize_buffer(STp->buffer);
2779 (STp->buffer)->buffer_blocks = 2779 (STp->buffer)->buffer_blocks =
2780 (STp->buffer)->buffer_size / STp->block_size; 2780 (STp->buffer)->buffer_size / STp->block_size;
2781 } 2781 }
2782 (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0; 2782 (STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
2783 if (cmd_in == SET_DENS_AND_BLK) 2783 if (cmd_in == SET_DENS_AND_BLK)
2784 STp->density = arg >> MT_ST_DENSITY_SHIFT; 2784 STp->density = arg >> MT_ST_DENSITY_SHIFT;
2785 } else if (cmd_in == MTSETDRVBUFFER) 2785 } else if (cmd_in == MTSETDRVBUFFER)
2786 STp->drv_buffer = (arg & 7); 2786 STp->drv_buffer = (arg & 7);
2787 else if (cmd_in == MTSETDENSITY) 2787 else if (cmd_in == MTSETDENSITY)
2788 STp->density = arg; 2788 STp->density = arg;
2789 2789
2790 if (cmd_in == MTEOM) 2790 if (cmd_in == MTEOM)
2791 STps->eof = ST_EOD; 2791 STps->eof = ST_EOD;
2792 else if (cmd_in == MTFSF) 2792 else if (cmd_in == MTFSF)
2793 STps->eof = ST_FM; 2793 STps->eof = ST_FM;
2794 else if (chg_eof) 2794 else if (chg_eof)
2795 STps->eof = ST_NOEOF; 2795 STps->eof = ST_NOEOF;
2796 2796
2797 if (cmd_in == MTWEOF) 2797 if (cmd_in == MTWEOF)
2798 STps->rw = ST_IDLE; 2798 STps->rw = ST_IDLE;
2799 } else { /* SCSI command was not completely successful. Don't return 2799 } else { /* SCSI command was not completely successful. Don't return
2800 from this block without releasing the SCSI command block! */ 2800 from this block without releasing the SCSI command block! */
2801 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 2801 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
2802 2802
2803 if (cmdstatp->flags & SENSE_EOM) { 2803 if (cmdstatp->flags & SENSE_EOM) {
2804 if (cmd_in != MTBSF && cmd_in != MTBSFM && 2804 if (cmd_in != MTBSF && cmd_in != MTBSFM &&
2805 cmd_in != MTBSR && cmd_in != MTBSS) 2805 cmd_in != MTBSR && cmd_in != MTBSS)
2806 STps->eof = ST_EOM_OK; 2806 STps->eof = ST_EOM_OK;
2807 STps->drv_block = 0; 2807 STps->drv_block = 0;
2808 } 2808 }
2809 2809
2810 if (cmdstatp->remainder_valid) 2810 if (cmdstatp->remainder_valid)
2811 undone = (int)cmdstatp->uremainder64; 2811 undone = (int)cmdstatp->uremainder64;
2812 else 2812 else
2813 undone = 0; 2813 undone = 0;
2814 2814
2815 if (cmd_in == MTWEOF && 2815 if (cmd_in == MTWEOF &&
2816 cmdstatp->have_sense && 2816 cmdstatp->have_sense &&
2817 (cmdstatp->flags & SENSE_EOM) && 2817 (cmdstatp->flags & SENSE_EOM) &&
2818 (cmdstatp->sense_hdr.sense_key == NO_SENSE || 2818 (cmdstatp->sense_hdr.sense_key == NO_SENSE ||
2819 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) && 2819 cmdstatp->sense_hdr.sense_key == RECOVERED_ERROR) &&
2820 undone == 0) { 2820 undone == 0) {
2821 ioctl_result = 0; /* EOF written succesfully at EOM */ 2821 ioctl_result = 0; /* EOF written succesfully at EOM */
2822 if (fileno >= 0) 2822 if (fileno >= 0)
2823 fileno++; 2823 fileno++;
2824 STps->drv_file = fileno; 2824 STps->drv_file = fileno;
2825 STps->eof = ST_NOEOF; 2825 STps->eof = ST_NOEOF;
2826 } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) { 2826 } else if ((cmd_in == MTFSF) || (cmd_in == MTFSFM)) {
2827 if (fileno >= 0) 2827 if (fileno >= 0)
2828 STps->drv_file = fileno - undone; 2828 STps->drv_file = fileno - undone;
2829 else 2829 else
2830 STps->drv_file = fileno; 2830 STps->drv_file = fileno;
2831 STps->drv_block = -1; 2831 STps->drv_block = -1;
2832 STps->eof = ST_NOEOF; 2832 STps->eof = ST_NOEOF;
2833 } else if ((cmd_in == MTBSF) || (cmd_in == MTBSFM)) { 2833 } else if ((cmd_in == MTBSF) || (cmd_in == MTBSFM)) {
2834 if (arg > 0 && undone < 0) /* Some drives get this wrong */ 2834 if (arg > 0 && undone < 0) /* Some drives get this wrong */
2835 undone = (-undone); 2835 undone = (-undone);
2836 if (STps->drv_file >= 0) 2836 if (STps->drv_file >= 0)
2837 STps->drv_file = fileno + undone; 2837 STps->drv_file = fileno + undone;
2838 STps->drv_block = 0; 2838 STps->drv_block = 0;
2839 STps->eof = ST_NOEOF; 2839 STps->eof = ST_NOEOF;
2840 } else if (cmd_in == MTFSR) { 2840 } else if (cmd_in == MTFSR) {
2841 if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ 2841 if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */
2842 if (STps->drv_file >= 0) 2842 if (STps->drv_file >= 0)
2843 STps->drv_file++; 2843 STps->drv_file++;
2844 STps->drv_block = 0; 2844 STps->drv_block = 0;
2845 STps->eof = ST_FM; 2845 STps->eof = ST_FM;
2846 } else { 2846 } else {
2847 if (blkno >= undone) 2847 if (blkno >= undone)
2848 STps->drv_block = blkno - undone; 2848 STps->drv_block = blkno - undone;
2849 else 2849 else
2850 STps->drv_block = (-1); 2850 STps->drv_block = (-1);
2851 STps->eof = ST_NOEOF; 2851 STps->eof = ST_NOEOF;
2852 } 2852 }
2853 } else if (cmd_in == MTBSR) { 2853 } else if (cmd_in == MTBSR) {
2854 if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */ 2854 if (cmdstatp->flags & SENSE_FMK) { /* Hit filemark */
2855 STps->drv_file--; 2855 STps->drv_file--;
2856 STps->drv_block = (-1); 2856 STps->drv_block = (-1);
2857 } else { 2857 } else {
2858 if (arg > 0 && undone < 0) /* Some drives get this wrong */ 2858 if (arg > 0 && undone < 0) /* Some drives get this wrong */
2859 undone = (-undone); 2859 undone = (-undone);
2860 if (STps->drv_block >= 0) 2860 if (STps->drv_block >= 0)
2861 STps->drv_block = blkno + undone; 2861 STps->drv_block = blkno + undone;
2862 } 2862 }
2863 STps->eof = ST_NOEOF; 2863 STps->eof = ST_NOEOF;
2864 } else if (cmd_in == MTEOM) { 2864 } else if (cmd_in == MTEOM) {
2865 STps->drv_file = (-1); 2865 STps->drv_file = (-1);
2866 STps->drv_block = (-1); 2866 STps->drv_block = (-1);
2867 STps->eof = ST_EOD; 2867 STps->eof = ST_EOD;
2868 } else if (cmd_in == MTSETBLK || 2868 } else if (cmd_in == MTSETBLK ||
2869 cmd_in == MTSETDENSITY || 2869 cmd_in == MTSETDENSITY ||
2870 cmd_in == MTSETDRVBUFFER || 2870 cmd_in == MTSETDRVBUFFER ||
2871 cmd_in == SET_DENS_AND_BLK) { 2871 cmd_in == SET_DENS_AND_BLK) {
2872 if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && 2872 if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST &&
2873 !(STp->use_pf & PF_TESTED)) { 2873 !(STp->use_pf & PF_TESTED)) {
2874 /* Try the other possible state of Page Format if not 2874 /* Try the other possible state of Page Format if not
2875 already tried */ 2875 already tried */
2876 STp->use_pf = !STp->use_pf | PF_TESTED; 2876 STp->use_pf = !STp->use_pf | PF_TESTED;
2877 st_release_request(SRpnt); 2877 st_release_request(SRpnt);
2878 SRpnt = NULL; 2878 SRpnt = NULL;
2879 return st_int_ioctl(STp, cmd_in, arg); 2879 return st_int_ioctl(STp, cmd_in, arg);
2880 } 2880 }
2881 } else if (chg_eof) 2881 } else if (chg_eof)
2882 STps->eof = ST_NOEOF; 2882 STps->eof = ST_NOEOF;
2883 2883
2884 if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK) 2884 if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
2885 STps->eof = ST_EOD; 2885 STps->eof = ST_EOD;
2886 2886
2887 st_release_request(SRpnt); 2887 st_release_request(SRpnt);
2888 SRpnt = NULL; 2888 SRpnt = NULL;
2889 } 2889 }
2890 2890
2891 return ioctl_result; 2891 return ioctl_result;
2892 } 2892 }
2893 2893
2894 2894
2895 /* Get the tape position. If bt == 2, arg points into a kernel space mt_loc 2895 /* Get the tape position. If bt == 2, arg points into a kernel space mt_loc
2896 structure. */ 2896 structure. */
2897 2897
2898 static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition, 2898 static int get_location(struct scsi_tape *STp, unsigned int *block, int *partition,
2899 int logical) 2899 int logical)
2900 { 2900 {
2901 int result; 2901 int result;
2902 unsigned char scmd[MAX_COMMAND_SIZE]; 2902 unsigned char scmd[MAX_COMMAND_SIZE];
2903 struct st_request *SRpnt; 2903 struct st_request *SRpnt;
2904 DEB( char *name = tape_name(STp); ) 2904 DEB( char *name = tape_name(STp); )
2905 2905
2906 if (STp->ready != ST_READY) 2906 if (STp->ready != ST_READY)
2907 return (-EIO); 2907 return (-EIO);
2908 2908
2909 memset(scmd, 0, MAX_COMMAND_SIZE); 2909 memset(scmd, 0, MAX_COMMAND_SIZE);
2910 if ((STp->device)->scsi_level < SCSI_2) { 2910 if ((STp->device)->scsi_level < SCSI_2) {
2911 scmd[0] = QFA_REQUEST_BLOCK; 2911 scmd[0] = QFA_REQUEST_BLOCK;
2912 scmd[4] = 3; 2912 scmd[4] = 3;
2913 } else { 2913 } else {
2914 scmd[0] = READ_POSITION; 2914 scmd[0] = READ_POSITION;
2915 if (!logical && !STp->scsi2_logical) 2915 if (!logical && !STp->scsi2_logical)
2916 scmd[1] = 1; 2916 scmd[1] = 1;
2917 } 2917 }
2918 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE, 2918 SRpnt = st_do_scsi(NULL, STp, scmd, 20, DMA_FROM_DEVICE,
2919 STp->device->timeout, MAX_READY_RETRIES, 1); 2919 STp->device->timeout, MAX_READY_RETRIES, 1);
2920 if (!SRpnt) 2920 if (!SRpnt)
2921 return (STp->buffer)->syscall_result; 2921 return (STp->buffer)->syscall_result;
2922 2922
2923 if ((STp->buffer)->syscall_result != 0 || 2923 if ((STp->buffer)->syscall_result != 0 ||
2924 (STp->device->scsi_level >= SCSI_2 && 2924 (STp->device->scsi_level >= SCSI_2 &&
2925 ((STp->buffer)->b_data[0] & 4) != 0)) { 2925 ((STp->buffer)->b_data[0] & 4) != 0)) {
2926 *block = *partition = 0; 2926 *block = *partition = 0;
2927 DEBC(printk(ST_DEB_MSG "%s: Can't read tape position.\n", name)); 2927 DEBC(printk(ST_DEB_MSG "%s: Can't read tape position.\n", name));
2928 result = (-EIO); 2928 result = (-EIO);
2929 } else { 2929 } else {
2930 result = 0; 2930 result = 0;
2931 if ((STp->device)->scsi_level < SCSI_2) { 2931 if ((STp->device)->scsi_level < SCSI_2) {
2932 *block = ((STp->buffer)->b_data[0] << 16) 2932 *block = ((STp->buffer)->b_data[0] << 16)
2933 + ((STp->buffer)->b_data[1] << 8) 2933 + ((STp->buffer)->b_data[1] << 8)
2934 + (STp->buffer)->b_data[2]; 2934 + (STp->buffer)->b_data[2];
2935 *partition = 0; 2935 *partition = 0;
2936 } else { 2936 } else {
2937 *block = ((STp->buffer)->b_data[4] << 24) 2937 *block = ((STp->buffer)->b_data[4] << 24)
2938 + ((STp->buffer)->b_data[5] << 16) 2938 + ((STp->buffer)->b_data[5] << 16)
2939 + ((STp->buffer)->b_data[6] << 8) 2939 + ((STp->buffer)->b_data[6] << 8)
2940 + (STp->buffer)->b_data[7]; 2940 + (STp->buffer)->b_data[7];
2941 *partition = (STp->buffer)->b_data[1]; 2941 *partition = (STp->buffer)->b_data[1];
2942 if (((STp->buffer)->b_data[0] & 0x80) && 2942 if (((STp->buffer)->b_data[0] & 0x80) &&
2943 (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */ 2943 (STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */
2944 STp->ps[0].drv_block = STp->ps[0].drv_file = 0; 2944 STp->ps[0].drv_block = STp->ps[0].drv_file = 0;
2945 } 2945 }
2946 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name, 2946 DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
2947 *block, *partition)); 2947 *block, *partition));
2948 } 2948 }
2949 st_release_request(SRpnt); 2949 st_release_request(SRpnt);
2950 SRpnt = NULL; 2950 SRpnt = NULL;
2951 2951
2952 return result; 2952 return result;
2953 } 2953 }
2954 2954
2955 2955
2956 /* Set the tape block and partition. Negative partition means that only the 2956 /* Set the tape block and partition. Negative partition means that only the
2957 block should be set in vendor specific way. */ 2957 block should be set in vendor specific way. */
2958 static int set_location(struct scsi_tape *STp, unsigned int block, int partition, 2958 static int set_location(struct scsi_tape *STp, unsigned int block, int partition,
2959 int logical) 2959 int logical)
2960 { 2960 {
2961 struct st_partstat *STps; 2961 struct st_partstat *STps;
2962 int result, p; 2962 int result, p;
2963 unsigned int blk; 2963 unsigned int blk;
2964 int timeout; 2964 int timeout;
2965 unsigned char scmd[MAX_COMMAND_SIZE]; 2965 unsigned char scmd[MAX_COMMAND_SIZE];
2966 struct st_request *SRpnt; 2966 struct st_request *SRpnt;
2967 DEB( char *name = tape_name(STp); ) 2967 DEB( char *name = tape_name(STp); )
2968 2968
2969 if (STp->ready != ST_READY) 2969 if (STp->ready != ST_READY)
2970 return (-EIO); 2970 return (-EIO);
2971 timeout = STp->long_timeout; 2971 timeout = STp->long_timeout;
2972 STps = &(STp->ps[STp->partition]); 2972 STps = &(STp->ps[STp->partition]);
2973 2973
2974 DEBC(printk(ST_DEB_MSG "%s: Setting block to %d and partition to %d.\n", 2974 DEBC(printk(ST_DEB_MSG "%s: Setting block to %d and partition to %d.\n",
2975 name, block, partition)); 2975 name, block, partition));
2976 DEB(if (partition < 0) 2976 DEB(if (partition < 0)
2977 return (-EIO); ) 2977 return (-EIO); )
2978 2978
2979 /* Update the location at the partition we are leaving */ 2979 /* Update the location at the partition we are leaving */
2980 if ((!STp->can_partitions && partition != 0) || 2980 if ((!STp->can_partitions && partition != 0) ||
2981 partition >= ST_NBR_PARTITIONS) 2981 partition >= ST_NBR_PARTITIONS)
2982 return (-EINVAL); 2982 return (-EINVAL);
2983 if (partition != STp->partition) { 2983 if (partition != STp->partition) {
2984 if (get_location(STp, &blk, &p, 1)) 2984 if (get_location(STp, &blk, &p, 1))
2985 STps->last_block_valid = 0; 2985 STps->last_block_valid = 0;
2986 else { 2986 else {
2987 STps->last_block_valid = 1; 2987 STps->last_block_valid = 1;
2988 STps->last_block_visited = blk; 2988 STps->last_block_visited = blk;
2989 DEBC(printk(ST_DEB_MSG 2989 DEBC(printk(ST_DEB_MSG
2990 "%s: Visited block %d for partition %d saved.\n", 2990 "%s: Visited block %d for partition %d saved.\n",
2991 name, blk, STp->partition)); 2991 name, blk, STp->partition));
2992 } 2992 }
2993 } 2993 }
2994 2994
2995 memset(scmd, 0, MAX_COMMAND_SIZE); 2995 memset(scmd, 0, MAX_COMMAND_SIZE);
2996 if ((STp->device)->scsi_level < SCSI_2) { 2996 if ((STp->device)->scsi_level < SCSI_2) {
2997 scmd[0] = QFA_SEEK_BLOCK; 2997 scmd[0] = QFA_SEEK_BLOCK;
2998 scmd[2] = (block >> 16); 2998 scmd[2] = (block >> 16);
2999 scmd[3] = (block >> 8); 2999 scmd[3] = (block >> 8);
3000 scmd[4] = block; 3000 scmd[4] = block;
3001 scmd[5] = 0; 3001 scmd[5] = 0;
3002 } else { 3002 } else {
3003 scmd[0] = SEEK_10; 3003 scmd[0] = SEEK_10;
3004 scmd[3] = (block >> 24); 3004 scmd[3] = (block >> 24);
3005 scmd[4] = (block >> 16); 3005 scmd[4] = (block >> 16);
3006 scmd[5] = (block >> 8); 3006 scmd[5] = (block >> 8);
3007 scmd[6] = block; 3007 scmd[6] = block;
3008 if (!logical && !STp->scsi2_logical) 3008 if (!logical && !STp->scsi2_logical)
3009 scmd[1] = 4; 3009 scmd[1] = 4;
3010 if (STp->partition != partition) { 3010 if (STp->partition != partition) {
3011 scmd[1] |= 2; 3011 scmd[1] |= 2;
3012 scmd[8] = partition; 3012 scmd[8] = partition;
3013 DEBC(printk(ST_DEB_MSG 3013 DEBC(printk(ST_DEB_MSG
3014 "%s: Trying to change partition from %d to %d\n", 3014 "%s: Trying to change partition from %d to %d\n",
3015 name, STp->partition, partition)); 3015 name, STp->partition, partition));
3016 } 3016 }
3017 } 3017 }
3018 if (STp->immediate) { 3018 if (STp->immediate) {
3019 scmd[1] |= 1; /* Don't wait for completion */ 3019 scmd[1] |= 1; /* Don't wait for completion */
3020 timeout = STp->device->timeout; 3020 timeout = STp->device->timeout;
3021 } 3021 }
3022 3022
3023 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE, 3023 SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
3024 timeout, MAX_READY_RETRIES, 1); 3024 timeout, MAX_READY_RETRIES, 1);
3025 if (!SRpnt) 3025 if (!SRpnt)
3026 return (STp->buffer)->syscall_result; 3026 return (STp->buffer)->syscall_result;
3027 3027
3028 STps->drv_block = STps->drv_file = (-1); 3028 STps->drv_block = STps->drv_file = (-1);
3029 STps->eof = ST_NOEOF; 3029 STps->eof = ST_NOEOF;
3030 if ((STp->buffer)->syscall_result != 0) { 3030 if ((STp->buffer)->syscall_result != 0) {
3031 result = (-EIO); 3031 result = (-EIO);
3032 if (STp->can_partitions && 3032 if (STp->can_partitions &&
3033 (STp->device)->scsi_level >= SCSI_2 && 3033 (STp->device)->scsi_level >= SCSI_2 &&
3034 (p = find_partition(STp)) >= 0) 3034 (p = find_partition(STp)) >= 0)
3035 STp->partition = p; 3035 STp->partition = p;
3036 } else { 3036 } else {
3037 if (STp->can_partitions) { 3037 if (STp->can_partitions) {
3038 STp->partition = partition; 3038 STp->partition = partition;
3039 STps = &(STp->ps[partition]); 3039 STps = &(STp->ps[partition]);
3040 if (!STps->last_block_valid || 3040 if (!STps->last_block_valid ||
3041 STps->last_block_visited != block) { 3041 STps->last_block_visited != block) {
3042 STps->at_sm = 0; 3042 STps->at_sm = 0;
3043 STps->rw = ST_IDLE; 3043 STps->rw = ST_IDLE;
3044 } 3044 }
3045 } else 3045 } else
3046 STps->at_sm = 0; 3046 STps->at_sm = 0;
3047 if (block == 0) 3047 if (block == 0)
3048 STps->drv_block = STps->drv_file = 0; 3048 STps->drv_block = STps->drv_file = 0;
3049 result = 0; 3049 result = 0;
3050 } 3050 }
3051 3051
3052 st_release_request(SRpnt); 3052 st_release_request(SRpnt);
3053 SRpnt = NULL; 3053 SRpnt = NULL;
3054 3054
3055 return result; 3055 return result;
3056 } 3056 }
3057 3057
3058 3058
3059 /* Find the current partition number for the drive status. Called from open and 3059 /* Find the current partition number for the drive status. Called from open and
3060 returns either partition number of negative error code. */ 3060 returns either partition number of negative error code. */
3061 static int find_partition(struct scsi_tape *STp) 3061 static int find_partition(struct scsi_tape *STp)
3062 { 3062 {
3063 int i, partition; 3063 int i, partition;
3064 unsigned int block; 3064 unsigned int block;
3065 3065
3066 if ((i = get_location(STp, &block, &partition, 1)) < 0) 3066 if ((i = get_location(STp, &block, &partition, 1)) < 0)
3067 return i; 3067 return i;
3068 if (partition >= ST_NBR_PARTITIONS) 3068 if (partition >= ST_NBR_PARTITIONS)
3069 return (-EIO); 3069 return (-EIO);
3070 return partition; 3070 return partition;
3071 } 3071 }
3072 3072
3073 3073
3074 /* Change the partition if necessary */ 3074 /* Change the partition if necessary */
3075 static int switch_partition(struct scsi_tape *STp) 3075 static int switch_partition(struct scsi_tape *STp)
3076 { 3076 {
3077 struct st_partstat *STps; 3077 struct st_partstat *STps;
3078 3078
3079 if (STp->partition == STp->new_partition) 3079 if (STp->partition == STp->new_partition)
3080 return 0; 3080 return 0;
3081 STps = &(STp->ps[STp->new_partition]); 3081 STps = &(STp->ps[STp->new_partition]);
3082 if (!STps->last_block_valid) 3082 if (!STps->last_block_valid)
3083 STps->last_block_visited = 0; 3083 STps->last_block_visited = 0;
3084 return set_location(STp, STps->last_block_visited, STp->new_partition, 1); 3084 return set_location(STp, STps->last_block_visited, STp->new_partition, 1);
3085 } 3085 }
3086 3086
3087 /* Functions for reading and writing the medium partition mode page. */ 3087 /* Functions for reading and writing the medium partition mode page. */
3088 3088
3089 #define PART_PAGE 0x11 3089 #define PART_PAGE 0x11
3090 #define PART_PAGE_FIXED_LENGTH 8 3090 #define PART_PAGE_FIXED_LENGTH 8
3091 3091
3092 #define PP_OFF_MAX_ADD_PARTS 2 3092 #define PP_OFF_MAX_ADD_PARTS 2
3093 #define PP_OFF_NBR_ADD_PARTS 3 3093 #define PP_OFF_NBR_ADD_PARTS 3
3094 #define PP_OFF_FLAGS 4 3094 #define PP_OFF_FLAGS 4
3095 #define PP_OFF_PART_UNITS 6 3095 #define PP_OFF_PART_UNITS 6
3096 #define PP_OFF_RESERVED 7 3096 #define PP_OFF_RESERVED 7
3097 3097
3098 #define PP_BIT_IDP 0x20 3098 #define PP_BIT_IDP 0x20
3099 #define PP_MSK_PSUM_MB 0x10 3099 #define PP_MSK_PSUM_MB 0x10
3100 3100
3101 /* Get the number of partitions on the tape. As a side effect reads the 3101 /* Get the number of partitions on the tape. As a side effect reads the
3102 mode page into the tape buffer. */ 3102 mode page into the tape buffer. */
3103 static int nbr_partitions(struct scsi_tape *STp) 3103 static int nbr_partitions(struct scsi_tape *STp)
3104 { 3104 {
3105 int result; 3105 int result;
3106 DEB( char *name = tape_name(STp); ) 3106 DEB( char *name = tape_name(STp); )
3107 3107
3108 if (STp->ready != ST_READY) 3108 if (STp->ready != ST_READY)
3109 return (-EIO); 3109 return (-EIO);
3110 3110
3111 result = read_mode_page(STp, PART_PAGE, 1); 3111 result = read_mode_page(STp, PART_PAGE, 1);
3112 3112
3113 if (result) { 3113 if (result) {
3114 DEBC(printk(ST_DEB_MSG "%s: Can't read medium partition page.\n", 3114 DEBC(printk(ST_DEB_MSG "%s: Can't read medium partition page.\n",
3115 name)); 3115 name));
3116 result = (-EIO); 3116 result = (-EIO);
3117 } else { 3117 } else {
3118 result = (STp->buffer)->b_data[MODE_HEADER_LENGTH + 3118 result = (STp->buffer)->b_data[MODE_HEADER_LENGTH +
3119 PP_OFF_NBR_ADD_PARTS] + 1; 3119 PP_OFF_NBR_ADD_PARTS] + 1;
3120 DEBC(printk(ST_DEB_MSG "%s: Number of partitions %d.\n", name, result)); 3120 DEBC(printk(ST_DEB_MSG "%s: Number of partitions %d.\n", name, result));
3121 } 3121 }
3122 3122
3123 return result; 3123 return result;
3124 } 3124 }
3125 3125
3126 3126
3127 /* Partition the tape into two partitions if size > 0 or one partition if 3127 /* Partition the tape into two partitions if size > 0 or one partition if
3128 size == 0. 3128 size == 0.
3129 3129
3130 The block descriptors are read and written because Sony SDT-7000 does not 3130 The block descriptors are read and written because Sony SDT-7000 does not
3131 work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>). 3131 work without this (suggestion from Michael Schaefer <Michael.Schaefer@dlr.de>).
3132 3132
3133 My HP C1533A drive returns only one partition size field. This is used to 3133 My HP C1533A drive returns only one partition size field. This is used to
3134 set the size of partition 1. There is no size field for the default partition. 3134 set the size of partition 1. There is no size field for the default partition.
3135 Michael Schaefer's Sony SDT-7000 returns two descriptors and the second is 3135 Michael Schaefer's Sony SDT-7000 returns two descriptors and the second is
3136 used to set the size of partition 1 (this is what the SCSI-3 standard specifies). 3136 used to set the size of partition 1 (this is what the SCSI-3 standard specifies).
3137 The following algorithm is used to accommodate both drives: if the number of 3137 The following algorithm is used to accommodate both drives: if the number of
3138 partition size fields is greater than the maximum number of additional partitions 3138 partition size fields is greater than the maximum number of additional partitions
3139 in the mode page, the second field is used. Otherwise the first field is used. 3139 in the mode page, the second field is used. Otherwise the first field is used.
3140 3140
3141 For Seagate DDS drives the page length must be 8 when no partitions is defined 3141 For Seagate DDS drives the page length must be 8 when no partitions is defined
3142 and 10 when 1 partition is defined (information from Eric Lee Green). This is 3142 and 10 when 1 partition is defined (information from Eric Lee Green). This is
3143 is acceptable also to some other old drives and enforced if the first partition 3143 is acceptable also to some other old drives and enforced if the first partition
3144 size field is used for the first additional partition size. 3144 size field is used for the first additional partition size.
3145 */ 3145 */
3146 static int partition_tape(struct scsi_tape *STp, int size) 3146 static int partition_tape(struct scsi_tape *STp, int size)
3147 { 3147 {
3148 char *name = tape_name(STp); 3148 char *name = tape_name(STp);
3149 int result; 3149 int result;
3150 int pgo, psd_cnt, psdo; 3150 int pgo, psd_cnt, psdo;
3151 unsigned char *bp; 3151 unsigned char *bp;
3152 3152
3153 result = read_mode_page(STp, PART_PAGE, 0); 3153 result = read_mode_page(STp, PART_PAGE, 0);
3154 if (result) { 3154 if (result) {
3155 DEBC(printk(ST_DEB_MSG "%s: Can't read partition mode page.\n", name)); 3155 DEBC(printk(ST_DEB_MSG "%s: Can't read partition mode page.\n", name));
3156 return result; 3156 return result;
3157 } 3157 }
3158 /* The mode page is in the buffer. Let's modify it and write it. */ 3158 /* The mode page is in the buffer. Let's modify it and write it. */
3159 bp = (STp->buffer)->b_data; 3159 bp = (STp->buffer)->b_data;
3160 pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH]; 3160 pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH];
3161 DEBC(printk(ST_DEB_MSG "%s: Partition page length is %d bytes.\n", 3161 DEBC(printk(ST_DEB_MSG "%s: Partition page length is %d bytes.\n",
3162 name, bp[pgo + MP_OFF_PAGE_LENGTH] + 2)); 3162 name, bp[pgo + MP_OFF_PAGE_LENGTH] + 2));
3163 3163
3164 psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2; 3164 psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2;
3165 psdo = pgo + PART_PAGE_FIXED_LENGTH; 3165 psdo = pgo + PART_PAGE_FIXED_LENGTH;
3166 if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) { 3166 if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) {
3167 bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */ 3167 bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */
3168 psdo += 2; 3168 psdo += 2;
3169 } 3169 }
3170 memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2); 3170 memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2);
3171 3171
3172 DEBC(printk("%s: psd_cnt %d, max.parts %d, nbr_parts %d\n", name, 3172 DEBC(printk("%s: psd_cnt %d, max.parts %d, nbr_parts %d\n", name,
3173 psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS], 3173 psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS],
3174 bp[pgo + PP_OFF_NBR_ADD_PARTS])); 3174 bp[pgo + PP_OFF_NBR_ADD_PARTS]));
3175 3175
3176 if (size <= 0) { 3176 if (size <= 0) {
3177 bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0; 3177 bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0;
3178 if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS]) 3178 if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS])
3179 bp[pgo + MP_OFF_PAGE_LENGTH] = 6; 3179 bp[pgo + MP_OFF_PAGE_LENGTH] = 6;
3180 DEBC(printk(ST_DEB_MSG "%s: Formatting tape with one partition.\n", 3180 DEBC(printk(ST_DEB_MSG "%s: Formatting tape with one partition.\n",
3181 name)); 3181 name));
3182 } else { 3182 } else {
3183 bp[psdo] = (size >> 8) & 0xff; 3183 bp[psdo] = (size >> 8) & 0xff;
3184 bp[psdo + 1] = size & 0xff; 3184 bp[psdo + 1] = size & 0xff;
3185 bp[pgo + 3] = 1; 3185 bp[pgo + 3] = 1;
3186 if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8) 3186 if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8)
3187 bp[pgo + MP_OFF_PAGE_LENGTH] = 8; 3187 bp[pgo + MP_OFF_PAGE_LENGTH] = 8;
3188 DEBC(printk(ST_DEB_MSG 3188 DEBC(printk(ST_DEB_MSG
3189 "%s: Formatting tape with two partitions (1 = %d MB).\n", 3189 "%s: Formatting tape with two partitions (1 = %d MB).\n",
3190 name, size)); 3190 name, size));
3191 } 3191 }
3192 bp[pgo + PP_OFF_PART_UNITS] = 0; 3192 bp[pgo + PP_OFF_PART_UNITS] = 0;
3193 bp[pgo + PP_OFF_RESERVED] = 0; 3193 bp[pgo + PP_OFF_RESERVED] = 0;
3194 bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB; 3194 bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB;
3195 3195
3196 result = write_mode_page(STp, PART_PAGE, 1); 3196 result = write_mode_page(STp, PART_PAGE, 1);
3197 if (result) { 3197 if (result) {
3198 printk(KERN_INFO "%s: Partitioning of tape failed.\n", name); 3198 printk(KERN_INFO "%s: Partitioning of tape failed.\n", name);
3199 result = (-EIO); 3199 result = (-EIO);
3200 } 3200 }
3201 3201
3202 return result; 3202 return result;
3203 } 3203 }
3204 3204
3205 3205
3206 3206
3207 /* The ioctl command */ 3207 /* The ioctl command */
3208 static int st_ioctl(struct inode *inode, struct file *file, 3208 static int st_ioctl(struct inode *inode, struct file *file,
3209 unsigned int cmd_in, unsigned long arg) 3209 unsigned int cmd_in, unsigned long arg)
3210 { 3210 {
3211 int i, cmd_nr, cmd_type, bt; 3211 int i, cmd_nr, cmd_type, bt;
3212 int retval = 0; 3212 int retval = 0;
3213 unsigned int blk; 3213 unsigned int blk;
3214 struct scsi_tape *STp = file->private_data; 3214 struct scsi_tape *STp = file->private_data;
3215 struct st_modedef *STm; 3215 struct st_modedef *STm;
3216 struct st_partstat *STps; 3216 struct st_partstat *STps;
3217 char *name = tape_name(STp); 3217 char *name = tape_name(STp);
3218 void __user *p = (void __user *)arg; 3218 void __user *p = (void __user *)arg;
3219 3219
3220 if (down_interruptible(&STp->lock)) 3220 if (down_interruptible(&STp->lock))
3221 return -ERESTARTSYS; 3221 return -ERESTARTSYS;
3222 3222
3223 DEB( 3223 DEB(
3224 if (debugging && !STp->in_use) { 3224 if (debugging && !STp->in_use) {
3225 printk(ST_DEB_MSG "%s: Incorrect device.\n", name); 3225 printk(ST_DEB_MSG "%s: Incorrect device.\n", name);
3226 retval = (-EIO); 3226 retval = (-EIO);
3227 goto out; 3227 goto out;
3228 } ) /* end DEB */ 3228 } ) /* end DEB */
3229 3229
3230 STm = &(STp->modes[STp->current_mode]); 3230 STm = &(STp->modes[STp->current_mode]);
3231 STps = &(STp->ps[STp->partition]); 3231 STps = &(STp->ps[STp->partition]);
3232 3232
3233 /* 3233 /*
3234 * If we are in the middle of error recovery, don't let anyone 3234 * If we are in the middle of error recovery, don't let anyone
3235 * else try and use this device. Also, if error recovery fails, it 3235 * else try and use this device. Also, if error recovery fails, it
3236 * may try and take the device offline, in which case all further 3236 * may try and take the device offline, in which case all further
3237 * access to the device is prohibited. 3237 * access to the device is prohibited.
3238 */ 3238 */
3239 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file); 3239 retval = scsi_nonblockable_ioctl(STp->device, cmd_in, p, file);
3240 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV) 3240 if (!scsi_block_when_processing_errors(STp->device) || retval != -ENODEV)
3241 goto out; 3241 goto out;
3242 retval = 0; 3242 retval = 0;
3243 3243
3244 cmd_type = _IOC_TYPE(cmd_in); 3244 cmd_type = _IOC_TYPE(cmd_in);
3245 cmd_nr = _IOC_NR(cmd_in); 3245 cmd_nr = _IOC_NR(cmd_in);
3246 3246
3247 if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) { 3247 if (cmd_type == _IOC_TYPE(MTIOCTOP) && cmd_nr == _IOC_NR(MTIOCTOP)) {
3248 struct mtop mtc; 3248 struct mtop mtc;
3249 3249
3250 if (_IOC_SIZE(cmd_in) != sizeof(mtc)) { 3250 if (_IOC_SIZE(cmd_in) != sizeof(mtc)) {
3251 retval = (-EINVAL); 3251 retval = (-EINVAL);
3252 goto out; 3252 goto out;
3253 } 3253 }
3254 3254
3255 i = copy_from_user(&mtc, p, sizeof(struct mtop)); 3255 i = copy_from_user(&mtc, p, sizeof(struct mtop));
3256 if (i) { 3256 if (i) {
3257 retval = (-EFAULT); 3257 retval = (-EFAULT);
3258 goto out; 3258 goto out;
3259 } 3259 }
3260 3260
3261 if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) { 3261 if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
3262 printk(KERN_WARNING 3262 printk(KERN_WARNING
3263 "%s: MTSETDRVBUFFER only allowed for root.\n", name); 3263 "%s: MTSETDRVBUFFER only allowed for root.\n", name);
3264 retval = (-EPERM); 3264 retval = (-EPERM);
3265 goto out; 3265 goto out;
3266 } 3266 }
3267 if (!STm->defined && 3267 if (!STm->defined &&
3268 (mtc.mt_op != MTSETDRVBUFFER && 3268 (mtc.mt_op != MTSETDRVBUFFER &&
3269 (mtc.mt_count & MT_ST_OPTIONS) == 0)) { 3269 (mtc.mt_count & MT_ST_OPTIONS) == 0)) {
3270 retval = (-ENXIO); 3270 retval = (-ENXIO);
3271 goto out; 3271 goto out;
3272 } 3272 }
3273 3273
3274 if (!STp->pos_unknown) { 3274 if (!STp->pos_unknown) {
3275 3275
3276 if (STps->eof == ST_FM_HIT) { 3276 if (STps->eof == ST_FM_HIT) {
3277 if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || 3277 if (mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
3278 mtc.mt_op == MTEOM) { 3278 mtc.mt_op == MTEOM) {
3279 mtc.mt_count -= 1; 3279 mtc.mt_count -= 1;
3280 if (STps->drv_file >= 0) 3280 if (STps->drv_file >= 0)
3281 STps->drv_file += 1; 3281 STps->drv_file += 1;
3282 } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) { 3282 } else if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) {
3283 mtc.mt_count += 1; 3283 mtc.mt_count += 1;
3284 if (STps->drv_file >= 0) 3284 if (STps->drv_file >= 0)
3285 STps->drv_file += 1; 3285 STps->drv_file += 1;
3286 } 3286 }
3287 } 3287 }
3288 3288
3289 if (mtc.mt_op == MTSEEK) { 3289 if (mtc.mt_op == MTSEEK) {
3290 /* Old position must be restored if partition will be 3290 /* Old position must be restored if partition will be
3291 changed */ 3291 changed */
3292 i = !STp->can_partitions || 3292 i = !STp->can_partitions ||
3293 (STp->new_partition != STp->partition); 3293 (STp->new_partition != STp->partition);
3294 } else { 3294 } else {
3295 i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || 3295 i = mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
3296 mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM || 3296 mtc.mt_op == MTRETEN || mtc.mt_op == MTEOM ||
3297 mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD || 3297 mtc.mt_op == MTLOCK || mtc.mt_op == MTLOAD ||
3298 mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM || 3298 mtc.mt_op == MTFSF || mtc.mt_op == MTFSFM ||
3299 mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM || 3299 mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM ||
3300 mtc.mt_op == MTCOMPRESSION; 3300 mtc.mt_op == MTCOMPRESSION;
3301 } 3301 }
3302 i = flush_buffer(STp, i); 3302 i = flush_buffer(STp, i);
3303 if (i < 0) { 3303 if (i < 0) {
3304 retval = i; 3304 retval = i;
3305 goto out; 3305 goto out;
3306 } 3306 }
3307 if (STps->rw == ST_WRITING && 3307 if (STps->rw == ST_WRITING &&
3308 (mtc.mt_op == MTREW || mtc.mt_op == MTOFFL || 3308 (mtc.mt_op == MTREW || mtc.mt_op == MTOFFL ||
3309 mtc.mt_op == MTSEEK || 3309 mtc.mt_op == MTSEEK ||
3310 mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)) { 3310 mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)) {
3311 i = st_int_ioctl(STp, MTWEOF, 1); 3311 i = st_int_ioctl(STp, MTWEOF, 1);
3312 if (i < 0) { 3312 if (i < 0) {
3313 retval = i; 3313 retval = i;
3314 goto out; 3314 goto out;
3315 } 3315 }
3316 if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM) 3316 if (mtc.mt_op == MTBSF || mtc.mt_op == MTBSFM)
3317 mtc.mt_count++; 3317 mtc.mt_count++;
3318 STps->rw = ST_IDLE; 3318 STps->rw = ST_IDLE;
3319 } 3319 }
3320 3320
3321 } else { 3321 } else {
3322 /* 3322 /*
3323 * If there was a bus reset, block further access 3323 * If there was a bus reset, block further access
3324 * to this device. If the user wants to rewind the tape, 3324 * to this device. If the user wants to rewind the tape,
3325 * then reset the flag and allow access again. 3325 * then reset the flag and allow access again.
3326 */ 3326 */
3327 if (mtc.mt_op != MTREW && 3327 if (mtc.mt_op != MTREW &&
3328 mtc.mt_op != MTOFFL && 3328 mtc.mt_op != MTOFFL &&
3329 mtc.mt_op != MTRETEN && 3329 mtc.mt_op != MTRETEN &&
3330 mtc.mt_op != MTERASE && 3330 mtc.mt_op != MTERASE &&
3331 mtc.mt_op != MTSEEK && 3331 mtc.mt_op != MTSEEK &&
3332 mtc.mt_op != MTEOM) { 3332 mtc.mt_op != MTEOM) {
3333 retval = (-EIO); 3333 retval = (-EIO);
3334 goto out; 3334 goto out;
3335 } 3335 }
3336 reset_state(STp); 3336 reset_state(STp);
3337 /* remove this when the midlevel properly clears was_reset */ 3337 /* remove this when the midlevel properly clears was_reset */
3338 STp->device->was_reset = 0; 3338 STp->device->was_reset = 0;
3339 } 3339 }
3340 3340
3341 if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && 3341 if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK &&
3342 mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTWSM && 3342 mtc.mt_op != MTSETDENSITY && mtc.mt_op != MTWSM &&
3343 mtc.mt_op != MTSETDRVBUFFER && mtc.mt_op != MTSETPART) 3343 mtc.mt_op != MTSETDRVBUFFER && mtc.mt_op != MTSETPART)
3344 STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */ 3344 STps->rw = ST_IDLE; /* Prevent automatic WEOF and fsf */
3345 3345
3346 if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED) 3346 if (mtc.mt_op == MTOFFL && STp->door_locked != ST_UNLOCKED)
3347 do_door_lock(STp, 0); /* Ignore result! */ 3347 do_door_lock(STp, 0); /* Ignore result! */
3348 3348
3349 if (mtc.mt_op == MTSETDRVBUFFER && 3349 if (mtc.mt_op == MTSETDRVBUFFER &&
3350 (mtc.mt_count & MT_ST_OPTIONS) != 0) { 3350 (mtc.mt_count & MT_ST_OPTIONS) != 0) {
3351 retval = st_set_options(STp, mtc.mt_count); 3351 retval = st_set_options(STp, mtc.mt_count);
3352 goto out; 3352 goto out;
3353 } 3353 }
3354 3354
3355 if (mtc.mt_op == MTSETPART) { 3355 if (mtc.mt_op == MTSETPART) {
3356 if (!STp->can_partitions || 3356 if (!STp->can_partitions ||
3357 mtc.mt_count < 0 || mtc.mt_count >= ST_NBR_PARTITIONS) { 3357 mtc.mt_count < 0 || mtc.mt_count >= ST_NBR_PARTITIONS) {
3358 retval = (-EINVAL); 3358 retval = (-EINVAL);
3359 goto out; 3359 goto out;
3360 } 3360 }
3361 if (mtc.mt_count >= STp->nbr_partitions && 3361 if (mtc.mt_count >= STp->nbr_partitions &&
3362 (STp->nbr_partitions = nbr_partitions(STp)) < 0) { 3362 (STp->nbr_partitions = nbr_partitions(STp)) < 0) {
3363 retval = (-EIO); 3363 retval = (-EIO);
3364 goto out; 3364 goto out;
3365 } 3365 }
3366 if (mtc.mt_count >= STp->nbr_partitions) { 3366 if (mtc.mt_count >= STp->nbr_partitions) {
3367 retval = (-EINVAL); 3367 retval = (-EINVAL);
3368 goto out; 3368 goto out;
3369 } 3369 }
3370 STp->new_partition = mtc.mt_count; 3370 STp->new_partition = mtc.mt_count;
3371 retval = 0; 3371 retval = 0;
3372 goto out; 3372 goto out;
3373 } 3373 }
3374 3374
3375 if (mtc.mt_op == MTMKPART) { 3375 if (mtc.mt_op == MTMKPART) {
3376 if (!STp->can_partitions) { 3376 if (!STp->can_partitions) {
3377 retval = (-EINVAL); 3377 retval = (-EINVAL);
3378 goto out; 3378 goto out;
3379 } 3379 }
3380 if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 || 3380 if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 ||
3381 (i = partition_tape(STp, mtc.mt_count)) < 0) { 3381 (i = partition_tape(STp, mtc.mt_count)) < 0) {
3382 retval = i; 3382 retval = i;
3383 goto out; 3383 goto out;
3384 } 3384 }
3385 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 3385 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
3386 STp->ps[i].rw = ST_IDLE; 3386 STp->ps[i].rw = ST_IDLE;
3387 STp->ps[i].at_sm = 0; 3387 STp->ps[i].at_sm = 0;
3388 STp->ps[i].last_block_valid = 0; 3388 STp->ps[i].last_block_valid = 0;
3389 } 3389 }
3390 STp->partition = STp->new_partition = 0; 3390 STp->partition = STp->new_partition = 0;
3391 STp->nbr_partitions = 1; /* Bad guess ?-) */ 3391 STp->nbr_partitions = 1; /* Bad guess ?-) */
3392 STps->drv_block = STps->drv_file = 0; 3392 STps->drv_block = STps->drv_file = 0;
3393 retval = 0; 3393 retval = 0;
3394 goto out; 3394 goto out;
3395 } 3395 }
3396 3396
3397 if (mtc.mt_op == MTSEEK) { 3397 if (mtc.mt_op == MTSEEK) {
3398 i = set_location(STp, mtc.mt_count, STp->new_partition, 0); 3398 i = set_location(STp, mtc.mt_count, STp->new_partition, 0);
3399 if (!STp->can_partitions) 3399 if (!STp->can_partitions)
3400 STp->ps[0].rw = ST_IDLE; 3400 STp->ps[0].rw = ST_IDLE;
3401 retval = i; 3401 retval = i;
3402 goto out; 3402 goto out;
3403 } 3403 }
3404 3404
3405 if (mtc.mt_op == MTUNLOAD || mtc.mt_op == MTOFFL) { 3405 if (mtc.mt_op == MTUNLOAD || mtc.mt_op == MTOFFL) {
3406 retval = do_load_unload(STp, file, 0); 3406 retval = do_load_unload(STp, file, 0);
3407 goto out; 3407 goto out;
3408 } 3408 }
3409 3409
3410 if (mtc.mt_op == MTLOAD) { 3410 if (mtc.mt_op == MTLOAD) {
3411 retval = do_load_unload(STp, file, max(1, mtc.mt_count)); 3411 retval = do_load_unload(STp, file, max(1, mtc.mt_count));
3412 goto out; 3412 goto out;
3413 } 3413 }
3414 3414
3415 if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) { 3415 if (mtc.mt_op == MTLOCK || mtc.mt_op == MTUNLOCK) {
3416 retval = do_door_lock(STp, (mtc.mt_op == MTLOCK)); 3416 retval = do_door_lock(STp, (mtc.mt_op == MTLOCK));
3417 goto out; 3417 goto out;
3418 } 3418 }
3419 3419
3420 if (STp->can_partitions && STp->ready == ST_READY && 3420 if (STp->can_partitions && STp->ready == ST_READY &&
3421 (i = switch_partition(STp)) < 0) { 3421 (i = switch_partition(STp)) < 0) {
3422 retval = i; 3422 retval = i;
3423 goto out; 3423 goto out;
3424 } 3424 }
3425 3425
3426 if (mtc.mt_op == MTCOMPRESSION) 3426 if (mtc.mt_op == MTCOMPRESSION)
3427 retval = st_compression(STp, (mtc.mt_count & 1)); 3427 retval = st_compression(STp, (mtc.mt_count & 1));
3428 else 3428 else
3429 retval = st_int_ioctl(STp, mtc.mt_op, mtc.mt_count); 3429 retval = st_int_ioctl(STp, mtc.mt_op, mtc.mt_count);
3430 goto out; 3430 goto out;
3431 } 3431 }
3432 if (!STm->defined) { 3432 if (!STm->defined) {
3433 retval = (-ENXIO); 3433 retval = (-ENXIO);
3434 goto out; 3434 goto out;
3435 } 3435 }
3436 3436
3437 if ((i = flush_buffer(STp, 0)) < 0) { 3437 if ((i = flush_buffer(STp, 0)) < 0) {
3438 retval = i; 3438 retval = i;
3439 goto out; 3439 goto out;
3440 } 3440 }
3441 if (STp->can_partitions && 3441 if (STp->can_partitions &&
3442 (i = switch_partition(STp)) < 0) { 3442 (i = switch_partition(STp)) < 0) {
3443 retval = i; 3443 retval = i;
3444 goto out; 3444 goto out;
3445 } 3445 }
3446 3446
3447 if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { 3447 if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) {
3448 struct mtget mt_status; 3448 struct mtget mt_status;
3449 3449
3450 if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { 3450 if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) {
3451 retval = (-EINVAL); 3451 retval = (-EINVAL);
3452 goto out; 3452 goto out;
3453 } 3453 }
3454 3454
3455 mt_status.mt_type = STp->tape_type; 3455 mt_status.mt_type = STp->tape_type;
3456 mt_status.mt_dsreg = 3456 mt_status.mt_dsreg =
3457 ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) | 3457 ((STp->block_size << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK) |
3458 ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); 3458 ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK);
3459 mt_status.mt_blkno = STps->drv_block; 3459 mt_status.mt_blkno = STps->drv_block;
3460 mt_status.mt_fileno = STps->drv_file; 3460 mt_status.mt_fileno = STps->drv_file;
3461 if (STp->block_size != 0) { 3461 if (STp->block_size != 0) {
3462 if (STps->rw == ST_WRITING) 3462 if (STps->rw == ST_WRITING)
3463 mt_status.mt_blkno += 3463 mt_status.mt_blkno +=
3464 (STp->buffer)->buffer_bytes / STp->block_size; 3464 (STp->buffer)->buffer_bytes / STp->block_size;
3465 else if (STps->rw == ST_READING) 3465 else if (STps->rw == ST_READING)
3466 mt_status.mt_blkno -= 3466 mt_status.mt_blkno -=
3467 ((STp->buffer)->buffer_bytes + 3467 ((STp->buffer)->buffer_bytes +
3468 STp->block_size - 1) / STp->block_size; 3468 STp->block_size - 1) / STp->block_size;
3469 } 3469 }
3470 3470
3471 mt_status.mt_gstat = 0; 3471 mt_status.mt_gstat = 0;
3472 if (STp->drv_write_prot) 3472 if (STp->drv_write_prot)
3473 mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff); 3473 mt_status.mt_gstat |= GMT_WR_PROT(0xffffffff);
3474 if (mt_status.mt_blkno == 0) { 3474 if (mt_status.mt_blkno == 0) {
3475 if (mt_status.mt_fileno == 0) 3475 if (mt_status.mt_fileno == 0)
3476 mt_status.mt_gstat |= GMT_BOT(0xffffffff); 3476 mt_status.mt_gstat |= GMT_BOT(0xffffffff);
3477 else 3477 else
3478 mt_status.mt_gstat |= GMT_EOF(0xffffffff); 3478 mt_status.mt_gstat |= GMT_EOF(0xffffffff);
3479 } 3479 }
3480 mt_status.mt_erreg = (STp->recover_reg << MT_ST_SOFTERR_SHIFT); 3480 mt_status.mt_erreg = (STp->recover_reg << MT_ST_SOFTERR_SHIFT);
3481 mt_status.mt_resid = STp->partition; 3481 mt_status.mt_resid = STp->partition;
3482 if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR) 3482 if (STps->eof == ST_EOM_OK || STps->eof == ST_EOM_ERROR)
3483 mt_status.mt_gstat |= GMT_EOT(0xffffffff); 3483 mt_status.mt_gstat |= GMT_EOT(0xffffffff);
3484 else if (STps->eof >= ST_EOM_OK) 3484 else if (STps->eof >= ST_EOM_OK)
3485 mt_status.mt_gstat |= GMT_EOD(0xffffffff); 3485 mt_status.mt_gstat |= GMT_EOD(0xffffffff);
3486 if (STp->density == 1) 3486 if (STp->density == 1)
3487 mt_status.mt_gstat |= GMT_D_800(0xffffffff); 3487 mt_status.mt_gstat |= GMT_D_800(0xffffffff);
3488 else if (STp->density == 2) 3488 else if (STp->density == 2)
3489 mt_status.mt_gstat |= GMT_D_1600(0xffffffff); 3489 mt_status.mt_gstat |= GMT_D_1600(0xffffffff);
3490 else if (STp->density == 3) 3490 else if (STp->density == 3)
3491 mt_status.mt_gstat |= GMT_D_6250(0xffffffff); 3491 mt_status.mt_gstat |= GMT_D_6250(0xffffffff);
3492 if (STp->ready == ST_READY) 3492 if (STp->ready == ST_READY)
3493 mt_status.mt_gstat |= GMT_ONLINE(0xffffffff); 3493 mt_status.mt_gstat |= GMT_ONLINE(0xffffffff);
3494 if (STp->ready == ST_NO_TAPE) 3494 if (STp->ready == ST_NO_TAPE)
3495 mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff); 3495 mt_status.mt_gstat |= GMT_DR_OPEN(0xffffffff);
3496 if (STps->at_sm) 3496 if (STps->at_sm)
3497 mt_status.mt_gstat |= GMT_SM(0xffffffff); 3497 mt_status.mt_gstat |= GMT_SM(0xffffffff);
3498 if (STm->do_async_writes || 3498 if (STm->do_async_writes ||
3499 (STm->do_buffer_writes && STp->block_size != 0) || 3499 (STm->do_buffer_writes && STp->block_size != 0) ||
3500 STp->drv_buffer != 0) 3500 STp->drv_buffer != 0)
3501 mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff); 3501 mt_status.mt_gstat |= GMT_IM_REP_EN(0xffffffff);
3502 if (STp->cleaning_req) 3502 if (STp->cleaning_req)
3503 mt_status.mt_gstat |= GMT_CLN(0xffffffff); 3503 mt_status.mt_gstat |= GMT_CLN(0xffffffff);
3504 3504
3505 i = copy_to_user(p, &mt_status, sizeof(struct mtget)); 3505 i = copy_to_user(p, &mt_status, sizeof(struct mtget));
3506 if (i) { 3506 if (i) {
3507 retval = (-EFAULT); 3507 retval = (-EFAULT);
3508 goto out; 3508 goto out;
3509 } 3509 }
3510 3510
3511 STp->recover_reg = 0; /* Clear after read */ 3511 STp->recover_reg = 0; /* Clear after read */
3512 retval = 0; 3512 retval = 0;
3513 goto out; 3513 goto out;
3514 } /* End of MTIOCGET */ 3514 } /* End of MTIOCGET */
3515 if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) { 3515 if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) {
3516 struct mtpos mt_pos; 3516 struct mtpos mt_pos;
3517 if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) { 3517 if (_IOC_SIZE(cmd_in) != sizeof(struct mtpos)) {
3518 retval = (-EINVAL); 3518 retval = (-EINVAL);
3519 goto out; 3519 goto out;
3520 } 3520 }
3521 if ((i = get_location(STp, &blk, &bt, 0)) < 0) { 3521 if ((i = get_location(STp, &blk, &bt, 0)) < 0) {
3522 retval = i; 3522 retval = i;
3523 goto out; 3523 goto out;
3524 } 3524 }
3525 mt_pos.mt_blkno = blk; 3525 mt_pos.mt_blkno = blk;
3526 i = copy_to_user(p, &mt_pos, sizeof(struct mtpos)); 3526 i = copy_to_user(p, &mt_pos, sizeof(struct mtpos));
3527 if (i) 3527 if (i)
3528 retval = (-EFAULT); 3528 retval = (-EFAULT);
3529 goto out; 3529 goto out;
3530 } 3530 }
3531 up(&STp->lock); 3531 up(&STp->lock);
3532 switch (cmd_in) { 3532 switch (cmd_in) {
3533 case SCSI_IOCTL_GET_IDLUN: 3533 case SCSI_IOCTL_GET_IDLUN:
3534 case SCSI_IOCTL_GET_BUS_NUMBER: 3534 case SCSI_IOCTL_GET_BUS_NUMBER:
3535 break; 3535 break;
3536 default: 3536 default:
3537 if ((cmd_in == SG_IO || 3537 if ((cmd_in == SG_IO ||
3538 cmd_in == SCSI_IOCTL_SEND_COMMAND || 3538 cmd_in == SCSI_IOCTL_SEND_COMMAND ||
3539 cmd_in == CDROM_SEND_PACKET) && 3539 cmd_in == CDROM_SEND_PACKET) &&
3540 !capable(CAP_SYS_RAWIO)) 3540 !capable(CAP_SYS_RAWIO))
3541 i = -EPERM; 3541 i = -EPERM;
3542 else 3542 else
3543 i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p); 3543 i = scsi_cmd_ioctl(file, STp->disk, cmd_in, p);
3544 if (i != -ENOTTY) 3544 if (i != -ENOTTY)
3545 return i; 3545 return i;
3546 break; 3546 break;
3547 } 3547 }
3548 retval = scsi_ioctl(STp->device, cmd_in, p); 3548 retval = scsi_ioctl(STp->device, cmd_in, p);
3549 if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */ 3549 if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */
3550 STp->rew_at_close = 0; 3550 STp->rew_at_close = 0;
3551 STp->ready = ST_NO_TAPE; 3551 STp->ready = ST_NO_TAPE;
3552 } 3552 }
3553 return retval; 3553 return retval;
3554 3554
3555 out: 3555 out:
3556 up(&STp->lock); 3556 up(&STp->lock);
3557 return retval; 3557 return retval;
3558 } 3558 }
3559 3559
3560 #ifdef CONFIG_COMPAT 3560 #ifdef CONFIG_COMPAT
3561 static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3561 static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3562 { 3562 {
3563 struct scsi_tape *STp = file->private_data; 3563 struct scsi_tape *STp = file->private_data;
3564 struct scsi_device *sdev = STp->device; 3564 struct scsi_device *sdev = STp->device;
3565 int ret = -ENOIOCTLCMD; 3565 int ret = -ENOIOCTLCMD;
3566 if (sdev->host->hostt->compat_ioctl) { 3566 if (sdev->host->hostt->compat_ioctl) {
3567 3567
3568 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); 3568 ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
3569 3569
3570 } 3570 }
3571 return ret; 3571 return ret;
3572 } 3572 }
3573 #endif 3573 #endif
3574 3574
3575 3575
3576 3576
3577 /* Try to allocate a new tape buffer. Calling function must not hold 3577 /* Try to allocate a new tape buffer. Calling function must not hold
3578 dev_arr_lock. */ 3578 dev_arr_lock. */
3579 static struct st_buffer * 3579 static struct st_buffer *
3580 new_tape_buffer(int from_initialization, int need_dma, int max_sg) 3580 new_tape_buffer(int from_initialization, int need_dma, int max_sg)
3581 { 3581 {
3582 int i, got = 0; 3582 int i, got = 0;
3583 gfp_t priority; 3583 gfp_t priority;
3584 struct st_buffer *tb; 3584 struct st_buffer *tb;
3585 3585
3586 if (from_initialization) 3586 if (from_initialization)
3587 priority = GFP_ATOMIC; 3587 priority = GFP_ATOMIC;
3588 else 3588 else
3589 priority = GFP_KERNEL; 3589 priority = GFP_KERNEL;
3590 3590
3591 i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) + 3591 i = sizeof(struct st_buffer) + (max_sg - 1) * sizeof(struct scatterlist) +
3592 max_sg * sizeof(struct st_buf_fragment); 3592 max_sg * sizeof(struct st_buf_fragment);
3593 tb = kmalloc(i, priority); 3593 tb = kmalloc(i, priority);
3594 if (!tb) { 3594 if (!tb) {
3595 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n"); 3595 printk(KERN_NOTICE "st: Can't allocate new tape buffer.\n");
3596 return NULL; 3596 return NULL;
3597 } 3597 }
3598 memset(tb, 0, i); 3598 memset(tb, 0, i);
3599 tb->frp_segs = tb->orig_frp_segs = 0; 3599 tb->frp_segs = tb->orig_frp_segs = 0;
3600 tb->use_sg = max_sg; 3600 tb->use_sg = max_sg;
3601 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg); 3601 tb->frp = (struct st_buf_fragment *)(&(tb->sg[0]) + max_sg);
3602 3602
3603 tb->in_use = 1; 3603 tb->in_use = 1;
3604 tb->dma = need_dma; 3604 tb->dma = need_dma;
3605 tb->buffer_size = got; 3605 tb->buffer_size = got;
3606 3606
3607 return tb; 3607 return tb;
3608 } 3608 }
3609 3609
3610 3610
3611 /* Try to allocate enough space in the tape buffer */ 3611 /* Try to allocate enough space in the tape buffer */
3612 static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) 3612 static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)
3613 { 3613 {
3614 int segs, nbr, max_segs, b_size, order, got; 3614 int segs, nbr, max_segs, b_size, order, got;
3615 gfp_t priority; 3615 gfp_t priority;
3616 3616
3617 if (new_size <= STbuffer->buffer_size) 3617 if (new_size <= STbuffer->buffer_size)
3618 return 1; 3618 return 1;
3619 3619
3620 if (STbuffer->buffer_size <= PAGE_SIZE) 3620 if (STbuffer->buffer_size <= PAGE_SIZE)
3621 normalize_buffer(STbuffer); /* Avoid extra segment */ 3621 normalize_buffer(STbuffer); /* Avoid extra segment */
3622 3622
3623 max_segs = STbuffer->use_sg; 3623 max_segs = STbuffer->use_sg;
3624 nbr = max_segs - STbuffer->frp_segs; 3624 nbr = max_segs - STbuffer->frp_segs;
3625 if (nbr <= 0) 3625 if (nbr <= 0)
3626 return 0; 3626 return 0;
3627 3627
3628 priority = GFP_KERNEL | __GFP_NOWARN; 3628 priority = GFP_KERNEL | __GFP_NOWARN;
3629 if (need_dma) 3629 if (need_dma)
3630 priority |= GFP_DMA; 3630 priority |= GFP_DMA;
3631 for (b_size = PAGE_SIZE, order=0; order <= 6 && 3631 for (b_size = PAGE_SIZE, order=0; order <= 6 &&
3632 b_size < new_size - STbuffer->buffer_size; 3632 b_size < new_size - STbuffer->buffer_size;
3633 order++, b_size *= 2) 3633 order++, b_size *= 2)
3634 ; /* empty */ 3634 ; /* empty */
3635 3635
3636 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; 3636 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size;
3637 segs < max_segs && got < new_size;) { 3637 segs < max_segs && got < new_size;) {
3638 STbuffer->frp[segs].page = alloc_pages(priority, order); 3638 STbuffer->frp[segs].page = alloc_pages(priority, order);
3639 if (STbuffer->frp[segs].page == NULL) { 3639 if (STbuffer->frp[segs].page == NULL) {
3640 if (new_size - got <= (max_segs - segs) * b_size / 2) { 3640 if (new_size - got <= (max_segs - segs) * b_size / 2) {
3641 b_size /= 2; /* Large enough for the rest of the buffers */ 3641 b_size /= 2; /* Large enough for the rest of the buffers */
3642 order--; 3642 order--;
3643 continue; 3643 continue;
3644 } 3644 }
3645 DEB(STbuffer->buffer_size = got); 3645 DEB(STbuffer->buffer_size = got);
3646 normalize_buffer(STbuffer); 3646 normalize_buffer(STbuffer);
3647 return 0; 3647 return 0;
3648 } 3648 }
3649 STbuffer->frp[segs].length = b_size; 3649 STbuffer->frp[segs].length = b_size;
3650 STbuffer->frp_segs += 1; 3650 STbuffer->frp_segs += 1;
3651 got += b_size; 3651 got += b_size;
3652 STbuffer->buffer_size = got; 3652 STbuffer->buffer_size = got;
3653 segs++; 3653 segs++;
3654 } 3654 }
3655 STbuffer->b_data = page_address(STbuffer->frp[0].page); 3655 STbuffer->b_data = page_address(STbuffer->frp[0].page);
3656 3656
3657 return 1; 3657 return 1;
3658 } 3658 }
3659 3659
3660 3660
3661 /* Release the extra buffer */ 3661 /* Release the extra buffer */
3662 static void normalize_buffer(struct st_buffer * STbuffer) 3662 static void normalize_buffer(struct st_buffer * STbuffer)
3663 { 3663 {
3664 int i, order; 3664 int i, order;
3665 3665
3666 for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) { 3666 for (i = STbuffer->orig_frp_segs; i < STbuffer->frp_segs; i++) {
3667 order = get_order(STbuffer->frp[i].length); 3667 order = get_order(STbuffer->frp[i].length);
3668 __free_pages(STbuffer->frp[i].page, order); 3668 __free_pages(STbuffer->frp[i].page, order);
3669 STbuffer->buffer_size -= STbuffer->frp[i].length; 3669 STbuffer->buffer_size -= STbuffer->frp[i].length;
3670 } 3670 }
3671 STbuffer->frp_segs = STbuffer->orig_frp_segs; 3671 STbuffer->frp_segs = STbuffer->orig_frp_segs;
3672 STbuffer->frp_sg_current = 0; 3672 STbuffer->frp_sg_current = 0;
3673 STbuffer->sg_segs = 0; 3673 STbuffer->sg_segs = 0;
3674 } 3674 }
3675 3675
3676 3676
3677 /* Move data from the user buffer to the tape buffer. Returns zero (success) or 3677 /* Move data from the user buffer to the tape buffer. Returns zero (success) or
3678 negative error code. */ 3678 negative error code. */
3679 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) 3679 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
3680 { 3680 {
3681 int i, cnt, res, offset; 3681 int i, cnt, res, offset;
3682 3682
3683 for (i = 0, offset = st_bp->buffer_bytes; 3683 for (i = 0, offset = st_bp->buffer_bytes;
3684 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3684 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
3685 offset -= st_bp->frp[i].length; 3685 offset -= st_bp->frp[i].length;
3686 if (i == st_bp->frp_segs) { /* Should never happen */ 3686 if (i == st_bp->frp_segs) { /* Should never happen */
3687 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n"); 3687 printk(KERN_WARNING "st: append_to_buffer offset overflow.\n");
3688 return (-EIO); 3688 return (-EIO);
3689 } 3689 }
3690 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3690 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3691 cnt = st_bp->frp[i].length - offset < do_count ? 3691 cnt = st_bp->frp[i].length - offset < do_count ?
3692 st_bp->frp[i].length - offset : do_count; 3692 st_bp->frp[i].length - offset : do_count;
3693 res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt); 3693 res = copy_from_user(page_address(st_bp->frp[i].page) + offset, ubp, cnt);
3694 if (res) 3694 if (res)
3695 return (-EFAULT); 3695 return (-EFAULT);
3696 do_count -= cnt; 3696 do_count -= cnt;
3697 st_bp->buffer_bytes += cnt; 3697 st_bp->buffer_bytes += cnt;
3698 ubp += cnt; 3698 ubp += cnt;
3699 offset = 0; 3699 offset = 0;
3700 } 3700 }
3701 if (do_count) /* Should never happen */ 3701 if (do_count) /* Should never happen */
3702 return (-EIO); 3702 return (-EIO);
3703 3703
3704 return 0; 3704 return 0;
3705 } 3705 }
3706 3706
3707 3707
3708 /* Move data from the tape buffer to the user buffer. Returns zero (success) or 3708 /* Move data from the tape buffer to the user buffer. Returns zero (success) or
3709 negative error code. */ 3709 negative error code. */
3710 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) 3710 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
3711 { 3711 {
3712 int i, cnt, res, offset; 3712 int i, cnt, res, offset;
3713 3713
3714 for (i = 0, offset = st_bp->read_pointer; 3714 for (i = 0, offset = st_bp->read_pointer;
3715 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++) 3715 i < st_bp->frp_segs && offset >= st_bp->frp[i].length; i++)
3716 offset -= st_bp->frp[i].length; 3716 offset -= st_bp->frp[i].length;
3717 if (i == st_bp->frp_segs) { /* Should never happen */ 3717 if (i == st_bp->frp_segs) { /* Should never happen */
3718 printk(KERN_WARNING "st: from_buffer offset overflow.\n"); 3718 printk(KERN_WARNING "st: from_buffer offset overflow.\n");
3719 return (-EIO); 3719 return (-EIO);
3720 } 3720 }
3721 for (; i < st_bp->frp_segs && do_count > 0; i++) { 3721 for (; i < st_bp->frp_segs && do_count > 0; i++) {
3722 cnt = st_bp->frp[i].length - offset < do_count ? 3722 cnt = st_bp->frp[i].length - offset < do_count ?
3723 st_bp->frp[i].length - offset : do_count; 3723 st_bp->frp[i].length - offset : do_count;
3724 res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt); 3724 res = copy_to_user(ubp, page_address(st_bp->frp[i].page) + offset, cnt);
3725 if (res) 3725 if (res)
3726 return (-EFAULT); 3726 return (-EFAULT);
3727 do_count -= cnt; 3727 do_count -= cnt;
3728 st_bp->buffer_bytes -= cnt; 3728 st_bp->buffer_bytes -= cnt;
3729 st_bp->read_pointer += cnt; 3729 st_bp->read_pointer += cnt;
3730 ubp += cnt; 3730 ubp += cnt;
3731 offset = 0; 3731 offset = 0;
3732 } 3732 }
3733 if (do_count) /* Should never happen */ 3733 if (do_count) /* Should never happen */
3734 return (-EIO); 3734 return (-EIO);
3735 3735
3736 return 0; 3736 return 0;
3737 } 3737 }
3738 3738
3739 3739
3740 /* Move data towards start of buffer */ 3740 /* Move data towards start of buffer */
3741 static void move_buffer_data(struct st_buffer * st_bp, int offset) 3741 static void move_buffer_data(struct st_buffer * st_bp, int offset)
3742 { 3742 {
3743 int src_seg, dst_seg, src_offset = 0, dst_offset; 3743 int src_seg, dst_seg, src_offset = 0, dst_offset;
3744 int count, total; 3744 int count, total;
3745 3745
3746 if (offset == 0) 3746 if (offset == 0)
3747 return; 3747 return;
3748 3748
3749 total=st_bp->buffer_bytes - offset; 3749 total=st_bp->buffer_bytes - offset;
3750 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) { 3750 for (src_seg=0; src_seg < st_bp->frp_segs; src_seg++) {
3751 src_offset = offset; 3751 src_offset = offset;
3752 if (src_offset < st_bp->frp[src_seg].length) 3752 if (src_offset < st_bp->frp[src_seg].length)
3753 break; 3753 break;
3754 offset -= st_bp->frp[src_seg].length; 3754 offset -= st_bp->frp[src_seg].length;
3755 } 3755 }
3756 3756
3757 st_bp->buffer_bytes = st_bp->read_pointer = total; 3757 st_bp->buffer_bytes = st_bp->read_pointer = total;
3758 for (dst_seg=dst_offset=0; total > 0; ) { 3758 for (dst_seg=dst_offset=0; total > 0; ) {
3759 count = min(st_bp->frp[dst_seg].length - dst_offset, 3759 count = min(st_bp->frp[dst_seg].length - dst_offset,
3760 st_bp->frp[src_seg].length - src_offset); 3760 st_bp->frp[src_seg].length - src_offset);
3761 memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset, 3761 memmove(page_address(st_bp->frp[dst_seg].page) + dst_offset,
3762 page_address(st_bp->frp[src_seg].page) + src_offset, count); 3762 page_address(st_bp->frp[src_seg].page) + src_offset, count);
3763 src_offset += count; 3763 src_offset += count;
3764 if (src_offset >= st_bp->frp[src_seg].length) { 3764 if (src_offset >= st_bp->frp[src_seg].length) {
3765 src_seg++; 3765 src_seg++;
3766 src_offset = 0; 3766 src_offset = 0;
3767 } 3767 }
3768 dst_offset += count; 3768 dst_offset += count;
3769 if (dst_offset >= st_bp->frp[dst_seg].length) { 3769 if (dst_offset >= st_bp->frp[dst_seg].length) {
3770 dst_seg++; 3770 dst_seg++;
3771 dst_offset = 0; 3771 dst_offset = 0;
3772 } 3772 }
3773 total -= count; 3773 total -= count;
3774 } 3774 }
3775 } 3775 }
3776 3776
3777 3777
3778 /* Fill the s/g list up to the length required for this transfer */ 3778 /* Fill the s/g list up to the length required for this transfer */
3779 static void buf_to_sg(struct st_buffer *STbp, unsigned int length) 3779 static void buf_to_sg(struct st_buffer *STbp, unsigned int length)
3780 { 3780 {
3781 int i; 3781 int i;
3782 unsigned int count; 3782 unsigned int count;
3783 struct scatterlist *sg; 3783 struct scatterlist *sg;
3784 struct st_buf_fragment *frp; 3784 struct st_buf_fragment *frp;
3785 3785
3786 if (length == STbp->frp_sg_current) 3786 if (length == STbp->frp_sg_current)
3787 return; /* work already done */ 3787 return; /* work already done */
3788 3788
3789 sg = &(STbp->sg[0]); 3789 sg = &(STbp->sg[0]);
3790 frp = STbp->frp; 3790 frp = STbp->frp;
3791 for (i=count=0; count < length; i++) { 3791 for (i=count=0; count < length; i++) {
3792 sg[i].page = frp[i].page; 3792 sg[i].page = frp[i].page;
3793 if (length - count > frp[i].length) 3793 if (length - count > frp[i].length)
3794 sg[i].length = frp[i].length; 3794 sg[i].length = frp[i].length;
3795 else 3795 else
3796 sg[i].length = length - count; 3796 sg[i].length = length - count;
3797 count += sg[i].length; 3797 count += sg[i].length;
3798 sg[i].offset = 0; 3798 sg[i].offset = 0;
3799 } 3799 }
3800 STbp->sg_segs = i; 3800 STbp->sg_segs = i;
3801 STbp->frp_sg_current = length; 3801 STbp->frp_sg_current = length;
3802 } 3802 }
3803 3803
3804 3804
3805 /* Validate the options from command line or module parameters */ 3805 /* Validate the options from command line or module parameters */
3806 static void validate_options(void) 3806 static void validate_options(void)
3807 { 3807 {
3808 if (buffer_kbs > 0) 3808 if (buffer_kbs > 0)
3809 st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE; 3809 st_fixed_buffer_size = buffer_kbs * ST_KILOBYTE;
3810 if (max_sg_segs >= ST_FIRST_SG) 3810 if (max_sg_segs >= ST_FIRST_SG)
3811 st_max_sg_segs = max_sg_segs; 3811 st_max_sg_segs = max_sg_segs;
3812 } 3812 }
3813 3813
3814 #ifndef MODULE 3814 #ifndef MODULE
3815 /* Set the boot options. Syntax is defined in Documenation/scsi/st.txt. 3815 /* Set the boot options. Syntax is defined in Documenation/scsi/st.txt.
3816 */ 3816 */
3817 static int __init st_setup(char *str) 3817 static int __init st_setup(char *str)
3818 { 3818 {
3819 int i, len, ints[5]; 3819 int i, len, ints[5];
3820 char *stp; 3820 char *stp;
3821 3821
3822 stp = get_options(str, ARRAY_SIZE(ints), ints); 3822 stp = get_options(str, ARRAY_SIZE(ints), ints);
3823 3823
3824 if (ints[0] > 0) { 3824 if (ints[0] > 0) {
3825 for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++) 3825 for (i = 0; i < ints[0] && i < ARRAY_SIZE(parms); i++)
3826 if (parms[i].val) 3826 if (parms[i].val)
3827 *parms[i].val = ints[i + 1]; 3827 *parms[i].val = ints[i + 1];
3828 } else { 3828 } else {
3829 while (stp != NULL) { 3829 while (stp != NULL) {
3830 for (i = 0; i < ARRAY_SIZE(parms); i++) { 3830 for (i = 0; i < ARRAY_SIZE(parms); i++) {
3831 len = strlen(parms[i].name); 3831 len = strlen(parms[i].name);
3832 if (!strncmp(stp, parms[i].name, len) && 3832 if (!strncmp(stp, parms[i].name, len) &&
3833 (*(stp + len) == ':' || *(stp + len) == '=')) { 3833 (*(stp + len) == ':' || *(stp + len) == '=')) {
3834 if (parms[i].val) 3834 if (parms[i].val)
3835 *parms[i].val = 3835 *parms[i].val =
3836 simple_strtoul(stp + len + 1, NULL, 0); 3836 simple_strtoul(stp + len + 1, NULL, 0);
3837 else 3837 else
3838 printk(KERN_WARNING "st: Obsolete parameter %s\n", 3838 printk(KERN_WARNING "st: Obsolete parameter %s\n",
3839 parms[i].name); 3839 parms[i].name);
3840 break; 3840 break;
3841 } 3841 }
3842 } 3842 }
3843 if (i >= sizeof(parms) / sizeof(struct st_dev_parm)) 3843 if (i >= sizeof(parms) / sizeof(struct st_dev_parm))
3844 printk(KERN_WARNING "st: invalid parameter in '%s'\n", 3844 printk(KERN_WARNING "st: invalid parameter in '%s'\n",
3845 stp); 3845 stp);
3846 stp = strchr(stp, ','); 3846 stp = strchr(stp, ',');
3847 if (stp) 3847 if (stp)
3848 stp++; 3848 stp++;
3849 } 3849 }
3850 } 3850 }
3851 3851
3852 validate_options(); 3852 validate_options();
3853 3853
3854 return 1; 3854 return 1;
3855 } 3855 }
3856 3856
3857 __setup("st=", st_setup); 3857 __setup("st=", st_setup);
3858 3858
3859 #endif 3859 #endif
3860 3860
3861 static struct file_operations st_fops = 3861 static struct file_operations st_fops =
3862 { 3862 {
3863 .owner = THIS_MODULE, 3863 .owner = THIS_MODULE,
3864 .read = st_read, 3864 .read = st_read,
3865 .write = st_write, 3865 .write = st_write,
3866 .ioctl = st_ioctl, 3866 .ioctl = st_ioctl,
3867 #ifdef CONFIG_COMPAT 3867 #ifdef CONFIG_COMPAT
3868 .compat_ioctl = st_compat_ioctl, 3868 .compat_ioctl = st_compat_ioctl,
3869 #endif 3869 #endif
3870 .open = st_open, 3870 .open = st_open,
3871 .flush = st_flush, 3871 .flush = st_flush,
3872 .release = st_release, 3872 .release = st_release,
3873 }; 3873 };
3874 3874
3875 static int st_probe(struct device *dev) 3875 static int st_probe(struct device *dev)
3876 { 3876 {
3877 struct scsi_device *SDp = to_scsi_device(dev); 3877 struct scsi_device *SDp = to_scsi_device(dev);
3878 struct gendisk *disk = NULL; 3878 struct gendisk *disk = NULL;
3879 struct cdev *cdev = NULL; 3879 struct cdev *cdev = NULL;
3880 struct scsi_tape *tpnt = NULL; 3880 struct scsi_tape *tpnt = NULL;
3881 struct st_modedef *STm; 3881 struct st_modedef *STm;
3882 struct st_partstat *STps; 3882 struct st_partstat *STps;
3883 struct st_buffer *buffer; 3883 struct st_buffer *buffer;
3884 int i, j, mode, dev_num, error; 3884 int i, j, mode, dev_num, error;
3885 char *stp; 3885 char *stp;
3886 3886
3887 if (SDp->type != TYPE_TAPE) 3887 if (SDp->type != TYPE_TAPE)
3888 return -ENODEV; 3888 return -ENODEV;
3889 if ((stp = st_incompatible(SDp))) { 3889 if ((stp = st_incompatible(SDp))) {
3890 sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n"); 3890 sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n");
3891 printk(KERN_INFO "st: The suggested driver is %s.\n", stp); 3891 printk(KERN_INFO "st: The suggested driver is %s.\n", stp);
3892 return -ENODEV; 3892 return -ENODEV;
3893 } 3893 }
3894 3894
3895 i = min(SDp->request_queue->max_hw_segments, 3895 i = min(SDp->request_queue->max_hw_segments,
3896 SDp->request_queue->max_phys_segments); 3896 SDp->request_queue->max_phys_segments);
3897 if (st_max_sg_segs < i) 3897 if (st_max_sg_segs < i)
3898 i = st_max_sg_segs; 3898 i = st_max_sg_segs;
3899 buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i); 3899 buffer = new_tape_buffer(1, (SDp->host)->unchecked_isa_dma, i);
3900 if (buffer == NULL) { 3900 if (buffer == NULL) {
3901 printk(KERN_ERR 3901 printk(KERN_ERR
3902 "st: Can't allocate new tape buffer. Device not attached.\n"); 3902 "st: Can't allocate new tape buffer. Device not attached.\n");
3903 goto out; 3903 goto out;
3904 } 3904 }
3905 3905
3906 disk = alloc_disk(1); 3906 disk = alloc_disk(1);
3907 if (!disk) { 3907 if (!disk) {
3908 printk(KERN_ERR "st: out of memory. Device not attached.\n"); 3908 printk(KERN_ERR "st: out of memory. Device not attached.\n");
3909 goto out_buffer_free; 3909 goto out_buffer_free;
3910 } 3910 }
3911 3911
3912 write_lock(&st_dev_arr_lock); 3912 write_lock(&st_dev_arr_lock);
3913 if (st_nr_dev >= st_dev_max) { 3913 if (st_nr_dev >= st_dev_max) {
3914 struct scsi_tape **tmp_da; 3914 struct scsi_tape **tmp_da;
3915 int tmp_dev_max; 3915 int tmp_dev_max;
3916 3916
3917 tmp_dev_max = max(st_nr_dev * 2, 8); 3917 tmp_dev_max = max(st_nr_dev * 2, 8);
3918 if (tmp_dev_max > ST_MAX_TAPES) 3918 if (tmp_dev_max > ST_MAX_TAPES)
3919 tmp_dev_max = ST_MAX_TAPES; 3919 tmp_dev_max = ST_MAX_TAPES;
3920 if (tmp_dev_max <= st_nr_dev) { 3920 if (tmp_dev_max <= st_nr_dev) {
3921 write_unlock(&st_dev_arr_lock); 3921 write_unlock(&st_dev_arr_lock);
3922 printk(KERN_ERR "st: Too many tape devices (max. %d).\n", 3922 printk(KERN_ERR "st: Too many tape devices (max. %d).\n",
3923 ST_MAX_TAPES); 3923 ST_MAX_TAPES);
3924 goto out_put_disk; 3924 goto out_put_disk;
3925 } 3925 }
3926 3926
3927 tmp_da = kmalloc(tmp_dev_max * sizeof(struct scsi_tape *), GFP_ATOMIC); 3927 tmp_da = kmalloc(tmp_dev_max * sizeof(struct scsi_tape *), GFP_ATOMIC);
3928 if (tmp_da == NULL) { 3928 if (tmp_da == NULL) {
3929 write_unlock(&st_dev_arr_lock); 3929 write_unlock(&st_dev_arr_lock);
3930 printk(KERN_ERR "st: Can't extend device array.\n"); 3930 printk(KERN_ERR "st: Can't extend device array.\n");
3931 goto out_put_disk; 3931 goto out_put_disk;
3932 } 3932 }
3933 3933
3934 memset(tmp_da, 0, tmp_dev_max * sizeof(struct scsi_tape *)); 3934 memset(tmp_da, 0, tmp_dev_max * sizeof(struct scsi_tape *));
3935 if (scsi_tapes != NULL) { 3935 if (scsi_tapes != NULL) {
3936 memcpy(tmp_da, scsi_tapes, 3936 memcpy(tmp_da, scsi_tapes,
3937 st_dev_max * sizeof(struct scsi_tape *)); 3937 st_dev_max * sizeof(struct scsi_tape *));
3938 kfree(scsi_tapes); 3938 kfree(scsi_tapes);
3939 } 3939 }
3940 scsi_tapes = tmp_da; 3940 scsi_tapes = tmp_da;
3941 3941
3942 st_dev_max = tmp_dev_max; 3942 st_dev_max = tmp_dev_max;
3943 } 3943 }
3944 3944
3945 for (i = 0; i < st_dev_max; i++) 3945 for (i = 0; i < st_dev_max; i++)
3946 if (scsi_tapes[i] == NULL) 3946 if (scsi_tapes[i] == NULL)
3947 break; 3947 break;
3948 if (i >= st_dev_max) 3948 if (i >= st_dev_max)
3949 panic("scsi_devices corrupt (st)"); 3949 panic("scsi_devices corrupt (st)");
3950 3950
3951 tpnt = kmalloc(sizeof(struct scsi_tape), GFP_ATOMIC); 3951 tpnt = kmalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
3952 if (tpnt == NULL) { 3952 if (tpnt == NULL) {
3953 write_unlock(&st_dev_arr_lock); 3953 write_unlock(&st_dev_arr_lock);
3954 printk(KERN_ERR "st: Can't allocate device descriptor.\n"); 3954 printk(KERN_ERR "st: Can't allocate device descriptor.\n");
3955 goto out_put_disk; 3955 goto out_put_disk;
3956 } 3956 }
3957 memset(tpnt, 0, sizeof(struct scsi_tape)); 3957 memset(tpnt, 0, sizeof(struct scsi_tape));
3958 kref_init(&tpnt->kref); 3958 kref_init(&tpnt->kref);
3959 tpnt->disk = disk; 3959 tpnt->disk = disk;
3960 sprintf(disk->disk_name, "st%d", i); 3960 sprintf(disk->disk_name, "st%d", i);
3961 disk->private_data = &tpnt->driver; 3961 disk->private_data = &tpnt->driver;
3962 disk->queue = SDp->request_queue; 3962 disk->queue = SDp->request_queue;
3963 tpnt->driver = &st_template; 3963 tpnt->driver = &st_template;
3964 scsi_tapes[i] = tpnt; 3964 scsi_tapes[i] = tpnt;
3965 dev_num = i; 3965 dev_num = i;
3966 3966
3967 tpnt->device = SDp; 3967 tpnt->device = SDp;
3968 if (SDp->scsi_level <= 2) 3968 if (SDp->scsi_level <= 2)
3969 tpnt->tape_type = MT_ISSCSI1; 3969 tpnt->tape_type = MT_ISSCSI1;
3970 else 3970 else
3971 tpnt->tape_type = MT_ISSCSI2; 3971 tpnt->tape_type = MT_ISSCSI2;
3972 3972
3973 tpnt->buffer = buffer; 3973 tpnt->buffer = buffer;
3974 tpnt->buffer->last_SRpnt = NULL; 3974 tpnt->buffer->last_SRpnt = NULL;
3975 3975
3976 tpnt->inited = 0; 3976 tpnt->inited = 0;
3977 tpnt->dirty = 0; 3977 tpnt->dirty = 0;
3978 tpnt->in_use = 0; 3978 tpnt->in_use = 0;
3979 tpnt->drv_buffer = 1; /* Try buffering if no mode sense */ 3979 tpnt->drv_buffer = 1; /* Try buffering if no mode sense */
3980 tpnt->restr_dma = (SDp->host)->unchecked_isa_dma; 3980 tpnt->restr_dma = (SDp->host)->unchecked_isa_dma;
3981 tpnt->use_pf = (SDp->scsi_level >= SCSI_2); 3981 tpnt->use_pf = (SDp->scsi_level >= SCSI_2);
3982 tpnt->density = 0; 3982 tpnt->density = 0;
3983 tpnt->do_auto_lock = ST_AUTO_LOCK; 3983 tpnt->do_auto_lock = ST_AUTO_LOCK;
3984 tpnt->can_bsr = (SDp->scsi_level > 2 ? 1 : ST_IN_FILE_POS); /* BSR mandatory in SCSI3 */ 3984 tpnt->can_bsr = (SDp->scsi_level > 2 ? 1 : ST_IN_FILE_POS); /* BSR mandatory in SCSI3 */
3985 tpnt->can_partitions = 0; 3985 tpnt->can_partitions = 0;
3986 tpnt->two_fm = ST_TWO_FM; 3986 tpnt->two_fm = ST_TWO_FM;
3987 tpnt->fast_mteom = ST_FAST_MTEOM; 3987 tpnt->fast_mteom = ST_FAST_MTEOM;
3988 tpnt->scsi2_logical = ST_SCSI2LOGICAL; 3988 tpnt->scsi2_logical = ST_SCSI2LOGICAL;
3989 tpnt->immediate = ST_NOWAIT; 3989 tpnt->immediate = ST_NOWAIT;
3990 tpnt->default_drvbuffer = 0xff; /* No forced buffering */ 3990 tpnt->default_drvbuffer = 0xff; /* No forced buffering */
3991 tpnt->partition = 0; 3991 tpnt->partition = 0;
3992 tpnt->new_partition = 0; 3992 tpnt->new_partition = 0;
3993 tpnt->nbr_partitions = 0; 3993 tpnt->nbr_partitions = 0;
3994 tpnt->device->timeout = ST_TIMEOUT; 3994 tpnt->device->timeout = ST_TIMEOUT;
3995 tpnt->long_timeout = ST_LONG_TIMEOUT; 3995 tpnt->long_timeout = ST_LONG_TIMEOUT;
3996 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma; 3996 tpnt->try_dio = try_direct_io && !SDp->host->unchecked_isa_dma;
3997 3997
3998 for (i = 0; i < ST_NBR_MODES; i++) { 3998 for (i = 0; i < ST_NBR_MODES; i++) {
3999 STm = &(tpnt->modes[i]); 3999 STm = &(tpnt->modes[i]);
4000 STm->defined = 0; 4000 STm->defined = 0;
4001 STm->sysv = ST_SYSV; 4001 STm->sysv = ST_SYSV;
4002 STm->defaults_for_writes = 0; 4002 STm->defaults_for_writes = 0;
4003 STm->do_async_writes = ST_ASYNC_WRITES; 4003 STm->do_async_writes = ST_ASYNC_WRITES;
4004 STm->do_buffer_writes = ST_BUFFER_WRITES; 4004 STm->do_buffer_writes = ST_BUFFER_WRITES;
4005 STm->do_read_ahead = ST_READ_AHEAD; 4005 STm->do_read_ahead = ST_READ_AHEAD;
4006 STm->default_compression = ST_DONT_TOUCH; 4006 STm->default_compression = ST_DONT_TOUCH;
4007 STm->default_blksize = (-1); /* No forced size */ 4007 STm->default_blksize = (-1); /* No forced size */
4008 STm->default_density = (-1); /* No forced density */ 4008 STm->default_density = (-1); /* No forced density */
4009 } 4009 }
4010 4010
4011 for (i = 0; i < ST_NBR_PARTITIONS; i++) { 4011 for (i = 0; i < ST_NBR_PARTITIONS; i++) {
4012 STps = &(tpnt->ps[i]); 4012 STps = &(tpnt->ps[i]);
4013 STps->rw = ST_IDLE; 4013 STps->rw = ST_IDLE;
4014 STps->eof = ST_NOEOF; 4014 STps->eof = ST_NOEOF;
4015 STps->at_sm = 0; 4015 STps->at_sm = 0;
4016 STps->last_block_valid = 0; 4016 STps->last_block_valid = 0;
4017 STps->drv_block = (-1); 4017 STps->drv_block = (-1);
4018 STps->drv_file = (-1); 4018 STps->drv_file = (-1);
4019 } 4019 }
4020 4020
4021 tpnt->current_mode = 0; 4021 tpnt->current_mode = 0;
4022 tpnt->modes[0].defined = 1; 4022 tpnt->modes[0].defined = 1;
4023 4023
4024 tpnt->density_changed = tpnt->compression_changed = 4024 tpnt->density_changed = tpnt->compression_changed =
4025 tpnt->blksize_changed = 0; 4025 tpnt->blksize_changed = 0;
4026 init_MUTEX(&tpnt->lock); 4026 init_MUTEX(&tpnt->lock);
4027 4027
4028 st_nr_dev++; 4028 st_nr_dev++;
4029 write_unlock(&st_dev_arr_lock); 4029 write_unlock(&st_dev_arr_lock);
4030 4030
4031 for (mode = 0; mode < ST_NBR_MODES; ++mode) { 4031 for (mode = 0; mode < ST_NBR_MODES; ++mode) {
4032 STm = &(tpnt->modes[mode]); 4032 STm = &(tpnt->modes[mode]);
4033 for (j=0; j < 2; j++) { 4033 for (j=0; j < 2; j++) {
4034 cdev = cdev_alloc(); 4034 cdev = cdev_alloc();
4035 if (!cdev) { 4035 if (!cdev) {
4036 printk(KERN_ERR 4036 printk(KERN_ERR
4037 "st%d: out of memory. Device not attached.\n", 4037 "st%d: out of memory. Device not attached.\n",
4038 dev_num); 4038 dev_num);
4039 goto out_free_tape; 4039 goto out_free_tape;
4040 } 4040 }
4041 cdev->owner = THIS_MODULE; 4041 cdev->owner = THIS_MODULE;
4042 cdev->ops = &st_fops; 4042 cdev->ops = &st_fops;
4043 4043
4044 error = cdev_add(cdev, 4044 error = cdev_add(cdev,
4045 MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, j)), 4045 MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, j)),
4046 1); 4046 1);
4047 if (error) { 4047 if (error) {
4048 printk(KERN_ERR "st%d: Can't add %s-rewind mode %d\n", 4048 printk(KERN_ERR "st%d: Can't add %s-rewind mode %d\n",
4049 dev_num, j ? "non" : "auto", mode); 4049 dev_num, j ? "non" : "auto", mode);
4050 printk(KERN_ERR "st%d: Device not attached.\n", dev_num); 4050 printk(KERN_ERR "st%d: Device not attached.\n", dev_num);
4051 goto out_free_tape; 4051 goto out_free_tape;
4052 } 4052 }
4053 STm->cdevs[j] = cdev; 4053 STm->cdevs[j] = cdev;
4054 4054
4055 } 4055 }
4056 do_create_class_files(tpnt, dev_num, mode); 4056 do_create_class_files(tpnt, dev_num, mode);
4057 } 4057 }
4058 4058
4059 for (mode = 0; mode < ST_NBR_MODES; ++mode) { 4059 for (mode = 0; mode < ST_NBR_MODES; ++mode) {
4060 /* Make sure that the minor numbers corresponding to the four 4060 /* Make sure that the minor numbers corresponding to the four
4061 first modes always get the same names */ 4061 first modes always get the same names */
4062 i = mode << (4 - ST_NBR_MODE_BITS); 4062 i = mode << (4 - ST_NBR_MODE_BITS);
4063 /* Rewind entry */ 4063 /* Rewind entry */
4064 devfs_mk_cdev(MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, 0)), 4064 devfs_mk_cdev(MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, 0)),
4065 S_IFCHR | S_IRUGO | S_IWUGO, 4065 S_IFCHR | S_IRUGO | S_IWUGO,
4066 "%s/mt%s", SDp->devfs_name, st_formats[i]); 4066 "%s/mt%s", SDp->devfs_name, st_formats[i]);
4067 /* No-rewind entry */ 4067 /* No-rewind entry */
4068 devfs_mk_cdev(MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, 1)), 4068 devfs_mk_cdev(MKDEV(SCSI_TAPE_MAJOR, TAPE_MINOR(dev_num, mode, 1)),
4069 S_IFCHR | S_IRUGO | S_IWUGO, 4069 S_IFCHR | S_IRUGO | S_IWUGO,
4070 "%s/mt%sn", SDp->devfs_name, st_formats[i]); 4070 "%s/mt%sn", SDp->devfs_name, st_formats[i]);
4071 } 4071 }
4072 disk->number = devfs_register_tape(SDp->devfs_name); 4072 disk->number = devfs_register_tape(SDp->devfs_name);
4073 4073
4074 sdev_printk(KERN_WARNING, SDp, 4074 sdev_printk(KERN_WARNING, SDp,
4075 "Attached scsi tape %s", tape_name(tpnt)); 4075 "Attached scsi tape %s", tape_name(tpnt));
4076 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", 4076 printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n",
4077 tape_name(tpnt), tpnt->try_dio ? "yes" : "no", 4077 tape_name(tpnt), tpnt->try_dio ? "yes" : "no",
4078 queue_dma_alignment(SDp->request_queue) + 1); 4078 queue_dma_alignment(SDp->request_queue) + 1);
4079 4079
4080 return 0; 4080 return 0;
4081 4081
4082 out_free_tape: 4082 out_free_tape:
4083 for (mode=0; mode < ST_NBR_MODES; mode++) { 4083 for (mode=0; mode < ST_NBR_MODES; mode++) {
4084 STm = &(tpnt->modes[mode]); 4084 STm = &(tpnt->modes[mode]);
4085 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj, 4085 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
4086 "tape"); 4086 "tape");
4087 for (j=0; j < 2; j++) { 4087 for (j=0; j < 2; j++) {
4088 if (STm->cdevs[j]) { 4088 if (STm->cdevs[j]) {
4089 if (cdev == STm->cdevs[j]) 4089 if (cdev == STm->cdevs[j])
4090 cdev = NULL; 4090 cdev = NULL;
4091 class_device_destroy(st_sysfs_class, 4091 class_device_destroy(st_sysfs_class,
4092 MKDEV(SCSI_TAPE_MAJOR, 4092 MKDEV(SCSI_TAPE_MAJOR,
4093 TAPE_MINOR(i, mode, j))); 4093 TAPE_MINOR(i, mode, j)));
4094 cdev_del(STm->cdevs[j]); 4094 cdev_del(STm->cdevs[j]);
4095 } 4095 }
4096 } 4096 }
4097 } 4097 }
4098 if (cdev) 4098 if (cdev)
4099 cdev_del(cdev); 4099 cdev_del(cdev);
4100 write_lock(&st_dev_arr_lock); 4100 write_lock(&st_dev_arr_lock);
4101 scsi_tapes[dev_num] = NULL; 4101 scsi_tapes[dev_num] = NULL;
4102 st_nr_dev--; 4102 st_nr_dev--;
4103 write_unlock(&st_dev_arr_lock); 4103 write_unlock(&st_dev_arr_lock);
4104 out_put_disk: 4104 out_put_disk:
4105 put_disk(disk); 4105 put_disk(disk);
4106 kfree(tpnt); 4106 kfree(tpnt);
4107 out_buffer_free: 4107 out_buffer_free:
4108 kfree(buffer); 4108 kfree(buffer);
4109 out: 4109 out:
4110 return -ENODEV; 4110 return -ENODEV;
4111 }; 4111 };
4112 4112
4113 4113
4114 static int st_remove(struct device *dev) 4114 static int st_remove(struct device *dev)
4115 { 4115 {
4116 struct scsi_device *SDp = to_scsi_device(dev); 4116 struct scsi_device *SDp = to_scsi_device(dev);
4117 struct scsi_tape *tpnt; 4117 struct scsi_tape *tpnt;
4118 int i, j, mode; 4118 int i, j, mode;
4119 4119
4120 write_lock(&st_dev_arr_lock); 4120 write_lock(&st_dev_arr_lock);
4121 for (i = 0; i < st_dev_max; i++) { 4121 for (i = 0; i < st_dev_max; i++) {
4122 tpnt = scsi_tapes[i]; 4122 tpnt = scsi_tapes[i];
4123 if (tpnt != NULL && tpnt->device == SDp) { 4123 if (tpnt != NULL && tpnt->device == SDp) {
4124 scsi_tapes[i] = NULL; 4124 scsi_tapes[i] = NULL;
4125 st_nr_dev--; 4125 st_nr_dev--;
4126 write_unlock(&st_dev_arr_lock); 4126 write_unlock(&st_dev_arr_lock);
4127 devfs_unregister_tape(tpnt->disk->number); 4127 devfs_unregister_tape(tpnt->disk->number);
4128 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj, 4128 sysfs_remove_link(&tpnt->device->sdev_gendev.kobj,
4129 "tape"); 4129 "tape");
4130 for (mode = 0; mode < ST_NBR_MODES; ++mode) { 4130 for (mode = 0; mode < ST_NBR_MODES; ++mode) {
4131 j = mode << (4 - ST_NBR_MODE_BITS); 4131 j = mode << (4 - ST_NBR_MODE_BITS);
4132 devfs_remove("%s/mt%s", SDp->devfs_name, st_formats[j]); 4132 devfs_remove("%s/mt%s", SDp->devfs_name, st_formats[j]);
4133 devfs_remove("%s/mt%sn", SDp->devfs_name, st_formats[j]); 4133 devfs_remove("%s/mt%sn", SDp->devfs_name, st_formats[j]);
4134 for (j=0; j < 2; j++) { 4134 for (j=0; j < 2; j++) {
4135 class_device_destroy(st_sysfs_class, 4135 class_device_destroy(st_sysfs_class,
4136 MKDEV(SCSI_TAPE_MAJOR, 4136 MKDEV(SCSI_TAPE_MAJOR,
4137 TAPE_MINOR(i, mode, j))); 4137 TAPE_MINOR(i, mode, j)));
4138 cdev_del(tpnt->modes[mode].cdevs[j]); 4138 cdev_del(tpnt->modes[mode].cdevs[j]);
4139 tpnt->modes[mode].cdevs[j] = NULL; 4139 tpnt->modes[mode].cdevs[j] = NULL;
4140 } 4140 }
4141 } 4141 }
4142 4142
4143 mutex_lock(&st_ref_mutex); 4143 mutex_lock(&st_ref_mutex);
4144 kref_put(&tpnt->kref, scsi_tape_release); 4144 kref_put(&tpnt->kref, scsi_tape_release);
4145 mutex_unlock(&st_ref_mutex); 4145 mutex_unlock(&st_ref_mutex);
4146 return 0; 4146 return 0;
4147 } 4147 }
4148 } 4148 }
4149 4149
4150 write_unlock(&st_dev_arr_lock); 4150 write_unlock(&st_dev_arr_lock);
4151 return 0; 4151 return 0;
4152 } 4152 }
4153 4153
4154 /** 4154 /**
4155 * scsi_tape_release - Called to free the Scsi_Tape structure 4155 * scsi_tape_release - Called to free the Scsi_Tape structure
4156 * @kref: pointer to embedded kref 4156 * @kref: pointer to embedded kref
4157 * 4157 *
4158 * st_ref_mutex must be held entering this routine. Because it is 4158 * st_ref_mutex must be held entering this routine. Because it is
4159 * called on last put, you should always use the scsi_tape_get() 4159 * called on last put, you should always use the scsi_tape_get()
4160 * scsi_tape_put() helpers which manipulate the semaphore directly 4160 * scsi_tape_put() helpers which manipulate the semaphore directly
4161 * and never do a direct kref_put(). 4161 * and never do a direct kref_put().
4162 **/ 4162 **/
4163 static void scsi_tape_release(struct kref *kref) 4163 static void scsi_tape_release(struct kref *kref)
4164 { 4164 {
4165 struct scsi_tape *tpnt = to_scsi_tape(kref); 4165 struct scsi_tape *tpnt = to_scsi_tape(kref);
4166 struct gendisk *disk = tpnt->disk; 4166 struct gendisk *disk = tpnt->disk;
4167 4167
4168 tpnt->device = NULL; 4168 tpnt->device = NULL;
4169 4169
4170 if (tpnt->buffer) { 4170 if (tpnt->buffer) {
4171 tpnt->buffer->orig_frp_segs = 0; 4171 tpnt->buffer->orig_frp_segs = 0;
4172 normalize_buffer(tpnt->buffer); 4172 normalize_buffer(tpnt->buffer);
4173 kfree(tpnt->buffer); 4173 kfree(tpnt->buffer);
4174 } 4174 }
4175 4175
4176 disk->private_data = NULL; 4176 disk->private_data = NULL;
4177 put_disk(disk); 4177 put_disk(disk);
4178 kfree(tpnt); 4178 kfree(tpnt);
4179 return; 4179 return;
4180 } 4180 }
4181 4181
4182 static int __init init_st(void) 4182 static int __init init_st(void)
4183 { 4183 {
4184 validate_options(); 4184 validate_options();
4185 4185
4186 printk(KERN_INFO 4186 printk(KERN_INFO
4187 "st: Version %s, fixed bufsize %d, s/g segs %d\n", 4187 "st: Version %s, fixed bufsize %d, s/g segs %d\n",
4188 verstr, st_fixed_buffer_size, st_max_sg_segs); 4188 verstr, st_fixed_buffer_size, st_max_sg_segs);
4189 4189
4190 st_sysfs_class = class_create(THIS_MODULE, "scsi_tape"); 4190 st_sysfs_class = class_create(THIS_MODULE, "scsi_tape");
4191 if (IS_ERR(st_sysfs_class)) { 4191 if (IS_ERR(st_sysfs_class)) {
4192 st_sysfs_class = NULL; 4192 st_sysfs_class = NULL;
4193 printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n"); 4193 printk(KERN_ERR "Unable create sysfs class for SCSI tapes\n");
4194 return 1; 4194 return 1;
4195 } 4195 }
4196 4196
4197 if (!register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4197 if (!register_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4198 ST_MAX_TAPE_ENTRIES, "st")) { 4198 ST_MAX_TAPE_ENTRIES, "st")) {
4199 if (scsi_register_driver(&st_template.gendrv) == 0) { 4199 if (scsi_register_driver(&st_template.gendrv) == 0) {
4200 do_create_driverfs_files(); 4200 do_create_driverfs_files();
4201 return 0; 4201 return 0;
4202 } 4202 }
4203 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4203 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4204 ST_MAX_TAPE_ENTRIES); 4204 ST_MAX_TAPE_ENTRIES);
4205 } 4205 }
4206 class_destroy(st_sysfs_class); 4206 class_destroy(st_sysfs_class);
4207 4207
4208 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n", SCSI_TAPE_MAJOR); 4208 printk(KERN_ERR "Unable to get major %d for SCSI tapes\n", SCSI_TAPE_MAJOR);
4209 return 1; 4209 return 1;
4210 } 4210 }
4211 4211
4212 static void __exit exit_st(void) 4212 static void __exit exit_st(void)
4213 { 4213 {
4214 do_remove_driverfs_files(); 4214 do_remove_driverfs_files();
4215 scsi_unregister_driver(&st_template.gendrv); 4215 scsi_unregister_driver(&st_template.gendrv);
4216 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4216 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4217 ST_MAX_TAPE_ENTRIES); 4217 ST_MAX_TAPE_ENTRIES);
4218 class_destroy(st_sysfs_class); 4218 class_destroy(st_sysfs_class);
4219 kfree(scsi_tapes); 4219 kfree(scsi_tapes);
4220 printk(KERN_INFO "st: Unloaded.\n"); 4220 printk(KERN_INFO "st: Unloaded.\n");
4221 } 4221 }
4222 4222
4223 module_init(init_st); 4223 module_init(init_st);
4224 module_exit(exit_st); 4224 module_exit(exit_st);
4225 4225
4226 4226
4227 /* The sysfs driver interface. Read-only at the moment */ 4227 /* The sysfs driver interface. Read-only at the moment */
4228 static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf) 4228 static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf)
4229 { 4229 {
4230 return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io); 4230 return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
4231 } 4231 }
4232 static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL); 4232 static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL);
4233 4233
4234 static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf) 4234 static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf)
4235 { 4235 {
4236 return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size); 4236 return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
4237 } 4237 }
4238 static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL); 4238 static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL);
4239 4239
4240 static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf) 4240 static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf)
4241 { 4241 {
4242 return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs); 4242 return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
4243 } 4243 }
4244 static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL); 4244 static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL);
4245 4245
4246 static ssize_t st_version_show(struct device_driver *ddd, char *buf) 4246 static ssize_t st_version_show(struct device_driver *ddd, char *buf)
4247 { 4247 {
4248 return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr); 4248 return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
4249 } 4249 }
4250 static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL); 4250 static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
4251 4251
4252 static void do_create_driverfs_files(void) 4252 static void do_create_driverfs_files(void)
4253 { 4253 {
4254 struct device_driver *driverfs = &st_template.gendrv; 4254 struct device_driver *driverfs = &st_template.gendrv;
4255 4255
4256 driver_create_file(driverfs, &driver_attr_try_direct_io); 4256 driver_create_file(driverfs, &driver_attr_try_direct_io);
4257 driver_create_file(driverfs, &driver_attr_fixed_buffer_size); 4257 driver_create_file(driverfs, &driver_attr_fixed_buffer_size);
4258 driver_create_file(driverfs, &driver_attr_max_sg_segs); 4258 driver_create_file(driverfs, &driver_attr_max_sg_segs);
4259 driver_create_file(driverfs, &driver_attr_version); 4259 driver_create_file(driverfs, &driver_attr_version);
4260 } 4260 }
4261 4261
4262 static void do_remove_driverfs_files(void) 4262 static void do_remove_driverfs_files(void)
4263 { 4263 {
4264 struct device_driver *driverfs = &st_template.gendrv; 4264 struct device_driver *driverfs = &st_template.gendrv;
4265 4265
4266 driver_remove_file(driverfs, &driver_attr_version); 4266 driver_remove_file(driverfs, &driver_attr_version);
4267 driver_remove_file(driverfs, &driver_attr_max_sg_segs); 4267 driver_remove_file(driverfs, &driver_attr_max_sg_segs);
4268 driver_remove_file(driverfs, &driver_attr_fixed_buffer_size); 4268 driver_remove_file(driverfs, &driver_attr_fixed_buffer_size);
4269 driver_remove_file(driverfs, &driver_attr_try_direct_io); 4269 driver_remove_file(driverfs, &driver_attr_try_direct_io);
4270 } 4270 }
4271 4271
4272 4272
4273 /* The sysfs simple class interface */ 4273 /* The sysfs simple class interface */
4274 static ssize_t st_defined_show(struct class_device *class_dev, char *buf) 4274 static ssize_t st_defined_show(struct class_device *class_dev, char *buf)
4275 { 4275 {
4276 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev); 4276 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev);
4277 ssize_t l = 0; 4277 ssize_t l = 0;
4278 4278
4279 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined); 4279 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
4280 return l; 4280 return l;
4281 } 4281 }
4282 4282
4283 CLASS_DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL); 4283 CLASS_DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
4284 4284
4285 static ssize_t st_defblk_show(struct class_device *class_dev, char *buf) 4285 static ssize_t st_defblk_show(struct class_device *class_dev, char *buf)
4286 { 4286 {
4287 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev); 4287 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev);
4288 ssize_t l = 0; 4288 ssize_t l = 0;
4289 4289
4290 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize); 4290 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
4291 return l; 4291 return l;
4292 } 4292 }
4293 4293
4294 CLASS_DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL); 4294 CLASS_DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
4295 4295
4296 static ssize_t st_defdensity_show(struct class_device *class_dev, char *buf) 4296 static ssize_t st_defdensity_show(struct class_device *class_dev, char *buf)
4297 { 4297 {
4298 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev); 4298 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev);
4299 ssize_t l = 0; 4299 ssize_t l = 0;
4300 char *fmt; 4300 char *fmt;
4301 4301
4302 fmt = STm->default_density >= 0 ? "0x%02x\n" : "%d\n"; 4302 fmt = STm->default_density >= 0 ? "0x%02x\n" : "%d\n";
4303 l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density); 4303 l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
4304 return l; 4304 return l;
4305 } 4305 }
4306 4306
4307 CLASS_DEVICE_ATTR(default_density, S_IRUGO, st_defdensity_show, NULL); 4307 CLASS_DEVICE_ATTR(default_density, S_IRUGO, st_defdensity_show, NULL);
4308 4308
4309 static ssize_t st_defcompression_show(struct class_device *class_dev, char *buf) 4309 static ssize_t st_defcompression_show(struct class_device *class_dev, char *buf)
4310 { 4310 {
4311 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev); 4311 struct st_modedef *STm = (struct st_modedef *)class_get_devdata(class_dev);
4312 ssize_t l = 0; 4312 ssize_t l = 0;
4313 4313
4314 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1); 4314 l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
4315 return l; 4315 return l;
4316 } 4316 }
4317 4317
4318 CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL); 4318 CLASS_DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
4319 4319
4320 static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode) 4320 static void do_create_class_files(struct scsi_tape *STp, int dev_num, int mode)
4321 { 4321 {
4322 int i, rew, error; 4322 int i, rew, error;
4323 char name[10]; 4323 char name[10];
4324 struct class_device *st_class_member; 4324 struct class_device *st_class_member;
4325 4325
4326 if (!st_sysfs_class) 4326 if (!st_sysfs_class)
4327 return; 4327 return;
4328 4328
4329 for (rew=0; rew < 2; rew++) { 4329 for (rew=0; rew < 2; rew++) {
4330 /* Make sure that the minor numbers corresponding to the four 4330 /* Make sure that the minor numbers corresponding to the four
4331 first modes always get the same names */ 4331 first modes always get the same names */
4332 i = mode << (4 - ST_NBR_MODE_BITS); 4332 i = mode << (4 - ST_NBR_MODE_BITS);
4333 snprintf(name, 10, "%s%s%s", rew ? "n" : "", 4333 snprintf(name, 10, "%s%s%s", rew ? "n" : "",
4334 STp->disk->disk_name, st_formats[i]); 4334 STp->disk->disk_name, st_formats[i]);
4335 st_class_member = 4335 st_class_member =
4336 class_device_create(st_sysfs_class, NULL, 4336 class_device_create(st_sysfs_class, NULL,
4337 MKDEV(SCSI_TAPE_MAJOR, 4337 MKDEV(SCSI_TAPE_MAJOR,
4338 TAPE_MINOR(dev_num, mode, rew)), 4338 TAPE_MINOR(dev_num, mode, rew)),
4339 &STp->device->sdev_gendev, "%s", name); 4339 &STp->device->sdev_gendev, "%s", name);
4340 if (IS_ERR(st_class_member)) { 4340 if (IS_ERR(st_class_member)) {
4341 printk(KERN_WARNING "st%d: class_device_create failed\n", 4341 printk(KERN_WARNING "st%d: class_device_create failed\n",
4342 dev_num); 4342 dev_num);
4343 goto out; 4343 goto out;
4344 } 4344 }
4345 class_set_devdata(st_class_member, &STp->modes[mode]); 4345 class_set_devdata(st_class_member, &STp->modes[mode]);
4346 4346
4347 class_device_create_file(st_class_member, 4347 class_device_create_file(st_class_member,
4348 &class_device_attr_defined); 4348 &class_device_attr_defined);
4349 class_device_create_file(st_class_member, 4349 class_device_create_file(st_class_member,
4350 &class_device_attr_default_blksize); 4350 &class_device_attr_default_blksize);
4351 class_device_create_file(st_class_member, 4351 class_device_create_file(st_class_member,
4352 &class_device_attr_default_density); 4352 &class_device_attr_default_density);
4353 class_device_create_file(st_class_member, 4353 class_device_create_file(st_class_member,
4354 &class_device_attr_default_compression); 4354 &class_device_attr_default_compression);
4355 if (mode == 0 && rew == 0) { 4355 if (mode == 0 && rew == 0) {
4356 error = sysfs_create_link(&STp->device->sdev_gendev.kobj, 4356 error = sysfs_create_link(&STp->device->sdev_gendev.kobj,
4357 &st_class_member->kobj, 4357 &st_class_member->kobj,
4358 "tape"); 4358 "tape");
4359 if (error) { 4359 if (error) {
4360 printk(KERN_ERR 4360 printk(KERN_ERR
4361 "st%d: Can't create sysfs link from SCSI device.\n", 4361 "st%d: Can't create sysfs link from SCSI device.\n",
4362 dev_num); 4362 dev_num);
4363 } 4363 }
4364 } 4364 }
4365 } 4365 }
4366 out: 4366 out:
4367 return; 4367 return;
4368 } 4368 }
4369 4369
4370 /* The following functions may be useful for a larger audience. */ 4370 /* The following functions may be useful for a larger audience. */
4371 static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, 4371 static int sgl_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
4372 unsigned long uaddr, size_t count, int rw) 4372 unsigned long uaddr, size_t count, int rw)
4373 { 4373 {
4374 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT; 4374 unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
4375 unsigned long start = uaddr >> PAGE_SHIFT; 4375 unsigned long start = uaddr >> PAGE_SHIFT;
4376 const int nr_pages = end - start; 4376 const int nr_pages = end - start;
4377 int res, i, j; 4377 int res, i, j;
4378 struct page **pages; 4378 struct page **pages;
4379 4379
4380 /* User attempted Overflow! */ 4380 /* User attempted Overflow! */
4381 if ((uaddr + count) < uaddr) 4381 if ((uaddr + count) < uaddr)
4382 return -EINVAL; 4382 return -EINVAL;
4383 4383
4384 /* Too big */ 4384 /* Too big */
4385 if (nr_pages > max_pages) 4385 if (nr_pages > max_pages)
4386 return -ENOMEM; 4386 return -ENOMEM;
4387 4387
4388 /* Hmm? */ 4388 /* Hmm? */
4389 if (count == 0) 4389 if (count == 0)
4390 return 0; 4390 return 0;
4391 4391
4392 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_KERNEL)) == NULL) 4392 if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_KERNEL)) == NULL)
4393 return -ENOMEM; 4393 return -ENOMEM;
4394 4394
4395 /* Try to fault in all of the necessary pages */ 4395 /* Try to fault in all of the necessary pages */
4396 down_read(&current->mm->mmap_sem); 4396 down_read(&current->mm->mmap_sem);
4397 /* rw==READ means read from drive, write into memory area */ 4397 /* rw==READ means read from drive, write into memory area */
4398 res = get_user_pages( 4398 res = get_user_pages(
4399 current, 4399 current,
4400 current->mm, 4400 current->mm,
4401 uaddr, 4401 uaddr,
4402 nr_pages, 4402 nr_pages,
4403 rw == READ, 4403 rw == READ,
4404 0, /* don't force */ 4404 0, /* don't force */
4405 pages, 4405 pages,
4406 NULL); 4406 NULL);
4407 up_read(&current->mm->mmap_sem); 4407 up_read(&current->mm->mmap_sem);
4408 4408
4409 /* Errors and no page mapped should return here */ 4409 /* Errors and no page mapped should return here */
4410 if (res < nr_pages) 4410 if (res < nr_pages)
4411 goto out_unmap; 4411 goto out_unmap;
4412 4412
4413 for (i=0; i < nr_pages; i++) { 4413 for (i=0; i < nr_pages; i++) {
4414 /* FIXME: flush superflous for rw==READ, 4414 /* FIXME: flush superflous for rw==READ,
4415 * probably wrong function for rw==WRITE 4415 * probably wrong function for rw==WRITE
4416 */ 4416 */
4417 flush_dcache_page(pages[i]); 4417 flush_dcache_page(pages[i]);
4418 } 4418 }
4419 4419
4420 /* Populate the scatter/gather list */ 4420 /* Populate the scatter/gather list */
4421 sgl[0].page = pages[0]; 4421 sgl[0].page = pages[0];
4422 sgl[0].offset = uaddr & ~PAGE_MASK; 4422 sgl[0].offset = uaddr & ~PAGE_MASK;
4423 if (nr_pages > 1) { 4423 if (nr_pages > 1) {
4424 sgl[0].length = PAGE_SIZE - sgl[0].offset; 4424 sgl[0].length = PAGE_SIZE - sgl[0].offset;
4425 count -= sgl[0].length; 4425 count -= sgl[0].length;
4426 for (i=1; i < nr_pages ; i++) { 4426 for (i=1; i < nr_pages ; i++) {
4427 sgl[i].offset = 0; 4427 sgl[i].offset = 0;
4428 sgl[i].page = pages[i]; 4428 sgl[i].page = pages[i];
4429 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE; 4429 sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
4430 count -= PAGE_SIZE; 4430 count -= PAGE_SIZE;
4431 } 4431 }
4432 } 4432 }
4433 else { 4433 else {
4434 sgl[0].length = count; 4434 sgl[0].length = count;
4435 } 4435 }
4436 4436
4437 kfree(pages); 4437 kfree(pages);
4438 return nr_pages; 4438 return nr_pages;
4439 4439
4440 out_unmap: 4440 out_unmap:
4441 if (res > 0) { 4441 if (res > 0) {
4442 for (j=0; j < res; j++) 4442 for (j=0; j < res; j++)
4443 page_cache_release(pages[j]); 4443 page_cache_release(pages[j]);
4444 res = 0; 4444 res = 0;
4445 } 4445 }
4446 kfree(pages); 4446 kfree(pages);
4447 return res; 4447 return res;
4448 } 4448 }
4449 4449
4450 4450
4451 /* And unmap them... */ 4451 /* And unmap them... */
4452 static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages, 4452 static int sgl_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
4453 int dirtied) 4453 int dirtied)
4454 { 4454 {
4455 int i; 4455 int i;
4456 4456
4457 for (i=0; i < nr_pages; i++) { 4457 for (i=0; i < nr_pages; i++) {
4458 struct page *page = sgl[i].page; 4458 struct page *page = sgl[i].page;
4459 4459
4460 if (dirtied) 4460 if (dirtied)
4461 SetPageDirty(page); 4461 SetPageDirty(page);
4462 /* FIXME: cache flush missing for rw==READ 4462 /* FIXME: cache flush missing for rw==READ
4463 * FIXME: call the correct reference counting function 4463 * FIXME: call the correct reference counting function
4464 */ 4464 */
4465 page_cache_release(page); 4465 page_cache_release(page);
4466 } 4466 }
4467 4467
4468 return 0; 4468 return 0;
4469 } 4469 }
4470 4470
include/scsi/scsi_device.h
1 #ifndef _SCSI_SCSI_DEVICE_H 1 #ifndef _SCSI_SCSI_DEVICE_H
2 #define _SCSI_SCSI_DEVICE_H 2 #define _SCSI_SCSI_DEVICE_H
3 3
4 #include <linux/device.h> 4 #include <linux/device.h>
5 #include <linux/list.h> 5 #include <linux/list.h>
6 #include <linux/spinlock.h> 6 #include <linux/spinlock.h>
7 #include <asm/atomic.h> 7 #include <asm/atomic.h>
8 8
9 struct request_queue; 9 struct request_queue;
10 struct scsi_cmnd; 10 struct scsi_cmnd;
11 struct scsi_lun; 11 struct scsi_lun;
12 struct scsi_sense_hdr; 12 struct scsi_sense_hdr;
13 13
14 struct scsi_mode_data { 14 struct scsi_mode_data {
15 __u32 length; 15 __u32 length;
16 __u16 block_descriptor_length; 16 __u16 block_descriptor_length;
17 __u8 medium_type; 17 __u8 medium_type;
18 __u8 device_specific; 18 __u8 device_specific;
19 __u8 header_length; 19 __u8 header_length;
20 __u8 longlba:1; 20 __u8 longlba:1;
21 }; 21 };
22 22
23 /* 23 /*
24 * sdev state: If you alter this, you also need to alter scsi_sysfs.c 24 * sdev state: If you alter this, you also need to alter scsi_sysfs.c
25 * (for the ascii descriptions) and the state model enforcer: 25 * (for the ascii descriptions) and the state model enforcer:
26 * scsi_lib:scsi_device_set_state(). 26 * scsi_lib:scsi_device_set_state().
27 */ 27 */
28 enum scsi_device_state { 28 enum scsi_device_state {
29 SDEV_CREATED = 1, /* device created but not added to sysfs 29 SDEV_CREATED = 1, /* device created but not added to sysfs
30 * Only internal commands allowed (for inq) */ 30 * Only internal commands allowed (for inq) */
31 SDEV_RUNNING, /* device properly configured 31 SDEV_RUNNING, /* device properly configured
32 * All commands allowed */ 32 * All commands allowed */
33 SDEV_CANCEL, /* beginning to delete device 33 SDEV_CANCEL, /* beginning to delete device
34 * Only error handler commands allowed */ 34 * Only error handler commands allowed */
35 SDEV_DEL, /* device deleted 35 SDEV_DEL, /* device deleted
36 * no commands allowed */ 36 * no commands allowed */
37 SDEV_QUIESCE, /* Device quiescent. No block commands 37 SDEV_QUIESCE, /* Device quiescent. No block commands
38 * will be accepted, only specials (which 38 * will be accepted, only specials (which
39 * originate in the mid-layer) */ 39 * originate in the mid-layer) */
40 SDEV_OFFLINE, /* Device offlined (by error handling or 40 SDEV_OFFLINE, /* Device offlined (by error handling or
41 * user request */ 41 * user request */
42 SDEV_BLOCK, /* Device blocked by scsi lld. No scsi 42 SDEV_BLOCK, /* Device blocked by scsi lld. No scsi
43 * commands from user or midlayer should be issued 43 * commands from user or midlayer should be issued
44 * to the scsi lld. */ 44 * to the scsi lld. */
45 }; 45 };
46 46
47 struct scsi_device { 47 struct scsi_device {
48 struct Scsi_Host *host; 48 struct Scsi_Host *host;
49 struct request_queue *request_queue; 49 struct request_queue *request_queue;
50 50
51 /* the next two are protected by the host->host_lock */ 51 /* the next two are protected by the host->host_lock */
52 struct list_head siblings; /* list of all devices on this host */ 52 struct list_head siblings; /* list of all devices on this host */
53 struct list_head same_target_siblings; /* just the devices sharing same target id */ 53 struct list_head same_target_siblings; /* just the devices sharing same target id */
54 54
55 /* this is now protected by the request_queue->queue_lock */ 55 /* this is now protected by the request_queue->queue_lock */
56 unsigned int device_busy; /* commands actually active on 56 unsigned int device_busy; /* commands actually active on
57 * low-level. protected by queue_lock. */ 57 * low-level. protected by queue_lock. */
58 spinlock_t list_lock; 58 spinlock_t list_lock;
59 struct list_head cmd_list; /* queue of in use SCSI Command structures */ 59 struct list_head cmd_list; /* queue of in use SCSI Command structures */
60 struct list_head starved_entry; 60 struct list_head starved_entry;
61 struct scsi_cmnd *current_cmnd; /* currently active command */ 61 struct scsi_cmnd *current_cmnd; /* currently active command */
62 unsigned short queue_depth; /* How deep of a queue we want */ 62 unsigned short queue_depth; /* How deep of a queue we want */
63 unsigned short last_queue_full_depth; /* These two are used by */ 63 unsigned short last_queue_full_depth; /* These two are used by */
64 unsigned short last_queue_full_count; /* scsi_track_queue_full() */ 64 unsigned short last_queue_full_count; /* scsi_track_queue_full() */
65 unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same 65 unsigned long last_queue_full_time;/* don't let QUEUE_FULLs on the same
66 jiffie count on our counter, they 66 jiffie count on our counter, they
67 could all be from the same event. */ 67 could all be from the same event. */
68 68
69 unsigned int id, lun, channel; 69 unsigned int id, lun, channel;
70 70
71 unsigned int manufacturer; /* Manufacturer of device, for using 71 unsigned int manufacturer; /* Manufacturer of device, for using
72 * vendor-specific cmd's */ 72 * vendor-specific cmd's */
73 unsigned sector_size; /* size in bytes */ 73 unsigned sector_size; /* size in bytes */
74 74
75 void *hostdata; /* available to low-level driver */ 75 void *hostdata; /* available to low-level driver */
76 char devfs_name[256]; /* devfs junk */ 76 char devfs_name[256]; /* devfs junk */
77 char type; 77 char type;
78 char scsi_level; 78 char scsi_level;
79 char inq_periph_qual; /* PQ from INQUIRY data */ 79 char inq_periph_qual; /* PQ from INQUIRY data */
80 unsigned char inquiry_len; /* valid bytes in 'inquiry' */ 80 unsigned char inquiry_len; /* valid bytes in 'inquiry' */
81 unsigned char * inquiry; /* INQUIRY response data */ 81 unsigned char * inquiry; /* INQUIRY response data */
82 const char * vendor; /* [back_compat] point into 'inquiry' ... */ 82 const char * vendor; /* [back_compat] point into 'inquiry' ... */
83 const char * model; /* ... after scan; point to static string */ 83 const char * model; /* ... after scan; point to static string */
84 const char * rev; /* ... "nullnullnullnull" before scan */ 84 const char * rev; /* ... "nullnullnullnull" before scan */
85 unsigned char current_tag; /* current tag */ 85 unsigned char current_tag; /* current tag */
86 struct scsi_target *sdev_target; /* used only for single_lun */ 86 struct scsi_target *sdev_target; /* used only for single_lun */
87 87
88 unsigned int sdev_bflags; /* black/white flags as also found in 88 unsigned int sdev_bflags; /* black/white flags as also found in
89 * scsi_devinfo.[hc]. For now used only to 89 * scsi_devinfo.[hc]. For now used only to
90 * pass settings from slave_alloc to scsi 90 * pass settings from slave_alloc to scsi
91 * core. */ 91 * core. */
92 unsigned writeable:1; 92 unsigned writeable:1;
93 unsigned removable:1; 93 unsigned removable:1;
94 unsigned changed:1; /* Data invalid due to media change */ 94 unsigned changed:1; /* Data invalid due to media change */
95 unsigned busy:1; /* Used to prevent races */ 95 unsigned busy:1; /* Used to prevent races */
96 unsigned lockable:1; /* Able to prevent media removal */ 96 unsigned lockable:1; /* Able to prevent media removal */
97 unsigned locked:1; /* Media removal disabled */ 97 unsigned locked:1; /* Media removal disabled */
98 unsigned borken:1; /* Tell the Seagate driver to be 98 unsigned borken:1; /* Tell the Seagate driver to be
99 * painfully slow on this device */ 99 * painfully slow on this device */
100 unsigned disconnect:1; /* can disconnect */ 100 unsigned disconnect:1; /* can disconnect */
101 unsigned soft_reset:1; /* Uses soft reset option */ 101 unsigned soft_reset:1; /* Uses soft reset option */
102 unsigned sdtr:1; /* Device supports SDTR messages */ 102 unsigned sdtr:1; /* Device supports SDTR messages */
103 unsigned wdtr:1; /* Device supports WDTR messages */ 103 unsigned wdtr:1; /* Device supports WDTR messages */
104 unsigned ppr:1; /* Device supports PPR messages */ 104 unsigned ppr:1; /* Device supports PPR messages */
105 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */ 105 unsigned tagged_supported:1; /* Supports SCSI-II tagged queuing */
106 unsigned simple_tags:1; /* simple queue tag messages are enabled */ 106 unsigned simple_tags:1; /* simple queue tag messages are enabled */
107 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */ 107 unsigned ordered_tags:1;/* ordered queue tag messages are enabled */
108 unsigned single_lun:1; /* Indicates we should only allow I/O to 108 unsigned single_lun:1; /* Indicates we should only allow I/O to
109 * one of the luns for the device at a 109 * one of the luns for the device at a
110 * time. */ 110 * time. */
111 unsigned was_reset:1; /* There was a bus reset on the bus for 111 unsigned was_reset:1; /* There was a bus reset on the bus for
112 * this device */ 112 * this device */
113 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN 113 unsigned expecting_cc_ua:1; /* Expecting a CHECK_CONDITION/UNIT_ATTN
114 * because we did a bus reset. */ 114 * because we did a bus reset. */
115 unsigned use_10_for_rw:1; /* first try 10-byte read / write */ 115 unsigned use_10_for_rw:1; /* first try 10-byte read / write */
116 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */ 116 unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
117 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ 117 unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */
118 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ 118 unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */
119 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */ 119 unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
120 unsigned no_start_on_add:1; /* do not issue start on add */ 120 unsigned no_start_on_add:1; /* do not issue start on add */
121 unsigned allow_restart:1; /* issue START_UNIT in error handler */ 121 unsigned allow_restart:1; /* issue START_UNIT in error handler */
122 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */ 122 unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
123 unsigned select_no_atn:1; 123 unsigned select_no_atn:1;
124 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */ 124 unsigned fix_capacity:1; /* READ_CAPACITY is too high by 1 */
125 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 125 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
126 126
127 unsigned int device_blocked; /* Device returned QUEUE_FULL. */ 127 unsigned int device_blocked; /* Device returned QUEUE_FULL. */
128 128
129 unsigned int max_device_blocked; /* what device_blocked counts down from */ 129 unsigned int max_device_blocked; /* what device_blocked counts down from */
130 #define SCSI_DEFAULT_DEVICE_BLOCKED 3 130 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
131 131
132 atomic_t iorequest_cnt; 132 atomic_t iorequest_cnt;
133 atomic_t iodone_cnt; 133 atomic_t iodone_cnt;
134 atomic_t ioerr_cnt; 134 atomic_t ioerr_cnt;
135 135
136 int timeout; 136 int timeout;
137 137
138 struct device sdev_gendev; 138 struct device sdev_gendev;
139 struct class_device sdev_classdev; 139 struct class_device sdev_classdev;
140 140
141 enum scsi_device_state sdev_state; 141 enum scsi_device_state sdev_state;
142 unsigned long sdev_data[0]; 142 unsigned long sdev_data[0];
143 } __attribute__((aligned(sizeof(unsigned long)))); 143 } __attribute__((aligned(sizeof(unsigned long))));
144 #define to_scsi_device(d) \ 144 #define to_scsi_device(d) \
145 container_of(d, struct scsi_device, sdev_gendev) 145 container_of(d, struct scsi_device, sdev_gendev)
146 #define class_to_sdev(d) \ 146 #define class_to_sdev(d) \
147 container_of(d, struct scsi_device, sdev_classdev) 147 container_of(d, struct scsi_device, sdev_classdev)
148 #define transport_class_to_sdev(class_dev) \ 148 #define transport_class_to_sdev(class_dev) \
149 to_scsi_device(class_dev->dev) 149 to_scsi_device(class_dev->dev)
150 150
151 #define sdev_printk(prefix, sdev, fmt, a...) \ 151 #define sdev_printk(prefix, sdev, fmt, a...) \
152 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a) 152 dev_printk(prefix, &(sdev)->sdev_gendev, fmt, ##a)
153 153
154 #define scmd_printk(prefix, scmd, fmt, a...) \ 154 #define scmd_printk(prefix, scmd, fmt, a...) \
155 dev_printk(prefix, &(scmd)->device->sdev_gendev, fmt, ##a) 155 dev_printk(prefix, &(scmd)->device->sdev_gendev, fmt, ##a)
156 156
157 /* 157 /*
158 * scsi_target: representation of a scsi target, for now, this is only 158 * scsi_target: representation of a scsi target, for now, this is only
159 * used for single_lun devices. If no one has active IO to the target, 159 * used for single_lun devices. If no one has active IO to the target,
160 * starget_sdev_user is NULL, else it points to the active sdev. 160 * starget_sdev_user is NULL, else it points to the active sdev.
161 */ 161 */
162 struct scsi_target { 162 struct scsi_target {
163 struct scsi_device *starget_sdev_user; 163 struct scsi_device *starget_sdev_user;
164 struct list_head siblings; 164 struct list_head siblings;
165 struct list_head devices; 165 struct list_head devices;
166 struct device dev; 166 struct device dev;
167 unsigned int reap_ref; /* protected by the host lock */ 167 unsigned int reap_ref; /* protected by the host lock */
168 unsigned int channel; 168 unsigned int channel;
169 unsigned int id; /* target id ... replace 169 unsigned int id; /* target id ... replace
170 * scsi_device.id eventually */ 170 * scsi_device.id eventually */
171 unsigned long create:1; /* signal that it needs to be added */ 171 unsigned long create:1; /* signal that it needs to be added */
172 char scsi_level; 172 char scsi_level;
173 void *hostdata; /* available to low-level driver */ 173 void *hostdata; /* available to low-level driver */
174 unsigned long starget_data[0]; /* for the transport */ 174 unsigned long starget_data[0]; /* for the transport */
175 /* starget_data must be the last element!!!! */ 175 /* starget_data must be the last element!!!! */
176 } __attribute__((aligned(sizeof(unsigned long)))); 176 } __attribute__((aligned(sizeof(unsigned long))));
177 177
178 #define to_scsi_target(d) container_of(d, struct scsi_target, dev) 178 #define to_scsi_target(d) container_of(d, struct scsi_target, dev)
179 static inline struct scsi_target *scsi_target(struct scsi_device *sdev) 179 static inline struct scsi_target *scsi_target(struct scsi_device *sdev)
180 { 180 {
181 return to_scsi_target(sdev->sdev_gendev.parent); 181 return to_scsi_target(sdev->sdev_gendev.parent);
182 } 182 }
183 #define transport_class_to_starget(class_dev) \ 183 #define transport_class_to_starget(class_dev) \
184 to_scsi_target(class_dev->dev) 184 to_scsi_target(class_dev->dev)
185 185
186 #define starget_printk(prefix, starget, fmt, a...) \ 186 #define starget_printk(prefix, starget, fmt, a...) \
187 dev_printk(prefix, &(starget)->dev, fmt, ##a) 187 dev_printk(prefix, &(starget)->dev, fmt, ##a)
188 188
189 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *, 189 extern struct scsi_device *__scsi_add_device(struct Scsi_Host *,
190 uint, uint, uint, void *hostdata); 190 uint, uint, uint, void *hostdata);
191 extern int scsi_add_device(struct Scsi_Host *host, uint channel, 191 extern int scsi_add_device(struct Scsi_Host *host, uint channel,
192 uint target, uint lun); 192 uint target, uint lun);
193 extern void scsi_remove_device(struct scsi_device *); 193 extern void scsi_remove_device(struct scsi_device *);
194 extern int scsi_device_cancel(struct scsi_device *, int); 194 extern int scsi_device_cancel(struct scsi_device *, int);
195 195
196 extern int scsi_device_get(struct scsi_device *); 196 extern int scsi_device_get(struct scsi_device *);
197 extern void scsi_device_put(struct scsi_device *); 197 extern void scsi_device_put(struct scsi_device *);
198 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *, 198 extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
199 uint, uint, uint); 199 uint, uint, uint);
200 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *, 200 extern struct scsi_device *__scsi_device_lookup(struct Scsi_Host *,
201 uint, uint, uint); 201 uint, uint, uint);
202 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *, 202 extern struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *,
203 uint); 203 uint);
204 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *, 204 extern struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *,
205 uint); 205 uint);
206 extern void starget_for_each_device(struct scsi_target *, void *, 206 extern void starget_for_each_device(struct scsi_target *, void *,
207 void (*fn)(struct scsi_device *, void *)); 207 void (*fn)(struct scsi_device *, void *));
208 208
209 /* only exposed to implement shost_for_each_device */ 209 /* only exposed to implement shost_for_each_device */
210 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *, 210 extern struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *,
211 struct scsi_device *); 211 struct scsi_device *);
212 212
213 /** 213 /**
214 * shost_for_each_device - iterate over all devices of a host 214 * shost_for_each_device - iterate over all devices of a host
215 * @sdev: iterator 215 * @sdev: iterator
216 * @host: host whiches devices we want to iterate over 216 * @host: host whiches devices we want to iterate over
217 * 217 *
218 * This traverses over each devices of @shost. The devices have 218 * This traverses over each devices of @shost. The devices have
219 * a reference that must be released by scsi_host_put when breaking 219 * a reference that must be released by scsi_host_put when breaking
220 * out of the loop. 220 * out of the loop.
221 */ 221 */
222 #define shost_for_each_device(sdev, shost) \ 222 #define shost_for_each_device(sdev, shost) \
223 for ((sdev) = __scsi_iterate_devices((shost), NULL); \ 223 for ((sdev) = __scsi_iterate_devices((shost), NULL); \
224 (sdev); \ 224 (sdev); \
225 (sdev) = __scsi_iterate_devices((shost), (sdev))) 225 (sdev) = __scsi_iterate_devices((shost), (sdev)))
226 226
227 /** 227 /**
228 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED) 228 * __shost_for_each_device - iterate over all devices of a host (UNLOCKED)
229 * @sdev: iterator 229 * @sdev: iterator
230 * @host: host whiches devices we want to iterate over 230 * @host: host whiches devices we want to iterate over
231 * 231 *
232 * This traverses over each devices of @shost. It does _not_ take a 232 * This traverses over each devices of @shost. It does _not_ take a
233 * reference on the scsi_device, thus it the whole loop must be protected 233 * reference on the scsi_device, thus it the whole loop must be protected
234 * by shost->host_lock. 234 * by shost->host_lock.
235 * 235 *
236 * Note: The only reason why drivers would want to use this is because 236 * Note: The only reason why drivers would want to use this is because
237 * they're need to access the device list in irq context. Otherwise you 237 * they're need to access the device list in irq context. Otherwise you
238 * really want to use shost_for_each_device instead. 238 * really want to use shost_for_each_device instead.
239 */ 239 */
240 #define __shost_for_each_device(sdev, shost) \ 240 #define __shost_for_each_device(sdev, shost) \
241 list_for_each_entry((sdev), &((shost)->__devices), siblings) 241 list_for_each_entry((sdev), &((shost)->__devices), siblings)
242 242
243 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int); 243 extern void scsi_adjust_queue_depth(struct scsi_device *, int, int);
244 extern int scsi_track_queue_full(struct scsi_device *, int); 244 extern int scsi_track_queue_full(struct scsi_device *, int);
245 245
246 extern int scsi_set_medium_removal(struct scsi_device *, char); 246 extern int scsi_set_medium_removal(struct scsi_device *, char);
247 247
248 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, 248 extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
249 unsigned char *buffer, int len, int timeout, 249 unsigned char *buffer, int len, int timeout,
250 int retries, struct scsi_mode_data *data, 250 int retries, struct scsi_mode_data *data,
251 struct scsi_sense_hdr *); 251 struct scsi_sense_hdr *);
252 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, 252 extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
253 int retries); 253 int retries);
254 extern int scsi_device_set_state(struct scsi_device *sdev, 254 extern int scsi_device_set_state(struct scsi_device *sdev,
255 enum scsi_device_state state); 255 enum scsi_device_state state);
256 extern int scsi_device_quiesce(struct scsi_device *sdev); 256 extern int scsi_device_quiesce(struct scsi_device *sdev);
257 extern void scsi_device_resume(struct scsi_device *sdev); 257 extern void scsi_device_resume(struct scsi_device *sdev);
258 extern void scsi_target_quiesce(struct scsi_target *); 258 extern void scsi_target_quiesce(struct scsi_target *);
259 extern void scsi_target_resume(struct scsi_target *); 259 extern void scsi_target_resume(struct scsi_target *);
260 extern void scsi_scan_target(struct device *parent, unsigned int channel, 260 extern void scsi_scan_target(struct device *parent, unsigned int channel,
261 unsigned int id, unsigned int lun, int rescan); 261 unsigned int id, unsigned int lun, int rescan);
262 extern void scsi_target_reap(struct scsi_target *); 262 extern void scsi_target_reap(struct scsi_target *);
263 extern void scsi_target_block(struct device *); 263 extern void scsi_target_block(struct device *);
264 extern void scsi_target_unblock(struct device *); 264 extern void scsi_target_unblock(struct device *);
265 extern void scsi_remove_target(struct device *); 265 extern void scsi_remove_target(struct device *);
266 extern void int_to_scsilun(unsigned int, struct scsi_lun *); 266 extern void int_to_scsilun(unsigned int, struct scsi_lun *);
267 extern const char *scsi_device_state_name(enum scsi_device_state); 267 extern const char *scsi_device_state_name(enum scsi_device_state);
268 extern int scsi_is_sdev_device(const struct device *); 268 extern int scsi_is_sdev_device(const struct device *);
269 extern int scsi_is_target_device(const struct device *); 269 extern int scsi_is_target_device(const struct device *);
270 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 270 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
271 int data_direction, void *buffer, unsigned bufflen, 271 int data_direction, void *buffer, unsigned bufflen,
272 unsigned char *sense, int timeout, int retries, 272 unsigned char *sense, int timeout, int retries,
273 int flag); 273 int flag);
274 extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, 274 extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
275 int data_direction, void *buffer, unsigned bufflen, 275 int data_direction, void *buffer, unsigned bufflen,
276 struct scsi_sense_hdr *, int timeout, int retries); 276 struct scsi_sense_hdr *, int timeout, int retries);
277 extern int scsi_execute_async(struct scsi_device *sdev, 277 extern int scsi_execute_async(struct scsi_device *sdev,
278 const unsigned char *cmd, int data_direction, 278 const unsigned char *cmd, int cmd_len, int data_direction,
279 void *buffer, unsigned bufflen, int use_sg, 279 void *buffer, unsigned bufflen, int use_sg,
280 int timeout, int retries, void *privdata, 280 int timeout, int retries, void *privdata,
281 void (*done)(void *, char *, int, int), 281 void (*done)(void *, char *, int, int),
282 gfp_t gfp); 282 gfp_t gfp);
283 283
284 static inline unsigned int sdev_channel(struct scsi_device *sdev) 284 static inline unsigned int sdev_channel(struct scsi_device *sdev)
285 { 285 {
286 return sdev->channel; 286 return sdev->channel;
287 } 287 }
288 288
289 static inline unsigned int sdev_id(struct scsi_device *sdev) 289 static inline unsigned int sdev_id(struct scsi_device *sdev)
290 { 290 {
291 return sdev->id; 291 return sdev->id;
292 } 292 }
293 293
294 #define scmd_id(scmd) sdev_id((scmd)->device) 294 #define scmd_id(scmd) sdev_id((scmd)->device)
295 #define scmd_channel(scmd) sdev_channel((scmd)->device) 295 #define scmd_channel(scmd) sdev_channel((scmd)->device)
296 296
297 static inline int scsi_device_online(struct scsi_device *sdev) 297 static inline int scsi_device_online(struct scsi_device *sdev)
298 { 298 {
299 return sdev->sdev_state != SDEV_OFFLINE; 299 return sdev->sdev_state != SDEV_OFFLINE;
300 } 300 }
301 301
302 /* accessor functions for the SCSI parameters */ 302 /* accessor functions for the SCSI parameters */
303 static inline int scsi_device_sync(struct scsi_device *sdev) 303 static inline int scsi_device_sync(struct scsi_device *sdev)
304 { 304 {
305 return sdev->sdtr; 305 return sdev->sdtr;
306 } 306 }
307 static inline int scsi_device_wide(struct scsi_device *sdev) 307 static inline int scsi_device_wide(struct scsi_device *sdev)
308 { 308 {
309 return sdev->wdtr; 309 return sdev->wdtr;
310 } 310 }
311 static inline int scsi_device_dt(struct scsi_device *sdev) 311 static inline int scsi_device_dt(struct scsi_device *sdev)
312 { 312 {
313 return sdev->ppr; 313 return sdev->ppr;
314 } 314 }
315 static inline int scsi_device_dt_only(struct scsi_device *sdev) 315 static inline int scsi_device_dt_only(struct scsi_device *sdev)
316 { 316 {
317 if (sdev->inquiry_len < 57) 317 if (sdev->inquiry_len < 57)
318 return 0; 318 return 0;
319 return (sdev->inquiry[56] & 0x0c) == 0x04; 319 return (sdev->inquiry[56] & 0x0c) == 0x04;
320 } 320 }
321 static inline int scsi_device_ius(struct scsi_device *sdev) 321 static inline int scsi_device_ius(struct scsi_device *sdev)
322 { 322 {
323 if (sdev->inquiry_len < 57) 323 if (sdev->inquiry_len < 57)
324 return 0; 324 return 0;
325 return sdev->inquiry[56] & 0x01; 325 return sdev->inquiry[56] & 0x01;
326 } 326 }
327 static inline int scsi_device_qas(struct scsi_device *sdev) 327 static inline int scsi_device_qas(struct scsi_device *sdev)
328 { 328 {
329 if (sdev->inquiry_len < 57) 329 if (sdev->inquiry_len < 57)
330 return 0; 330 return 0;
331 return sdev->inquiry[56] & 0x02; 331 return sdev->inquiry[56] & 0x02;
332 } 332 }
333 #endif /* _SCSI_SCSI_DEVICE_H */ 333 #endif /* _SCSI_SCSI_DEVICE_H */
334 334