Commit bb509912481214cf6ad1181c968295c62ff1ad9e

Authored by Michael Holzheu
Committed by Martin Schwidefsky
1 parent 622e99bf0d

[S390] tape: Add pr_fmt() macro to all tape source files

Without defining the pr_fmt() macro, the "tape: " prefix will not be
printed when using the pr_xxx printk macros. This patch adds the
missing definitions.

Signed-off-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

Showing 8 changed files with 18 additions and 0 deletions Inline Diff

drivers/s390/char/tape_34xx.c
1 /* 1 /*
2 * drivers/s390/char/tape_34xx.c 2 * drivers/s390/char/tape_34xx.c
3 * tape device discipline for 3480/3490 tapes. 3 * tape device discipline for 3480/3490 tapes.
4 * 4 *
5 * Copyright IBM Corp. 2001, 2009 5 * Copyright IBM Corp. 2001, 2009
6 * Author(s): Carsten Otte <cotte@de.ibm.com> 6 * Author(s): Carsten Otte <cotte@de.ibm.com>
7 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 7 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11 #define KMSG_COMPONENT "tape_34xx" 11 #define KMSG_COMPONENT "tape_34xx"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13 #include <linux/module.h> 14 #include <linux/module.h>
14 #include <linux/init.h> 15 #include <linux/init.h>
15 #include <linux/bio.h> 16 #include <linux/bio.h>
16 #include <linux/workqueue.h> 17 #include <linux/workqueue.h>
17 18
18 #define TAPE_DBF_AREA tape_34xx_dbf 19 #define TAPE_DBF_AREA tape_34xx_dbf
19 20
20 #include "tape.h" 21 #include "tape.h"
21 #include "tape_std.h" 22 #include "tape_std.h"
22 23
23 /* 24 /*
24 * Pointer to debug area. 25 * Pointer to debug area.
25 */ 26 */
26 debug_info_t *TAPE_DBF_AREA = NULL; 27 debug_info_t *TAPE_DBF_AREA = NULL;
27 EXPORT_SYMBOL(TAPE_DBF_AREA); 28 EXPORT_SYMBOL(TAPE_DBF_AREA);
28 29
29 #define TAPE34XX_FMT_3480 0 30 #define TAPE34XX_FMT_3480 0
30 #define TAPE34XX_FMT_3480_2_XF 1 31 #define TAPE34XX_FMT_3480_2_XF 1
31 #define TAPE34XX_FMT_3480_XF 2 32 #define TAPE34XX_FMT_3480_XF 2
32 33
33 struct tape_34xx_block_id { 34 struct tape_34xx_block_id {
34 unsigned int wrap : 1; 35 unsigned int wrap : 1;
35 unsigned int segment : 7; 36 unsigned int segment : 7;
36 unsigned int format : 2; 37 unsigned int format : 2;
37 unsigned int block : 22; 38 unsigned int block : 22;
38 }; 39 };
39 40
40 /* 41 /*
41 * A list of block ID's is used to faster seek blocks. 42 * A list of block ID's is used to faster seek blocks.
42 */ 43 */
43 struct tape_34xx_sbid { 44 struct tape_34xx_sbid {
44 struct list_head list; 45 struct list_head list;
45 struct tape_34xx_block_id bid; 46 struct tape_34xx_block_id bid;
46 }; 47 };
47 48
48 static void tape_34xx_delete_sbid_from(struct tape_device *, int); 49 static void tape_34xx_delete_sbid_from(struct tape_device *, int);
49 50
50 /* 51 /*
51 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 52 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
52 * So we just do a normal sense. 53 * So we just do a normal sense.
53 */ 54 */
54 static int 55 static int
55 tape_34xx_medium_sense(struct tape_device *device) 56 tape_34xx_medium_sense(struct tape_device *device)
56 { 57 {
57 struct tape_request *request; 58 struct tape_request *request;
58 unsigned char *sense; 59 unsigned char *sense;
59 int rc; 60 int rc;
60 61
61 request = tape_alloc_request(1, 32); 62 request = tape_alloc_request(1, 32);
62 if (IS_ERR(request)) { 63 if (IS_ERR(request)) {
63 DBF_EXCEPTION(6, "MSEN fail\n"); 64 DBF_EXCEPTION(6, "MSEN fail\n");
64 return PTR_ERR(request); 65 return PTR_ERR(request);
65 } 66 }
66 67
67 request->op = TO_MSEN; 68 request->op = TO_MSEN;
68 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); 69 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
69 70
70 rc = tape_do_io_interruptible(device, request); 71 rc = tape_do_io_interruptible(device, request);
71 if (request->rc == 0) { 72 if (request->rc == 0) {
72 sense = request->cpdata; 73 sense = request->cpdata;
73 74
74 /* 75 /*
75 * This isn't quite correct. But since INTERVENTION_REQUIRED 76 * This isn't quite correct. But since INTERVENTION_REQUIRED
76 * means that the drive is 'neither ready nor on-line' it is 77 * means that the drive is 'neither ready nor on-line' it is
77 * only slightly inaccurate to say there is no tape loaded if 78 * only slightly inaccurate to say there is no tape loaded if
78 * the drive isn't online... 79 * the drive isn't online...
79 */ 80 */
80 if (sense[0] & SENSE_INTERVENTION_REQUIRED) 81 if (sense[0] & SENSE_INTERVENTION_REQUIRED)
81 tape_med_state_set(device, MS_UNLOADED); 82 tape_med_state_set(device, MS_UNLOADED);
82 else 83 else
83 tape_med_state_set(device, MS_LOADED); 84 tape_med_state_set(device, MS_LOADED);
84 85
85 if (sense[1] & SENSE_WRITE_PROTECT) 86 if (sense[1] & SENSE_WRITE_PROTECT)
86 device->tape_generic_status |= GMT_WR_PROT(~0); 87 device->tape_generic_status |= GMT_WR_PROT(~0);
87 else 88 else
88 device->tape_generic_status &= ~GMT_WR_PROT(~0); 89 device->tape_generic_status &= ~GMT_WR_PROT(~0);
89 } else { 90 } else {
90 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 91 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
91 request->rc); 92 request->rc);
92 } 93 }
93 tape_free_request(request); 94 tape_free_request(request);
94 95
95 return rc; 96 return rc;
96 } 97 }
97 98
98 struct tape_34xx_work { 99 struct tape_34xx_work {
99 struct tape_device *device; 100 struct tape_device *device;
100 enum tape_op op; 101 enum tape_op op;
101 struct work_struct work; 102 struct work_struct work;
102 }; 103 };
103 104
104 /* 105 /*
105 * These functions are currently used only to schedule a medium_sense for 106 * These functions are currently used only to schedule a medium_sense for
106 * later execution. This is because we get an interrupt whenever a medium 107 * later execution. This is because we get an interrupt whenever a medium
107 * is inserted but cannot call tape_do_io* from an interrupt context. 108 * is inserted but cannot call tape_do_io* from an interrupt context.
108 * Maybe that's useful for other actions we want to start from the 109 * Maybe that's useful for other actions we want to start from the
109 * interrupt handler. 110 * interrupt handler.
110 */ 111 */
111 static void 112 static void
112 tape_34xx_work_handler(struct work_struct *work) 113 tape_34xx_work_handler(struct work_struct *work)
113 { 114 {
114 struct tape_34xx_work *p = 115 struct tape_34xx_work *p =
115 container_of(work, struct tape_34xx_work, work); 116 container_of(work, struct tape_34xx_work, work);
116 struct tape_device *device = p->device; 117 struct tape_device *device = p->device;
117 118
118 switch(p->op) { 119 switch(p->op) {
119 case TO_MSEN: 120 case TO_MSEN:
120 tape_34xx_medium_sense(device); 121 tape_34xx_medium_sense(device);
121 break; 122 break;
122 default: 123 default:
123 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 124 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
124 } 125 }
125 tape_put_device(device); 126 tape_put_device(device);
126 kfree(p); 127 kfree(p);
127 } 128 }
128 129
129 static int 130 static int
130 tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) 131 tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
131 { 132 {
132 struct tape_34xx_work *p; 133 struct tape_34xx_work *p;
133 134
134 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) 135 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
135 return -ENOMEM; 136 return -ENOMEM;
136 137
137 INIT_WORK(&p->work, tape_34xx_work_handler); 138 INIT_WORK(&p->work, tape_34xx_work_handler);
138 139
139 p->device = tape_get_device(device); 140 p->device = tape_get_device(device);
140 p->op = op; 141 p->op = op;
141 142
142 schedule_work(&p->work); 143 schedule_work(&p->work);
143 return 0; 144 return 0;
144 } 145 }
145 146
146 /* 147 /*
147 * Done Handler is called when dev stat = DEVICE-END (successful operation) 148 * Done Handler is called when dev stat = DEVICE-END (successful operation)
148 */ 149 */
149 static inline int 150 static inline int
150 tape_34xx_done(struct tape_request *request) 151 tape_34xx_done(struct tape_request *request)
151 { 152 {
152 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 153 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
153 154
154 switch (request->op) { 155 switch (request->op) {
155 case TO_DSE: 156 case TO_DSE:
156 case TO_RUN: 157 case TO_RUN:
157 case TO_WRI: 158 case TO_WRI:
158 case TO_WTM: 159 case TO_WTM:
159 case TO_ASSIGN: 160 case TO_ASSIGN:
160 case TO_UNASSIGN: 161 case TO_UNASSIGN:
161 tape_34xx_delete_sbid_from(request->device, 0); 162 tape_34xx_delete_sbid_from(request->device, 0);
162 break; 163 break;
163 default: 164 default:
164 ; 165 ;
165 } 166 }
166 return TAPE_IO_SUCCESS; 167 return TAPE_IO_SUCCESS;
167 } 168 }
168 169
169 static inline int 170 static inline int
170 tape_34xx_erp_failed(struct tape_request *request, int rc) 171 tape_34xx_erp_failed(struct tape_request *request, int rc)
171 { 172 {
172 DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n", 173 DBF_EVENT(3, "Error recovery failed for %s (RC=%d)\n",
173 tape_op_verbose[request->op], rc); 174 tape_op_verbose[request->op], rc);
174 return rc; 175 return rc;
175 } 176 }
176 177
177 static inline int 178 static inline int
178 tape_34xx_erp_succeeded(struct tape_request *request) 179 tape_34xx_erp_succeeded(struct tape_request *request)
179 { 180 {
180 DBF_EVENT(3, "Error Recovery successful for %s\n", 181 DBF_EVENT(3, "Error Recovery successful for %s\n",
181 tape_op_verbose[request->op]); 182 tape_op_verbose[request->op]);
182 return tape_34xx_done(request); 183 return tape_34xx_done(request);
183 } 184 }
184 185
185 static inline int 186 static inline int
186 tape_34xx_erp_retry(struct tape_request *request) 187 tape_34xx_erp_retry(struct tape_request *request)
187 { 188 {
188 DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]); 189 DBF_EVENT(3, "xerp retr %s\n", tape_op_verbose[request->op]);
189 return TAPE_IO_RETRY; 190 return TAPE_IO_RETRY;
190 } 191 }
191 192
192 /* 193 /*
193 * This function is called, when no request is outstanding and we get an 194 * This function is called, when no request is outstanding and we get an
194 * interrupt 195 * interrupt
195 */ 196 */
196 static int 197 static int
197 tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb) 198 tape_34xx_unsolicited_irq(struct tape_device *device, struct irb *irb)
198 { 199 {
199 if (irb->scsw.cmd.dstat == 0x85) { /* READY */ 200 if (irb->scsw.cmd.dstat == 0x85) { /* READY */
200 /* A medium was inserted in the drive. */ 201 /* A medium was inserted in the drive. */
201 DBF_EVENT(6, "xuud med\n"); 202 DBF_EVENT(6, "xuud med\n");
202 tape_34xx_delete_sbid_from(device, 0); 203 tape_34xx_delete_sbid_from(device, 0);
203 tape_34xx_schedule_work(device, TO_MSEN); 204 tape_34xx_schedule_work(device, TO_MSEN);
204 } else { 205 } else {
205 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 206 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
206 tape_dump_sense_dbf(device, NULL, irb); 207 tape_dump_sense_dbf(device, NULL, irb);
207 } 208 }
208 return TAPE_IO_SUCCESS; 209 return TAPE_IO_SUCCESS;
209 } 210 }
210 211
211 /* 212 /*
212 * Read Opposite Error Recovery Function: 213 * Read Opposite Error Recovery Function:
213 * Used, when Read Forward does not work 214 * Used, when Read Forward does not work
214 */ 215 */
215 static int 216 static int
216 tape_34xx_erp_read_opposite(struct tape_device *device, 217 tape_34xx_erp_read_opposite(struct tape_device *device,
217 struct tape_request *request) 218 struct tape_request *request)
218 { 219 {
219 if (request->op == TO_RFO) { 220 if (request->op == TO_RFO) {
220 /* 221 /*
221 * We did read forward, but the data could not be read 222 * We did read forward, but the data could not be read
222 * *correctly*. We transform the request to a read backward 223 * *correctly*. We transform the request to a read backward
223 * and try again. 224 * and try again.
224 */ 225 */
225 tape_std_read_backward(device, request); 226 tape_std_read_backward(device, request);
226 return tape_34xx_erp_retry(request); 227 return tape_34xx_erp_retry(request);
227 } 228 }
228 229
229 /* 230 /*
230 * We tried to read forward and backward, but hat no 231 * We tried to read forward and backward, but hat no
231 * success -> failed. 232 * success -> failed.
232 */ 233 */
233 return tape_34xx_erp_failed(request, -EIO); 234 return tape_34xx_erp_failed(request, -EIO);
234 } 235 }
235 236
236 static int 237 static int
237 tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request, 238 tape_34xx_erp_bug(struct tape_device *device, struct tape_request *request,
238 struct irb *irb, int no) 239 struct irb *irb, int no)
239 { 240 {
240 if (request->op != TO_ASSIGN) { 241 if (request->op != TO_ASSIGN) {
241 dev_err(&device->cdev->dev, "An unexpected condition %d " 242 dev_err(&device->cdev->dev, "An unexpected condition %d "
242 "occurred in tape error recovery\n", no); 243 "occurred in tape error recovery\n", no);
243 tape_dump_sense_dbf(device, request, irb); 244 tape_dump_sense_dbf(device, request, irb);
244 } 245 }
245 return tape_34xx_erp_failed(request, -EIO); 246 return tape_34xx_erp_failed(request, -EIO);
246 } 247 }
247 248
248 /* 249 /*
249 * Handle data overrun between cu and drive. The channel speed might 250 * Handle data overrun between cu and drive. The channel speed might
250 * be too slow. 251 * be too slow.
251 */ 252 */
252 static int 253 static int
253 tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request, 254 tape_34xx_erp_overrun(struct tape_device *device, struct tape_request *request,
254 struct irb *irb) 255 struct irb *irb)
255 { 256 {
256 if (irb->ecw[3] == 0x40) { 257 if (irb->ecw[3] == 0x40) {
257 dev_warn (&device->cdev->dev, "A data overrun occurred between" 258 dev_warn (&device->cdev->dev, "A data overrun occurred between"
258 " the control unit and tape unit\n"); 259 " the control unit and tape unit\n");
259 return tape_34xx_erp_failed(request, -EIO); 260 return tape_34xx_erp_failed(request, -EIO);
260 } 261 }
261 return tape_34xx_erp_bug(device, request, irb, -1); 262 return tape_34xx_erp_bug(device, request, irb, -1);
262 } 263 }
263 264
264 /* 265 /*
265 * Handle record sequence error. 266 * Handle record sequence error.
266 */ 267 */
267 static int 268 static int
268 tape_34xx_erp_sequence(struct tape_device *device, 269 tape_34xx_erp_sequence(struct tape_device *device,
269 struct tape_request *request, struct irb *irb) 270 struct tape_request *request, struct irb *irb)
270 { 271 {
271 if (irb->ecw[3] == 0x41) { 272 if (irb->ecw[3] == 0x41) {
272 /* 273 /*
273 * cu detected incorrect block-id sequence on tape. 274 * cu detected incorrect block-id sequence on tape.
274 */ 275 */
275 dev_warn (&device->cdev->dev, "The block ID sequence on the " 276 dev_warn (&device->cdev->dev, "The block ID sequence on the "
276 "tape is incorrect\n"); 277 "tape is incorrect\n");
277 return tape_34xx_erp_failed(request, -EIO); 278 return tape_34xx_erp_failed(request, -EIO);
278 } 279 }
279 /* 280 /*
280 * Record sequence error bit is set, but erpa does not 281 * Record sequence error bit is set, but erpa does not
281 * show record sequence error. 282 * show record sequence error.
282 */ 283 */
283 return tape_34xx_erp_bug(device, request, irb, -2); 284 return tape_34xx_erp_bug(device, request, irb, -2);
284 } 285 }
285 286
286 /* 287 /*
287 * This function analyses the tape's sense-data in case of a unit-check. 288 * This function analyses the tape's sense-data in case of a unit-check.
288 * If possible, it tries to recover from the error. Else the user is 289 * If possible, it tries to recover from the error. Else the user is
289 * informed about the problem. 290 * informed about the problem.
290 */ 291 */
291 static int 292 static int
292 tape_34xx_unit_check(struct tape_device *device, struct tape_request *request, 293 tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
293 struct irb *irb) 294 struct irb *irb)
294 { 295 {
295 int inhibit_cu_recovery; 296 int inhibit_cu_recovery;
296 __u8* sense; 297 __u8* sense;
297 298
298 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; 299 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
299 sense = irb->ecw; 300 sense = irb->ecw;
300 301
301 #ifdef CONFIG_S390_TAPE_BLOCK 302 #ifdef CONFIG_S390_TAPE_BLOCK
302 if (request->op == TO_BLOCK) { 303 if (request->op == TO_BLOCK) {
303 /* 304 /*
304 * Recovery for block device requests. Set the block_position 305 * Recovery for block device requests. Set the block_position
305 * to something invalid and retry. 306 * to something invalid and retry.
306 */ 307 */
307 device->blk_data.block_position = -1; 308 device->blk_data.block_position = -1;
308 if (request->retries-- <= 0) 309 if (request->retries-- <= 0)
309 return tape_34xx_erp_failed(request, -EIO); 310 return tape_34xx_erp_failed(request, -EIO);
310 else 311 else
311 return tape_34xx_erp_retry(request); 312 return tape_34xx_erp_retry(request);
312 } 313 }
313 #endif 314 #endif
314 315
315 if ( 316 if (
316 sense[0] & SENSE_COMMAND_REJECT && 317 sense[0] & SENSE_COMMAND_REJECT &&
317 sense[1] & SENSE_WRITE_PROTECT 318 sense[1] & SENSE_WRITE_PROTECT
318 ) { 319 ) {
319 if ( 320 if (
320 request->op == TO_DSE || 321 request->op == TO_DSE ||
321 request->op == TO_WRI || 322 request->op == TO_WRI ||
322 request->op == TO_WTM 323 request->op == TO_WTM
323 ) { 324 ) {
324 /* medium is write protected */ 325 /* medium is write protected */
325 return tape_34xx_erp_failed(request, -EACCES); 326 return tape_34xx_erp_failed(request, -EACCES);
326 } else { 327 } else {
327 return tape_34xx_erp_bug(device, request, irb, -3); 328 return tape_34xx_erp_bug(device, request, irb, -3);
328 } 329 }
329 } 330 }
330 331
331 /* 332 /*
332 * Special cases for various tape-states when reaching 333 * Special cases for various tape-states when reaching
333 * end of recorded area 334 * end of recorded area
334 * 335 *
335 * FIXME: Maybe a special case of the special case: 336 * FIXME: Maybe a special case of the special case:
336 * sense[0] == SENSE_EQUIPMENT_CHECK && 337 * sense[0] == SENSE_EQUIPMENT_CHECK &&
337 * sense[1] == SENSE_DRIVE_ONLINE && 338 * sense[1] == SENSE_DRIVE_ONLINE &&
338 * sense[3] == 0x47 (Volume Fenced) 339 * sense[3] == 0x47 (Volume Fenced)
339 * 340 *
340 * This was caused by continued FSF or FSR after an 341 * This was caused by continued FSF or FSR after an
341 * 'End Of Data'. 342 * 'End Of Data'.
342 */ 343 */
343 if (( 344 if ((
344 sense[0] == SENSE_DATA_CHECK || 345 sense[0] == SENSE_DATA_CHECK ||
345 sense[0] == SENSE_EQUIPMENT_CHECK || 346 sense[0] == SENSE_EQUIPMENT_CHECK ||
346 sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK 347 sense[0] == SENSE_EQUIPMENT_CHECK + SENSE_DEFERRED_UNIT_CHECK
347 ) && ( 348 ) && (
348 sense[1] == SENSE_DRIVE_ONLINE || 349 sense[1] == SENSE_DRIVE_ONLINE ||
349 sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE 350 sense[1] == SENSE_BEGINNING_OF_TAPE + SENSE_WRITE_MODE
350 )) { 351 )) {
351 switch (request->op) { 352 switch (request->op) {
352 /* 353 /*
353 * sense[0] == SENSE_DATA_CHECK && 354 * sense[0] == SENSE_DATA_CHECK &&
354 * sense[1] == SENSE_DRIVE_ONLINE 355 * sense[1] == SENSE_DRIVE_ONLINE
355 * sense[3] == 0x36 (End Of Data) 356 * sense[3] == 0x36 (End Of Data)
356 * 357 *
357 * Further seeks might return a 'Volume Fenced'. 358 * Further seeks might return a 'Volume Fenced'.
358 */ 359 */
359 case TO_FSF: 360 case TO_FSF:
360 case TO_FSB: 361 case TO_FSB:
361 /* Trying to seek beyond end of recorded area */ 362 /* Trying to seek beyond end of recorded area */
362 return tape_34xx_erp_failed(request, -ENOSPC); 363 return tape_34xx_erp_failed(request, -ENOSPC);
363 case TO_BSB: 364 case TO_BSB:
364 return tape_34xx_erp_retry(request); 365 return tape_34xx_erp_retry(request);
365 366
366 /* 367 /*
367 * sense[0] == SENSE_DATA_CHECK && 368 * sense[0] == SENSE_DATA_CHECK &&
368 * sense[1] == SENSE_DRIVE_ONLINE && 369 * sense[1] == SENSE_DRIVE_ONLINE &&
369 * sense[3] == 0x36 (End Of Data) 370 * sense[3] == 0x36 (End Of Data)
370 */ 371 */
371 case TO_LBL: 372 case TO_LBL:
372 /* Block could not be located. */ 373 /* Block could not be located. */
373 tape_34xx_delete_sbid_from(device, 0); 374 tape_34xx_delete_sbid_from(device, 0);
374 return tape_34xx_erp_failed(request, -EIO); 375 return tape_34xx_erp_failed(request, -EIO);
375 376
376 case TO_RFO: 377 case TO_RFO:
377 /* Read beyond end of recorded area -> 0 bytes read */ 378 /* Read beyond end of recorded area -> 0 bytes read */
378 return tape_34xx_erp_failed(request, 0); 379 return tape_34xx_erp_failed(request, 0);
379 380
380 /* 381 /*
381 * sense[0] == SENSE_EQUIPMENT_CHECK && 382 * sense[0] == SENSE_EQUIPMENT_CHECK &&
382 * sense[1] == SENSE_DRIVE_ONLINE && 383 * sense[1] == SENSE_DRIVE_ONLINE &&
383 * sense[3] == 0x38 (Physical End Of Volume) 384 * sense[3] == 0x38 (Physical End Of Volume)
384 */ 385 */
385 case TO_WRI: 386 case TO_WRI:
386 /* Writing at physical end of volume */ 387 /* Writing at physical end of volume */
387 return tape_34xx_erp_failed(request, -ENOSPC); 388 return tape_34xx_erp_failed(request, -ENOSPC);
388 default: 389 default:
389 return tape_34xx_erp_failed(request, 0); 390 return tape_34xx_erp_failed(request, 0);
390 } 391 }
391 } 392 }
392 393
393 /* Sensing special bits */ 394 /* Sensing special bits */
394 if (sense[0] & SENSE_BUS_OUT_CHECK) 395 if (sense[0] & SENSE_BUS_OUT_CHECK)
395 return tape_34xx_erp_retry(request); 396 return tape_34xx_erp_retry(request);
396 397
397 if (sense[0] & SENSE_DATA_CHECK) { 398 if (sense[0] & SENSE_DATA_CHECK) {
398 /* 399 /*
399 * hardware failure, damaged tape or improper 400 * hardware failure, damaged tape or improper
400 * operating conditions 401 * operating conditions
401 */ 402 */
402 switch (sense[3]) { 403 switch (sense[3]) {
403 case 0x23: 404 case 0x23:
404 /* a read data check occurred */ 405 /* a read data check occurred */
405 if ((sense[2] & SENSE_TAPE_SYNC_MODE) || 406 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
406 inhibit_cu_recovery) 407 inhibit_cu_recovery)
407 // data check is not permanent, may be 408 // data check is not permanent, may be
408 // recovered. We always use async-mode with 409 // recovered. We always use async-mode with
409 // cu-recovery, so this should *never* happen. 410 // cu-recovery, so this should *never* happen.
410 return tape_34xx_erp_bug(device, request, 411 return tape_34xx_erp_bug(device, request,
411 irb, -4); 412 irb, -4);
412 413
413 /* data check is permanent, CU recovery has failed */ 414 /* data check is permanent, CU recovery has failed */
414 dev_warn (&device->cdev->dev, "A read error occurred " 415 dev_warn (&device->cdev->dev, "A read error occurred "
415 "that cannot be recovered\n"); 416 "that cannot be recovered\n");
416 return tape_34xx_erp_failed(request, -EIO); 417 return tape_34xx_erp_failed(request, -EIO);
417 case 0x25: 418 case 0x25:
418 // a write data check occurred 419 // a write data check occurred
419 if ((sense[2] & SENSE_TAPE_SYNC_MODE) || 420 if ((sense[2] & SENSE_TAPE_SYNC_MODE) ||
420 inhibit_cu_recovery) 421 inhibit_cu_recovery)
421 // data check is not permanent, may be 422 // data check is not permanent, may be
422 // recovered. We always use async-mode with 423 // recovered. We always use async-mode with
423 // cu-recovery, so this should *never* happen. 424 // cu-recovery, so this should *never* happen.
424 return tape_34xx_erp_bug(device, request, 425 return tape_34xx_erp_bug(device, request,
425 irb, -5); 426 irb, -5);
426 427
427 // data check is permanent, cu-recovery has failed 428 // data check is permanent, cu-recovery has failed
428 dev_warn (&device->cdev->dev, "A write error on the " 429 dev_warn (&device->cdev->dev, "A write error on the "
429 "tape cannot be recovered\n"); 430 "tape cannot be recovered\n");
430 return tape_34xx_erp_failed(request, -EIO); 431 return tape_34xx_erp_failed(request, -EIO);
431 case 0x26: 432 case 0x26:
432 /* Data Check (read opposite) occurred. */ 433 /* Data Check (read opposite) occurred. */
433 return tape_34xx_erp_read_opposite(device, request); 434 return tape_34xx_erp_read_opposite(device, request);
434 case 0x28: 435 case 0x28:
435 /* ID-Mark at tape start couldn't be written */ 436 /* ID-Mark at tape start couldn't be written */
436 dev_warn (&device->cdev->dev, "Writing the ID-mark " 437 dev_warn (&device->cdev->dev, "Writing the ID-mark "
437 "failed\n"); 438 "failed\n");
438 return tape_34xx_erp_failed(request, -EIO); 439 return tape_34xx_erp_failed(request, -EIO);
439 case 0x31: 440 case 0x31:
440 /* Tape void. Tried to read beyond end of device. */ 441 /* Tape void. Tried to read beyond end of device. */
441 dev_warn (&device->cdev->dev, "Reading the tape beyond" 442 dev_warn (&device->cdev->dev, "Reading the tape beyond"
442 " the end of the recorded area failed\n"); 443 " the end of the recorded area failed\n");
443 return tape_34xx_erp_failed(request, -ENOSPC); 444 return tape_34xx_erp_failed(request, -ENOSPC);
444 case 0x41: 445 case 0x41:
445 /* Record sequence error. */ 446 /* Record sequence error. */
446 dev_warn (&device->cdev->dev, "The tape contains an " 447 dev_warn (&device->cdev->dev, "The tape contains an "
447 "incorrect block ID sequence\n"); 448 "incorrect block ID sequence\n");
448 return tape_34xx_erp_failed(request, -EIO); 449 return tape_34xx_erp_failed(request, -EIO);
449 default: 450 default:
450 /* all data checks for 3480 should result in one of 451 /* all data checks for 3480 should result in one of
451 * the above erpa-codes. For 3490, other data-check 452 * the above erpa-codes. For 3490, other data-check
452 * conditions do exist. */ 453 * conditions do exist. */
453 if (device->cdev->id.driver_info == tape_3480) 454 if (device->cdev->id.driver_info == tape_3480)
454 return tape_34xx_erp_bug(device, request, 455 return tape_34xx_erp_bug(device, request,
455 irb, -6); 456 irb, -6);
456 } 457 }
457 } 458 }
458 459
459 if (sense[0] & SENSE_OVERRUN) 460 if (sense[0] & SENSE_OVERRUN)
460 return tape_34xx_erp_overrun(device, request, irb); 461 return tape_34xx_erp_overrun(device, request, irb);
461 462
462 if (sense[1] & SENSE_RECORD_SEQUENCE_ERR) 463 if (sense[1] & SENSE_RECORD_SEQUENCE_ERR)
463 return tape_34xx_erp_sequence(device, request, irb); 464 return tape_34xx_erp_sequence(device, request, irb);
464 465
465 /* Sensing erpa codes */ 466 /* Sensing erpa codes */
466 switch (sense[3]) { 467 switch (sense[3]) {
467 case 0x00: 468 case 0x00:
468 /* Unit check with erpa code 0. Report and ignore. */ 469 /* Unit check with erpa code 0. Report and ignore. */
469 return TAPE_IO_SUCCESS; 470 return TAPE_IO_SUCCESS;
470 case 0x21: 471 case 0x21:
471 /* 472 /*
472 * Data streaming not operational. CU will switch to 473 * Data streaming not operational. CU will switch to
473 * interlock mode. Reissue the command. 474 * interlock mode. Reissue the command.
474 */ 475 */
475 return tape_34xx_erp_retry(request); 476 return tape_34xx_erp_retry(request);
476 case 0x22: 477 case 0x22:
477 /* 478 /*
478 * Path equipment check. Might be drive adapter error, buffer 479 * Path equipment check. Might be drive adapter error, buffer
479 * error on the lower interface, internal path not usable, 480 * error on the lower interface, internal path not usable,
480 * or error during cartridge load. 481 * or error during cartridge load.
481 */ 482 */
482 dev_warn (&device->cdev->dev, "A path equipment check occurred" 483 dev_warn (&device->cdev->dev, "A path equipment check occurred"
483 " for the tape device\n"); 484 " for the tape device\n");
484 return tape_34xx_erp_failed(request, -EIO); 485 return tape_34xx_erp_failed(request, -EIO);
485 case 0x24: 486 case 0x24:
486 /* 487 /*
487 * Load display check. Load display was command was issued, 488 * Load display check. Load display was command was issued,
488 * but the drive is displaying a drive check message. Can 489 * but the drive is displaying a drive check message. Can
489 * be threated as "device end". 490 * be threated as "device end".
490 */ 491 */
491 return tape_34xx_erp_succeeded(request); 492 return tape_34xx_erp_succeeded(request);
492 case 0x27: 493 case 0x27:
493 /* 494 /*
494 * Command reject. May indicate illegal channel program or 495 * Command reject. May indicate illegal channel program or
495 * buffer over/underrun. Since all channel programs are 496 * buffer over/underrun. Since all channel programs are
496 * issued by this driver and ought be correct, we assume a 497 * issued by this driver and ought be correct, we assume a
497 * over/underrun situation and retry the channel program. 498 * over/underrun situation and retry the channel program.
498 */ 499 */
499 return tape_34xx_erp_retry(request); 500 return tape_34xx_erp_retry(request);
500 case 0x29: 501 case 0x29:
501 /* 502 /*
502 * Function incompatible. Either the tape is idrc compressed 503 * Function incompatible. Either the tape is idrc compressed
503 * but the hardware isn't capable to do idrc, or a perform 504 * but the hardware isn't capable to do idrc, or a perform
504 * subsystem func is issued and the CU is not on-line. 505 * subsystem func is issued and the CU is not on-line.
505 */ 506 */
506 return tape_34xx_erp_failed(request, -EIO); 507 return tape_34xx_erp_failed(request, -EIO);
507 case 0x2a: 508 case 0x2a:
508 /* 509 /*
509 * Unsolicited environmental data. An internal counter 510 * Unsolicited environmental data. An internal counter
510 * overflows, we can ignore this and reissue the cmd. 511 * overflows, we can ignore this and reissue the cmd.
511 */ 512 */
512 return tape_34xx_erp_retry(request); 513 return tape_34xx_erp_retry(request);
513 case 0x2b: 514 case 0x2b:
514 /* 515 /*
515 * Environmental data present. Indicates either unload 516 * Environmental data present. Indicates either unload
516 * completed ok or read buffered log command completed ok. 517 * completed ok or read buffered log command completed ok.
517 */ 518 */
518 if (request->op == TO_RUN) { 519 if (request->op == TO_RUN) {
519 /* Rewind unload completed ok. */ 520 /* Rewind unload completed ok. */
520 tape_med_state_set(device, MS_UNLOADED); 521 tape_med_state_set(device, MS_UNLOADED);
521 return tape_34xx_erp_succeeded(request); 522 return tape_34xx_erp_succeeded(request);
522 } 523 }
523 /* tape_34xx doesn't use read buffered log commands. */ 524 /* tape_34xx doesn't use read buffered log commands. */
524 return tape_34xx_erp_bug(device, request, irb, sense[3]); 525 return tape_34xx_erp_bug(device, request, irb, sense[3]);
525 case 0x2c: 526 case 0x2c:
526 /* 527 /*
527 * Permanent equipment check. CU has tried recovery, but 528 * Permanent equipment check. CU has tried recovery, but
528 * did not succeed. 529 * did not succeed.
529 */ 530 */
530 return tape_34xx_erp_failed(request, -EIO); 531 return tape_34xx_erp_failed(request, -EIO);
531 case 0x2d: 532 case 0x2d:
532 /* Data security erase failure. */ 533 /* Data security erase failure. */
533 if (request->op == TO_DSE) 534 if (request->op == TO_DSE)
534 return tape_34xx_erp_failed(request, -EIO); 535 return tape_34xx_erp_failed(request, -EIO);
535 /* Data security erase failure, but no such command issued. */ 536 /* Data security erase failure, but no such command issued. */
536 return tape_34xx_erp_bug(device, request, irb, sense[3]); 537 return tape_34xx_erp_bug(device, request, irb, sense[3]);
537 case 0x2e: 538 case 0x2e:
538 /* 539 /*
539 * Not capable. This indicates either that the drive fails 540 * Not capable. This indicates either that the drive fails
540 * reading the format id mark or that that format specified 541 * reading the format id mark or that that format specified
541 * is not supported by the drive. 542 * is not supported by the drive.
542 */ 543 */
543 dev_warn (&device->cdev->dev, "The tape unit cannot process " 544 dev_warn (&device->cdev->dev, "The tape unit cannot process "
544 "the tape format\n"); 545 "the tape format\n");
545 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 546 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
546 case 0x30: 547 case 0x30:
547 /* The medium is write protected. */ 548 /* The medium is write protected. */
548 dev_warn (&device->cdev->dev, "The tape medium is write-" 549 dev_warn (&device->cdev->dev, "The tape medium is write-"
549 "protected\n"); 550 "protected\n");
550 return tape_34xx_erp_failed(request, -EACCES); 551 return tape_34xx_erp_failed(request, -EACCES);
551 case 0x32: 552 case 0x32:
552 // Tension loss. We cannot recover this, it's an I/O error. 553 // Tension loss. We cannot recover this, it's an I/O error.
553 dev_warn (&device->cdev->dev, "The tape does not have the " 554 dev_warn (&device->cdev->dev, "The tape does not have the "
554 "required tape tension\n"); 555 "required tape tension\n");
555 return tape_34xx_erp_failed(request, -EIO); 556 return tape_34xx_erp_failed(request, -EIO);
556 case 0x33: 557 case 0x33:
557 /* 558 /*
558 * Load Failure. The cartridge was not inserted correctly or 559 * Load Failure. The cartridge was not inserted correctly or
559 * the tape is not threaded correctly. 560 * the tape is not threaded correctly.
560 */ 561 */
561 dev_warn (&device->cdev->dev, "The tape unit failed to load" 562 dev_warn (&device->cdev->dev, "The tape unit failed to load"
562 " the cartridge\n"); 563 " the cartridge\n");
563 tape_34xx_delete_sbid_from(device, 0); 564 tape_34xx_delete_sbid_from(device, 0);
564 return tape_34xx_erp_failed(request, -EIO); 565 return tape_34xx_erp_failed(request, -EIO);
565 case 0x34: 566 case 0x34:
566 /* 567 /*
567 * Unload failure. The drive cannot maintain tape tension 568 * Unload failure. The drive cannot maintain tape tension
568 * and control tape movement during an unload operation. 569 * and control tape movement during an unload operation.
569 */ 570 */
570 dev_warn (&device->cdev->dev, "Automatic unloading of the tape" 571 dev_warn (&device->cdev->dev, "Automatic unloading of the tape"
571 " cartridge failed\n"); 572 " cartridge failed\n");
572 if (request->op == TO_RUN) 573 if (request->op == TO_RUN)
573 return tape_34xx_erp_failed(request, -EIO); 574 return tape_34xx_erp_failed(request, -EIO);
574 return tape_34xx_erp_bug(device, request, irb, sense[3]); 575 return tape_34xx_erp_bug(device, request, irb, sense[3]);
575 case 0x35: 576 case 0x35:
576 /* 577 /*
577 * Drive equipment check. One of the following: 578 * Drive equipment check. One of the following:
578 * - cu cannot recover from a drive detected error 579 * - cu cannot recover from a drive detected error
579 * - a check code message is shown on drive display 580 * - a check code message is shown on drive display
580 * - the cartridge loader does not respond correctly 581 * - the cartridge loader does not respond correctly
581 * - a failure occurs during an index, load, or unload cycle 582 * - a failure occurs during an index, load, or unload cycle
582 */ 583 */
583 dev_warn (&device->cdev->dev, "An equipment check has occurred" 584 dev_warn (&device->cdev->dev, "An equipment check has occurred"
584 " on the tape unit\n"); 585 " on the tape unit\n");
585 return tape_34xx_erp_failed(request, -EIO); 586 return tape_34xx_erp_failed(request, -EIO);
586 case 0x36: 587 case 0x36:
587 if (device->cdev->id.driver_info == tape_3490) 588 if (device->cdev->id.driver_info == tape_3490)
588 /* End of data. */ 589 /* End of data. */
589 return tape_34xx_erp_failed(request, -EIO); 590 return tape_34xx_erp_failed(request, -EIO);
590 /* This erpa is reserved for 3480 */ 591 /* This erpa is reserved for 3480 */
591 return tape_34xx_erp_bug(device, request, irb, sense[3]); 592 return tape_34xx_erp_bug(device, request, irb, sense[3]);
592 case 0x37: 593 case 0x37:
593 /* 594 /*
594 * Tape length error. The tape is shorter than reported in 595 * Tape length error. The tape is shorter than reported in
595 * the beginning-of-tape data. 596 * the beginning-of-tape data.
596 */ 597 */
597 dev_warn (&device->cdev->dev, "The tape information states an" 598 dev_warn (&device->cdev->dev, "The tape information states an"
598 " incorrect length\n"); 599 " incorrect length\n");
599 return tape_34xx_erp_failed(request, -EIO); 600 return tape_34xx_erp_failed(request, -EIO);
600 case 0x38: 601 case 0x38:
601 /* 602 /*
602 * Physical end of tape. A read/write operation reached 603 * Physical end of tape. A read/write operation reached
603 * the physical end of tape. 604 * the physical end of tape.
604 */ 605 */
605 if (request->op==TO_WRI || 606 if (request->op==TO_WRI ||
606 request->op==TO_DSE || 607 request->op==TO_DSE ||
607 request->op==TO_WTM) 608 request->op==TO_WTM)
608 return tape_34xx_erp_failed(request, -ENOSPC); 609 return tape_34xx_erp_failed(request, -ENOSPC);
609 return tape_34xx_erp_failed(request, -EIO); 610 return tape_34xx_erp_failed(request, -EIO);
610 case 0x39: 611 case 0x39:
611 /* Backward at Beginning of tape. */ 612 /* Backward at Beginning of tape. */
612 return tape_34xx_erp_failed(request, -EIO); 613 return tape_34xx_erp_failed(request, -EIO);
613 case 0x3a: 614 case 0x3a:
614 /* Drive switched to not ready. */ 615 /* Drive switched to not ready. */
615 dev_warn (&device->cdev->dev, "The tape unit is not ready\n"); 616 dev_warn (&device->cdev->dev, "The tape unit is not ready\n");
616 return tape_34xx_erp_failed(request, -EIO); 617 return tape_34xx_erp_failed(request, -EIO);
617 case 0x3b: 618 case 0x3b:
618 /* Manual rewind or unload. This causes an I/O error. */ 619 /* Manual rewind or unload. This causes an I/O error. */
619 dev_warn (&device->cdev->dev, "The tape medium has been " 620 dev_warn (&device->cdev->dev, "The tape medium has been "
620 "rewound or unloaded manually\n"); 621 "rewound or unloaded manually\n");
621 tape_34xx_delete_sbid_from(device, 0); 622 tape_34xx_delete_sbid_from(device, 0);
622 return tape_34xx_erp_failed(request, -EIO); 623 return tape_34xx_erp_failed(request, -EIO);
623 case 0x42: 624 case 0x42:
624 /* 625 /*
625 * Degraded mode. A condition that can cause degraded 626 * Degraded mode. A condition that can cause degraded
626 * performance is detected. 627 * performance is detected.
627 */ 628 */
628 dev_warn (&device->cdev->dev, "The tape subsystem is running " 629 dev_warn (&device->cdev->dev, "The tape subsystem is running "
629 "in degraded mode\n"); 630 "in degraded mode\n");
630 return tape_34xx_erp_retry(request); 631 return tape_34xx_erp_retry(request);
631 case 0x43: 632 case 0x43:
632 /* Drive not ready. */ 633 /* Drive not ready. */
633 tape_34xx_delete_sbid_from(device, 0); 634 tape_34xx_delete_sbid_from(device, 0);
634 tape_med_state_set(device, MS_UNLOADED); 635 tape_med_state_set(device, MS_UNLOADED);
635 /* Some commands commands are successful even in this case */ 636 /* Some commands commands are successful even in this case */
636 if (sense[1] & SENSE_DRIVE_ONLINE) { 637 if (sense[1] & SENSE_DRIVE_ONLINE) {
637 switch(request->op) { 638 switch(request->op) {
638 case TO_ASSIGN: 639 case TO_ASSIGN:
639 case TO_UNASSIGN: 640 case TO_UNASSIGN:
640 case TO_DIS: 641 case TO_DIS:
641 case TO_NOP: 642 case TO_NOP:
642 return tape_34xx_done(request); 643 return tape_34xx_done(request);
643 break; 644 break;
644 default: 645 default:
645 break; 646 break;
646 } 647 }
647 } 648 }
648 return tape_34xx_erp_failed(request, -ENOMEDIUM); 649 return tape_34xx_erp_failed(request, -ENOMEDIUM);
649 case 0x44: 650 case 0x44:
650 /* Locate Block unsuccessful. */ 651 /* Locate Block unsuccessful. */
651 if (request->op != TO_BLOCK && request->op != TO_LBL) 652 if (request->op != TO_BLOCK && request->op != TO_LBL)
652 /* No locate block was issued. */ 653 /* No locate block was issued. */
653 return tape_34xx_erp_bug(device, request, 654 return tape_34xx_erp_bug(device, request,
654 irb, sense[3]); 655 irb, sense[3]);
655 return tape_34xx_erp_failed(request, -EIO); 656 return tape_34xx_erp_failed(request, -EIO);
656 case 0x45: 657 case 0x45:
657 /* The drive is assigned to a different channel path. */ 658 /* The drive is assigned to a different channel path. */
658 dev_warn (&device->cdev->dev, "The tape unit is already " 659 dev_warn (&device->cdev->dev, "The tape unit is already "
659 "assigned\n"); 660 "assigned\n");
660 return tape_34xx_erp_failed(request, -EIO); 661 return tape_34xx_erp_failed(request, -EIO);
661 case 0x46: 662 case 0x46:
662 /* 663 /*
663 * Drive not on-line. Drive may be switched offline, 664 * Drive not on-line. Drive may be switched offline,
664 * the power supply may be switched off or 665 * the power supply may be switched off or
665 * the drive address may not be set correctly. 666 * the drive address may not be set correctly.
666 */ 667 */
667 dev_warn (&device->cdev->dev, "The tape unit is not online\n"); 668 dev_warn (&device->cdev->dev, "The tape unit is not online\n");
668 return tape_34xx_erp_failed(request, -EIO); 669 return tape_34xx_erp_failed(request, -EIO);
669 case 0x47: 670 case 0x47:
670 /* Volume fenced. CU reports volume integrity is lost. */ 671 /* Volume fenced. CU reports volume integrity is lost. */
671 dev_warn (&device->cdev->dev, "The control unit has fenced " 672 dev_warn (&device->cdev->dev, "The control unit has fenced "
672 "access to the tape volume\n"); 673 "access to the tape volume\n");
673 tape_34xx_delete_sbid_from(device, 0); 674 tape_34xx_delete_sbid_from(device, 0);
674 return tape_34xx_erp_failed(request, -EIO); 675 return tape_34xx_erp_failed(request, -EIO);
675 case 0x48: 676 case 0x48:
676 /* Log sense data and retry request. */ 677 /* Log sense data and retry request. */
677 return tape_34xx_erp_retry(request); 678 return tape_34xx_erp_retry(request);
678 case 0x49: 679 case 0x49:
679 /* Bus out check. A parity check error on the bus was found. */ 680 /* Bus out check. A parity check error on the bus was found. */
680 dev_warn (&device->cdev->dev, "A parity error occurred on the " 681 dev_warn (&device->cdev->dev, "A parity error occurred on the "
681 "tape bus\n"); 682 "tape bus\n");
682 return tape_34xx_erp_failed(request, -EIO); 683 return tape_34xx_erp_failed(request, -EIO);
683 case 0x4a: 684 case 0x4a:
684 /* Control unit erp failed. */ 685 /* Control unit erp failed. */
685 dev_warn (&device->cdev->dev, "I/O error recovery failed on " 686 dev_warn (&device->cdev->dev, "I/O error recovery failed on "
686 "the tape control unit\n"); 687 "the tape control unit\n");
687 return tape_34xx_erp_failed(request, -EIO); 688 return tape_34xx_erp_failed(request, -EIO);
688 case 0x4b: 689 case 0x4b:
689 /* 690 /*
690 * CU and drive incompatible. The drive requests micro-program 691 * CU and drive incompatible. The drive requests micro-program
691 * patches, which are not available on the CU. 692 * patches, which are not available on the CU.
692 */ 693 */
693 dev_warn (&device->cdev->dev, "The tape unit requires a " 694 dev_warn (&device->cdev->dev, "The tape unit requires a "
694 "firmware update\n"); 695 "firmware update\n");
695 return tape_34xx_erp_failed(request, -EIO); 696 return tape_34xx_erp_failed(request, -EIO);
696 case 0x4c: 697 case 0x4c:
697 /* 698 /*
698 * Recovered Check-One failure. Cu develops a hardware error, 699 * Recovered Check-One failure. Cu develops a hardware error,
699 * but is able to recover. 700 * but is able to recover.
700 */ 701 */
701 return tape_34xx_erp_retry(request); 702 return tape_34xx_erp_retry(request);
702 case 0x4d: 703 case 0x4d:
703 if (device->cdev->id.driver_info == tape_3490) 704 if (device->cdev->id.driver_info == tape_3490)
704 /* 705 /*
705 * Resetting event received. Since the driver does 706 * Resetting event received. Since the driver does
706 * not support resetting event recovery (which has to 707 * not support resetting event recovery (which has to
707 * be handled by the I/O Layer), retry our command. 708 * be handled by the I/O Layer), retry our command.
708 */ 709 */
709 return tape_34xx_erp_retry(request); 710 return tape_34xx_erp_retry(request);
710 /* This erpa is reserved for 3480. */ 711 /* This erpa is reserved for 3480. */
711 return tape_34xx_erp_bug(device, request, irb, sense[3]); 712 return tape_34xx_erp_bug(device, request, irb, sense[3]);
712 case 0x4e: 713 case 0x4e:
713 if (device->cdev->id.driver_info == tape_3490) { 714 if (device->cdev->id.driver_info == tape_3490) {
714 /* 715 /*
715 * Maximum block size exceeded. This indicates, that 716 * Maximum block size exceeded. This indicates, that
716 * the block to be written is larger than allowed for 717 * the block to be written is larger than allowed for
717 * buffered mode. 718 * buffered mode.
718 */ 719 */
719 dev_warn (&device->cdev->dev, "The maximum block size" 720 dev_warn (&device->cdev->dev, "The maximum block size"
720 " for buffered mode is exceeded\n"); 721 " for buffered mode is exceeded\n");
721 return tape_34xx_erp_failed(request, -ENOBUFS); 722 return tape_34xx_erp_failed(request, -ENOBUFS);
722 } 723 }
723 /* This erpa is reserved for 3480. */ 724 /* This erpa is reserved for 3480. */
724 return tape_34xx_erp_bug(device, request, irb, sense[3]); 725 return tape_34xx_erp_bug(device, request, irb, sense[3]);
725 case 0x50: 726 case 0x50:
726 /* 727 /*
727 * Read buffered log (Overflow). CU is running in extended 728 * Read buffered log (Overflow). CU is running in extended
728 * buffered log mode, and a counter overflows. This should 729 * buffered log mode, and a counter overflows. This should
729 * never happen, since we're never running in extended 730 * never happen, since we're never running in extended
730 * buffered log mode. 731 * buffered log mode.
731 */ 732 */
732 return tape_34xx_erp_retry(request); 733 return tape_34xx_erp_retry(request);
733 case 0x51: 734 case 0x51:
734 /* 735 /*
735 * Read buffered log (EOV). EOF processing occurs while the 736 * Read buffered log (EOV). EOF processing occurs while the
736 * CU is in extended buffered log mode. This should never 737 * CU is in extended buffered log mode. This should never
737 * happen, since we're never running in extended buffered 738 * happen, since we're never running in extended buffered
738 * log mode. 739 * log mode.
739 */ 740 */
740 return tape_34xx_erp_retry(request); 741 return tape_34xx_erp_retry(request);
741 case 0x52: 742 case 0x52:
742 /* End of Volume complete. Rewind unload completed ok. */ 743 /* End of Volume complete. Rewind unload completed ok. */
743 if (request->op == TO_RUN) { 744 if (request->op == TO_RUN) {
744 tape_med_state_set(device, MS_UNLOADED); 745 tape_med_state_set(device, MS_UNLOADED);
745 tape_34xx_delete_sbid_from(device, 0); 746 tape_34xx_delete_sbid_from(device, 0);
746 return tape_34xx_erp_succeeded(request); 747 return tape_34xx_erp_succeeded(request);
747 } 748 }
748 return tape_34xx_erp_bug(device, request, irb, sense[3]); 749 return tape_34xx_erp_bug(device, request, irb, sense[3]);
749 case 0x53: 750 case 0x53:
750 /* Global command intercept. */ 751 /* Global command intercept. */
751 return tape_34xx_erp_retry(request); 752 return tape_34xx_erp_retry(request);
752 case 0x54: 753 case 0x54:
753 /* Channel interface recovery (temporary). */ 754 /* Channel interface recovery (temporary). */
754 return tape_34xx_erp_retry(request); 755 return tape_34xx_erp_retry(request);
755 case 0x55: 756 case 0x55:
756 /* Channel interface recovery (permanent). */ 757 /* Channel interface recovery (permanent). */
757 dev_warn (&device->cdev->dev, "A channel interface error cannot be" 758 dev_warn (&device->cdev->dev, "A channel interface error cannot be"
758 " recovered\n"); 759 " recovered\n");
759 return tape_34xx_erp_failed(request, -EIO); 760 return tape_34xx_erp_failed(request, -EIO);
760 case 0x56: 761 case 0x56:
761 /* Channel protocol error. */ 762 /* Channel protocol error. */
762 dev_warn (&device->cdev->dev, "A channel protocol error " 763 dev_warn (&device->cdev->dev, "A channel protocol error "
763 "occurred\n"); 764 "occurred\n");
764 return tape_34xx_erp_failed(request, -EIO); 765 return tape_34xx_erp_failed(request, -EIO);
765 case 0x57: 766 case 0x57:
766 if (device->cdev->id.driver_info == tape_3480) { 767 if (device->cdev->id.driver_info == tape_3480) {
767 /* Attention intercept. */ 768 /* Attention intercept. */
768 return tape_34xx_erp_retry(request); 769 return tape_34xx_erp_retry(request);
769 } else { 770 } else {
770 /* Global status intercept. */ 771 /* Global status intercept. */
771 return tape_34xx_erp_retry(request); 772 return tape_34xx_erp_retry(request);
772 } 773 }
773 case 0x5a: 774 case 0x5a:
774 /* 775 /*
775 * Tape length incompatible. The tape inserted is too long, 776 * Tape length incompatible. The tape inserted is too long,
776 * which could cause damage to the tape or the drive. 777 * which could cause damage to the tape or the drive.
777 */ 778 */
778 dev_warn (&device->cdev->dev, "The tape unit does not support " 779 dev_warn (&device->cdev->dev, "The tape unit does not support "
779 "the tape length\n"); 780 "the tape length\n");
780 return tape_34xx_erp_failed(request, -EIO); 781 return tape_34xx_erp_failed(request, -EIO);
781 case 0x5b: 782 case 0x5b:
782 /* Format 3480 XF incompatible */ 783 /* Format 3480 XF incompatible */
783 if (sense[1] & SENSE_BEGINNING_OF_TAPE) 784 if (sense[1] & SENSE_BEGINNING_OF_TAPE)
784 /* The tape will get overwritten. */ 785 /* The tape will get overwritten. */
785 return tape_34xx_erp_retry(request); 786 return tape_34xx_erp_retry(request);
786 dev_warn (&device->cdev->dev, "The tape unit does not support" 787 dev_warn (&device->cdev->dev, "The tape unit does not support"
787 " format 3480 XF\n"); 788 " format 3480 XF\n");
788 return tape_34xx_erp_failed(request, -EIO); 789 return tape_34xx_erp_failed(request, -EIO);
789 case 0x5c: 790 case 0x5c:
790 /* Format 3480-2 XF incompatible */ 791 /* Format 3480-2 XF incompatible */
791 dev_warn (&device->cdev->dev, "The tape unit does not support tape " 792 dev_warn (&device->cdev->dev, "The tape unit does not support tape "
792 "format 3480-2 XF\n"); 793 "format 3480-2 XF\n");
793 return tape_34xx_erp_failed(request, -EIO); 794 return tape_34xx_erp_failed(request, -EIO);
794 case 0x5d: 795 case 0x5d:
795 /* Tape length violation. */ 796 /* Tape length violation. */
796 dev_warn (&device->cdev->dev, "The tape unit does not support" 797 dev_warn (&device->cdev->dev, "The tape unit does not support"
797 " the current tape length\n"); 798 " the current tape length\n");
798 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 799 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
799 case 0x5e: 800 case 0x5e:
800 /* Compaction algorithm incompatible. */ 801 /* Compaction algorithm incompatible. */
801 dev_warn (&device->cdev->dev, "The tape unit does not support" 802 dev_warn (&device->cdev->dev, "The tape unit does not support"
802 " the compaction algorithm\n"); 803 " the compaction algorithm\n");
803 return tape_34xx_erp_failed(request, -EMEDIUMTYPE); 804 return tape_34xx_erp_failed(request, -EMEDIUMTYPE);
804 805
805 /* The following erpas should have been covered earlier. */ 806 /* The following erpas should have been covered earlier. */
806 case 0x23: /* Read data check. */ 807 case 0x23: /* Read data check. */
807 case 0x25: /* Write data check. */ 808 case 0x25: /* Write data check. */
808 case 0x26: /* Data check (read opposite). */ 809 case 0x26: /* Data check (read opposite). */
809 case 0x28: /* Write id mark check. */ 810 case 0x28: /* Write id mark check. */
810 case 0x31: /* Tape void. */ 811 case 0x31: /* Tape void. */
811 case 0x40: /* Overrun error. */ 812 case 0x40: /* Overrun error. */
812 case 0x41: /* Record sequence error. */ 813 case 0x41: /* Record sequence error. */
813 /* All other erpas are reserved for future use. */ 814 /* All other erpas are reserved for future use. */
814 default: 815 default:
815 return tape_34xx_erp_bug(device, request, irb, sense[3]); 816 return tape_34xx_erp_bug(device, request, irb, sense[3]);
816 } 817 }
817 } 818 }
818 819
819 /* 820 /*
820 * 3480/3490 interrupt handler 821 * 3480/3490 interrupt handler
821 */ 822 */
822 static int 823 static int
823 tape_34xx_irq(struct tape_device *device, struct tape_request *request, 824 tape_34xx_irq(struct tape_device *device, struct tape_request *request,
824 struct irb *irb) 825 struct irb *irb)
825 { 826 {
826 if (request == NULL) 827 if (request == NULL)
827 return tape_34xx_unsolicited_irq(device, irb); 828 return tape_34xx_unsolicited_irq(device, irb);
828 829
829 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && 830 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
830 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && 831 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
831 (request->op == TO_WRI)) { 832 (request->op == TO_WRI)) {
832 /* Write at end of volume */ 833 /* Write at end of volume */
833 return tape_34xx_erp_failed(request, -ENOSPC); 834 return tape_34xx_erp_failed(request, -ENOSPC);
834 } 835 }
835 836
836 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) 837 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
837 return tape_34xx_unit_check(device, request, irb); 838 return tape_34xx_unit_check(device, request, irb);
838 839
839 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { 840 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
840 /* 841 /*
841 * A unit exception occurs on skipping over a tapemark block. 842 * A unit exception occurs on skipping over a tapemark block.
842 */ 843 */
843 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) { 844 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
844 if (request->op == TO_BSB || request->op == TO_FSB) 845 if (request->op == TO_BSB || request->op == TO_FSB)
845 request->rescnt++; 846 request->rescnt++;
846 else 847 else
847 DBF_EVENT(5, "Unit Exception!\n"); 848 DBF_EVENT(5, "Unit Exception!\n");
848 } 849 }
849 return tape_34xx_done(request); 850 return tape_34xx_done(request);
850 } 851 }
851 852
852 DBF_EVENT(6, "xunknownirq\n"); 853 DBF_EVENT(6, "xunknownirq\n");
853 tape_dump_sense_dbf(device, request, irb); 854 tape_dump_sense_dbf(device, request, irb);
854 return TAPE_IO_STOP; 855 return TAPE_IO_STOP;
855 } 856 }
856 857
857 /* 858 /*
858 * ioctl_overload 859 * ioctl_overload
859 */ 860 */
860 static int 861 static int
861 tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) 862 tape_34xx_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
862 { 863 {
863 if (cmd == TAPE390_DISPLAY) { 864 if (cmd == TAPE390_DISPLAY) {
864 struct display_struct disp; 865 struct display_struct disp;
865 866
866 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0) 867 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)) != 0)
867 return -EFAULT; 868 return -EFAULT;
868 869
869 return tape_std_display(device, &disp); 870 return tape_std_display(device, &disp);
870 } else 871 } else
871 return -EINVAL; 872 return -EINVAL;
872 } 873 }
873 874
874 static inline void 875 static inline void
875 tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l) 876 tape_34xx_append_new_sbid(struct tape_34xx_block_id bid, struct list_head *l)
876 { 877 {
877 struct tape_34xx_sbid * new_sbid; 878 struct tape_34xx_sbid * new_sbid;
878 879
879 new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC); 880 new_sbid = kmalloc(sizeof(*new_sbid), GFP_ATOMIC);
880 if (!new_sbid) 881 if (!new_sbid)
881 return; 882 return;
882 883
883 new_sbid->bid = bid; 884 new_sbid->bid = bid;
884 list_add(&new_sbid->list, l); 885 list_add(&new_sbid->list, l);
885 } 886 }
886 887
887 /* 888 /*
888 * Build up the search block ID list. The block ID consists of a logical 889 * Build up the search block ID list. The block ID consists of a logical
889 * block number and a hardware specific part. The hardware specific part 890 * block number and a hardware specific part. The hardware specific part
890 * helps the tape drive to speed up searching for a specific block. 891 * helps the tape drive to speed up searching for a specific block.
891 */ 892 */
892 static void 893 static void
893 tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid) 894 tape_34xx_add_sbid(struct tape_device *device, struct tape_34xx_block_id bid)
894 { 895 {
895 struct list_head * sbid_list; 896 struct list_head * sbid_list;
896 struct tape_34xx_sbid * sbid; 897 struct tape_34xx_sbid * sbid;
897 struct list_head * l; 898 struct list_head * l;
898 899
899 /* 900 /*
900 * immediately return if there is no list at all or the block to add 901 * immediately return if there is no list at all or the block to add
901 * is located in segment 1 of wrap 0 because this position is used 902 * is located in segment 1 of wrap 0 because this position is used
902 * if no hardware position data is supplied. 903 * if no hardware position data is supplied.
903 */ 904 */
904 sbid_list = (struct list_head *) device->discdata; 905 sbid_list = (struct list_head *) device->discdata;
905 if (!sbid_list || (bid.segment < 2 && bid.wrap == 0)) 906 if (!sbid_list || (bid.segment < 2 && bid.wrap == 0))
906 return; 907 return;
907 908
908 /* 909 /*
909 * Search the position where to insert the new entry. Hardware 910 * Search the position where to insert the new entry. Hardware
910 * acceleration uses only the segment and wrap number. So we 911 * acceleration uses only the segment and wrap number. So we
911 * need only one entry for a specific wrap/segment combination. 912 * need only one entry for a specific wrap/segment combination.
912 * If there is a block with a lower number but the same hard- 913 * If there is a block with a lower number but the same hard-
913 * ware position data we just update the block number in the 914 * ware position data we just update the block number in the
914 * existing entry. 915 * existing entry.
915 */ 916 */
916 list_for_each(l, sbid_list) { 917 list_for_each(l, sbid_list) {
917 sbid = list_entry(l, struct tape_34xx_sbid, list); 918 sbid = list_entry(l, struct tape_34xx_sbid, list);
918 919
919 if ( 920 if (
920 (sbid->bid.segment == bid.segment) && 921 (sbid->bid.segment == bid.segment) &&
921 (sbid->bid.wrap == bid.wrap) 922 (sbid->bid.wrap == bid.wrap)
922 ) { 923 ) {
923 if (bid.block < sbid->bid.block) 924 if (bid.block < sbid->bid.block)
924 sbid->bid = bid; 925 sbid->bid = bid;
925 else return; 926 else return;
926 break; 927 break;
927 } 928 }
928 929
929 /* Sort in according to logical block number. */ 930 /* Sort in according to logical block number. */
930 if (bid.block < sbid->bid.block) { 931 if (bid.block < sbid->bid.block) {
931 tape_34xx_append_new_sbid(bid, l->prev); 932 tape_34xx_append_new_sbid(bid, l->prev);
932 break; 933 break;
933 } 934 }
934 } 935 }
935 /* List empty or new block bigger than last entry. */ 936 /* List empty or new block bigger than last entry. */
936 if (l == sbid_list) 937 if (l == sbid_list)
937 tape_34xx_append_new_sbid(bid, l->prev); 938 tape_34xx_append_new_sbid(bid, l->prev);
938 939
939 DBF_LH(4, "Current list is:\n"); 940 DBF_LH(4, "Current list is:\n");
940 list_for_each(l, sbid_list) { 941 list_for_each(l, sbid_list) {
941 sbid = list_entry(l, struct tape_34xx_sbid, list); 942 sbid = list_entry(l, struct tape_34xx_sbid, list);
942 DBF_LH(4, "%d:%03d@%05d\n", 943 DBF_LH(4, "%d:%03d@%05d\n",
943 sbid->bid.wrap, 944 sbid->bid.wrap,
944 sbid->bid.segment, 945 sbid->bid.segment,
945 sbid->bid.block 946 sbid->bid.block
946 ); 947 );
947 } 948 }
948 } 949 }
949 950
950 /* 951 /*
951 * Delete all entries from the search block ID list that belong to tape blocks 952 * Delete all entries from the search block ID list that belong to tape blocks
952 * equal or higher than the given number. 953 * equal or higher than the given number.
953 */ 954 */
954 static void 955 static void
955 tape_34xx_delete_sbid_from(struct tape_device *device, int from) 956 tape_34xx_delete_sbid_from(struct tape_device *device, int from)
956 { 957 {
957 struct list_head * sbid_list; 958 struct list_head * sbid_list;
958 struct tape_34xx_sbid * sbid; 959 struct tape_34xx_sbid * sbid;
959 struct list_head * l; 960 struct list_head * l;
960 struct list_head * n; 961 struct list_head * n;
961 962
962 sbid_list = (struct list_head *) device->discdata; 963 sbid_list = (struct list_head *) device->discdata;
963 if (!sbid_list) 964 if (!sbid_list)
964 return; 965 return;
965 966
966 list_for_each_safe(l, n, sbid_list) { 967 list_for_each_safe(l, n, sbid_list) {
967 sbid = list_entry(l, struct tape_34xx_sbid, list); 968 sbid = list_entry(l, struct tape_34xx_sbid, list);
968 if (sbid->bid.block >= from) { 969 if (sbid->bid.block >= from) {
969 DBF_LH(4, "Delete sbid %d:%03d@%05d\n", 970 DBF_LH(4, "Delete sbid %d:%03d@%05d\n",
970 sbid->bid.wrap, 971 sbid->bid.wrap,
971 sbid->bid.segment, 972 sbid->bid.segment,
972 sbid->bid.block 973 sbid->bid.block
973 ); 974 );
974 list_del(l); 975 list_del(l);
975 kfree(sbid); 976 kfree(sbid);
976 } 977 }
977 } 978 }
978 } 979 }
979 980
980 /* 981 /*
981 * Merge hardware position data into a block id. 982 * Merge hardware position data into a block id.
982 */ 983 */
983 static void 984 static void
984 tape_34xx_merge_sbid( 985 tape_34xx_merge_sbid(
985 struct tape_device * device, 986 struct tape_device * device,
986 struct tape_34xx_block_id * bid 987 struct tape_34xx_block_id * bid
987 ) { 988 ) {
988 struct tape_34xx_sbid * sbid; 989 struct tape_34xx_sbid * sbid;
989 struct tape_34xx_sbid * sbid_to_use; 990 struct tape_34xx_sbid * sbid_to_use;
990 struct list_head * sbid_list; 991 struct list_head * sbid_list;
991 struct list_head * l; 992 struct list_head * l;
992 993
993 sbid_list = (struct list_head *) device->discdata; 994 sbid_list = (struct list_head *) device->discdata;
994 bid->wrap = 0; 995 bid->wrap = 0;
995 bid->segment = 1; 996 bid->segment = 1;
996 997
997 if (!sbid_list || list_empty(sbid_list)) 998 if (!sbid_list || list_empty(sbid_list))
998 return; 999 return;
999 1000
1000 sbid_to_use = NULL; 1001 sbid_to_use = NULL;
1001 list_for_each(l, sbid_list) { 1002 list_for_each(l, sbid_list) {
1002 sbid = list_entry(l, struct tape_34xx_sbid, list); 1003 sbid = list_entry(l, struct tape_34xx_sbid, list);
1003 1004
1004 if (sbid->bid.block >= bid->block) 1005 if (sbid->bid.block >= bid->block)
1005 break; 1006 break;
1006 sbid_to_use = sbid; 1007 sbid_to_use = sbid;
1007 } 1008 }
1008 if (sbid_to_use) { 1009 if (sbid_to_use) {
1009 bid->wrap = sbid_to_use->bid.wrap; 1010 bid->wrap = sbid_to_use->bid.wrap;
1010 bid->segment = sbid_to_use->bid.segment; 1011 bid->segment = sbid_to_use->bid.segment;
1011 DBF_LH(4, "Use %d:%03d@%05d for %05d\n", 1012 DBF_LH(4, "Use %d:%03d@%05d for %05d\n",
1012 sbid_to_use->bid.wrap, 1013 sbid_to_use->bid.wrap,
1013 sbid_to_use->bid.segment, 1014 sbid_to_use->bid.segment,
1014 sbid_to_use->bid.block, 1015 sbid_to_use->bid.block,
1015 bid->block 1016 bid->block
1016 ); 1017 );
1017 } 1018 }
1018 } 1019 }
1019 1020
1020 static int 1021 static int
1021 tape_34xx_setup_device(struct tape_device * device) 1022 tape_34xx_setup_device(struct tape_device * device)
1022 { 1023 {
1023 int rc; 1024 int rc;
1024 struct list_head * discdata; 1025 struct list_head * discdata;
1025 1026
1026 DBF_EVENT(6, "34xx device setup\n"); 1027 DBF_EVENT(6, "34xx device setup\n");
1027 if ((rc = tape_std_assign(device)) == 0) { 1028 if ((rc = tape_std_assign(device)) == 0) {
1028 if ((rc = tape_34xx_medium_sense(device)) != 0) { 1029 if ((rc = tape_34xx_medium_sense(device)) != 0) {
1029 DBF_LH(3, "34xx medium sense returned %d\n", rc); 1030 DBF_LH(3, "34xx medium sense returned %d\n", rc);
1030 } 1031 }
1031 } 1032 }
1032 discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL); 1033 discdata = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1033 if (discdata) { 1034 if (discdata) {
1034 INIT_LIST_HEAD(discdata); 1035 INIT_LIST_HEAD(discdata);
1035 device->discdata = discdata; 1036 device->discdata = discdata;
1036 } 1037 }
1037 1038
1038 return rc; 1039 return rc;
1039 } 1040 }
1040 1041
1041 static void 1042 static void
1042 tape_34xx_cleanup_device(struct tape_device *device) 1043 tape_34xx_cleanup_device(struct tape_device *device)
1043 { 1044 {
1044 tape_std_unassign(device); 1045 tape_std_unassign(device);
1045 1046
1046 if (device->discdata) { 1047 if (device->discdata) {
1047 tape_34xx_delete_sbid_from(device, 0); 1048 tape_34xx_delete_sbid_from(device, 0);
1048 kfree(device->discdata); 1049 kfree(device->discdata);
1049 device->discdata = NULL; 1050 device->discdata = NULL;
1050 } 1051 }
1051 } 1052 }
1052 1053
1053 1054
1054 /* 1055 /*
1055 * MTTELL: Tell block. Return the number of block relative to current file. 1056 * MTTELL: Tell block. Return the number of block relative to current file.
1056 */ 1057 */
1057 static int 1058 static int
1058 tape_34xx_mttell(struct tape_device *device, int mt_count) 1059 tape_34xx_mttell(struct tape_device *device, int mt_count)
1059 { 1060 {
1060 struct { 1061 struct {
1061 struct tape_34xx_block_id cbid; 1062 struct tape_34xx_block_id cbid;
1062 struct tape_34xx_block_id dbid; 1063 struct tape_34xx_block_id dbid;
1063 } __attribute__ ((packed)) block_id; 1064 } __attribute__ ((packed)) block_id;
1064 int rc; 1065 int rc;
1065 1066
1066 rc = tape_std_read_block_id(device, (__u64 *) &block_id); 1067 rc = tape_std_read_block_id(device, (__u64 *) &block_id);
1067 if (rc) 1068 if (rc)
1068 return rc; 1069 return rc;
1069 1070
1070 tape_34xx_add_sbid(device, block_id.cbid); 1071 tape_34xx_add_sbid(device, block_id.cbid);
1071 return block_id.cbid.block; 1072 return block_id.cbid.block;
1072 } 1073 }
1073 1074
1074 /* 1075 /*
1075 * MTSEEK: seek to the specified block. 1076 * MTSEEK: seek to the specified block.
1076 */ 1077 */
1077 static int 1078 static int
1078 tape_34xx_mtseek(struct tape_device *device, int mt_count) 1079 tape_34xx_mtseek(struct tape_device *device, int mt_count)
1079 { 1080 {
1080 struct tape_request *request; 1081 struct tape_request *request;
1081 struct tape_34xx_block_id * bid; 1082 struct tape_34xx_block_id * bid;
1082 1083
1083 if (mt_count > 0x3fffff) { 1084 if (mt_count > 0x3fffff) {
1084 DBF_EXCEPTION(6, "xsee parm\n"); 1085 DBF_EXCEPTION(6, "xsee parm\n");
1085 return -EINVAL; 1086 return -EINVAL;
1086 } 1087 }
1087 request = tape_alloc_request(3, 4); 1088 request = tape_alloc_request(3, 4);
1088 if (IS_ERR(request)) 1089 if (IS_ERR(request))
1089 return PTR_ERR(request); 1090 return PTR_ERR(request);
1090 1091
1091 /* setup ccws */ 1092 /* setup ccws */
1092 request->op = TO_LBL; 1093 request->op = TO_LBL;
1093 bid = (struct tape_34xx_block_id *) request->cpdata; 1094 bid = (struct tape_34xx_block_id *) request->cpdata;
1094 bid->format = (*device->modeset_byte & 0x08) ? 1095 bid->format = (*device->modeset_byte & 0x08) ?
1095 TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480; 1096 TAPE34XX_FMT_3480_XF : TAPE34XX_FMT_3480;
1096 bid->block = mt_count; 1097 bid->block = mt_count;
1097 tape_34xx_merge_sbid(device, bid); 1098 tape_34xx_merge_sbid(device, bid);
1098 1099
1099 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 1100 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
1100 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); 1101 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1101 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 1102 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
1102 1103
1103 /* execute it */ 1104 /* execute it */
1104 return tape_do_io_free(device, request); 1105 return tape_do_io_free(device, request);
1105 } 1106 }
1106 1107
1107 #ifdef CONFIG_S390_TAPE_BLOCK 1108 #ifdef CONFIG_S390_TAPE_BLOCK
1108 /* 1109 /*
1109 * Tape block read for 34xx. 1110 * Tape block read for 34xx.
1110 */ 1111 */
1111 static struct tape_request * 1112 static struct tape_request *
1112 tape_34xx_bread(struct tape_device *device, struct request *req) 1113 tape_34xx_bread(struct tape_device *device, struct request *req)
1113 { 1114 {
1114 struct tape_request *request; 1115 struct tape_request *request;
1115 struct ccw1 *ccw; 1116 struct ccw1 *ccw;
1116 int count = 0; 1117 int count = 0;
1117 unsigned off; 1118 unsigned off;
1118 char *dst; 1119 char *dst;
1119 struct bio_vec *bv; 1120 struct bio_vec *bv;
1120 struct req_iterator iter; 1121 struct req_iterator iter;
1121 struct tape_34xx_block_id * start_block; 1122 struct tape_34xx_block_id * start_block;
1122 1123
1123 DBF_EVENT(6, "xBREDid:"); 1124 DBF_EVENT(6, "xBREDid:");
1124 1125
1125 /* Count the number of blocks for the request. */ 1126 /* Count the number of blocks for the request. */
1126 rq_for_each_segment(bv, req, iter) 1127 rq_for_each_segment(bv, req, iter)
1127 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); 1128 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1128 1129
1129 /* Allocate the ccw request. */ 1130 /* Allocate the ccw request. */
1130 request = tape_alloc_request(3+count+1, 8); 1131 request = tape_alloc_request(3+count+1, 8);
1131 if (IS_ERR(request)) 1132 if (IS_ERR(request))
1132 return request; 1133 return request;
1133 1134
1134 /* Setup ccws. */ 1135 /* Setup ccws. */
1135 request->op = TO_BLOCK; 1136 request->op = TO_BLOCK;
1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1137 start_block = (struct tape_34xx_block_id *) request->cpdata;
1137 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 1138 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1139 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1139 1140
1140 ccw = request->cpaddr; 1141 ccw = request->cpaddr;
1141 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); 1142 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
1142 1143
1143 /* 1144 /*
1144 * We always setup a nop after the mode set ccw. This slot is 1145 * We always setup a nop after the mode set ccw. This slot is
1145 * used in tape_std_check_locate to insert a locate ccw if the 1146 * used in tape_std_check_locate to insert a locate ccw if the
1146 * current tape position doesn't match the start block to be read. 1147 * current tape position doesn't match the start block to be read.
1147 * The second nop will be filled with a read block id which is in 1148 * The second nop will be filled with a read block id which is in
1148 * turn used by tape_34xx_free_bread to populate the segment bid 1149 * turn used by tape_34xx_free_bread to populate the segment bid
1149 * table. 1150 * table.
1150 */ 1151 */
1151 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1152 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1152 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 1153 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1153 1154
1154 rq_for_each_segment(bv, req, iter) { 1155 rq_for_each_segment(bv, req, iter) {
1155 dst = kmap(bv->bv_page) + bv->bv_offset; 1156 dst = kmap(bv->bv_page) + bv->bv_offset;
1156 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { 1157 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1157 ccw->flags = CCW_FLAG_CC; 1158 ccw->flags = CCW_FLAG_CC;
1158 ccw->cmd_code = READ_FORWARD; 1159 ccw->cmd_code = READ_FORWARD;
1159 ccw->count = TAPEBLOCK_HSEC_SIZE; 1160 ccw->count = TAPEBLOCK_HSEC_SIZE;
1160 set_normalized_cda(ccw, (void*) __pa(dst)); 1161 set_normalized_cda(ccw, (void*) __pa(dst));
1161 ccw++; 1162 ccw++;
1162 dst += TAPEBLOCK_HSEC_SIZE; 1163 dst += TAPEBLOCK_HSEC_SIZE;
1163 } 1164 }
1164 } 1165 }
1165 1166
1166 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 1167 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
1167 DBF_EVENT(6, "xBREDccwg\n"); 1168 DBF_EVENT(6, "xBREDccwg\n");
1168 return request; 1169 return request;
1169 } 1170 }
1170 1171
1171 static void 1172 static void
1172 tape_34xx_free_bread (struct tape_request *request) 1173 tape_34xx_free_bread (struct tape_request *request)
1173 { 1174 {
1174 struct ccw1* ccw; 1175 struct ccw1* ccw;
1175 1176
1176 ccw = request->cpaddr; 1177 ccw = request->cpaddr;
1177 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) { 1178 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
1178 struct { 1179 struct {
1179 struct tape_34xx_block_id cbid; 1180 struct tape_34xx_block_id cbid;
1180 struct tape_34xx_block_id dbid; 1181 struct tape_34xx_block_id dbid;
1181 } __attribute__ ((packed)) *rbi_data; 1182 } __attribute__ ((packed)) *rbi_data;
1182 1183
1183 rbi_data = request->cpdata; 1184 rbi_data = request->cpdata;
1184 1185
1185 if (request->device) 1186 if (request->device)
1186 tape_34xx_add_sbid(request->device, rbi_data->cbid); 1187 tape_34xx_add_sbid(request->device, rbi_data->cbid);
1187 } 1188 }
1188 1189
1189 /* Last ccw is a nop and doesn't need clear_normalized_cda */ 1190 /* Last ccw is a nop and doesn't need clear_normalized_cda */
1190 for (; ccw->flags & CCW_FLAG_CC; ccw++) 1191 for (; ccw->flags & CCW_FLAG_CC; ccw++)
1191 if (ccw->cmd_code == READ_FORWARD) 1192 if (ccw->cmd_code == READ_FORWARD)
1192 clear_normalized_cda(ccw); 1193 clear_normalized_cda(ccw);
1193 tape_free_request(request); 1194 tape_free_request(request);
1194 } 1195 }
1195 1196
1196 /* 1197 /*
1197 * check_locate is called just before the tape request is passed to 1198 * check_locate is called just before the tape request is passed to
1198 * the common io layer for execution. It has to check the current 1199 * the common io layer for execution. It has to check the current
1199 * tape position and insert a locate ccw if it doesn't match the 1200 * tape position and insert a locate ccw if it doesn't match the
1200 * start block for the request. 1201 * start block for the request.
1201 */ 1202 */
1202 static void 1203 static void
1203 tape_34xx_check_locate(struct tape_device *device, struct tape_request *request) 1204 tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
1204 { 1205 {
1205 struct tape_34xx_block_id * start_block; 1206 struct tape_34xx_block_id * start_block;
1206 1207
1207 start_block = (struct tape_34xx_block_id *) request->cpdata; 1208 start_block = (struct tape_34xx_block_id *) request->cpdata;
1208 if (start_block->block == device->blk_data.block_position) 1209 if (start_block->block == device->blk_data.block_position)
1209 return; 1210 return;
1210 1211
1211 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof); 1212 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
1212 start_block->wrap = 0; 1213 start_block->wrap = 0;
1213 start_block->segment = 1; 1214 start_block->segment = 1;
1214 start_block->format = (*device->modeset_byte & 0x08) ? 1215 start_block->format = (*device->modeset_byte & 0x08) ?
1215 TAPE34XX_FMT_3480_XF : 1216 TAPE34XX_FMT_3480_XF :
1216 TAPE34XX_FMT_3480; 1217 TAPE34XX_FMT_3480;
1217 start_block->block = start_block->block + device->bof; 1218 start_block->block = start_block->block + device->bof;
1218 tape_34xx_merge_sbid(device, start_block); 1219 tape_34xx_merge_sbid(device, start_block);
1219 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); 1220 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1220 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata); 1221 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
1221 } 1222 }
1222 #endif 1223 #endif
1223 1224
1224 /* 1225 /*
1225 * List of 3480/3490 magnetic tape commands. 1226 * List of 3480/3490 magnetic tape commands.
1226 */ 1227 */
1227 static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = { 1228 static tape_mtop_fn tape_34xx_mtop[TAPE_NR_MTOPS] = {
1228 [MTRESET] = tape_std_mtreset, 1229 [MTRESET] = tape_std_mtreset,
1229 [MTFSF] = tape_std_mtfsf, 1230 [MTFSF] = tape_std_mtfsf,
1230 [MTBSF] = tape_std_mtbsf, 1231 [MTBSF] = tape_std_mtbsf,
1231 [MTFSR] = tape_std_mtfsr, 1232 [MTFSR] = tape_std_mtfsr,
1232 [MTBSR] = tape_std_mtbsr, 1233 [MTBSR] = tape_std_mtbsr,
1233 [MTWEOF] = tape_std_mtweof, 1234 [MTWEOF] = tape_std_mtweof,
1234 [MTREW] = tape_std_mtrew, 1235 [MTREW] = tape_std_mtrew,
1235 [MTOFFL] = tape_std_mtoffl, 1236 [MTOFFL] = tape_std_mtoffl,
1236 [MTNOP] = tape_std_mtnop, 1237 [MTNOP] = tape_std_mtnop,
1237 [MTRETEN] = tape_std_mtreten, 1238 [MTRETEN] = tape_std_mtreten,
1238 [MTBSFM] = tape_std_mtbsfm, 1239 [MTBSFM] = tape_std_mtbsfm,
1239 [MTFSFM] = tape_std_mtfsfm, 1240 [MTFSFM] = tape_std_mtfsfm,
1240 [MTEOM] = tape_std_mteom, 1241 [MTEOM] = tape_std_mteom,
1241 [MTERASE] = tape_std_mterase, 1242 [MTERASE] = tape_std_mterase,
1242 [MTRAS1] = NULL, 1243 [MTRAS1] = NULL,
1243 [MTRAS2] = NULL, 1244 [MTRAS2] = NULL,
1244 [MTRAS3] = NULL, 1245 [MTRAS3] = NULL,
1245 [MTSETBLK] = tape_std_mtsetblk, 1246 [MTSETBLK] = tape_std_mtsetblk,
1246 [MTSETDENSITY] = NULL, 1247 [MTSETDENSITY] = NULL,
1247 [MTSEEK] = tape_34xx_mtseek, 1248 [MTSEEK] = tape_34xx_mtseek,
1248 [MTTELL] = tape_34xx_mttell, 1249 [MTTELL] = tape_34xx_mttell,
1249 [MTSETDRVBUFFER] = NULL, 1250 [MTSETDRVBUFFER] = NULL,
1250 [MTFSS] = NULL, 1251 [MTFSS] = NULL,
1251 [MTBSS] = NULL, 1252 [MTBSS] = NULL,
1252 [MTWSM] = NULL, 1253 [MTWSM] = NULL,
1253 [MTLOCK] = NULL, 1254 [MTLOCK] = NULL,
1254 [MTUNLOCK] = NULL, 1255 [MTUNLOCK] = NULL,
1255 [MTLOAD] = tape_std_mtload, 1256 [MTLOAD] = tape_std_mtload,
1256 [MTUNLOAD] = tape_std_mtunload, 1257 [MTUNLOAD] = tape_std_mtunload,
1257 [MTCOMPRESSION] = tape_std_mtcompression, 1258 [MTCOMPRESSION] = tape_std_mtcompression,
1258 [MTSETPART] = NULL, 1259 [MTSETPART] = NULL,
1259 [MTMKPART] = NULL 1260 [MTMKPART] = NULL
1260 }; 1261 };
1261 1262
1262 /* 1263 /*
1263 * Tape discipline structure for 3480 and 3490. 1264 * Tape discipline structure for 3480 and 3490.
1264 */ 1265 */
1265 static struct tape_discipline tape_discipline_34xx = { 1266 static struct tape_discipline tape_discipline_34xx = {
1266 .owner = THIS_MODULE, 1267 .owner = THIS_MODULE,
1267 .setup_device = tape_34xx_setup_device, 1268 .setup_device = tape_34xx_setup_device,
1268 .cleanup_device = tape_34xx_cleanup_device, 1269 .cleanup_device = tape_34xx_cleanup_device,
1269 .process_eov = tape_std_process_eov, 1270 .process_eov = tape_std_process_eov,
1270 .irq = tape_34xx_irq, 1271 .irq = tape_34xx_irq,
1271 .read_block = tape_std_read_block, 1272 .read_block = tape_std_read_block,
1272 .write_block = tape_std_write_block, 1273 .write_block = tape_std_write_block,
1273 #ifdef CONFIG_S390_TAPE_BLOCK 1274 #ifdef CONFIG_S390_TAPE_BLOCK
1274 .bread = tape_34xx_bread, 1275 .bread = tape_34xx_bread,
1275 .free_bread = tape_34xx_free_bread, 1276 .free_bread = tape_34xx_free_bread,
1276 .check_locate = tape_34xx_check_locate, 1277 .check_locate = tape_34xx_check_locate,
1277 #endif 1278 #endif
1278 .ioctl_fn = tape_34xx_ioctl, 1279 .ioctl_fn = tape_34xx_ioctl,
1279 .mtop_array = tape_34xx_mtop 1280 .mtop_array = tape_34xx_mtop
1280 }; 1281 };
1281 1282
1282 static struct ccw_device_id tape_34xx_ids[] = { 1283 static struct ccw_device_id tape_34xx_ids[] = {
1283 { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480}, 1284 { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480},
1284 { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490}, 1285 { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490},
1285 { /* end of list */ }, 1286 { /* end of list */ },
1286 }; 1287 };
1287 1288
1288 static int 1289 static int
1289 tape_34xx_online(struct ccw_device *cdev) 1290 tape_34xx_online(struct ccw_device *cdev)
1290 { 1291 {
1291 return tape_generic_online( 1292 return tape_generic_online(
1292 dev_get_drvdata(&cdev->dev), 1293 dev_get_drvdata(&cdev->dev),
1293 &tape_discipline_34xx 1294 &tape_discipline_34xx
1294 ); 1295 );
1295 } 1296 }
1296 1297
1297 static struct ccw_driver tape_34xx_driver = { 1298 static struct ccw_driver tape_34xx_driver = {
1298 .name = "tape_34xx", 1299 .name = "tape_34xx",
1299 .owner = THIS_MODULE, 1300 .owner = THIS_MODULE,
1300 .ids = tape_34xx_ids, 1301 .ids = tape_34xx_ids,
1301 .probe = tape_generic_probe, 1302 .probe = tape_generic_probe,
1302 .remove = tape_generic_remove, 1303 .remove = tape_generic_remove,
1303 .set_online = tape_34xx_online, 1304 .set_online = tape_34xx_online,
1304 .set_offline = tape_generic_offline, 1305 .set_offline = tape_generic_offline,
1305 .freeze = tape_generic_pm_suspend, 1306 .freeze = tape_generic_pm_suspend,
1306 }; 1307 };
1307 1308
1308 static int 1309 static int
1309 tape_34xx_init (void) 1310 tape_34xx_init (void)
1310 { 1311 {
1311 int rc; 1312 int rc;
1312 1313
1313 TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long)); 1314 TAPE_DBF_AREA = debug_register ( "tape_34xx", 2, 2, 4*sizeof(long));
1314 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1315 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1315 #ifdef DBF_LIKE_HELL 1316 #ifdef DBF_LIKE_HELL
1316 debug_set_level(TAPE_DBF_AREA, 6); 1317 debug_set_level(TAPE_DBF_AREA, 6);
1317 #endif 1318 #endif
1318 1319
1319 DBF_EVENT(3, "34xx init\n"); 1320 DBF_EVENT(3, "34xx init\n");
1320 /* Register driver for 3480/3490 tapes. */ 1321 /* Register driver for 3480/3490 tapes. */
1321 rc = ccw_driver_register(&tape_34xx_driver); 1322 rc = ccw_driver_register(&tape_34xx_driver);
1322 if (rc) 1323 if (rc)
1323 DBF_EVENT(3, "34xx init failed\n"); 1324 DBF_EVENT(3, "34xx init failed\n");
1324 else 1325 else
1325 DBF_EVENT(3, "34xx registered\n"); 1326 DBF_EVENT(3, "34xx registered\n");
1326 return rc; 1327 return rc;
1327 } 1328 }
1328 1329
1329 static void 1330 static void
1330 tape_34xx_exit(void) 1331 tape_34xx_exit(void)
1331 { 1332 {
1332 ccw_driver_unregister(&tape_34xx_driver); 1333 ccw_driver_unregister(&tape_34xx_driver);
1333 1334
1334 debug_unregister(TAPE_DBF_AREA); 1335 debug_unregister(TAPE_DBF_AREA);
1335 } 1336 }
1336 1337
1337 MODULE_DEVICE_TABLE(ccw, tape_34xx_ids); 1338 MODULE_DEVICE_TABLE(ccw, tape_34xx_ids);
1338 MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH"); 1339 MODULE_AUTHOR("(C) 2001-2002 IBM Deutschland Entwicklung GmbH");
1339 MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver"); 1340 MODULE_DESCRIPTION("Linux on zSeries channel attached 3480 tape device driver");
1340 MODULE_LICENSE("GPL"); 1341 MODULE_LICENSE("GPL");
1341 1342
1342 module_init(tape_34xx_init); 1343 module_init(tape_34xx_init);
1343 module_exit(tape_34xx_exit); 1344 module_exit(tape_34xx_exit);
1344 1345
drivers/s390/char/tape_3590.c
1 /* 1 /*
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright IBM Corp. 2001, 2009 5 * Copyright IBM Corp. 2001, 2009
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11 #define KMSG_COMPONENT "tape_3590" 11 #define KMSG_COMPONENT "tape_3590"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
13 #include <linux/module.h> 14 #include <linux/module.h>
14 #include <linux/init.h> 15 #include <linux/init.h>
15 #include <linux/bio.h> 16 #include <linux/bio.h>
16 #include <asm/ebcdic.h> 17 #include <asm/ebcdic.h>
17 18
18 #define TAPE_DBF_AREA tape_3590_dbf 19 #define TAPE_DBF_AREA tape_3590_dbf
19 #define BUFSIZE 512 /* size of buffers for dynamic generated messages */ 20 #define BUFSIZE 512 /* size of buffers for dynamic generated messages */
20 21
21 #include "tape.h" 22 #include "tape.h"
22 #include "tape_std.h" 23 #include "tape_std.h"
23 #include "tape_3590.h" 24 #include "tape_3590.h"
24 25
25 /* 26 /*
26 * Pointer to debug area. 27 * Pointer to debug area.
27 */ 28 */
28 debug_info_t *TAPE_DBF_AREA = NULL; 29 debug_info_t *TAPE_DBF_AREA = NULL;
29 EXPORT_SYMBOL(TAPE_DBF_AREA); 30 EXPORT_SYMBOL(TAPE_DBF_AREA);
30 31
31 /******************************************************************* 32 /*******************************************************************
32 * Error Recovery fuctions: 33 * Error Recovery fuctions:
33 * - Read Opposite: implemented 34 * - Read Opposite: implemented
34 * - Read Device (buffered) log: BRA 35 * - Read Device (buffered) log: BRA
35 * - Read Library log: BRA 36 * - Read Library log: BRA
36 * - Swap Devices: BRA 37 * - Swap Devices: BRA
37 * - Long Busy: implemented 38 * - Long Busy: implemented
38 * - Special Intercept: BRA 39 * - Special Intercept: BRA
39 * - Read Alternate: implemented 40 * - Read Alternate: implemented
40 *******************************************************************/ 41 *******************************************************************/
41 42
42 static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { 43 static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
43 [0x00] = "", 44 [0x00] = "",
44 [0x10] = "Lost Sense", 45 [0x10] = "Lost Sense",
45 [0x11] = "Assigned Elsewhere", 46 [0x11] = "Assigned Elsewhere",
46 [0x12] = "Allegiance Reset", 47 [0x12] = "Allegiance Reset",
47 [0x13] = "Shared Access Violation", 48 [0x13] = "Shared Access Violation",
48 [0x20] = "Command Reject", 49 [0x20] = "Command Reject",
49 [0x21] = "Configuration Error", 50 [0x21] = "Configuration Error",
50 [0x22] = "Protection Exception", 51 [0x22] = "Protection Exception",
51 [0x23] = "Write Protect", 52 [0x23] = "Write Protect",
52 [0x24] = "Write Length", 53 [0x24] = "Write Length",
53 [0x25] = "Read-Only Format", 54 [0x25] = "Read-Only Format",
54 [0x31] = "Beginning of Partition", 55 [0x31] = "Beginning of Partition",
55 [0x33] = "End of Partition", 56 [0x33] = "End of Partition",
56 [0x34] = "End of Data", 57 [0x34] = "End of Data",
57 [0x35] = "Block not found", 58 [0x35] = "Block not found",
58 [0x40] = "Device Intervention", 59 [0x40] = "Device Intervention",
59 [0x41] = "Loader Intervention", 60 [0x41] = "Loader Intervention",
60 [0x42] = "Library Intervention", 61 [0x42] = "Library Intervention",
61 [0x50] = "Write Error", 62 [0x50] = "Write Error",
62 [0x51] = "Erase Error", 63 [0x51] = "Erase Error",
63 [0x52] = "Formatting Error", 64 [0x52] = "Formatting Error",
64 [0x53] = "Read Error", 65 [0x53] = "Read Error",
65 [0x54] = "Unsupported Format", 66 [0x54] = "Unsupported Format",
66 [0x55] = "No Formatting", 67 [0x55] = "No Formatting",
67 [0x56] = "Positioning lost", 68 [0x56] = "Positioning lost",
68 [0x57] = "Read Length", 69 [0x57] = "Read Length",
69 [0x60] = "Unsupported Medium", 70 [0x60] = "Unsupported Medium",
70 [0x61] = "Medium Length Error", 71 [0x61] = "Medium Length Error",
71 [0x62] = "Medium removed", 72 [0x62] = "Medium removed",
72 [0x64] = "Load Check", 73 [0x64] = "Load Check",
73 [0x65] = "Unload Check", 74 [0x65] = "Unload Check",
74 [0x70] = "Equipment Check", 75 [0x70] = "Equipment Check",
75 [0x71] = "Bus out Check", 76 [0x71] = "Bus out Check",
76 [0x72] = "Protocol Error", 77 [0x72] = "Protocol Error",
77 [0x73] = "Interface Error", 78 [0x73] = "Interface Error",
78 [0x74] = "Overrun", 79 [0x74] = "Overrun",
79 [0x75] = "Halt Signal", 80 [0x75] = "Halt Signal",
80 [0x90] = "Device fenced", 81 [0x90] = "Device fenced",
81 [0x91] = "Device Path fenced", 82 [0x91] = "Device Path fenced",
82 [0xa0] = "Volume misplaced", 83 [0xa0] = "Volume misplaced",
83 [0xa1] = "Volume inaccessible", 84 [0xa1] = "Volume inaccessible",
84 [0xa2] = "Volume in input", 85 [0xa2] = "Volume in input",
85 [0xa3] = "Volume ejected", 86 [0xa3] = "Volume ejected",
86 [0xa4] = "All categories reserved", 87 [0xa4] = "All categories reserved",
87 [0xa5] = "Duplicate Volume", 88 [0xa5] = "Duplicate Volume",
88 [0xa6] = "Library Manager Offline", 89 [0xa6] = "Library Manager Offline",
89 [0xa7] = "Library Output Station full", 90 [0xa7] = "Library Output Station full",
90 [0xa8] = "Vision System non-operational", 91 [0xa8] = "Vision System non-operational",
91 [0xa9] = "Library Manager Equipment Check", 92 [0xa9] = "Library Manager Equipment Check",
92 [0xaa] = "Library Equipment Check", 93 [0xaa] = "Library Equipment Check",
93 [0xab] = "All Library Cells full", 94 [0xab] = "All Library Cells full",
94 [0xac] = "No Cleaner Volumes in Library", 95 [0xac] = "No Cleaner Volumes in Library",
95 [0xad] = "I/O Station door open", 96 [0xad] = "I/O Station door open",
96 [0xae] = "Subsystem environmental alert", 97 [0xae] = "Subsystem environmental alert",
97 }; 98 };
98 99
99 static int crypt_supported(struct tape_device *device) 100 static int crypt_supported(struct tape_device *device)
100 { 101 {
101 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); 102 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
102 } 103 }
103 104
104 static int crypt_enabled(struct tape_device *device) 105 static int crypt_enabled(struct tape_device *device)
105 { 106 {
106 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); 107 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
107 } 108 }
108 109
109 static void ext_to_int_kekl(struct tape390_kekl *in, 110 static void ext_to_int_kekl(struct tape390_kekl *in,
110 struct tape3592_kekl *out) 111 struct tape3592_kekl *out)
111 { 112 {
112 int i; 113 int i;
113 114
114 memset(out, 0, sizeof(*out)); 115 memset(out, 0, sizeof(*out));
115 if (in->type == TAPE390_KEKL_TYPE_HASH) 116 if (in->type == TAPE390_KEKL_TYPE_HASH)
116 out->flags |= 0x40; 117 out->flags |= 0x40;
117 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) 118 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
118 out->flags |= 0x80; 119 out->flags |= 0x80;
119 strncpy(out->label, in->label, 64); 120 strncpy(out->label, in->label, 64);
120 for (i = strlen(in->label); i < sizeof(out->label); i++) 121 for (i = strlen(in->label); i < sizeof(out->label); i++)
121 out->label[i] = ' '; 122 out->label[i] = ' ';
122 ASCEBC(out->label, sizeof(out->label)); 123 ASCEBC(out->label, sizeof(out->label));
123 } 124 }
124 125
125 static void int_to_ext_kekl(struct tape3592_kekl *in, 126 static void int_to_ext_kekl(struct tape3592_kekl *in,
126 struct tape390_kekl *out) 127 struct tape390_kekl *out)
127 { 128 {
128 memset(out, 0, sizeof(*out)); 129 memset(out, 0, sizeof(*out));
129 if(in->flags & 0x40) 130 if(in->flags & 0x40)
130 out->type = TAPE390_KEKL_TYPE_HASH; 131 out->type = TAPE390_KEKL_TYPE_HASH;
131 else 132 else
132 out->type = TAPE390_KEKL_TYPE_LABEL; 133 out->type = TAPE390_KEKL_TYPE_LABEL;
133 if(in->flags & 0x80) 134 if(in->flags & 0x80)
134 out->type_on_tape = TAPE390_KEKL_TYPE_HASH; 135 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
135 else 136 else
136 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; 137 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
137 memcpy(out->label, in->label, sizeof(in->label)); 138 memcpy(out->label, in->label, sizeof(in->label));
138 EBCASC(out->label, sizeof(in->label)); 139 EBCASC(out->label, sizeof(in->label));
139 strstrip(out->label); 140 strstrip(out->label);
140 } 141 }
141 142
142 static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, 143 static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
143 struct tape390_kekl_pair *out) 144 struct tape390_kekl_pair *out)
144 { 145 {
145 if (in->count == 0) { 146 if (in->count == 0) {
146 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; 147 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; 148 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; 149 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
149 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; 150 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
150 } else if (in->count == 1) { 151 } else if (in->count == 1) {
151 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); 152 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
152 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; 153 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
153 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; 154 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
154 } else if (in->count == 2) { 155 } else if (in->count == 2) {
155 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); 156 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
156 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); 157 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
157 } else { 158 } else {
158 printk("Invalid KEKL number: %d\n", in->count); 159 printk("Invalid KEKL number: %d\n", in->count);
159 BUG(); 160 BUG();
160 } 161 }
161 } 162 }
162 163
163 static int check_ext_kekl(struct tape390_kekl *kekl) 164 static int check_ext_kekl(struct tape390_kekl *kekl)
164 { 165 {
165 if (kekl->type == TAPE390_KEKL_TYPE_NONE) 166 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
166 goto invalid; 167 goto invalid;
167 if (kekl->type > TAPE390_KEKL_TYPE_HASH) 168 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
168 goto invalid; 169 goto invalid;
169 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) 170 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
170 goto invalid; 171 goto invalid;
171 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) 172 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
172 goto invalid; 173 goto invalid;
173 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && 174 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
174 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) 175 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
175 goto invalid; 176 goto invalid;
176 177
177 return 0; 178 return 0;
178 invalid: 179 invalid:
179 return -EINVAL; 180 return -EINVAL;
180 } 181 }
181 182
182 static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) 183 static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
183 { 184 {
184 if (check_ext_kekl(&kekls->kekl[0])) 185 if (check_ext_kekl(&kekls->kekl[0]))
185 goto invalid; 186 goto invalid;
186 if (check_ext_kekl(&kekls->kekl[1])) 187 if (check_ext_kekl(&kekls->kekl[1]))
187 goto invalid; 188 goto invalid;
188 189
189 return 0; 190 return 0;
190 invalid: 191 invalid:
191 return -EINVAL; 192 return -EINVAL;
192 } 193 }
193 194
194 /* 195 /*
195 * Query KEKLs 196 * Query KEKLs
196 */ 197 */
197 static int tape_3592_kekl_query(struct tape_device *device, 198 static int tape_3592_kekl_query(struct tape_device *device,
198 struct tape390_kekl_pair *ext_kekls) 199 struct tape390_kekl_pair *ext_kekls)
199 { 200 {
200 struct tape_request *request; 201 struct tape_request *request;
201 struct tape3592_kekl_query_order *order; 202 struct tape3592_kekl_query_order *order;
202 struct tape3592_kekl_query_data *int_kekls; 203 struct tape3592_kekl_query_data *int_kekls;
203 int rc; 204 int rc;
204 205
205 DBF_EVENT(6, "tape3592_kekl_query\n"); 206 DBF_EVENT(6, "tape3592_kekl_query\n");
206 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); 207 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
207 if (!int_kekls) 208 if (!int_kekls)
208 return -ENOMEM; 209 return -ENOMEM;
209 request = tape_alloc_request(2, sizeof(*order)); 210 request = tape_alloc_request(2, sizeof(*order));
210 if (IS_ERR(request)) { 211 if (IS_ERR(request)) {
211 rc = PTR_ERR(request); 212 rc = PTR_ERR(request);
212 goto fail_malloc; 213 goto fail_malloc;
213 } 214 }
214 order = request->cpdata; 215 order = request->cpdata;
215 memset(order,0,sizeof(*order)); 216 memset(order,0,sizeof(*order));
216 order->code = 0xe2; 217 order->code = 0xe2;
217 order->max_count = 2; 218 order->max_count = 2;
218 request->op = TO_KEKL_QUERY; 219 request->op = TO_KEKL_QUERY;
219 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); 220 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
220 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), 221 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
221 int_kekls); 222 int_kekls);
222 rc = tape_do_io(device, request); 223 rc = tape_do_io(device, request);
223 if (rc) 224 if (rc)
224 goto fail_request; 225 goto fail_request;
225 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); 226 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
226 227
227 rc = 0; 228 rc = 0;
228 fail_request: 229 fail_request:
229 tape_free_request(request); 230 tape_free_request(request);
230 fail_malloc: 231 fail_malloc:
231 kfree(int_kekls); 232 kfree(int_kekls);
232 return rc; 233 return rc;
233 } 234 }
234 235
235 /* 236 /*
236 * IOCTL: Query KEKLs 237 * IOCTL: Query KEKLs
237 */ 238 */
238 static int tape_3592_ioctl_kekl_query(struct tape_device *device, 239 static int tape_3592_ioctl_kekl_query(struct tape_device *device,
239 unsigned long arg) 240 unsigned long arg)
240 { 241 {
241 int rc; 242 int rc;
242 struct tape390_kekl_pair *ext_kekls; 243 struct tape390_kekl_pair *ext_kekls;
243 244
244 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); 245 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
245 if (!crypt_supported(device)) 246 if (!crypt_supported(device))
246 return -ENOSYS; 247 return -ENOSYS;
247 if (!crypt_enabled(device)) 248 if (!crypt_enabled(device))
248 return -EUNATCH; 249 return -EUNATCH;
249 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); 250 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
250 if (!ext_kekls) 251 if (!ext_kekls)
251 return -ENOMEM; 252 return -ENOMEM;
252 rc = tape_3592_kekl_query(device, ext_kekls); 253 rc = tape_3592_kekl_query(device, ext_kekls);
253 if (rc != 0) 254 if (rc != 0)
254 goto fail; 255 goto fail;
255 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { 256 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
256 rc = -EFAULT; 257 rc = -EFAULT;
257 goto fail; 258 goto fail;
258 } 259 }
259 rc = 0; 260 rc = 0;
260 fail: 261 fail:
261 kfree(ext_kekls); 262 kfree(ext_kekls);
262 return rc; 263 return rc;
263 } 264 }
264 265
265 static int tape_3590_mttell(struct tape_device *device, int mt_count); 266 static int tape_3590_mttell(struct tape_device *device, int mt_count);
266 267
267 /* 268 /*
268 * Set KEKLs 269 * Set KEKLs
269 */ 270 */
270 static int tape_3592_kekl_set(struct tape_device *device, 271 static int tape_3592_kekl_set(struct tape_device *device,
271 struct tape390_kekl_pair *ext_kekls) 272 struct tape390_kekl_pair *ext_kekls)
272 { 273 {
273 struct tape_request *request; 274 struct tape_request *request;
274 struct tape3592_kekl_set_order *order; 275 struct tape3592_kekl_set_order *order;
275 276
276 DBF_EVENT(6, "tape3592_kekl_set\n"); 277 DBF_EVENT(6, "tape3592_kekl_set\n");
277 if (check_ext_kekl_pair(ext_kekls)) { 278 if (check_ext_kekl_pair(ext_kekls)) {
278 DBF_EVENT(6, "invalid kekls\n"); 279 DBF_EVENT(6, "invalid kekls\n");
279 return -EINVAL; 280 return -EINVAL;
280 } 281 }
281 if (tape_3590_mttell(device, 0) != 0) 282 if (tape_3590_mttell(device, 0) != 0)
282 return -EBADSLT; 283 return -EBADSLT;
283 request = tape_alloc_request(1, sizeof(*order)); 284 request = tape_alloc_request(1, sizeof(*order));
284 if (IS_ERR(request)) 285 if (IS_ERR(request))
285 return PTR_ERR(request); 286 return PTR_ERR(request);
286 order = request->cpdata; 287 order = request->cpdata;
287 memset(order, 0, sizeof(*order)); 288 memset(order, 0, sizeof(*order));
288 order->code = 0xe3; 289 order->code = 0xe3;
289 order->kekls.count = 2; 290 order->kekls.count = 2;
290 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); 291 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
291 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); 292 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
292 request->op = TO_KEKL_SET; 293 request->op = TO_KEKL_SET;
293 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); 294 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
294 295
295 return tape_do_io_free(device, request); 296 return tape_do_io_free(device, request);
296 } 297 }
297 298
298 /* 299 /*
299 * IOCTL: Set KEKLs 300 * IOCTL: Set KEKLs
300 */ 301 */
301 static int tape_3592_ioctl_kekl_set(struct tape_device *device, 302 static int tape_3592_ioctl_kekl_set(struct tape_device *device,
302 unsigned long arg) 303 unsigned long arg)
303 { 304 {
304 int rc; 305 int rc;
305 struct tape390_kekl_pair *ext_kekls; 306 struct tape390_kekl_pair *ext_kekls;
306 307
307 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); 308 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
308 if (!crypt_supported(device)) 309 if (!crypt_supported(device))
309 return -ENOSYS; 310 return -ENOSYS;
310 if (!crypt_enabled(device)) 311 if (!crypt_enabled(device))
311 return -EUNATCH; 312 return -EUNATCH;
312 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); 313 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
313 if (!ext_kekls) 314 if (!ext_kekls)
314 return -ENOMEM; 315 return -ENOMEM;
315 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { 316 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
316 rc = -EFAULT; 317 rc = -EFAULT;
317 goto out; 318 goto out;
318 } 319 }
319 rc = tape_3592_kekl_set(device, ext_kekls); 320 rc = tape_3592_kekl_set(device, ext_kekls);
320 out: 321 out:
321 kfree(ext_kekls); 322 kfree(ext_kekls);
322 return rc; 323 return rc;
323 } 324 }
324 325
325 /* 326 /*
326 * Enable encryption 327 * Enable encryption
327 */ 328 */
328 static int tape_3592_enable_crypt(struct tape_device *device) 329 static int tape_3592_enable_crypt(struct tape_device *device)
329 { 330 {
330 struct tape_request *request; 331 struct tape_request *request;
331 char *data; 332 char *data;
332 333
333 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 334 DBF_EVENT(6, "tape_3592_enable_crypt\n");
334 if (!crypt_supported(device)) 335 if (!crypt_supported(device))
335 return -ENOSYS; 336 return -ENOSYS;
336 request = tape_alloc_request(2, 72); 337 request = tape_alloc_request(2, 72);
337 if (IS_ERR(request)) 338 if (IS_ERR(request))
338 return PTR_ERR(request); 339 return PTR_ERR(request);
339 data = request->cpdata; 340 data = request->cpdata;
340 memset(data,0,72); 341 memset(data,0,72);
341 342
342 data[0] = 0x05; 343 data[0] = 0x05;
343 data[36 + 0] = 0x03; 344 data[36 + 0] = 0x03;
344 data[36 + 1] = 0x03; 345 data[36 + 1] = 0x03;
345 data[36 + 4] = 0x40; 346 data[36 + 4] = 0x40;
346 data[36 + 6] = 0x01; 347 data[36 + 6] = 0x01;
347 data[36 + 14] = 0x2f; 348 data[36 + 14] = 0x2f;
348 data[36 + 18] = 0xc3; 349 data[36 + 18] = 0xc3;
349 data[36 + 35] = 0x72; 350 data[36 + 35] = 0x72;
350 request->op = TO_CRYPT_ON; 351 request->op = TO_CRYPT_ON;
351 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 352 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
352 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 353 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
353 return tape_do_io_free(device, request); 354 return tape_do_io_free(device, request);
354 } 355 }
355 356
356 /* 357 /*
357 * Disable encryption 358 * Disable encryption
358 */ 359 */
359 static int tape_3592_disable_crypt(struct tape_device *device) 360 static int tape_3592_disable_crypt(struct tape_device *device)
360 { 361 {
361 struct tape_request *request; 362 struct tape_request *request;
362 char *data; 363 char *data;
363 364
364 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 365 DBF_EVENT(6, "tape_3592_disable_crypt\n");
365 if (!crypt_supported(device)) 366 if (!crypt_supported(device))
366 return -ENOSYS; 367 return -ENOSYS;
367 request = tape_alloc_request(2, 72); 368 request = tape_alloc_request(2, 72);
368 if (IS_ERR(request)) 369 if (IS_ERR(request))
369 return PTR_ERR(request); 370 return PTR_ERR(request);
370 data = request->cpdata; 371 data = request->cpdata;
371 memset(data,0,72); 372 memset(data,0,72);
372 373
373 data[0] = 0x05; 374 data[0] = 0x05;
374 data[36 + 0] = 0x03; 375 data[36 + 0] = 0x03;
375 data[36 + 1] = 0x03; 376 data[36 + 1] = 0x03;
376 data[36 + 35] = 0x32; 377 data[36 + 35] = 0x32;
377 378
378 request->op = TO_CRYPT_OFF; 379 request->op = TO_CRYPT_OFF;
379 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 380 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
380 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 381 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
381 382
382 return tape_do_io_free(device, request); 383 return tape_do_io_free(device, request);
383 } 384 }
384 385
385 /* 386 /*
386 * IOCTL: Set encryption status 387 * IOCTL: Set encryption status
387 */ 388 */
388 static int tape_3592_ioctl_crypt_set(struct tape_device *device, 389 static int tape_3592_ioctl_crypt_set(struct tape_device *device,
389 unsigned long arg) 390 unsigned long arg)
390 { 391 {
391 struct tape390_crypt_info info; 392 struct tape390_crypt_info info;
392 393
393 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); 394 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
394 if (!crypt_supported(device)) 395 if (!crypt_supported(device))
395 return -ENOSYS; 396 return -ENOSYS;
396 if (copy_from_user(&info, (char __user *)arg, sizeof(info))) 397 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
397 return -EFAULT; 398 return -EFAULT;
398 if (info.status & ~TAPE390_CRYPT_ON_MASK) 399 if (info.status & ~TAPE390_CRYPT_ON_MASK)
399 return -EINVAL; 400 return -EINVAL;
400 if (info.status & TAPE390_CRYPT_ON_MASK) 401 if (info.status & TAPE390_CRYPT_ON_MASK)
401 return tape_3592_enable_crypt(device); 402 return tape_3592_enable_crypt(device);
402 else 403 else
403 return tape_3592_disable_crypt(device); 404 return tape_3592_disable_crypt(device);
404 } 405 }
405 406
406 static int tape_3590_sense_medium(struct tape_device *device); 407 static int tape_3590_sense_medium(struct tape_device *device);
407 408
408 /* 409 /*
409 * IOCTL: Query enryption status 410 * IOCTL: Query enryption status
410 */ 411 */
411 static int tape_3592_ioctl_crypt_query(struct tape_device *device, 412 static int tape_3592_ioctl_crypt_query(struct tape_device *device,
412 unsigned long arg) 413 unsigned long arg)
413 { 414 {
414 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); 415 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
415 if (!crypt_supported(device)) 416 if (!crypt_supported(device))
416 return -ENOSYS; 417 return -ENOSYS;
417 tape_3590_sense_medium(device); 418 tape_3590_sense_medium(device);
418 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), 419 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
419 sizeof(TAPE_3590_CRYPT_INFO(device)))) 420 sizeof(TAPE_3590_CRYPT_INFO(device))))
420 return -EFAULT; 421 return -EFAULT;
421 else 422 else
422 return 0; 423 return 0;
423 } 424 }
424 425
425 /* 426 /*
426 * 3590 IOCTL Overload 427 * 3590 IOCTL Overload
427 */ 428 */
428 static int 429 static int
429 tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) 430 tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
430 { 431 {
431 switch (cmd) { 432 switch (cmd) {
432 case TAPE390_DISPLAY: { 433 case TAPE390_DISPLAY: {
433 struct display_struct disp; 434 struct display_struct disp;
434 435
435 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp))) 436 if (copy_from_user(&disp, (char __user *) arg, sizeof(disp)))
436 return -EFAULT; 437 return -EFAULT;
437 438
438 return tape_std_display(device, &disp); 439 return tape_std_display(device, &disp);
439 } 440 }
440 case TAPE390_KEKL_SET: 441 case TAPE390_KEKL_SET:
441 return tape_3592_ioctl_kekl_set(device, arg); 442 return tape_3592_ioctl_kekl_set(device, arg);
442 case TAPE390_KEKL_QUERY: 443 case TAPE390_KEKL_QUERY:
443 return tape_3592_ioctl_kekl_query(device, arg); 444 return tape_3592_ioctl_kekl_query(device, arg);
444 case TAPE390_CRYPT_SET: 445 case TAPE390_CRYPT_SET:
445 return tape_3592_ioctl_crypt_set(device, arg); 446 return tape_3592_ioctl_crypt_set(device, arg);
446 case TAPE390_CRYPT_QUERY: 447 case TAPE390_CRYPT_QUERY:
447 return tape_3592_ioctl_crypt_query(device, arg); 448 return tape_3592_ioctl_crypt_query(device, arg);
448 default: 449 default:
449 return -EINVAL; /* no additional ioctls */ 450 return -EINVAL; /* no additional ioctls */
450 } 451 }
451 } 452 }
452 453
453 /* 454 /*
454 * SENSE Medium: Get Sense data about medium state 455 * SENSE Medium: Get Sense data about medium state
455 */ 456 */
456 static int 457 static int
457 tape_3590_sense_medium(struct tape_device *device) 458 tape_3590_sense_medium(struct tape_device *device)
458 { 459 {
459 struct tape_request *request; 460 struct tape_request *request;
460 461
461 request = tape_alloc_request(1, 128); 462 request = tape_alloc_request(1, 128);
462 if (IS_ERR(request)) 463 if (IS_ERR(request))
463 return PTR_ERR(request); 464 return PTR_ERR(request);
464 request->op = TO_MSEN; 465 request->op = TO_MSEN;
465 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); 466 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
466 return tape_do_io_free(device, request); 467 return tape_do_io_free(device, request);
467 } 468 }
468 469
469 /* 470 /*
470 * MTTELL: Tell block. Return the number of block relative to current file. 471 * MTTELL: Tell block. Return the number of block relative to current file.
471 */ 472 */
472 static int 473 static int
473 tape_3590_mttell(struct tape_device *device, int mt_count) 474 tape_3590_mttell(struct tape_device *device, int mt_count)
474 { 475 {
475 __u64 block_id; 476 __u64 block_id;
476 int rc; 477 int rc;
477 478
478 rc = tape_std_read_block_id(device, &block_id); 479 rc = tape_std_read_block_id(device, &block_id);
479 if (rc) 480 if (rc)
480 return rc; 481 return rc;
481 return block_id >> 32; 482 return block_id >> 32;
482 } 483 }
483 484
484 /* 485 /*
485 * MTSEEK: seek to the specified block. 486 * MTSEEK: seek to the specified block.
486 */ 487 */
487 static int 488 static int
488 tape_3590_mtseek(struct tape_device *device, int count) 489 tape_3590_mtseek(struct tape_device *device, int count)
489 { 490 {
490 struct tape_request *request; 491 struct tape_request *request;
491 492
492 DBF_EVENT(6, "xsee id: %x\n", count); 493 DBF_EVENT(6, "xsee id: %x\n", count);
493 request = tape_alloc_request(3, 4); 494 request = tape_alloc_request(3, 4);
494 if (IS_ERR(request)) 495 if (IS_ERR(request))
495 return PTR_ERR(request); 496 return PTR_ERR(request);
496 request->op = TO_LBL; 497 request->op = TO_LBL;
497 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 498 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
498 *(__u32 *) request->cpdata = count; 499 *(__u32 *) request->cpdata = count;
499 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); 500 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
500 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 501 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
501 return tape_do_io_free(device, request); 502 return tape_do_io_free(device, request);
502 } 503 }
503 504
504 /* 505 /*
505 * Read Opposite Error Recovery Function: 506 * Read Opposite Error Recovery Function:
506 * Used, when Read Forward does not work 507 * Used, when Read Forward does not work
507 */ 508 */
508 static void 509 static void
509 tape_3590_read_opposite(struct tape_device *device, 510 tape_3590_read_opposite(struct tape_device *device,
510 struct tape_request *request) 511 struct tape_request *request)
511 { 512 {
512 struct tape_3590_disc_data *data; 513 struct tape_3590_disc_data *data;
513 514
514 /* 515 /*
515 * We have allocated 4 ccws in tape_std_read, so we can now 516 * We have allocated 4 ccws in tape_std_read, so we can now
516 * transform the request to a read backward, followed by a 517 * transform the request to a read backward, followed by a
517 * forward space block. 518 * forward space block.
518 */ 519 */
519 request->op = TO_RBA; 520 request->op = TO_RBA;
520 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 521 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
521 data = device->discdata; 522 data = device->discdata;
522 tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op, 523 tape_ccw_cc_idal(request->cpaddr + 1, data->read_back_op,
523 device->char_data.idal_buf); 524 device->char_data.idal_buf);
524 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); 525 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
525 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); 526 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
526 DBF_EVENT(6, "xrop ccwg\n"); 527 DBF_EVENT(6, "xrop ccwg\n");
527 } 528 }
528 529
529 /* 530 /*
530 * Read Attention Msg 531 * Read Attention Msg
531 * This should be done after an interrupt with attention bit (0x80) 532 * This should be done after an interrupt with attention bit (0x80)
532 * in device state. 533 * in device state.
533 * 534 *
534 * After a "read attention message" request there are two possible 535 * After a "read attention message" request there are two possible
535 * results: 536 * results:
536 * 537 *
537 * 1. A unit check is presented, when attention sense is present (e.g. when 538 * 1. A unit check is presented, when attention sense is present (e.g. when
538 * a medium has been unloaded). The attention sense comes then 539 * a medium has been unloaded). The attention sense comes then
539 * together with the unit check. The recovery action is either "retry" 540 * together with the unit check. The recovery action is either "retry"
540 * (in case there is an attention message pending) or "permanent error". 541 * (in case there is an attention message pending) or "permanent error".
541 * 542 *
542 * 2. The attention msg is written to the "read subsystem data" buffer. 543 * 2. The attention msg is written to the "read subsystem data" buffer.
543 * In this case we probably should print it to the console. 544 * In this case we probably should print it to the console.
544 */ 545 */
545 static int 546 static int
546 tape_3590_read_attmsg(struct tape_device *device) 547 tape_3590_read_attmsg(struct tape_device *device)
547 { 548 {
548 struct tape_request *request; 549 struct tape_request *request;
549 char *buf; 550 char *buf;
550 551
551 request = tape_alloc_request(3, 4096); 552 request = tape_alloc_request(3, 4096);
552 if (IS_ERR(request)) 553 if (IS_ERR(request))
553 return PTR_ERR(request); 554 return PTR_ERR(request);
554 request->op = TO_READ_ATTMSG; 555 request->op = TO_READ_ATTMSG;
555 buf = request->cpdata; 556 buf = request->cpdata;
556 buf[0] = PREP_RD_SS_DATA; 557 buf[0] = PREP_RD_SS_DATA;
557 buf[6] = RD_ATTMSG; /* read att msg */ 558 buf[6] = RD_ATTMSG; /* read att msg */
558 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 559 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
559 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 560 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
560 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 561 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
561 return tape_do_io_free(device, request); 562 return tape_do_io_free(device, request);
562 } 563 }
563 564
564 /* 565 /*
565 * These functions are used to schedule follow-up actions from within an 566 * These functions are used to schedule follow-up actions from within an
566 * interrupt context (like unsolicited interrupts). 567 * interrupt context (like unsolicited interrupts).
567 */ 568 */
568 struct work_handler_data { 569 struct work_handler_data {
569 struct tape_device *device; 570 struct tape_device *device;
570 enum tape_op op; 571 enum tape_op op;
571 struct work_struct work; 572 struct work_struct work;
572 }; 573 };
573 574
574 static void 575 static void
575 tape_3590_work_handler(struct work_struct *work) 576 tape_3590_work_handler(struct work_struct *work)
576 { 577 {
577 struct work_handler_data *p = 578 struct work_handler_data *p =
578 container_of(work, struct work_handler_data, work); 579 container_of(work, struct work_handler_data, work);
579 580
580 switch (p->op) { 581 switch (p->op) {
581 case TO_MSEN: 582 case TO_MSEN:
582 tape_3590_sense_medium(p->device); 583 tape_3590_sense_medium(p->device);
583 break; 584 break;
584 case TO_READ_ATTMSG: 585 case TO_READ_ATTMSG:
585 tape_3590_read_attmsg(p->device); 586 tape_3590_read_attmsg(p->device);
586 break; 587 break;
587 case TO_CRYPT_ON: 588 case TO_CRYPT_ON:
588 tape_3592_enable_crypt(p->device); 589 tape_3592_enable_crypt(p->device);
589 break; 590 break;
590 case TO_CRYPT_OFF: 591 case TO_CRYPT_OFF:
591 tape_3592_disable_crypt(p->device); 592 tape_3592_disable_crypt(p->device);
592 break; 593 break;
593 default: 594 default:
594 DBF_EVENT(3, "T3590: work handler undefined for " 595 DBF_EVENT(3, "T3590: work handler undefined for "
595 "operation 0x%02x\n", p->op); 596 "operation 0x%02x\n", p->op);
596 } 597 }
597 tape_put_device(p->device); 598 tape_put_device(p->device);
598 kfree(p); 599 kfree(p);
599 } 600 }
600 601
601 static int 602 static int
602 tape_3590_schedule_work(struct tape_device *device, enum tape_op op) 603 tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
603 { 604 {
604 struct work_handler_data *p; 605 struct work_handler_data *p;
605 606
606 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) 607 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
607 return -ENOMEM; 608 return -ENOMEM;
608 609
609 INIT_WORK(&p->work, tape_3590_work_handler); 610 INIT_WORK(&p->work, tape_3590_work_handler);
610 611
611 p->device = tape_get_device(device); 612 p->device = tape_get_device(device);
612 p->op = op; 613 p->op = op;
613 614
614 schedule_work(&p->work); 615 schedule_work(&p->work);
615 return 0; 616 return 0;
616 } 617 }
617 618
618 #ifdef CONFIG_S390_TAPE_BLOCK 619 #ifdef CONFIG_S390_TAPE_BLOCK
619 /* 620 /*
620 * Tape Block READ 621 * Tape Block READ
621 */ 622 */
622 static struct tape_request * 623 static struct tape_request *
623 tape_3590_bread(struct tape_device *device, struct request *req) 624 tape_3590_bread(struct tape_device *device, struct request *req)
624 { 625 {
625 struct tape_request *request; 626 struct tape_request *request;
626 struct ccw1 *ccw; 627 struct ccw1 *ccw;
627 int count = 0, start_block; 628 int count = 0, start_block;
628 unsigned off; 629 unsigned off;
629 char *dst; 630 char *dst;
630 struct bio_vec *bv; 631 struct bio_vec *bv;
631 struct req_iterator iter; 632 struct req_iterator iter;
632 633
633 DBF_EVENT(6, "xBREDid:"); 634 DBF_EVENT(6, "xBREDid:");
634 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 635 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
635 DBF_EVENT(6, "start_block = %i\n", start_block); 636 DBF_EVENT(6, "start_block = %i\n", start_block);
636 637
637 rq_for_each_segment(bv, req, iter) 638 rq_for_each_segment(bv, req, iter)
638 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9); 639 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
639 640
640 request = tape_alloc_request(2 + count + 1, 4); 641 request = tape_alloc_request(2 + count + 1, 4);
641 if (IS_ERR(request)) 642 if (IS_ERR(request))
642 return request; 643 return request;
643 request->op = TO_BLOCK; 644 request->op = TO_BLOCK;
644 *(__u32 *) request->cpdata = start_block; 645 *(__u32 *) request->cpdata = start_block;
645 ccw = request->cpaddr; 646 ccw = request->cpaddr;
646 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte); 647 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
647 648
648 /* 649 /*
649 * We always setup a nop after the mode set ccw. This slot is 650 * We always setup a nop after the mode set ccw. This slot is
650 * used in tape_std_check_locate to insert a locate ccw if the 651 * used in tape_std_check_locate to insert a locate ccw if the
651 * current tape position doesn't match the start block to be read. 652 * current tape position doesn't match the start block to be read.
652 */ 653 */
653 ccw = tape_ccw_cc(ccw, NOP, 0, NULL); 654 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
654 655
655 rq_for_each_segment(bv, req, iter) { 656 rq_for_each_segment(bv, req, iter) {
656 dst = page_address(bv->bv_page) + bv->bv_offset; 657 dst = page_address(bv->bv_page) + bv->bv_offset;
657 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) { 658 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
658 ccw->flags = CCW_FLAG_CC; 659 ccw->flags = CCW_FLAG_CC;
659 ccw->cmd_code = READ_FORWARD; 660 ccw->cmd_code = READ_FORWARD;
660 ccw->count = TAPEBLOCK_HSEC_SIZE; 661 ccw->count = TAPEBLOCK_HSEC_SIZE;
661 set_normalized_cda(ccw, (void *) __pa(dst)); 662 set_normalized_cda(ccw, (void *) __pa(dst));
662 ccw++; 663 ccw++;
663 dst += TAPEBLOCK_HSEC_SIZE; 664 dst += TAPEBLOCK_HSEC_SIZE;
664 } 665 }
665 BUG_ON(off > bv->bv_len); 666 BUG_ON(off > bv->bv_len);
666 } 667 }
667 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 668 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
668 DBF_EVENT(6, "xBREDccwg\n"); 669 DBF_EVENT(6, "xBREDccwg\n");
669 return request; 670 return request;
670 } 671 }
671 672
672 static void 673 static void
673 tape_3590_free_bread(struct tape_request *request) 674 tape_3590_free_bread(struct tape_request *request)
674 { 675 {
675 struct ccw1 *ccw; 676 struct ccw1 *ccw;
676 677
677 /* Last ccw is a nop and doesn't need clear_normalized_cda */ 678 /* Last ccw is a nop and doesn't need clear_normalized_cda */
678 for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++) 679 for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
679 if (ccw->cmd_code == READ_FORWARD) 680 if (ccw->cmd_code == READ_FORWARD)
680 clear_normalized_cda(ccw); 681 clear_normalized_cda(ccw);
681 tape_free_request(request); 682 tape_free_request(request);
682 } 683 }
683 684
684 /* 685 /*
685 * check_locate is called just before the tape request is passed to 686 * check_locate is called just before the tape request is passed to
686 * the common io layer for execution. It has to check the current 687 * the common io layer for execution. It has to check the current
687 * tape position and insert a locate ccw if it doesn't match the 688 * tape position and insert a locate ccw if it doesn't match the
688 * start block for the request. 689 * start block for the request.
689 */ 690 */
690 static void 691 static void
691 tape_3590_check_locate(struct tape_device *device, struct tape_request *request) 692 tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
692 { 693 {
693 __u32 *start_block; 694 __u32 *start_block;
694 695
695 start_block = (__u32 *) request->cpdata; 696 start_block = (__u32 *) request->cpdata;
696 if (*start_block != device->blk_data.block_position) { 697 if (*start_block != device->blk_data.block_position) {
697 /* Add the start offset of the file to get the real block. */ 698 /* Add the start offset of the file to get the real block. */
698 *start_block += device->bof; 699 *start_block += device->bof;
699 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata); 700 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
700 } 701 }
701 } 702 }
702 #endif 703 #endif
703 704
704 static void tape_3590_med_state_set(struct tape_device *device, 705 static void tape_3590_med_state_set(struct tape_device *device,
705 struct tape_3590_med_sense *sense) 706 struct tape_3590_med_sense *sense)
706 { 707 {
707 struct tape390_crypt_info *c_info; 708 struct tape390_crypt_info *c_info;
708 709
709 c_info = &TAPE_3590_CRYPT_INFO(device); 710 c_info = &TAPE_3590_CRYPT_INFO(device);
710 711
711 DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst); 712 DBF_EVENT(6, "medium state: %x:%x\n", sense->macst, sense->masst);
712 switch (sense->macst) { 713 switch (sense->macst) {
713 case 0x04: 714 case 0x04:
714 case 0x05: 715 case 0x05:
715 case 0x06: 716 case 0x06:
716 tape_med_state_set(device, MS_UNLOADED); 717 tape_med_state_set(device, MS_UNLOADED);
717 TAPE_3590_CRYPT_INFO(device).medium_status = 0; 718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
718 return; 719 return;
719 case 0x08: 720 case 0x08:
720 case 0x09: 721 case 0x09:
721 tape_med_state_set(device, MS_LOADED); 722 tape_med_state_set(device, MS_LOADED);
722 break; 723 break;
723 default: 724 default:
724 tape_med_state_set(device, MS_UNKNOWN); 725 tape_med_state_set(device, MS_UNKNOWN);
725 return; 726 return;
726 } 727 }
727 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; 728 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
728 if (sense->flags & MSENSE_CRYPT_MASK) { 729 if (sense->flags & MSENSE_CRYPT_MASK) {
729 DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags); 730 DBF_EVENT(6, "Medium is encrypted (%04x)\n", sense->flags);
730 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; 731 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
731 } else { 732 } else {
732 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); 733 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
733 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; 734 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
734 } 735 }
735 } 736 }
736 737
737 /* 738 /*
738 * The done handler is called at device/channel end and wakes up the sleeping 739 * The done handler is called at device/channel end and wakes up the sleeping
739 * process 740 * process
740 */ 741 */
741 static int 742 static int
742 tape_3590_done(struct tape_device *device, struct tape_request *request) 743 tape_3590_done(struct tape_device *device, struct tape_request *request)
743 { 744 {
744 struct tape_3590_disc_data *disc_data; 745 struct tape_3590_disc_data *disc_data;
745 746
746 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 747 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
747 disc_data = device->discdata; 748 disc_data = device->discdata;
748 749
749 switch (request->op) { 750 switch (request->op) {
750 case TO_BSB: 751 case TO_BSB:
751 case TO_BSF: 752 case TO_BSF:
752 case TO_DSE: 753 case TO_DSE:
753 case TO_FSB: 754 case TO_FSB:
754 case TO_FSF: 755 case TO_FSF:
755 case TO_LBL: 756 case TO_LBL:
756 case TO_RFO: 757 case TO_RFO:
757 case TO_RBA: 758 case TO_RBA:
758 case TO_REW: 759 case TO_REW:
759 case TO_WRI: 760 case TO_WRI:
760 case TO_WTM: 761 case TO_WTM:
761 case TO_BLOCK: 762 case TO_BLOCK:
762 case TO_LOAD: 763 case TO_LOAD:
763 tape_med_state_set(device, MS_LOADED); 764 tape_med_state_set(device, MS_LOADED);
764 break; 765 break;
765 case TO_RUN: 766 case TO_RUN:
766 tape_med_state_set(device, MS_UNLOADED); 767 tape_med_state_set(device, MS_UNLOADED);
767 tape_3590_schedule_work(device, TO_CRYPT_OFF); 768 tape_3590_schedule_work(device, TO_CRYPT_OFF);
768 break; 769 break;
769 case TO_MSEN: 770 case TO_MSEN:
770 tape_3590_med_state_set(device, request->cpdata); 771 tape_3590_med_state_set(device, request->cpdata);
771 break; 772 break;
772 case TO_CRYPT_ON: 773 case TO_CRYPT_ON:
773 TAPE_3590_CRYPT_INFO(device).status 774 TAPE_3590_CRYPT_INFO(device).status
774 |= TAPE390_CRYPT_ON_MASK; 775 |= TAPE390_CRYPT_ON_MASK;
775 *(device->modeset_byte) |= 0x03; 776 *(device->modeset_byte) |= 0x03;
776 break; 777 break;
777 case TO_CRYPT_OFF: 778 case TO_CRYPT_OFF:
778 TAPE_3590_CRYPT_INFO(device).status 779 TAPE_3590_CRYPT_INFO(device).status
779 &= ~TAPE390_CRYPT_ON_MASK; 780 &= ~TAPE390_CRYPT_ON_MASK;
780 *(device->modeset_byte) &= ~0x03; 781 *(device->modeset_byte) &= ~0x03;
781 break; 782 break;
782 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 783 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
783 case TO_NOP: /* Same to NOP. */ 784 case TO_NOP: /* Same to NOP. */
784 case TO_READ_CONFIG: 785 case TO_READ_CONFIG:
785 case TO_READ_ATTMSG: 786 case TO_READ_ATTMSG:
786 case TO_DIS: 787 case TO_DIS:
787 case TO_ASSIGN: 788 case TO_ASSIGN:
788 case TO_UNASSIGN: 789 case TO_UNASSIGN:
789 case TO_SIZE: 790 case TO_SIZE:
790 case TO_KEKL_SET: 791 case TO_KEKL_SET:
791 case TO_KEKL_QUERY: 792 case TO_KEKL_QUERY:
792 case TO_RDC: 793 case TO_RDC:
793 break; 794 break;
794 } 795 }
795 return TAPE_IO_SUCCESS; 796 return TAPE_IO_SUCCESS;
796 } 797 }
797 798
798 /* 799 /*
799 * This fuction is called, when error recovery was successfull 800 * This fuction is called, when error recovery was successfull
800 */ 801 */
801 static inline int 802 static inline int
802 tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) 803 tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request)
803 { 804 {
804 DBF_EVENT(3, "Error Recovery successful for %s\n", 805 DBF_EVENT(3, "Error Recovery successful for %s\n",
805 tape_op_verbose[request->op]); 806 tape_op_verbose[request->op]);
806 return tape_3590_done(device, request); 807 return tape_3590_done(device, request);
807 } 808 }
808 809
809 /* 810 /*
810 * This fuction is called, when error recovery was not successfull 811 * This fuction is called, when error recovery was not successfull
811 */ 812 */
812 static inline int 813 static inline int
813 tape_3590_erp_failed(struct tape_device *device, struct tape_request *request, 814 tape_3590_erp_failed(struct tape_device *device, struct tape_request *request,
814 struct irb *irb, int rc) 815 struct irb *irb, int rc)
815 { 816 {
816 DBF_EVENT(3, "Error Recovery failed for %s\n", 817 DBF_EVENT(3, "Error Recovery failed for %s\n",
817 tape_op_verbose[request->op]); 818 tape_op_verbose[request->op]);
818 tape_dump_sense_dbf(device, request, irb); 819 tape_dump_sense_dbf(device, request, irb);
819 return rc; 820 return rc;
820 } 821 }
821 822
822 /* 823 /*
823 * Error Recovery do retry 824 * Error Recovery do retry
824 */ 825 */
825 static inline int 826 static inline int
826 tape_3590_erp_retry(struct tape_device *device, struct tape_request *request, 827 tape_3590_erp_retry(struct tape_device *device, struct tape_request *request,
827 struct irb *irb) 828 struct irb *irb)
828 { 829 {
829 DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]); 830 DBF_EVENT(2, "Retry: %s\n", tape_op_verbose[request->op]);
830 tape_dump_sense_dbf(device, request, irb); 831 tape_dump_sense_dbf(device, request, irb);
831 return TAPE_IO_RETRY; 832 return TAPE_IO_RETRY;
832 } 833 }
833 834
834 /* 835 /*
835 * Handle unsolicited interrupts 836 * Handle unsolicited interrupts
836 */ 837 */
837 static int 838 static int
838 tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb) 839 tape_3590_unsolicited_irq(struct tape_device *device, struct irb *irb)
839 { 840 {
840 if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END) 841 if (irb->scsw.cmd.dstat == DEV_STAT_CHN_END)
841 /* Probably result of halt ssch */ 842 /* Probably result of halt ssch */
842 return TAPE_IO_PENDING; 843 return TAPE_IO_PENDING;
843 else if (irb->scsw.cmd.dstat == 0x85) 844 else if (irb->scsw.cmd.dstat == 0x85)
844 /* Device Ready */ 845 /* Device Ready */
845 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id); 846 DBF_EVENT(3, "unsol.irq! tape ready: %08x\n", device->cdev_id);
846 else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { 847 else if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
847 tape_3590_schedule_work(device, TO_READ_ATTMSG); 848 tape_3590_schedule_work(device, TO_READ_ATTMSG);
848 } else { 849 } else {
849 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id); 850 DBF_EVENT(3, "unsol.irq! dev end: %08x\n", device->cdev_id);
850 tape_dump_sense_dbf(device, NULL, irb); 851 tape_dump_sense_dbf(device, NULL, irb);
851 } 852 }
852 /* check medium state */ 853 /* check medium state */
853 tape_3590_schedule_work(device, TO_MSEN); 854 tape_3590_schedule_work(device, TO_MSEN);
854 return TAPE_IO_SUCCESS; 855 return TAPE_IO_SUCCESS;
855 } 856 }
856 857
857 /* 858 /*
858 * Basic Recovery routine 859 * Basic Recovery routine
859 */ 860 */
860 static int 861 static int
861 tape_3590_erp_basic(struct tape_device *device, struct tape_request *request, 862 tape_3590_erp_basic(struct tape_device *device, struct tape_request *request,
862 struct irb *irb, int rc) 863 struct irb *irb, int rc)
863 { 864 {
864 struct tape_3590_sense *sense; 865 struct tape_3590_sense *sense;
865 866
866 sense = (struct tape_3590_sense *) irb->ecw; 867 sense = (struct tape_3590_sense *) irb->ecw;
867 868
868 switch (sense->bra) { 869 switch (sense->bra) {
869 case SENSE_BRA_PER: 870 case SENSE_BRA_PER:
870 return tape_3590_erp_failed(device, request, irb, rc); 871 return tape_3590_erp_failed(device, request, irb, rc);
871 case SENSE_BRA_CONT: 872 case SENSE_BRA_CONT:
872 return tape_3590_erp_succeded(device, request); 873 return tape_3590_erp_succeded(device, request);
873 case SENSE_BRA_RE: 874 case SENSE_BRA_RE:
874 return tape_3590_erp_retry(device, request, irb); 875 return tape_3590_erp_retry(device, request, irb);
875 case SENSE_BRA_DRE: 876 case SENSE_BRA_DRE:
876 return tape_3590_erp_failed(device, request, irb, rc); 877 return tape_3590_erp_failed(device, request, irb, rc);
877 default: 878 default:
878 BUG(); 879 BUG();
879 return TAPE_IO_STOP; 880 return TAPE_IO_STOP;
880 } 881 }
881 } 882 }
882 883
883 /* 884 /*
884 * RDL: Read Device (buffered) log 885 * RDL: Read Device (buffered) log
885 */ 886 */
886 static int 887 static int
887 tape_3590_erp_read_buf_log(struct tape_device *device, 888 tape_3590_erp_read_buf_log(struct tape_device *device,
888 struct tape_request *request, struct irb *irb) 889 struct tape_request *request, struct irb *irb)
889 { 890 {
890 /* 891 /*
891 * We just do the basic error recovery at the moment (retry). 892 * We just do the basic error recovery at the moment (retry).
892 * Perhaps in the future, we read the log and dump it somewhere... 893 * Perhaps in the future, we read the log and dump it somewhere...
893 */ 894 */
894 return tape_3590_erp_basic(device, request, irb, -EIO); 895 return tape_3590_erp_basic(device, request, irb, -EIO);
895 } 896 }
896 897
897 /* 898 /*
898 * SWAP: Swap Devices 899 * SWAP: Swap Devices
899 */ 900 */
900 static int 901 static int
901 tape_3590_erp_swap(struct tape_device *device, struct tape_request *request, 902 tape_3590_erp_swap(struct tape_device *device, struct tape_request *request,
902 struct irb *irb) 903 struct irb *irb)
903 { 904 {
904 /* 905 /*
905 * This error recovery should swap the tapes 906 * This error recovery should swap the tapes
906 * if the original has a problem. The operation 907 * if the original has a problem. The operation
907 * should proceed with the new tape... this 908 * should proceed with the new tape... this
908 * should probably be done in user space! 909 * should probably be done in user space!
909 */ 910 */
910 dev_warn (&device->cdev->dev, "The tape medium must be loaded into a " 911 dev_warn (&device->cdev->dev, "The tape medium must be loaded into a "
911 "different tape unit\n"); 912 "different tape unit\n");
912 return tape_3590_erp_basic(device, request, irb, -EIO); 913 return tape_3590_erp_basic(device, request, irb, -EIO);
913 } 914 }
914 915
915 /* 916 /*
916 * LBY: Long Busy 917 * LBY: Long Busy
917 */ 918 */
918 static int 919 static int
919 tape_3590_erp_long_busy(struct tape_device *device, 920 tape_3590_erp_long_busy(struct tape_device *device,
920 struct tape_request *request, struct irb *irb) 921 struct tape_request *request, struct irb *irb)
921 { 922 {
922 DBF_EVENT(6, "Device is busy\n"); 923 DBF_EVENT(6, "Device is busy\n");
923 return TAPE_IO_LONG_BUSY; 924 return TAPE_IO_LONG_BUSY;
924 } 925 }
925 926
926 /* 927 /*
927 * SPI: Special Intercept 928 * SPI: Special Intercept
928 */ 929 */
929 static int 930 static int
930 tape_3590_erp_special_interrupt(struct tape_device *device, 931 tape_3590_erp_special_interrupt(struct tape_device *device,
931 struct tape_request *request, struct irb *irb) 932 struct tape_request *request, struct irb *irb)
932 { 933 {
933 return tape_3590_erp_basic(device, request, irb, -EIO); 934 return tape_3590_erp_basic(device, request, irb, -EIO);
934 } 935 }
935 936
936 /* 937 /*
937 * RDA: Read Alternate 938 * RDA: Read Alternate
938 */ 939 */
939 static int 940 static int
940 tape_3590_erp_read_alternate(struct tape_device *device, 941 tape_3590_erp_read_alternate(struct tape_device *device,
941 struct tape_request *request, struct irb *irb) 942 struct tape_request *request, struct irb *irb)
942 { 943 {
943 struct tape_3590_disc_data *data; 944 struct tape_3590_disc_data *data;
944 945
945 /* 946 /*
946 * The issued Read Backward or Read Previous command is not 947 * The issued Read Backward or Read Previous command is not
947 * supported by the device 948 * supported by the device
948 * The recovery action should be to issue another command: 949 * The recovery action should be to issue another command:
949 * Read Revious: if Read Backward is not supported 950 * Read Revious: if Read Backward is not supported
950 * Read Backward: if Read Previous is not supported 951 * Read Backward: if Read Previous is not supported
951 */ 952 */
952 data = device->discdata; 953 data = device->discdata;
953 if (data->read_back_op == READ_PREVIOUS) { 954 if (data->read_back_op == READ_PREVIOUS) {
954 DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n", 955 DBF_EVENT(2, "(%08x): No support for READ_PREVIOUS command\n",
955 device->cdev_id); 956 device->cdev_id);
956 data->read_back_op = READ_BACKWARD; 957 data->read_back_op = READ_BACKWARD;
957 } else { 958 } else {
958 DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n", 959 DBF_EVENT(2, "(%08x): No support for READ_BACKWARD command\n",
959 device->cdev_id); 960 device->cdev_id);
960 data->read_back_op = READ_PREVIOUS; 961 data->read_back_op = READ_PREVIOUS;
961 } 962 }
962 tape_3590_read_opposite(device, request); 963 tape_3590_read_opposite(device, request);
963 return tape_3590_erp_retry(device, request, irb); 964 return tape_3590_erp_retry(device, request, irb);
964 } 965 }
965 966
966 /* 967 /*
967 * Error Recovery read opposite 968 * Error Recovery read opposite
968 */ 969 */
969 static int 970 static int
970 tape_3590_erp_read_opposite(struct tape_device *device, 971 tape_3590_erp_read_opposite(struct tape_device *device,
971 struct tape_request *request, struct irb *irb) 972 struct tape_request *request, struct irb *irb)
972 { 973 {
973 switch (request->op) { 974 switch (request->op) {
974 case TO_RFO: 975 case TO_RFO:
975 /* 976 /*
976 * We did read forward, but the data could not be read. 977 * We did read forward, but the data could not be read.
977 * We will read backward and then skip forward again. 978 * We will read backward and then skip forward again.
978 */ 979 */
979 tape_3590_read_opposite(device, request); 980 tape_3590_read_opposite(device, request);
980 return tape_3590_erp_retry(device, request, irb); 981 return tape_3590_erp_retry(device, request, irb);
981 case TO_RBA: 982 case TO_RBA:
982 /* We tried to read forward and backward, but hat no success */ 983 /* We tried to read forward and backward, but hat no success */
983 return tape_3590_erp_failed(device, request, irb, -EIO); 984 return tape_3590_erp_failed(device, request, irb, -EIO);
984 break; 985 break;
985 default: 986 default:
986 return tape_3590_erp_failed(device, request, irb, -EIO); 987 return tape_3590_erp_failed(device, request, irb, -EIO);
987 } 988 }
988 } 989 }
989 990
990 /* 991 /*
991 * Print an MIM (Media Information Message) (message code f0) 992 * Print an MIM (Media Information Message) (message code f0)
992 */ 993 */
993 static void 994 static void
994 tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb) 995 tape_3590_print_mim_msg_f0(struct tape_device *device, struct irb *irb)
995 { 996 {
996 struct tape_3590_sense *sense; 997 struct tape_3590_sense *sense;
997 char *exception, *service; 998 char *exception, *service;
998 999
999 exception = kmalloc(BUFSIZE, GFP_ATOMIC); 1000 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1000 service = kmalloc(BUFSIZE, GFP_ATOMIC); 1001 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1001 1002
1002 if (!exception || !service) 1003 if (!exception || !service)
1003 goto out_nomem; 1004 goto out_nomem;
1004 1005
1005 sense = (struct tape_3590_sense *) irb->ecw; 1006 sense = (struct tape_3590_sense *) irb->ecw;
1006 /* Exception Message */ 1007 /* Exception Message */
1007 switch (sense->fmt.f70.emc) { 1008 switch (sense->fmt.f70.emc) {
1008 case 0x02: 1009 case 0x02:
1009 snprintf(exception, BUFSIZE, "Data degraded"); 1010 snprintf(exception, BUFSIZE, "Data degraded");
1010 break; 1011 break;
1011 case 0x03: 1012 case 0x03:
1012 snprintf(exception, BUFSIZE, "Data degraded in partion %i", 1013 snprintf(exception, BUFSIZE, "Data degraded in partion %i",
1013 sense->fmt.f70.mp); 1014 sense->fmt.f70.mp);
1014 break; 1015 break;
1015 case 0x04: 1016 case 0x04:
1016 snprintf(exception, BUFSIZE, "Medium degraded"); 1017 snprintf(exception, BUFSIZE, "Medium degraded");
1017 break; 1018 break;
1018 case 0x05: 1019 case 0x05:
1019 snprintf(exception, BUFSIZE, "Medium degraded in partition %i", 1020 snprintf(exception, BUFSIZE, "Medium degraded in partition %i",
1020 sense->fmt.f70.mp); 1021 sense->fmt.f70.mp);
1021 break; 1022 break;
1022 case 0x06: 1023 case 0x06:
1023 snprintf(exception, BUFSIZE, "Block 0 Error"); 1024 snprintf(exception, BUFSIZE, "Block 0 Error");
1024 break; 1025 break;
1025 case 0x07: 1026 case 0x07:
1026 snprintf(exception, BUFSIZE, "Medium Exception 0x%02x", 1027 snprintf(exception, BUFSIZE, "Medium Exception 0x%02x",
1027 sense->fmt.f70.md); 1028 sense->fmt.f70.md);
1028 break; 1029 break;
1029 default: 1030 default:
1030 snprintf(exception, BUFSIZE, "0x%02x", 1031 snprintf(exception, BUFSIZE, "0x%02x",
1031 sense->fmt.f70.emc); 1032 sense->fmt.f70.emc);
1032 break; 1033 break;
1033 } 1034 }
1034 /* Service Message */ 1035 /* Service Message */
1035 switch (sense->fmt.f70.smc) { 1036 switch (sense->fmt.f70.smc) {
1036 case 0x02: 1037 case 0x02:
1037 snprintf(service, BUFSIZE, "Reference Media maintenance " 1038 snprintf(service, BUFSIZE, "Reference Media maintenance "
1038 "procedure %i", sense->fmt.f70.md); 1039 "procedure %i", sense->fmt.f70.md);
1039 break; 1040 break;
1040 default: 1041 default:
1041 snprintf(service, BUFSIZE, "0x%02x", 1042 snprintf(service, BUFSIZE, "0x%02x",
1042 sense->fmt.f70.smc); 1043 sense->fmt.f70.smc);
1043 break; 1044 break;
1044 } 1045 }
1045 1046
1046 dev_warn (&device->cdev->dev, "Tape media information: exception %s, " 1047 dev_warn (&device->cdev->dev, "Tape media information: exception %s, "
1047 "service %s\n", exception, service); 1048 "service %s\n", exception, service);
1048 1049
1049 out_nomem: 1050 out_nomem:
1050 kfree(exception); 1051 kfree(exception);
1051 kfree(service); 1052 kfree(service);
1052 } 1053 }
1053 1054
1054 /* 1055 /*
1055 * Print an I/O Subsystem Service Information Message (message code f1) 1056 * Print an I/O Subsystem Service Information Message (message code f1)
1056 */ 1057 */
1057 static void 1058 static void
1058 tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb) 1059 tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
1059 { 1060 {
1060 struct tape_3590_sense *sense; 1061 struct tape_3590_sense *sense;
1061 char *exception, *service; 1062 char *exception, *service;
1062 1063
1063 exception = kmalloc(BUFSIZE, GFP_ATOMIC); 1064 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1064 service = kmalloc(BUFSIZE, GFP_ATOMIC); 1065 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1065 1066
1066 if (!exception || !service) 1067 if (!exception || !service)
1067 goto out_nomem; 1068 goto out_nomem;
1068 1069
1069 sense = (struct tape_3590_sense *) irb->ecw; 1070 sense = (struct tape_3590_sense *) irb->ecw;
1070 /* Exception Message */ 1071 /* Exception Message */
1071 switch (sense->fmt.f71.emc) { 1072 switch (sense->fmt.f71.emc) {
1072 case 0x01: 1073 case 0x01:
1073 snprintf(exception, BUFSIZE, "Effect of failure is unknown"); 1074 snprintf(exception, BUFSIZE, "Effect of failure is unknown");
1074 break; 1075 break;
1075 case 0x02: 1076 case 0x02:
1076 snprintf(exception, BUFSIZE, "CU Exception - no performance " 1077 snprintf(exception, BUFSIZE, "CU Exception - no performance "
1077 "impact"); 1078 "impact");
1078 break; 1079 break;
1079 case 0x03: 1080 case 0x03:
1080 snprintf(exception, BUFSIZE, "CU Exception on channel " 1081 snprintf(exception, BUFSIZE, "CU Exception on channel "
1081 "interface 0x%02x", sense->fmt.f71.md[0]); 1082 "interface 0x%02x", sense->fmt.f71.md[0]);
1082 break; 1083 break;
1083 case 0x04: 1084 case 0x04:
1084 snprintf(exception, BUFSIZE, "CU Exception on device path " 1085 snprintf(exception, BUFSIZE, "CU Exception on device path "
1085 "0x%02x", sense->fmt.f71.md[0]); 1086 "0x%02x", sense->fmt.f71.md[0]);
1086 break; 1087 break;
1087 case 0x05: 1088 case 0x05:
1088 snprintf(exception, BUFSIZE, "CU Exception on library path " 1089 snprintf(exception, BUFSIZE, "CU Exception on library path "
1089 "0x%02x", sense->fmt.f71.md[0]); 1090 "0x%02x", sense->fmt.f71.md[0]);
1090 break; 1091 break;
1091 case 0x06: 1092 case 0x06:
1092 snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x", 1093 snprintf(exception, BUFSIZE, "CU Exception on node 0x%02x",
1093 sense->fmt.f71.md[0]); 1094 sense->fmt.f71.md[0]);
1094 break; 1095 break;
1095 case 0x07: 1096 case 0x07:
1096 snprintf(exception, BUFSIZE, "CU Exception on partition " 1097 snprintf(exception, BUFSIZE, "CU Exception on partition "
1097 "0x%02x", sense->fmt.f71.md[0]); 1098 "0x%02x", sense->fmt.f71.md[0]);
1098 break; 1099 break;
1099 default: 1100 default:
1100 snprintf(exception, BUFSIZE, "0x%02x", 1101 snprintf(exception, BUFSIZE, "0x%02x",
1101 sense->fmt.f71.emc); 1102 sense->fmt.f71.emc);
1102 } 1103 }
1103 /* Service Message */ 1104 /* Service Message */
1104 switch (sense->fmt.f71.smc) { 1105 switch (sense->fmt.f71.smc) {
1105 case 0x01: 1106 case 0x01:
1106 snprintf(service, BUFSIZE, "Repair impact is unknown"); 1107 snprintf(service, BUFSIZE, "Repair impact is unknown");
1107 break; 1108 break;
1108 case 0x02: 1109 case 0x02:
1109 snprintf(service, BUFSIZE, "Repair will not impact cu " 1110 snprintf(service, BUFSIZE, "Repair will not impact cu "
1110 "performance"); 1111 "performance");
1111 break; 1112 break;
1112 case 0x03: 1113 case 0x03:
1113 if (sense->fmt.f71.mdf == 0) 1114 if (sense->fmt.f71.mdf == 0)
1114 snprintf(service, BUFSIZE, "Repair will disable node " 1115 snprintf(service, BUFSIZE, "Repair will disable node "
1115 "0x%x on CU", sense->fmt.f71.md[1]); 1116 "0x%x on CU", sense->fmt.f71.md[1]);
1116 else 1117 else
1117 snprintf(service, BUFSIZE, "Repair will disable " 1118 snprintf(service, BUFSIZE, "Repair will disable "
1118 "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1], 1119 "nodes (0x%x-0x%x) on CU", sense->fmt.f71.md[1],
1119 sense->fmt.f71.md[2]); 1120 sense->fmt.f71.md[2]);
1120 break; 1121 break;
1121 case 0x04: 1122 case 0x04:
1122 if (sense->fmt.f71.mdf == 0) 1123 if (sense->fmt.f71.mdf == 0)
1123 snprintf(service, BUFSIZE, "Repair will disable " 1124 snprintf(service, BUFSIZE, "Repair will disable "
1124 "channel path 0x%x on CU", 1125 "channel path 0x%x on CU",
1125 sense->fmt.f71.md[1]); 1126 sense->fmt.f71.md[1]);
1126 else 1127 else
1127 snprintf(service, BUFSIZE, "Repair will disable cannel" 1128 snprintf(service, BUFSIZE, "Repair will disable cannel"
1128 " paths (0x%x-0x%x) on CU", 1129 " paths (0x%x-0x%x) on CU",
1129 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1130 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1130 break; 1131 break;
1131 case 0x05: 1132 case 0x05:
1132 if (sense->fmt.f71.mdf == 0) 1133 if (sense->fmt.f71.mdf == 0)
1133 snprintf(service, BUFSIZE, "Repair will disable device" 1134 snprintf(service, BUFSIZE, "Repair will disable device"
1134 " path 0x%x on CU", sense->fmt.f71.md[1]); 1135 " path 0x%x on CU", sense->fmt.f71.md[1]);
1135 else 1136 else
1136 snprintf(service, BUFSIZE, "Repair will disable device" 1137 snprintf(service, BUFSIZE, "Repair will disable device"
1137 " paths (0x%x-0x%x) on CU", 1138 " paths (0x%x-0x%x) on CU",
1138 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1139 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1139 break; 1140 break;
1140 case 0x06: 1141 case 0x06:
1141 if (sense->fmt.f71.mdf == 0) 1142 if (sense->fmt.f71.mdf == 0)
1142 snprintf(service, BUFSIZE, "Repair will disable " 1143 snprintf(service, BUFSIZE, "Repair will disable "
1143 "library path 0x%x on CU", 1144 "library path 0x%x on CU",
1144 sense->fmt.f71.md[1]); 1145 sense->fmt.f71.md[1]);
1145 else 1146 else
1146 snprintf(service, BUFSIZE, "Repair will disable " 1147 snprintf(service, BUFSIZE, "Repair will disable "
1147 "library paths (0x%x-0x%x) on CU", 1148 "library paths (0x%x-0x%x) on CU",
1148 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1149 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1149 break; 1150 break;
1150 case 0x07: 1151 case 0x07:
1151 snprintf(service, BUFSIZE, "Repair will disable access to CU"); 1152 snprintf(service, BUFSIZE, "Repair will disable access to CU");
1152 break; 1153 break;
1153 default: 1154 default:
1154 snprintf(service, BUFSIZE, "0x%02x", 1155 snprintf(service, BUFSIZE, "0x%02x",
1155 sense->fmt.f71.smc); 1156 sense->fmt.f71.smc);
1156 } 1157 }
1157 1158
1158 dev_warn (&device->cdev->dev, "I/O subsystem information: exception" 1159 dev_warn (&device->cdev->dev, "I/O subsystem information: exception"
1159 " %s, service %s\n", exception, service); 1160 " %s, service %s\n", exception, service);
1160 out_nomem: 1161 out_nomem:
1161 kfree(exception); 1162 kfree(exception);
1162 kfree(service); 1163 kfree(service);
1163 } 1164 }
1164 1165
1165 /* 1166 /*
1166 * Print an Device Subsystem Service Information Message (message code f2) 1167 * Print an Device Subsystem Service Information Message (message code f2)
1167 */ 1168 */
1168 static void 1169 static void
1169 tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb) 1170 tape_3590_print_dev_sim_msg_f2(struct tape_device *device, struct irb *irb)
1170 { 1171 {
1171 struct tape_3590_sense *sense; 1172 struct tape_3590_sense *sense;
1172 char *exception, *service; 1173 char *exception, *service;
1173 1174
1174 exception = kmalloc(BUFSIZE, GFP_ATOMIC); 1175 exception = kmalloc(BUFSIZE, GFP_ATOMIC);
1175 service = kmalloc(BUFSIZE, GFP_ATOMIC); 1176 service = kmalloc(BUFSIZE, GFP_ATOMIC);
1176 1177
1177 if (!exception || !service) 1178 if (!exception || !service)
1178 goto out_nomem; 1179 goto out_nomem;
1179 1180
1180 sense = (struct tape_3590_sense *) irb->ecw; 1181 sense = (struct tape_3590_sense *) irb->ecw;
1181 /* Exception Message */ 1182 /* Exception Message */
1182 switch (sense->fmt.f71.emc) { 1183 switch (sense->fmt.f71.emc) {
1183 case 0x01: 1184 case 0x01:
1184 snprintf(exception, BUFSIZE, "Effect of failure is unknown"); 1185 snprintf(exception, BUFSIZE, "Effect of failure is unknown");
1185 break; 1186 break;
1186 case 0x02: 1187 case 0x02:
1187 snprintf(exception, BUFSIZE, "DV Exception - no performance" 1188 snprintf(exception, BUFSIZE, "DV Exception - no performance"
1188 " impact"); 1189 " impact");
1189 break; 1190 break;
1190 case 0x03: 1191 case 0x03:
1191 snprintf(exception, BUFSIZE, "DV Exception on channel " 1192 snprintf(exception, BUFSIZE, "DV Exception on channel "
1192 "interface 0x%02x", sense->fmt.f71.md[0]); 1193 "interface 0x%02x", sense->fmt.f71.md[0]);
1193 break; 1194 break;
1194 case 0x04: 1195 case 0x04:
1195 snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x", 1196 snprintf(exception, BUFSIZE, "DV Exception on loader 0x%02x",
1196 sense->fmt.f71.md[0]); 1197 sense->fmt.f71.md[0]);
1197 break; 1198 break;
1198 case 0x05: 1199 case 0x05:
1199 snprintf(exception, BUFSIZE, "DV Exception on message display" 1200 snprintf(exception, BUFSIZE, "DV Exception on message display"
1200 " 0x%02x", sense->fmt.f71.md[0]); 1201 " 0x%02x", sense->fmt.f71.md[0]);
1201 break; 1202 break;
1202 case 0x06: 1203 case 0x06:
1203 snprintf(exception, BUFSIZE, "DV Exception in tape path"); 1204 snprintf(exception, BUFSIZE, "DV Exception in tape path");
1204 break; 1205 break;
1205 case 0x07: 1206 case 0x07:
1206 snprintf(exception, BUFSIZE, "DV Exception in drive"); 1207 snprintf(exception, BUFSIZE, "DV Exception in drive");
1207 break; 1208 break;
1208 default: 1209 default:
1209 snprintf(exception, BUFSIZE, "0x%02x", 1210 snprintf(exception, BUFSIZE, "0x%02x",
1210 sense->fmt.f71.emc); 1211 sense->fmt.f71.emc);
1211 } 1212 }
1212 /* Service Message */ 1213 /* Service Message */
1213 switch (sense->fmt.f71.smc) { 1214 switch (sense->fmt.f71.smc) {
1214 case 0x01: 1215 case 0x01:
1215 snprintf(service, BUFSIZE, "Repair impact is unknown"); 1216 snprintf(service, BUFSIZE, "Repair impact is unknown");
1216 break; 1217 break;
1217 case 0x02: 1218 case 0x02:
1218 snprintf(service, BUFSIZE, "Repair will not impact device " 1219 snprintf(service, BUFSIZE, "Repair will not impact device "
1219 "performance"); 1220 "performance");
1220 break; 1221 break;
1221 case 0x03: 1222 case 0x03:
1222 if (sense->fmt.f71.mdf == 0) 1223 if (sense->fmt.f71.mdf == 0)
1223 snprintf(service, BUFSIZE, "Repair will disable " 1224 snprintf(service, BUFSIZE, "Repair will disable "
1224 "channel path 0x%x on DV", 1225 "channel path 0x%x on DV",
1225 sense->fmt.f71.md[1]); 1226 sense->fmt.f71.md[1]);
1226 else 1227 else
1227 snprintf(service, BUFSIZE, "Repair will disable " 1228 snprintf(service, BUFSIZE, "Repair will disable "
1228 "channel path (0x%x-0x%x) on DV", 1229 "channel path (0x%x-0x%x) on DV",
1229 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1230 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1230 break; 1231 break;
1231 case 0x04: 1232 case 0x04:
1232 if (sense->fmt.f71.mdf == 0) 1233 if (sense->fmt.f71.mdf == 0)
1233 snprintf(service, BUFSIZE, "Repair will disable " 1234 snprintf(service, BUFSIZE, "Repair will disable "
1234 "interface 0x%x on DV", sense->fmt.f71.md[1]); 1235 "interface 0x%x on DV", sense->fmt.f71.md[1]);
1235 else 1236 else
1236 snprintf(service, BUFSIZE, "Repair will disable " 1237 snprintf(service, BUFSIZE, "Repair will disable "
1237 "interfaces (0x%x-0x%x) on DV", 1238 "interfaces (0x%x-0x%x) on DV",
1238 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1239 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1239 break; 1240 break;
1240 case 0x05: 1241 case 0x05:
1241 if (sense->fmt.f71.mdf == 0) 1242 if (sense->fmt.f71.mdf == 0)
1242 snprintf(service, BUFSIZE, "Repair will disable loader" 1243 snprintf(service, BUFSIZE, "Repair will disable loader"
1243 " 0x%x on DV", sense->fmt.f71.md[1]); 1244 " 0x%x on DV", sense->fmt.f71.md[1]);
1244 else 1245 else
1245 snprintf(service, BUFSIZE, "Repair will disable loader" 1246 snprintf(service, BUFSIZE, "Repair will disable loader"
1246 " (0x%x-0x%x) on DV", 1247 " (0x%x-0x%x) on DV",
1247 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1248 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1248 break; 1249 break;
1249 case 0x07: 1250 case 0x07:
1250 snprintf(service, BUFSIZE, "Repair will disable access to DV"); 1251 snprintf(service, BUFSIZE, "Repair will disable access to DV");
1251 break; 1252 break;
1252 case 0x08: 1253 case 0x08:
1253 if (sense->fmt.f71.mdf == 0) 1254 if (sense->fmt.f71.mdf == 0)
1254 snprintf(service, BUFSIZE, "Repair will disable " 1255 snprintf(service, BUFSIZE, "Repair will disable "
1255 "message display 0x%x on DV", 1256 "message display 0x%x on DV",
1256 sense->fmt.f71.md[1]); 1257 sense->fmt.f71.md[1]);
1257 else 1258 else
1258 snprintf(service, BUFSIZE, "Repair will disable " 1259 snprintf(service, BUFSIZE, "Repair will disable "
1259 "message displays (0x%x-0x%x) on DV", 1260 "message displays (0x%x-0x%x) on DV",
1260 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1261 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1261 break; 1262 break;
1262 case 0x09: 1263 case 0x09:
1263 snprintf(service, BUFSIZE, "Clean DV"); 1264 snprintf(service, BUFSIZE, "Clean DV");
1264 break; 1265 break;
1265 default: 1266 default:
1266 snprintf(service, BUFSIZE, "0x%02x", 1267 snprintf(service, BUFSIZE, "0x%02x",
1267 sense->fmt.f71.smc); 1268 sense->fmt.f71.smc);
1268 } 1269 }
1269 1270
1270 dev_warn (&device->cdev->dev, "Device subsystem information: exception" 1271 dev_warn (&device->cdev->dev, "Device subsystem information: exception"
1271 " %s, service %s\n", exception, service); 1272 " %s, service %s\n", exception, service);
1272 out_nomem: 1273 out_nomem:
1273 kfree(exception); 1274 kfree(exception);
1274 kfree(service); 1275 kfree(service);
1275 } 1276 }
1276 1277
1277 /* 1278 /*
1278 * Print standard ERA Message 1279 * Print standard ERA Message
1279 */ 1280 */
1280 static void 1281 static void
1281 tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) 1282 tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
1282 { 1283 {
1283 struct tape_3590_sense *sense; 1284 struct tape_3590_sense *sense;
1284 1285
1285 sense = (struct tape_3590_sense *) irb->ecw; 1286 sense = (struct tape_3590_sense *) irb->ecw;
1286 if (sense->mc == 0) 1287 if (sense->mc == 0)
1287 return; 1288 return;
1288 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) { 1289 if ((sense->mc > 0) && (sense->mc < TAPE_3590_MAX_MSG)) {
1289 if (tape_3590_msg[sense->mc] != NULL) 1290 if (tape_3590_msg[sense->mc] != NULL)
1290 dev_warn (&device->cdev->dev, "The tape unit has " 1291 dev_warn (&device->cdev->dev, "The tape unit has "
1291 "issued sense message %s\n", 1292 "issued sense message %s\n",
1292 tape_3590_msg[sense->mc]); 1293 tape_3590_msg[sense->mc]);
1293 else 1294 else
1294 dev_warn (&device->cdev->dev, "The tape unit has " 1295 dev_warn (&device->cdev->dev, "The tape unit has "
1295 "issued an unknown sense message code 0x%x\n", 1296 "issued an unknown sense message code 0x%x\n",
1296 sense->mc); 1297 sense->mc);
1297 return; 1298 return;
1298 } 1299 }
1299 if (sense->mc == 0xf0) { 1300 if (sense->mc == 0xf0) {
1300 /* Standard Media Information Message */ 1301 /* Standard Media Information Message */
1301 dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, " 1302 dev_warn (&device->cdev->dev, "MIM SEV=%i, MC=%02x, ES=%x/%x, "
1302 "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc, 1303 "RC=%02x-%04x-%02x\n", sense->fmt.f70.sev, sense->mc,
1303 sense->fmt.f70.emc, sense->fmt.f70.smc, 1304 sense->fmt.f70.emc, sense->fmt.f70.smc,
1304 sense->fmt.f70.refcode, sense->fmt.f70.mid, 1305 sense->fmt.f70.refcode, sense->fmt.f70.mid,
1305 sense->fmt.f70.fid); 1306 sense->fmt.f70.fid);
1306 tape_3590_print_mim_msg_f0(device, irb); 1307 tape_3590_print_mim_msg_f0(device, irb);
1307 return; 1308 return;
1308 } 1309 }
1309 if (sense->mc == 0xf1) { 1310 if (sense->mc == 0xf1) {
1310 /* Standard I/O Subsystem Service Information Message */ 1311 /* Standard I/O Subsystem Service Information Message */
1311 dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x," 1312 dev_warn (&device->cdev->dev, "IOSIM SEV=%i, DEVTYPE=3590/%02x,"
1312 " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1313 " MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1313 sense->fmt.f71.sev, device->cdev->id.dev_model, 1314 sense->fmt.f71.sev, device->cdev->id.dev_model,
1314 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, 1315 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
1315 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, 1316 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
1316 sense->fmt.f71.refcode3); 1317 sense->fmt.f71.refcode3);
1317 tape_3590_print_io_sim_msg_f1(device, irb); 1318 tape_3590_print_io_sim_msg_f1(device, irb);
1318 return; 1319 return;
1319 } 1320 }
1320 if (sense->mc == 0xf2) { 1321 if (sense->mc == 0xf2) {
1321 /* Standard Device Service Information Message */ 1322 /* Standard Device Service Information Message */
1322 dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x" 1323 dev_warn (&device->cdev->dev, "DEVSIM SEV=%i, DEVTYPE=3590/%02x"
1323 ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n", 1324 ", MC=%02x, ES=%x/%x, REF=0x%04x-0x%04x-0x%04x\n",
1324 sense->fmt.f71.sev, device->cdev->id.dev_model, 1325 sense->fmt.f71.sev, device->cdev->id.dev_model,
1325 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc, 1326 sense->mc, sense->fmt.f71.emc, sense->fmt.f71.smc,
1326 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2, 1327 sense->fmt.f71.refcode1, sense->fmt.f71.refcode2,
1327 sense->fmt.f71.refcode3); 1328 sense->fmt.f71.refcode3);
1328 tape_3590_print_dev_sim_msg_f2(device, irb); 1329 tape_3590_print_dev_sim_msg_f2(device, irb);
1329 return; 1330 return;
1330 } 1331 }
1331 if (sense->mc == 0xf3) { 1332 if (sense->mc == 0xf3) {
1332 /* Standard Library Service Information Message */ 1333 /* Standard Library Service Information Message */
1333 return; 1334 return;
1334 } 1335 }
1335 dev_warn (&device->cdev->dev, "The tape unit has issued an unknown " 1336 dev_warn (&device->cdev->dev, "The tape unit has issued an unknown "
1336 "sense message code %x\n", sense->mc); 1337 "sense message code %x\n", sense->mc);
1337 } 1338 }
1338 1339
1339 static int tape_3590_crypt_error(struct tape_device *device, 1340 static int tape_3590_crypt_error(struct tape_device *device,
1340 struct tape_request *request, struct irb *irb) 1341 struct tape_request *request, struct irb *irb)
1341 { 1342 {
1342 u8 cu_rc, ekm_rc1; 1343 u8 cu_rc, ekm_rc1;
1343 u16 ekm_rc2; 1344 u16 ekm_rc2;
1344 u32 drv_rc; 1345 u32 drv_rc;
1345 const char *bus_id; 1346 const char *bus_id;
1346 char *sense; 1347 char *sense;
1347 1348
1348 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; 1349 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1349 bus_id = dev_name(&device->cdev->dev); 1350 bus_id = dev_name(&device->cdev->dev);
1350 cu_rc = sense[0]; 1351 cu_rc = sense[0];
1351 drv_rc = *((u32*) &sense[5]) & 0xffffff; 1352 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1352 ekm_rc1 = sense[9]; 1353 ekm_rc1 = sense[9];
1353 ekm_rc2 = *((u16*) &sense[10]); 1354 ekm_rc2 = *((u16*) &sense[10]);
1354 if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) 1355 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1355 /* key not defined on EKM */ 1356 /* key not defined on EKM */
1356 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); 1357 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1357 if ((cu_rc == 1) || (cu_rc == 2)) 1358 if ((cu_rc == 1) || (cu_rc == 2))
1358 /* No connection to EKM */ 1359 /* No connection to EKM */
1359 return tape_3590_erp_basic(device, request, irb, -ENOTCONN); 1360 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1360 1361
1361 dev_err (&device->cdev->dev, "The tape unit failed to obtain the " 1362 dev_err (&device->cdev->dev, "The tape unit failed to obtain the "
1362 "encryption key from EKM\n"); 1363 "encryption key from EKM\n");
1363 1364
1364 return tape_3590_erp_basic(device, request, irb, -ENOKEY); 1365 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1365 } 1366 }
1366 1367
1367 /* 1368 /*
1368 * 3590 error Recovery routine: 1369 * 3590 error Recovery routine:
1369 * If possible, it tries to recover from the error. If this is not possible, 1370 * If possible, it tries to recover from the error. If this is not possible,
1370 * inform the user about the problem. 1371 * inform the user about the problem.
1371 */ 1372 */
1372 static int 1373 static int
1373 tape_3590_unit_check(struct tape_device *device, struct tape_request *request, 1374 tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1374 struct irb *irb) 1375 struct irb *irb)
1375 { 1376 {
1376 struct tape_3590_sense *sense; 1377 struct tape_3590_sense *sense;
1377 int rc; 1378 int rc;
1378 1379
1379 #ifdef CONFIG_S390_TAPE_BLOCK 1380 #ifdef CONFIG_S390_TAPE_BLOCK
1380 if (request->op == TO_BLOCK) { 1381 if (request->op == TO_BLOCK) {
1381 /* 1382 /*
1382 * Recovery for block device requests. Set the block_position 1383 * Recovery for block device requests. Set the block_position
1383 * to something invalid and retry. 1384 * to something invalid and retry.
1384 */ 1385 */
1385 device->blk_data.block_position = -1; 1386 device->blk_data.block_position = -1;
1386 if (request->retries-- <= 0) 1387 if (request->retries-- <= 0)
1387 return tape_3590_erp_failed(device, request, irb, -EIO); 1388 return tape_3590_erp_failed(device, request, irb, -EIO);
1388 else 1389 else
1389 return tape_3590_erp_retry(device, request, irb); 1390 return tape_3590_erp_retry(device, request, irb);
1390 } 1391 }
1391 #endif 1392 #endif
1392 1393
1393 sense = (struct tape_3590_sense *) irb->ecw; 1394 sense = (struct tape_3590_sense *) irb->ecw;
1394 1395
1395 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); 1396 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1396 1397
1397 /* 1398 /*
1398 * First check all RC-QRCs where we want to do something special 1399 * First check all RC-QRCs where we want to do something special
1399 * - "break": basic error recovery is done 1400 * - "break": basic error recovery is done
1400 * - "goto out:": just print error message if available 1401 * - "goto out:": just print error message if available
1401 */ 1402 */
1402 rc = -EIO; 1403 rc = -EIO;
1403 switch (sense->rc_rqc) { 1404 switch (sense->rc_rqc) {
1404 1405
1405 case 0x1110: 1406 case 0x1110:
1406 tape_3590_print_era_msg(device, irb); 1407 tape_3590_print_era_msg(device, irb);
1407 return tape_3590_erp_read_buf_log(device, request, irb); 1408 return tape_3590_erp_read_buf_log(device, request, irb);
1408 1409
1409 case 0x2011: 1410 case 0x2011:
1410 tape_3590_print_era_msg(device, irb); 1411 tape_3590_print_era_msg(device, irb);
1411 return tape_3590_erp_read_alternate(device, request, irb); 1412 return tape_3590_erp_read_alternate(device, request, irb);
1412 1413
1413 case 0x2230: 1414 case 0x2230:
1414 case 0x2231: 1415 case 0x2231:
1415 tape_3590_print_era_msg(device, irb); 1416 tape_3590_print_era_msg(device, irb);
1416 return tape_3590_erp_special_interrupt(device, request, irb); 1417 return tape_3590_erp_special_interrupt(device, request, irb);
1417 case 0x2240: 1418 case 0x2240:
1418 return tape_3590_crypt_error(device, request, irb); 1419 return tape_3590_crypt_error(device, request, irb);
1419 1420
1420 case 0x3010: 1421 case 0x3010:
1421 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1422 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
1422 device->cdev_id); 1423 device->cdev_id);
1423 return tape_3590_erp_basic(device, request, irb, -ENOSPC); 1424 return tape_3590_erp_basic(device, request, irb, -ENOSPC);
1424 case 0x3012: 1425 case 0x3012:
1425 DBF_EVENT(2, "(%08x): Forward at End of Partition\n", 1426 DBF_EVENT(2, "(%08x): Forward at End of Partition\n",
1426 device->cdev_id); 1427 device->cdev_id);
1427 return tape_3590_erp_basic(device, request, irb, -ENOSPC); 1428 return tape_3590_erp_basic(device, request, irb, -ENOSPC);
1428 case 0x3020: 1429 case 0x3020:
1429 DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id); 1430 DBF_EVENT(2, "(%08x): End of Data Mark\n", device->cdev_id);
1430 return tape_3590_erp_basic(device, request, irb, -ENOSPC); 1431 return tape_3590_erp_basic(device, request, irb, -ENOSPC);
1431 1432
1432 case 0x3122: 1433 case 0x3122:
1433 DBF_EVENT(2, "(%08x): Rewind Unload initiated\n", 1434 DBF_EVENT(2, "(%08x): Rewind Unload initiated\n",
1434 device->cdev_id); 1435 device->cdev_id);
1435 return tape_3590_erp_basic(device, request, irb, -EIO); 1436 return tape_3590_erp_basic(device, request, irb, -EIO);
1436 case 0x3123: 1437 case 0x3123:
1437 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1438 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1438 device->cdev_id); 1439 device->cdev_id);
1439 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1440 tape_3590_schedule_work(device, TO_CRYPT_OFF); 1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1441 return tape_3590_erp_basic(device, request, irb, 0); 1442 return tape_3590_erp_basic(device, request, irb, 0);
1442 1443
1443 case 0x4010: 1444 case 0x4010:
1444 /* 1445 /*
1445 * print additional msg since default msg 1446 * print additional msg since default msg
1446 * "device intervention" is not very meaningfull 1447 * "device intervention" is not very meaningfull
1447 */ 1448 */
1448 tape_med_state_set(device, MS_UNLOADED); 1449 tape_med_state_set(device, MS_UNLOADED);
1449 tape_3590_schedule_work(device, TO_CRYPT_OFF); 1450 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1450 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1451 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1451 case 0x4012: /* Device Long Busy */ 1452 case 0x4012: /* Device Long Busy */
1452 /* XXX: Also use long busy handling here? */ 1453 /* XXX: Also use long busy handling here? */
1453 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); 1454 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1454 tape_3590_print_era_msg(device, irb); 1455 tape_3590_print_era_msg(device, irb);
1455 return tape_3590_erp_basic(device, request, irb, -EBUSY); 1456 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1456 case 0x4014: 1457 case 0x4014:
1457 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); 1458 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1458 return tape_3590_erp_long_busy(device, request, irb); 1459 return tape_3590_erp_long_busy(device, request, irb);
1459 1460
1460 case 0x5010: 1461 case 0x5010:
1461 if (sense->rac == 0xd0) { 1462 if (sense->rac == 0xd0) {
1462 /* Swap */ 1463 /* Swap */
1463 tape_3590_print_era_msg(device, irb); 1464 tape_3590_print_era_msg(device, irb);
1464 return tape_3590_erp_swap(device, request, irb); 1465 return tape_3590_erp_swap(device, request, irb);
1465 } 1466 }
1466 if (sense->rac == 0x26) { 1467 if (sense->rac == 0x26) {
1467 /* Read Opposite */ 1468 /* Read Opposite */
1468 tape_3590_print_era_msg(device, irb); 1469 tape_3590_print_era_msg(device, irb);
1469 return tape_3590_erp_read_opposite(device, request, 1470 return tape_3590_erp_read_opposite(device, request,
1470 irb); 1471 irb);
1471 } 1472 }
1472 return tape_3590_erp_basic(device, request, irb, -EIO); 1473 return tape_3590_erp_basic(device, request, irb, -EIO);
1473 case 0x5020: 1474 case 0x5020:
1474 case 0x5021: 1475 case 0x5021:
1475 case 0x5022: 1476 case 0x5022:
1476 case 0x5040: 1477 case 0x5040:
1477 case 0x5041: 1478 case 0x5041:
1478 case 0x5042: 1479 case 0x5042:
1479 tape_3590_print_era_msg(device, irb); 1480 tape_3590_print_era_msg(device, irb);
1480 return tape_3590_erp_swap(device, request, irb); 1481 return tape_3590_erp_swap(device, request, irb);
1481 1482
1482 case 0x5110: 1483 case 0x5110:
1483 case 0x5111: 1484 case 0x5111:
1484 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); 1485 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
1485 1486
1486 case 0x5120: 1487 case 0x5120:
1487 case 0x1120: 1488 case 0x1120:
1488 tape_med_state_set(device, MS_UNLOADED); 1489 tape_med_state_set(device, MS_UNLOADED);
1489 tape_3590_schedule_work(device, TO_CRYPT_OFF); 1490 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1490 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1491 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1491 1492
1492 case 0x6020: 1493 case 0x6020:
1493 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE); 1494 return tape_3590_erp_basic(device, request, irb, -EMEDIUMTYPE);
1494 1495
1495 case 0x8011: 1496 case 0x8011:
1496 return tape_3590_erp_basic(device, request, irb, -EPERM); 1497 return tape_3590_erp_basic(device, request, irb, -EPERM);
1497 case 0x8013: 1498 case 0x8013:
1498 dev_warn (&device->cdev->dev, "A different host has privileged" 1499 dev_warn (&device->cdev->dev, "A different host has privileged"
1499 " access to the tape unit\n"); 1500 " access to the tape unit\n");
1500 return tape_3590_erp_basic(device, request, irb, -EPERM); 1501 return tape_3590_erp_basic(device, request, irb, -EPERM);
1501 default: 1502 default:
1502 return tape_3590_erp_basic(device, request, irb, -EIO); 1503 return tape_3590_erp_basic(device, request, irb, -EIO);
1503 } 1504 }
1504 } 1505 }
1505 1506
1506 /* 1507 /*
1507 * 3590 interrupt handler: 1508 * 3590 interrupt handler:
1508 */ 1509 */
1509 static int 1510 static int
1510 tape_3590_irq(struct tape_device *device, struct tape_request *request, 1511 tape_3590_irq(struct tape_device *device, struct tape_request *request,
1511 struct irb *irb) 1512 struct irb *irb)
1512 { 1513 {
1513 if (request == NULL) 1514 if (request == NULL)
1514 return tape_3590_unsolicited_irq(device, irb); 1515 return tape_3590_unsolicited_irq(device, irb);
1515 1516
1516 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) && 1517 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) &&
1517 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) && 1518 (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) &&
1518 (request->op == TO_WRI)) { 1519 (request->op == TO_WRI)) {
1519 /* Write at end of volume */ 1520 /* Write at end of volume */
1520 DBF_EVENT(2, "End of volume\n"); 1521 DBF_EVENT(2, "End of volume\n");
1521 return tape_3590_erp_failed(device, request, irb, -ENOSPC); 1522 return tape_3590_erp_failed(device, request, irb, -ENOSPC);
1522 } 1523 }
1523 1524
1524 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) 1525 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
1525 return tape_3590_unit_check(device, request, irb); 1526 return tape_3590_unit_check(device, request, irb);
1526 1527
1527 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) { 1528 if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
1528 if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) { 1529 if (irb->scsw.cmd.dstat == DEV_STAT_UNIT_EXCEP) {
1529 if (request->op == TO_FSB || request->op == TO_BSB) 1530 if (request->op == TO_FSB || request->op == TO_BSB)
1530 request->rescnt++; 1531 request->rescnt++;
1531 else 1532 else
1532 DBF_EVENT(5, "Unit Exception!\n"); 1533 DBF_EVENT(5, "Unit Exception!\n");
1533 } 1534 }
1534 1535
1535 return tape_3590_done(device, request); 1536 return tape_3590_done(device, request);
1536 } 1537 }
1537 1538
1538 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { 1539 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
1539 DBF_EVENT(2, "cannel end\n"); 1540 DBF_EVENT(2, "cannel end\n");
1540 return TAPE_IO_PENDING; 1541 return TAPE_IO_PENDING;
1541 } 1542 }
1542 1543
1543 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { 1544 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1544 DBF_EVENT(2, "Unit Attention when busy..\n"); 1545 DBF_EVENT(2, "Unit Attention when busy..\n");
1545 return TAPE_IO_PENDING; 1546 return TAPE_IO_PENDING;
1546 } 1547 }
1547 1548
1548 DBF_EVENT(6, "xunknownirq\n"); 1549 DBF_EVENT(6, "xunknownirq\n");
1549 tape_dump_sense_dbf(device, request, irb); 1550 tape_dump_sense_dbf(device, request, irb);
1550 return TAPE_IO_STOP; 1551 return TAPE_IO_STOP;
1551 } 1552 }
1552 1553
1553 1554
1554 static int tape_3590_read_dev_chars(struct tape_device *device, 1555 static int tape_3590_read_dev_chars(struct tape_device *device,
1555 struct tape_3590_rdc_data *rdc_data) 1556 struct tape_3590_rdc_data *rdc_data)
1556 { 1557 {
1557 int rc; 1558 int rc;
1558 struct tape_request *request; 1559 struct tape_request *request;
1559 1560
1560 request = tape_alloc_request(1, sizeof(*rdc_data)); 1561 request = tape_alloc_request(1, sizeof(*rdc_data));
1561 if (IS_ERR(request)) 1562 if (IS_ERR(request))
1562 return PTR_ERR(request); 1563 return PTR_ERR(request);
1563 request->op = TO_RDC; 1564 request->op = TO_RDC;
1564 tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data), 1565 tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
1565 request->cpdata); 1566 request->cpdata);
1566 rc = tape_do_io(device, request); 1567 rc = tape_do_io(device, request);
1567 if (rc == 0) 1568 if (rc == 0)
1568 memcpy(rdc_data, request->cpdata, sizeof(*rdc_data)); 1569 memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
1569 tape_free_request(request); 1570 tape_free_request(request);
1570 return rc; 1571 return rc;
1571 } 1572 }
1572 1573
1573 /* 1574 /*
1574 * Setup device function 1575 * Setup device function
1575 */ 1576 */
1576 static int 1577 static int
1577 tape_3590_setup_device(struct tape_device *device) 1578 tape_3590_setup_device(struct tape_device *device)
1578 { 1579 {
1579 int rc; 1580 int rc;
1580 struct tape_3590_disc_data *data; 1581 struct tape_3590_disc_data *data;
1581 struct tape_3590_rdc_data *rdc_data; 1582 struct tape_3590_rdc_data *rdc_data;
1582 1583
1583 DBF_EVENT(6, "3590 device setup\n"); 1584 DBF_EVENT(6, "3590 device setup\n");
1584 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); 1585 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1585 if (data == NULL) 1586 if (data == NULL)
1586 return -ENOMEM; 1587 return -ENOMEM;
1587 data->read_back_op = READ_PREVIOUS; 1588 data->read_back_op = READ_PREVIOUS;
1588 device->discdata = data; 1589 device->discdata = data;
1589 1590
1590 rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA); 1591 rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
1591 if (!rdc_data) { 1592 if (!rdc_data) {
1592 rc = -ENOMEM; 1593 rc = -ENOMEM;
1593 goto fail_kmalloc; 1594 goto fail_kmalloc;
1594 } 1595 }
1595 rc = tape_3590_read_dev_chars(device, rdc_data); 1596 rc = tape_3590_read_dev_chars(device, rdc_data);
1596 if (rc) { 1597 if (rc) {
1597 DBF_LH(3, "Read device characteristics failed!\n"); 1598 DBF_LH(3, "Read device characteristics failed!\n");
1598 goto fail_rdc_data; 1599 goto fail_rdc_data;
1599 } 1600 }
1600 rc = tape_std_assign(device); 1601 rc = tape_std_assign(device);
1601 if (rc) 1602 if (rc)
1602 goto fail_rdc_data; 1603 goto fail_rdc_data;
1603 if (rdc_data->data[31] == 0x13) { 1604 if (rdc_data->data[31] == 0x13) {
1604 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; 1605 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1605 tape_3592_disable_crypt(device); 1606 tape_3592_disable_crypt(device);
1606 } else { 1607 } else {
1607 DBF_EVENT(6, "Device has NO crypto support\n"); 1608 DBF_EVENT(6, "Device has NO crypto support\n");
1608 } 1609 }
1609 /* Try to find out if medium is loaded */ 1610 /* Try to find out if medium is loaded */
1610 rc = tape_3590_sense_medium(device); 1611 rc = tape_3590_sense_medium(device);
1611 if (rc) { 1612 if (rc) {
1612 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1613 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1613 goto fail_rdc_data; 1614 goto fail_rdc_data;
1614 } 1615 }
1615 return 0; 1616 return 0;
1616 1617
1617 fail_rdc_data: 1618 fail_rdc_data:
1618 kfree(rdc_data); 1619 kfree(rdc_data);
1619 fail_kmalloc: 1620 fail_kmalloc:
1620 kfree(data); 1621 kfree(data);
1621 return rc; 1622 return rc;
1622 } 1623 }
1623 1624
1624 /* 1625 /*
1625 * Cleanup device function 1626 * Cleanup device function
1626 */ 1627 */
1627 static void 1628 static void
1628 tape_3590_cleanup_device(struct tape_device *device) 1629 tape_3590_cleanup_device(struct tape_device *device)
1629 { 1630 {
1630 flush_scheduled_work(); 1631 flush_scheduled_work();
1631 tape_std_unassign(device); 1632 tape_std_unassign(device);
1632 1633
1633 kfree(device->discdata); 1634 kfree(device->discdata);
1634 device->discdata = NULL; 1635 device->discdata = NULL;
1635 } 1636 }
1636 1637
1637 /* 1638 /*
1638 * List of 3590 magnetic tape commands. 1639 * List of 3590 magnetic tape commands.
1639 */ 1640 */
1640 static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = { 1641 static tape_mtop_fn tape_3590_mtop[TAPE_NR_MTOPS] = {
1641 [MTRESET] = tape_std_mtreset, 1642 [MTRESET] = tape_std_mtreset,
1642 [MTFSF] = tape_std_mtfsf, 1643 [MTFSF] = tape_std_mtfsf,
1643 [MTBSF] = tape_std_mtbsf, 1644 [MTBSF] = tape_std_mtbsf,
1644 [MTFSR] = tape_std_mtfsr, 1645 [MTFSR] = tape_std_mtfsr,
1645 [MTBSR] = tape_std_mtbsr, 1646 [MTBSR] = tape_std_mtbsr,
1646 [MTWEOF] = tape_std_mtweof, 1647 [MTWEOF] = tape_std_mtweof,
1647 [MTREW] = tape_std_mtrew, 1648 [MTREW] = tape_std_mtrew,
1648 [MTOFFL] = tape_std_mtoffl, 1649 [MTOFFL] = tape_std_mtoffl,
1649 [MTNOP] = tape_std_mtnop, 1650 [MTNOP] = tape_std_mtnop,
1650 [MTRETEN] = tape_std_mtreten, 1651 [MTRETEN] = tape_std_mtreten,
1651 [MTBSFM] = tape_std_mtbsfm, 1652 [MTBSFM] = tape_std_mtbsfm,
1652 [MTFSFM] = tape_std_mtfsfm, 1653 [MTFSFM] = tape_std_mtfsfm,
1653 [MTEOM] = tape_std_mteom, 1654 [MTEOM] = tape_std_mteom,
1654 [MTERASE] = tape_std_mterase, 1655 [MTERASE] = tape_std_mterase,
1655 [MTRAS1] = NULL, 1656 [MTRAS1] = NULL,
1656 [MTRAS2] = NULL, 1657 [MTRAS2] = NULL,
1657 [MTRAS3] = NULL, 1658 [MTRAS3] = NULL,
1658 [MTSETBLK] = tape_std_mtsetblk, 1659 [MTSETBLK] = tape_std_mtsetblk,
1659 [MTSETDENSITY] = NULL, 1660 [MTSETDENSITY] = NULL,
1660 [MTSEEK] = tape_3590_mtseek, 1661 [MTSEEK] = tape_3590_mtseek,
1661 [MTTELL] = tape_3590_mttell, 1662 [MTTELL] = tape_3590_mttell,
1662 [MTSETDRVBUFFER] = NULL, 1663 [MTSETDRVBUFFER] = NULL,
1663 [MTFSS] = NULL, 1664 [MTFSS] = NULL,
1664 [MTBSS] = NULL, 1665 [MTBSS] = NULL,
1665 [MTWSM] = NULL, 1666 [MTWSM] = NULL,
1666 [MTLOCK] = NULL, 1667 [MTLOCK] = NULL,
1667 [MTUNLOCK] = NULL, 1668 [MTUNLOCK] = NULL,
1668 [MTLOAD] = tape_std_mtload, 1669 [MTLOAD] = tape_std_mtload,
1669 [MTUNLOAD] = tape_std_mtunload, 1670 [MTUNLOAD] = tape_std_mtunload,
1670 [MTCOMPRESSION] = tape_std_mtcompression, 1671 [MTCOMPRESSION] = tape_std_mtcompression,
1671 [MTSETPART] = NULL, 1672 [MTSETPART] = NULL,
1672 [MTMKPART] = NULL 1673 [MTMKPART] = NULL
1673 }; 1674 };
1674 1675
1675 /* 1676 /*
1676 * Tape discipline structure for 3590. 1677 * Tape discipline structure for 3590.
1677 */ 1678 */
1678 static struct tape_discipline tape_discipline_3590 = { 1679 static struct tape_discipline tape_discipline_3590 = {
1679 .owner = THIS_MODULE, 1680 .owner = THIS_MODULE,
1680 .setup_device = tape_3590_setup_device, 1681 .setup_device = tape_3590_setup_device,
1681 .cleanup_device = tape_3590_cleanup_device, 1682 .cleanup_device = tape_3590_cleanup_device,
1682 .process_eov = tape_std_process_eov, 1683 .process_eov = tape_std_process_eov,
1683 .irq = tape_3590_irq, 1684 .irq = tape_3590_irq,
1684 .read_block = tape_std_read_block, 1685 .read_block = tape_std_read_block,
1685 .write_block = tape_std_write_block, 1686 .write_block = tape_std_write_block,
1686 #ifdef CONFIG_S390_TAPE_BLOCK 1687 #ifdef CONFIG_S390_TAPE_BLOCK
1687 .bread = tape_3590_bread, 1688 .bread = tape_3590_bread,
1688 .free_bread = tape_3590_free_bread, 1689 .free_bread = tape_3590_free_bread,
1689 .check_locate = tape_3590_check_locate, 1690 .check_locate = tape_3590_check_locate,
1690 #endif 1691 #endif
1691 .ioctl_fn = tape_3590_ioctl, 1692 .ioctl_fn = tape_3590_ioctl,
1692 .mtop_array = tape_3590_mtop 1693 .mtop_array = tape_3590_mtop
1693 }; 1694 };
1694 1695
1695 static struct ccw_device_id tape_3590_ids[] = { 1696 static struct ccw_device_id tape_3590_ids[] = {
1696 {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590}, 1697 {CCW_DEVICE_DEVTYPE(0x3590, 0, 0x3590, 0), .driver_info = tape_3590},
1697 {CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592}, 1698 {CCW_DEVICE_DEVTYPE(0x3592, 0, 0x3592, 0), .driver_info = tape_3592},
1698 { /* end of list */ } 1699 { /* end of list */ }
1699 }; 1700 };
1700 1701
1701 static int 1702 static int
1702 tape_3590_online(struct ccw_device *cdev) 1703 tape_3590_online(struct ccw_device *cdev)
1703 { 1704 {
1704 return tape_generic_online(dev_get_drvdata(&cdev->dev), 1705 return tape_generic_online(dev_get_drvdata(&cdev->dev),
1705 &tape_discipline_3590); 1706 &tape_discipline_3590);
1706 } 1707 }
1707 1708
1708 static struct ccw_driver tape_3590_driver = { 1709 static struct ccw_driver tape_3590_driver = {
1709 .name = "tape_3590", 1710 .name = "tape_3590",
1710 .owner = THIS_MODULE, 1711 .owner = THIS_MODULE,
1711 .ids = tape_3590_ids, 1712 .ids = tape_3590_ids,
1712 .probe = tape_generic_probe, 1713 .probe = tape_generic_probe,
1713 .remove = tape_generic_remove, 1714 .remove = tape_generic_remove,
1714 .set_offline = tape_generic_offline, 1715 .set_offline = tape_generic_offline,
1715 .set_online = tape_3590_online, 1716 .set_online = tape_3590_online,
1716 .freeze = tape_generic_pm_suspend, 1717 .freeze = tape_generic_pm_suspend,
1717 }; 1718 };
1718 1719
1719 /* 1720 /*
1720 * Setup discipline structure. 1721 * Setup discipline structure.
1721 */ 1722 */
1722 static int 1723 static int
1723 tape_3590_init(void) 1724 tape_3590_init(void)
1724 { 1725 {
1725 int rc; 1726 int rc;
1726 1727
1727 TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long)); 1728 TAPE_DBF_AREA = debug_register("tape_3590", 2, 2, 4 * sizeof(long));
1728 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1729 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1729 #ifdef DBF_LIKE_HELL 1730 #ifdef DBF_LIKE_HELL
1730 debug_set_level(TAPE_DBF_AREA, 6); 1731 debug_set_level(TAPE_DBF_AREA, 6);
1731 #endif 1732 #endif
1732 1733
1733 DBF_EVENT(3, "3590 init\n"); 1734 DBF_EVENT(3, "3590 init\n");
1734 /* Register driver for 3590 tapes. */ 1735 /* Register driver for 3590 tapes. */
1735 rc = ccw_driver_register(&tape_3590_driver); 1736 rc = ccw_driver_register(&tape_3590_driver);
1736 if (rc) 1737 if (rc)
1737 DBF_EVENT(3, "3590 init failed\n"); 1738 DBF_EVENT(3, "3590 init failed\n");
1738 else 1739 else
1739 DBF_EVENT(3, "3590 registered\n"); 1740 DBF_EVENT(3, "3590 registered\n");
1740 return rc; 1741 return rc;
1741 } 1742 }
1742 1743
1743 static void 1744 static void
1744 tape_3590_exit(void) 1745 tape_3590_exit(void)
1745 { 1746 {
1746 ccw_driver_unregister(&tape_3590_driver); 1747 ccw_driver_unregister(&tape_3590_driver);
1747 1748
1748 debug_unregister(TAPE_DBF_AREA); 1749 debug_unregister(TAPE_DBF_AREA);
1749 } 1750 }
1750 1751
1751 MODULE_DEVICE_TABLE(ccw, tape_3590_ids); 1752 MODULE_DEVICE_TABLE(ccw, tape_3590_ids);
1752 MODULE_AUTHOR("(C) 2001,2006 IBM Corporation"); 1753 MODULE_AUTHOR("(C) 2001,2006 IBM Corporation");
1753 MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver"); 1754 MODULE_DESCRIPTION("Linux on zSeries channel attached 3590 tape device driver");
1754 MODULE_LICENSE("GPL"); 1755 MODULE_LICENSE("GPL");
1755 1756
1756 module_init(tape_3590_init); 1757 module_init(tape_3590_init);
1757 module_exit(tape_3590_exit); 1758 module_exit(tape_3590_exit);
1758 1759
drivers/s390/char/tape_block.c
1 /* 1 /*
2 * drivers/s390/char/tape_block.c 2 * drivers/s390/char/tape_block.c
3 * block device frontend for tape device driver 3 * block device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2001,2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Stefan Bader <shbader@de.ibm.com> 10 * Stefan Bader <shbader@de.ibm.com>
11 */ 11 */
12 12
13 #define KMSG_COMPONENT "tape" 13 #define KMSG_COMPONENT "tape"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 15
15 #include <linux/fs.h> 16 #include <linux/fs.h>
16 #include <linux/module.h> 17 #include <linux/module.h>
17 #include <linux/blkdev.h> 18 #include <linux/blkdev.h>
18 #include <linux/interrupt.h> 19 #include <linux/interrupt.h>
19 #include <linux/buffer_head.h> 20 #include <linux/buffer_head.h>
20 #include <linux/kernel.h> 21 #include <linux/kernel.h>
21 22
22 #include <asm/debug.h> 23 #include <asm/debug.h>
23 24
24 #define TAPE_DBF_AREA tape_core_dbf 25 #define TAPE_DBF_AREA tape_core_dbf
25 26
26 #include "tape.h" 27 #include "tape.h"
27 28
28 #define TAPEBLOCK_MAX_SEC 100 29 #define TAPEBLOCK_MAX_SEC 100
29 #define TAPEBLOCK_MIN_REQUEUE 3 30 #define TAPEBLOCK_MIN_REQUEUE 3
30 31
31 /* 32 /*
32 * 2003/11/25 Stefan Bader <shbader@de.ibm.com> 33 * 2003/11/25 Stefan Bader <shbader@de.ibm.com>
33 * 34 *
34 * In 2.5/2.6 the block device request function is very likely to be called 35 * In 2.5/2.6 the block device request function is very likely to be called
35 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't 36 * with disabled interrupts (e.g. generic_unplug_device). So the driver can't
36 * just call any function that tries to allocate CCW requests from that con- 37 * just call any function that tries to allocate CCW requests from that con-
37 * text since it might sleep. There are two choices to work around this: 38 * text since it might sleep. There are two choices to work around this:
38 * a) do not allocate with kmalloc but use its own memory pool 39 * a) do not allocate with kmalloc but use its own memory pool
39 * b) take requests from the queue outside that context, knowing that 40 * b) take requests from the queue outside that context, knowing that
40 * allocation might sleep 41 * allocation might sleep
41 */ 42 */
42 43
43 /* 44 /*
44 * file operation structure for tape block frontend 45 * file operation structure for tape block frontend
45 */ 46 */
46 static int tapeblock_open(struct block_device *, fmode_t); 47 static int tapeblock_open(struct block_device *, fmode_t);
47 static int tapeblock_release(struct gendisk *, fmode_t); 48 static int tapeblock_release(struct gendisk *, fmode_t);
48 static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int, 49 static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
49 unsigned long); 50 unsigned long);
50 static int tapeblock_medium_changed(struct gendisk *); 51 static int tapeblock_medium_changed(struct gendisk *);
51 static int tapeblock_revalidate_disk(struct gendisk *); 52 static int tapeblock_revalidate_disk(struct gendisk *);
52 53
53 static const struct block_device_operations tapeblock_fops = { 54 static const struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE, 55 .owner = THIS_MODULE,
55 .open = tapeblock_open, 56 .open = tapeblock_open,
56 .release = tapeblock_release, 57 .release = tapeblock_release,
57 .ioctl = tapeblock_ioctl, 58 .ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed, 59 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk, 60 .revalidate_disk = tapeblock_revalidate_disk,
60 }; 61 };
61 62
62 static int tapeblock_major = 0; 63 static int tapeblock_major = 0;
63 64
64 static void 65 static void
65 tapeblock_trigger_requeue(struct tape_device *device) 66 tapeblock_trigger_requeue(struct tape_device *device)
66 { 67 {
67 /* Protect against rescheduling. */ 68 /* Protect against rescheduling. */
68 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0) 69 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 return; 70 return;
70 schedule_work(&device->blk_data.requeue_task); 71 schedule_work(&device->blk_data.requeue_task);
71 } 72 }
72 73
73 /* 74 /*
74 * Post finished request. 75 * Post finished request.
75 */ 76 */
76 static void 77 static void
77 __tapeblock_end_request(struct tape_request *ccw_req, void *data) 78 __tapeblock_end_request(struct tape_request *ccw_req, void *data)
78 { 79 {
79 struct tape_device *device; 80 struct tape_device *device;
80 struct request *req; 81 struct request *req;
81 82
82 DBF_LH(6, "__tapeblock_end_request()\n"); 83 DBF_LH(6, "__tapeblock_end_request()\n");
83 84
84 device = ccw_req->device; 85 device = ccw_req->device;
85 req = (struct request *) data; 86 req = (struct request *) data;
86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO); 87 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
87 if (ccw_req->rc == 0) 88 if (ccw_req->rc == 0)
88 /* Update position. */ 89 /* Update position. */
89 device->blk_data.block_position = 90 device->blk_data.block_position =
90 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B; 91 (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B;
91 else 92 else
92 /* We lost the position information due to an error. */ 93 /* We lost the position information due to an error. */
93 device->blk_data.block_position = -1; 94 device->blk_data.block_position = -1;
94 device->discipline->free_bread(ccw_req); 95 device->discipline->free_bread(ccw_req);
95 if (!list_empty(&device->req_queue) || 96 if (!list_empty(&device->req_queue) ||
96 blk_peek_request(device->blk_data.request_queue)) 97 blk_peek_request(device->blk_data.request_queue))
97 tapeblock_trigger_requeue(device); 98 tapeblock_trigger_requeue(device);
98 } 99 }
99 100
100 /* 101 /*
101 * Feed the tape device CCW queue with requests supplied in a list. 102 * Feed the tape device CCW queue with requests supplied in a list.
102 */ 103 */
103 static int 104 static int
104 tapeblock_start_request(struct tape_device *device, struct request *req) 105 tapeblock_start_request(struct tape_device *device, struct request *req)
105 { 106 {
106 struct tape_request * ccw_req; 107 struct tape_request * ccw_req;
107 int rc; 108 int rc;
108 109
109 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req); 110 DBF_LH(6, "tapeblock_start_request(%p, %p)\n", device, req);
110 111
111 ccw_req = device->discipline->bread(device, req); 112 ccw_req = device->discipline->bread(device, req);
112 if (IS_ERR(ccw_req)) { 113 if (IS_ERR(ccw_req)) {
113 DBF_EVENT(1, "TBLOCK: bread failed\n"); 114 DBF_EVENT(1, "TBLOCK: bread failed\n");
114 blk_end_request_all(req, -EIO); 115 blk_end_request_all(req, -EIO);
115 return PTR_ERR(ccw_req); 116 return PTR_ERR(ccw_req);
116 } 117 }
117 ccw_req->callback = __tapeblock_end_request; 118 ccw_req->callback = __tapeblock_end_request;
118 ccw_req->callback_data = (void *) req; 119 ccw_req->callback_data = (void *) req;
119 ccw_req->retries = TAPEBLOCK_RETRIES; 120 ccw_req->retries = TAPEBLOCK_RETRIES;
120 121
121 rc = tape_do_io_async(device, ccw_req); 122 rc = tape_do_io_async(device, ccw_req);
122 if (rc) { 123 if (rc) {
123 /* 124 /*
124 * Start/enqueueing failed. No retries in 125 * Start/enqueueing failed. No retries in
125 * this case. 126 * this case.
126 */ 127 */
127 blk_end_request_all(req, -EIO); 128 blk_end_request_all(req, -EIO);
128 device->discipline->free_bread(ccw_req); 129 device->discipline->free_bread(ccw_req);
129 } 130 }
130 131
131 return rc; 132 return rc;
132 } 133 }
133 134
134 /* 135 /*
135 * Move requests from the block device request queue to the tape device ccw 136 * Move requests from the block device request queue to the tape device ccw
136 * queue. 137 * queue.
137 */ 138 */
138 static void 139 static void
139 tapeblock_requeue(struct work_struct *work) { 140 tapeblock_requeue(struct work_struct *work) {
140 struct tape_blk_data * blkdat; 141 struct tape_blk_data * blkdat;
141 struct tape_device * device; 142 struct tape_device * device;
142 struct request_queue * queue; 143 struct request_queue * queue;
143 int nr_queued; 144 int nr_queued;
144 struct request * req; 145 struct request * req;
145 struct list_head * l; 146 struct list_head * l;
146 int rc; 147 int rc;
147 148
148 blkdat = container_of(work, struct tape_blk_data, requeue_task); 149 blkdat = container_of(work, struct tape_blk_data, requeue_task);
149 device = blkdat->device; 150 device = blkdat->device;
150 if (!device) 151 if (!device)
151 return; 152 return;
152 153
153 spin_lock_irq(get_ccwdev_lock(device->cdev)); 154 spin_lock_irq(get_ccwdev_lock(device->cdev));
154 queue = device->blk_data.request_queue; 155 queue = device->blk_data.request_queue;
155 156
156 /* Count number of requests on ccw queue. */ 157 /* Count number of requests on ccw queue. */
157 nr_queued = 0; 158 nr_queued = 0;
158 list_for_each(l, &device->req_queue) 159 list_for_each(l, &device->req_queue)
159 nr_queued++; 160 nr_queued++;
160 spin_unlock(get_ccwdev_lock(device->cdev)); 161 spin_unlock(get_ccwdev_lock(device->cdev));
161 162
162 spin_lock_irq(&device->blk_data.request_queue_lock); 163 spin_lock_irq(&device->blk_data.request_queue_lock);
163 while ( 164 while (
164 !blk_queue_plugged(queue) && 165 !blk_queue_plugged(queue) &&
165 blk_peek_request(queue) && 166 blk_peek_request(queue) &&
166 nr_queued < TAPEBLOCK_MIN_REQUEUE 167 nr_queued < TAPEBLOCK_MIN_REQUEUE
167 ) { 168 ) {
168 req = blk_fetch_request(queue); 169 req = blk_fetch_request(queue);
169 if (rq_data_dir(req) == WRITE) { 170 if (rq_data_dir(req) == WRITE) {
170 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 171 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
171 spin_unlock_irq(&device->blk_data.request_queue_lock); 172 spin_unlock_irq(&device->blk_data.request_queue_lock);
172 blk_end_request_all(req, -EIO); 173 blk_end_request_all(req, -EIO);
173 spin_lock_irq(&device->blk_data.request_queue_lock); 174 spin_lock_irq(&device->blk_data.request_queue_lock);
174 continue; 175 continue;
175 } 176 }
176 nr_queued++; 177 nr_queued++;
177 spin_unlock_irq(&device->blk_data.request_queue_lock); 178 spin_unlock_irq(&device->blk_data.request_queue_lock);
178 rc = tapeblock_start_request(device, req); 179 rc = tapeblock_start_request(device, req);
179 spin_lock_irq(&device->blk_data.request_queue_lock); 180 spin_lock_irq(&device->blk_data.request_queue_lock);
180 } 181 }
181 spin_unlock_irq(&device->blk_data.request_queue_lock); 182 spin_unlock_irq(&device->blk_data.request_queue_lock);
182 atomic_set(&device->blk_data.requeue_scheduled, 0); 183 atomic_set(&device->blk_data.requeue_scheduled, 0);
183 } 184 }
184 185
185 /* 186 /*
186 * Tape request queue function. Called from ll_rw_blk.c 187 * Tape request queue function. Called from ll_rw_blk.c
187 */ 188 */
188 static void 189 static void
189 tapeblock_request_fn(struct request_queue *queue) 190 tapeblock_request_fn(struct request_queue *queue)
190 { 191 {
191 struct tape_device *device; 192 struct tape_device *device;
192 193
193 device = (struct tape_device *) queue->queuedata; 194 device = (struct tape_device *) queue->queuedata;
194 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); 195 DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
195 BUG_ON(device == NULL); 196 BUG_ON(device == NULL);
196 tapeblock_trigger_requeue(device); 197 tapeblock_trigger_requeue(device);
197 } 198 }
198 199
199 /* 200 /*
200 * This function is called for every new tapedevice 201 * This function is called for every new tapedevice
201 */ 202 */
202 int 203 int
203 tapeblock_setup_device(struct tape_device * device) 204 tapeblock_setup_device(struct tape_device * device)
204 { 205 {
205 struct tape_blk_data * blkdat; 206 struct tape_blk_data * blkdat;
206 struct gendisk * disk; 207 struct gendisk * disk;
207 int rc; 208 int rc;
208 209
209 blkdat = &device->blk_data; 210 blkdat = &device->blk_data;
210 blkdat->device = device; 211 blkdat->device = device;
211 spin_lock_init(&blkdat->request_queue_lock); 212 spin_lock_init(&blkdat->request_queue_lock);
212 atomic_set(&blkdat->requeue_scheduled, 0); 213 atomic_set(&blkdat->requeue_scheduled, 0);
213 214
214 blkdat->request_queue = blk_init_queue( 215 blkdat->request_queue = blk_init_queue(
215 tapeblock_request_fn, 216 tapeblock_request_fn,
216 &blkdat->request_queue_lock 217 &blkdat->request_queue_lock
217 ); 218 );
218 if (!blkdat->request_queue) 219 if (!blkdat->request_queue)
219 return -ENOMEM; 220 return -ENOMEM;
220 221
221 elevator_exit(blkdat->request_queue->elevator); 222 elevator_exit(blkdat->request_queue->elevator);
222 rc = elevator_init(blkdat->request_queue, "noop"); 223 rc = elevator_init(blkdat->request_queue, "noop");
223 if (rc) 224 if (rc)
224 goto cleanup_queue; 225 goto cleanup_queue;
225 226
226 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 227 blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE);
227 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 228 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC);
228 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 229 blk_queue_max_phys_segments(blkdat->request_queue, -1L);
229 blk_queue_max_hw_segments(blkdat->request_queue, -1L); 230 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
230 blk_queue_max_segment_size(blkdat->request_queue, -1L); 231 blk_queue_max_segment_size(blkdat->request_queue, -1L);
231 blk_queue_segment_boundary(blkdat->request_queue, -1L); 232 blk_queue_segment_boundary(blkdat->request_queue, -1L);
232 233
233 disk = alloc_disk(1); 234 disk = alloc_disk(1);
234 if (!disk) { 235 if (!disk) {
235 rc = -ENOMEM; 236 rc = -ENOMEM;
236 goto cleanup_queue; 237 goto cleanup_queue;
237 } 238 }
238 239
239 disk->major = tapeblock_major; 240 disk->major = tapeblock_major;
240 disk->first_minor = device->first_minor; 241 disk->first_minor = device->first_minor;
241 disk->fops = &tapeblock_fops; 242 disk->fops = &tapeblock_fops;
242 disk->private_data = tape_get_device(device); 243 disk->private_data = tape_get_device(device);
243 disk->queue = blkdat->request_queue; 244 disk->queue = blkdat->request_queue;
244 set_capacity(disk, 0); 245 set_capacity(disk, 0);
245 sprintf(disk->disk_name, "btibm%d", 246 sprintf(disk->disk_name, "btibm%d",
246 device->first_minor / TAPE_MINORS_PER_DEV); 247 device->first_minor / TAPE_MINORS_PER_DEV);
247 248
248 blkdat->disk = disk; 249 blkdat->disk = disk;
249 blkdat->medium_changed = 1; 250 blkdat->medium_changed = 1;
250 blkdat->request_queue->queuedata = tape_get_device(device); 251 blkdat->request_queue->queuedata = tape_get_device(device);
251 252
252 add_disk(disk); 253 add_disk(disk);
253 254
254 tape_get_device(device); 255 tape_get_device(device);
255 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); 256 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
256 257
257 return 0; 258 return 0;
258 259
259 cleanup_queue: 260 cleanup_queue:
260 blk_cleanup_queue(blkdat->request_queue); 261 blk_cleanup_queue(blkdat->request_queue);
261 blkdat->request_queue = NULL; 262 blkdat->request_queue = NULL;
262 263
263 return rc; 264 return rc;
264 } 265 }
265 266
266 void 267 void
267 tapeblock_cleanup_device(struct tape_device *device) 268 tapeblock_cleanup_device(struct tape_device *device)
268 { 269 {
269 flush_scheduled_work(); 270 flush_scheduled_work();
270 tape_put_device(device); 271 tape_put_device(device);
271 272
272 if (!device->blk_data.disk) { 273 if (!device->blk_data.disk) {
273 goto cleanup_queue; 274 goto cleanup_queue;
274 } 275 }
275 276
276 del_gendisk(device->blk_data.disk); 277 del_gendisk(device->blk_data.disk);
277 device->blk_data.disk->private_data = NULL; 278 device->blk_data.disk->private_data = NULL;
278 tape_put_device(device); 279 tape_put_device(device);
279 put_disk(device->blk_data.disk); 280 put_disk(device->blk_data.disk);
280 281
281 device->blk_data.disk = NULL; 282 device->blk_data.disk = NULL;
282 cleanup_queue: 283 cleanup_queue:
283 device->blk_data.request_queue->queuedata = NULL; 284 device->blk_data.request_queue->queuedata = NULL;
284 tape_put_device(device); 285 tape_put_device(device);
285 286
286 blk_cleanup_queue(device->blk_data.request_queue); 287 blk_cleanup_queue(device->blk_data.request_queue);
287 device->blk_data.request_queue = NULL; 288 device->blk_data.request_queue = NULL;
288 } 289 }
289 290
290 /* 291 /*
291 * Detect number of blocks of the tape. 292 * Detect number of blocks of the tape.
292 * FIXME: can we extent this to detect the blocks size as well ? 293 * FIXME: can we extent this to detect the blocks size as well ?
293 */ 294 */
294 static int 295 static int
295 tapeblock_revalidate_disk(struct gendisk *disk) 296 tapeblock_revalidate_disk(struct gendisk *disk)
296 { 297 {
297 struct tape_device * device; 298 struct tape_device * device;
298 unsigned int nr_of_blks; 299 unsigned int nr_of_blks;
299 int rc; 300 int rc;
300 301
301 device = (struct tape_device *) disk->private_data; 302 device = (struct tape_device *) disk->private_data;
302 BUG_ON(!device); 303 BUG_ON(!device);
303 304
304 if (!device->blk_data.medium_changed) 305 if (!device->blk_data.medium_changed)
305 return 0; 306 return 0;
306 307
307 rc = tape_mtop(device, MTFSFM, 1); 308 rc = tape_mtop(device, MTFSFM, 1);
308 if (rc) 309 if (rc)
309 return rc; 310 return rc;
310 311
311 rc = tape_mtop(device, MTTELL, 1); 312 rc = tape_mtop(device, MTTELL, 1);
312 if (rc < 0) 313 if (rc < 0)
313 return rc; 314 return rc;
314 315
315 pr_info("%s: Determining the size of the recorded area...\n", 316 pr_info("%s: Determining the size of the recorded area...\n",
316 dev_name(&device->cdev->dev)); 317 dev_name(&device->cdev->dev));
317 DBF_LH(3, "Image file ends at %d\n", rc); 318 DBF_LH(3, "Image file ends at %d\n", rc);
318 nr_of_blks = rc; 319 nr_of_blks = rc;
319 320
320 /* This will fail for the first file. Catch the error by checking the 321 /* This will fail for the first file. Catch the error by checking the
321 * position. */ 322 * position. */
322 tape_mtop(device, MTBSF, 1); 323 tape_mtop(device, MTBSF, 1);
323 324
324 rc = tape_mtop(device, MTTELL, 1); 325 rc = tape_mtop(device, MTTELL, 1);
325 if (rc < 0) 326 if (rc < 0)
326 return rc; 327 return rc;
327 328
328 if (rc > nr_of_blks) 329 if (rc > nr_of_blks)
329 return -EINVAL; 330 return -EINVAL;
330 331
331 DBF_LH(3, "Image file starts at %d\n", rc); 332 DBF_LH(3, "Image file starts at %d\n", rc);
332 device->bof = rc; 333 device->bof = rc;
333 nr_of_blks -= rc; 334 nr_of_blks -= rc;
334 335
335 pr_info("%s: The size of the recorded area is %i blocks\n", 336 pr_info("%s: The size of the recorded area is %i blocks\n",
336 dev_name(&device->cdev->dev), nr_of_blks); 337 dev_name(&device->cdev->dev), nr_of_blks);
337 set_capacity(device->blk_data.disk, 338 set_capacity(device->blk_data.disk,
338 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); 339 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
339 340
340 device->blk_data.block_position = 0; 341 device->blk_data.block_position = 0;
341 device->blk_data.medium_changed = 0; 342 device->blk_data.medium_changed = 0;
342 return 0; 343 return 0;
343 } 344 }
344 345
345 static int 346 static int
346 tapeblock_medium_changed(struct gendisk *disk) 347 tapeblock_medium_changed(struct gendisk *disk)
347 { 348 {
348 struct tape_device *device; 349 struct tape_device *device;
349 350
350 device = (struct tape_device *) disk->private_data; 351 device = (struct tape_device *) disk->private_data;
351 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n", 352 DBF_LH(6, "tapeblock_medium_changed(%p) = %d\n",
352 device, device->blk_data.medium_changed); 353 device, device->blk_data.medium_changed);
353 354
354 return device->blk_data.medium_changed; 355 return device->blk_data.medium_changed;
355 } 356 }
356 357
357 /* 358 /*
358 * Block frontend tape device open function. 359 * Block frontend tape device open function.
359 */ 360 */
360 static int 361 static int
361 tapeblock_open(struct block_device *bdev, fmode_t mode) 362 tapeblock_open(struct block_device *bdev, fmode_t mode)
362 { 363 {
363 struct gendisk * disk = bdev->bd_disk; 364 struct gendisk * disk = bdev->bd_disk;
364 struct tape_device * device; 365 struct tape_device * device;
365 int rc; 366 int rc;
366 367
367 device = tape_get_device(disk->private_data); 368 device = tape_get_device(disk->private_data);
368 369
369 if (device->required_tapemarks) { 370 if (device->required_tapemarks) {
370 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 371 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
371 pr_warning("%s: Opening the tape failed because of missing " 372 pr_warning("%s: Opening the tape failed because of missing "
372 "end-of-file marks\n", dev_name(&device->cdev->dev)); 373 "end-of-file marks\n", dev_name(&device->cdev->dev));
373 rc = -EPERM; 374 rc = -EPERM;
374 goto put_device; 375 goto put_device;
375 } 376 }
376 377
377 rc = tape_open(device); 378 rc = tape_open(device);
378 if (rc) 379 if (rc)
379 goto put_device; 380 goto put_device;
380 381
381 rc = tapeblock_revalidate_disk(disk); 382 rc = tapeblock_revalidate_disk(disk);
382 if (rc) 383 if (rc)
383 goto release; 384 goto release;
384 385
385 /* 386 /*
386 * Note: The reference to <device> is hold until the release function 387 * Note: The reference to <device> is hold until the release function
387 * is called. 388 * is called.
388 */ 389 */
389 tape_state_set(device, TS_BLKUSE); 390 tape_state_set(device, TS_BLKUSE);
390 return 0; 391 return 0;
391 392
392 release: 393 release:
393 tape_release(device); 394 tape_release(device);
394 put_device: 395 put_device:
395 tape_put_device(device); 396 tape_put_device(device);
396 return rc; 397 return rc;
397 } 398 }
398 399
399 /* 400 /*
400 * Block frontend tape device release function. 401 * Block frontend tape device release function.
401 * 402 *
402 * Note: One reference to the tape device was made by the open function. So 403 * Note: One reference to the tape device was made by the open function. So
403 * we just get the pointer here and release the reference. 404 * we just get the pointer here and release the reference.
404 */ 405 */
405 static int 406 static int
406 tapeblock_release(struct gendisk *disk, fmode_t mode) 407 tapeblock_release(struct gendisk *disk, fmode_t mode)
407 { 408 {
408 struct tape_device *device = disk->private_data; 409 struct tape_device *device = disk->private_data;
409 410
410 tape_state_set(device, TS_IN_USE); 411 tape_state_set(device, TS_IN_USE);
411 tape_release(device); 412 tape_release(device);
412 tape_put_device(device); 413 tape_put_device(device);
413 414
414 return 0; 415 return 0;
415 } 416 }
416 417
417 /* 418 /*
418 * Support of some generic block device IOCTLs. 419 * Support of some generic block device IOCTLs.
419 */ 420 */
420 static int 421 static int
421 tapeblock_ioctl( 422 tapeblock_ioctl(
422 struct block_device * bdev, 423 struct block_device * bdev,
423 fmode_t mode, 424 fmode_t mode,
424 unsigned int command, 425 unsigned int command,
425 unsigned long arg 426 unsigned long arg
426 ) { 427 ) {
427 int rc; 428 int rc;
428 int minor; 429 int minor;
429 struct gendisk *disk = bdev->bd_disk; 430 struct gendisk *disk = bdev->bd_disk;
430 struct tape_device *device; 431 struct tape_device *device;
431 432
432 rc = 0; 433 rc = 0;
433 BUG_ON(!disk); 434 BUG_ON(!disk);
434 device = disk->private_data; 435 device = disk->private_data;
435 BUG_ON(!device); 436 BUG_ON(!device);
436 minor = MINOR(bdev->bd_dev); 437 minor = MINOR(bdev->bd_dev);
437 438
438 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); 439 DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
439 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor); 440 DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
440 441
441 switch (command) { 442 switch (command) {
442 /* Refuse some IOCTL calls without complaining (mount). */ 443 /* Refuse some IOCTL calls without complaining (mount). */
443 case 0x5310: /* CDROMMULTISESSION */ 444 case 0x5310: /* CDROMMULTISESSION */
444 rc = -EINVAL; 445 rc = -EINVAL;
445 break; 446 break;
446 default: 447 default:
447 rc = -EINVAL; 448 rc = -EINVAL;
448 } 449 }
449 450
450 return rc; 451 return rc;
451 } 452 }
452 453
453 /* 454 /*
454 * Initialize block device frontend. 455 * Initialize block device frontend.
455 */ 456 */
456 int 457 int
457 tapeblock_init(void) 458 tapeblock_init(void)
458 { 459 {
459 int rc; 460 int rc;
460 461
461 /* Register the tape major number to the kernel */ 462 /* Register the tape major number to the kernel */
462 rc = register_blkdev(tapeblock_major, "tBLK"); 463 rc = register_blkdev(tapeblock_major, "tBLK");
463 if (rc < 0) 464 if (rc < 0)
464 return rc; 465 return rc;
465 466
466 if (tapeblock_major == 0) 467 if (tapeblock_major == 0)
467 tapeblock_major = rc; 468 tapeblock_major = rc;
468 return 0; 469 return 0;
469 } 470 }
470 471
471 /* 472 /*
472 * Deregister major for block device frontend 473 * Deregister major for block device frontend
473 */ 474 */
474 void 475 void
475 tapeblock_exit(void) 476 tapeblock_exit(void)
476 { 477 {
477 unregister_blkdev(tapeblock_major, "tBLK"); 478 unregister_blkdev(tapeblock_major, "tBLK");
478 } 479 }
479 480
drivers/s390/char/tape_char.c
1 /* 1 /*
2 * drivers/s390/char/tape_char.c 2 * drivers/s390/char/tape_char.c
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright IBM Corp. 2001,2006 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 */ 11 */
12 12
13 #define KMSG_COMPONENT "tape"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
13 #include <linux/module.h> 16 #include <linux/module.h>
14 #include <linux/types.h> 17 #include <linux/types.h>
15 #include <linux/proc_fs.h> 18 #include <linux/proc_fs.h>
16 #include <linux/mtio.h> 19 #include <linux/mtio.h>
17 #include <linux/smp_lock.h> 20 #include <linux/smp_lock.h>
18 21
19 #include <asm/uaccess.h> 22 #include <asm/uaccess.h>
20 23
21 #define TAPE_DBF_AREA tape_core_dbf 24 #define TAPE_DBF_AREA tape_core_dbf
22 25
23 #include "tape.h" 26 #include "tape.h"
24 #include "tape_std.h" 27 #include "tape_std.h"
25 #include "tape_class.h" 28 #include "tape_class.h"
26 29
27 #define TAPECHAR_MAJOR 0 /* get dynamic major */ 30 #define TAPECHAR_MAJOR 0 /* get dynamic major */
28 31
29 /* 32 /*
30 * file operation structure for tape character frontend 33 * file operation structure for tape character frontend
31 */ 34 */
32 static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *); 35 static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
33 static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); 36 static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
34 static int tapechar_open(struct inode *,struct file *); 37 static int tapechar_open(struct inode *,struct file *);
35 static int tapechar_release(struct inode *,struct file *); 38 static int tapechar_release(struct inode *,struct file *);
36 static long tapechar_ioctl(struct file *, unsigned int, unsigned long); 39 static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
37 static long tapechar_compat_ioctl(struct file *, unsigned int, 40 static long tapechar_compat_ioctl(struct file *, unsigned int,
38 unsigned long); 41 unsigned long);
39 42
40 static const struct file_operations tape_fops = 43 static const struct file_operations tape_fops =
41 { 44 {
42 .owner = THIS_MODULE, 45 .owner = THIS_MODULE,
43 .read = tapechar_read, 46 .read = tapechar_read,
44 .write = tapechar_write, 47 .write = tapechar_write,
45 .unlocked_ioctl = tapechar_ioctl, 48 .unlocked_ioctl = tapechar_ioctl,
46 .compat_ioctl = tapechar_compat_ioctl, 49 .compat_ioctl = tapechar_compat_ioctl,
47 .open = tapechar_open, 50 .open = tapechar_open,
48 .release = tapechar_release, 51 .release = tapechar_release,
49 }; 52 };
50 53
51 static int tapechar_major = TAPECHAR_MAJOR; 54 static int tapechar_major = TAPECHAR_MAJOR;
52 55
53 /* 56 /*
54 * This function is called for every new tapedevice 57 * This function is called for every new tapedevice
55 */ 58 */
56 int 59 int
57 tapechar_setup_device(struct tape_device * device) 60 tapechar_setup_device(struct tape_device * device)
58 { 61 {
59 char device_name[20]; 62 char device_name[20];
60 63
61 sprintf(device_name, "ntibm%i", device->first_minor / 2); 64 sprintf(device_name, "ntibm%i", device->first_minor / 2);
62 device->nt = register_tape_dev( 65 device->nt = register_tape_dev(
63 &device->cdev->dev, 66 &device->cdev->dev,
64 MKDEV(tapechar_major, device->first_minor), 67 MKDEV(tapechar_major, device->first_minor),
65 &tape_fops, 68 &tape_fops,
66 device_name, 69 device_name,
67 "non-rewinding" 70 "non-rewinding"
68 ); 71 );
69 device_name[0] = 'r'; 72 device_name[0] = 'r';
70 device->rt = register_tape_dev( 73 device->rt = register_tape_dev(
71 &device->cdev->dev, 74 &device->cdev->dev,
72 MKDEV(tapechar_major, device->first_minor + 1), 75 MKDEV(tapechar_major, device->first_minor + 1),
73 &tape_fops, 76 &tape_fops,
74 device_name, 77 device_name,
75 "rewinding" 78 "rewinding"
76 ); 79 );
77 80
78 return 0; 81 return 0;
79 } 82 }
80 83
81 void 84 void
82 tapechar_cleanup_device(struct tape_device *device) 85 tapechar_cleanup_device(struct tape_device *device)
83 { 86 {
84 unregister_tape_dev(&device->cdev->dev, device->rt); 87 unregister_tape_dev(&device->cdev->dev, device->rt);
85 device->rt = NULL; 88 device->rt = NULL;
86 unregister_tape_dev(&device->cdev->dev, device->nt); 89 unregister_tape_dev(&device->cdev->dev, device->nt);
87 device->nt = NULL; 90 device->nt = NULL;
88 } 91 }
89 92
90 static int 93 static int
91 tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 94 tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
92 { 95 {
93 struct idal_buffer *new; 96 struct idal_buffer *new;
94 97
95 if (device->char_data.idal_buf != NULL && 98 if (device->char_data.idal_buf != NULL &&
96 device->char_data.idal_buf->size == block_size) 99 device->char_data.idal_buf->size == block_size)
97 return 0; 100 return 0;
98 101
99 if (block_size > MAX_BLOCKSIZE) { 102 if (block_size > MAX_BLOCKSIZE) {
100 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n", 103 DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
101 block_size, MAX_BLOCKSIZE); 104 block_size, MAX_BLOCKSIZE);
102 return -EINVAL; 105 return -EINVAL;
103 } 106 }
104 107
105 /* The current idal buffer is not correct. Allocate a new one. */ 108 /* The current idal buffer is not correct. Allocate a new one. */
106 new = idal_buffer_alloc(block_size, 0); 109 new = idal_buffer_alloc(block_size, 0);
107 if (IS_ERR(new)) 110 if (IS_ERR(new))
108 return -ENOMEM; 111 return -ENOMEM;
109 112
110 if (device->char_data.idal_buf != NULL) 113 if (device->char_data.idal_buf != NULL)
111 idal_buffer_free(device->char_data.idal_buf); 114 idal_buffer_free(device->char_data.idal_buf);
112 115
113 device->char_data.idal_buf = new; 116 device->char_data.idal_buf = new;
114 117
115 return 0; 118 return 0;
116 } 119 }
117 120
118 /* 121 /*
119 * Tape device read function 122 * Tape device read function
120 */ 123 */
121 static ssize_t 124 static ssize_t
122 tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 125 tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
123 { 126 {
124 struct tape_device *device; 127 struct tape_device *device;
125 struct tape_request *request; 128 struct tape_request *request;
126 size_t block_size; 129 size_t block_size;
127 int rc; 130 int rc;
128 131
129 DBF_EVENT(6, "TCHAR:read\n"); 132 DBF_EVENT(6, "TCHAR:read\n");
130 device = (struct tape_device *) filp->private_data; 133 device = (struct tape_device *) filp->private_data;
131 134
132 /* 135 /*
133 * If the tape isn't terminated yet, do it now. And since we then 136 * If the tape isn't terminated yet, do it now. And since we then
134 * are at the end of the tape there wouldn't be anything to read 137 * are at the end of the tape there wouldn't be anything to read
135 * anyways. So we return immediatly. 138 * anyways. So we return immediatly.
136 */ 139 */
137 if(device->required_tapemarks) { 140 if(device->required_tapemarks) {
138 return tape_std_terminate_write(device); 141 return tape_std_terminate_write(device);
139 } 142 }
140 143
141 /* Find out block size to use */ 144 /* Find out block size to use */
142 if (device->char_data.block_size != 0) { 145 if (device->char_data.block_size != 0) {
143 if (count < device->char_data.block_size) { 146 if (count < device->char_data.block_size) {
144 DBF_EVENT(3, "TCHAR:read smaller than block " 147 DBF_EVENT(3, "TCHAR:read smaller than block "
145 "size was requested\n"); 148 "size was requested\n");
146 return -EINVAL; 149 return -EINVAL;
147 } 150 }
148 block_size = device->char_data.block_size; 151 block_size = device->char_data.block_size;
149 } else { 152 } else {
150 block_size = count; 153 block_size = count;
151 } 154 }
152 155
153 rc = tapechar_check_idalbuffer(device, block_size); 156 rc = tapechar_check_idalbuffer(device, block_size);
154 if (rc) 157 if (rc)
155 return rc; 158 return rc;
156 159
157 #ifdef CONFIG_S390_TAPE_BLOCK 160 #ifdef CONFIG_S390_TAPE_BLOCK
158 /* Changes position. */ 161 /* Changes position. */
159 device->blk_data.medium_changed = 1; 162 device->blk_data.medium_changed = 1;
160 #endif 163 #endif
161 164
162 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); 165 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
163 /* Let the discipline build the ccw chain. */ 166 /* Let the discipline build the ccw chain. */
164 request = device->discipline->read_block(device, block_size); 167 request = device->discipline->read_block(device, block_size);
165 if (IS_ERR(request)) 168 if (IS_ERR(request))
166 return PTR_ERR(request); 169 return PTR_ERR(request);
167 /* Execute it. */ 170 /* Execute it. */
168 rc = tape_do_io(device, request); 171 rc = tape_do_io(device, request);
169 if (rc == 0) { 172 if (rc == 0) {
170 rc = block_size - request->rescnt; 173 rc = block_size - request->rescnt;
171 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); 174 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
172 /* Copy data from idal buffer to user space. */ 175 /* Copy data from idal buffer to user space. */
173 if (idal_buffer_to_user(device->char_data.idal_buf, 176 if (idal_buffer_to_user(device->char_data.idal_buf,
174 data, rc) != 0) 177 data, rc) != 0)
175 rc = -EFAULT; 178 rc = -EFAULT;
176 } 179 }
177 tape_free_request(request); 180 tape_free_request(request);
178 return rc; 181 return rc;
179 } 182 }
180 183
181 /* 184 /*
182 * Tape device write function 185 * Tape device write function
183 */ 186 */
184 static ssize_t 187 static ssize_t
185 tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 188 tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
186 { 189 {
187 struct tape_device *device; 190 struct tape_device *device;
188 struct tape_request *request; 191 struct tape_request *request;
189 size_t block_size; 192 size_t block_size;
190 size_t written; 193 size_t written;
191 int nblocks; 194 int nblocks;
192 int i, rc; 195 int i, rc;
193 196
194 DBF_EVENT(6, "TCHAR:write\n"); 197 DBF_EVENT(6, "TCHAR:write\n");
195 device = (struct tape_device *) filp->private_data; 198 device = (struct tape_device *) filp->private_data;
196 /* Find out block size and number of blocks */ 199 /* Find out block size and number of blocks */
197 if (device->char_data.block_size != 0) { 200 if (device->char_data.block_size != 0) {
198 if (count < device->char_data.block_size) { 201 if (count < device->char_data.block_size) {
199 DBF_EVENT(3, "TCHAR:write smaller than block " 202 DBF_EVENT(3, "TCHAR:write smaller than block "
200 "size was requested\n"); 203 "size was requested\n");
201 return -EINVAL; 204 return -EINVAL;
202 } 205 }
203 block_size = device->char_data.block_size; 206 block_size = device->char_data.block_size;
204 nblocks = count / block_size; 207 nblocks = count / block_size;
205 } else { 208 } else {
206 block_size = count; 209 block_size = count;
207 nblocks = 1; 210 nblocks = 1;
208 } 211 }
209 212
210 rc = tapechar_check_idalbuffer(device, block_size); 213 rc = tapechar_check_idalbuffer(device, block_size);
211 if (rc) 214 if (rc)
212 return rc; 215 return rc;
213 216
214 #ifdef CONFIG_S390_TAPE_BLOCK 217 #ifdef CONFIG_S390_TAPE_BLOCK
215 /* Changes position. */ 218 /* Changes position. */
216 device->blk_data.medium_changed = 1; 219 device->blk_data.medium_changed = 1;
217 #endif 220 #endif
218 221
219 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); 222 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
220 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); 223 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
221 /* Let the discipline build the ccw chain. */ 224 /* Let the discipline build the ccw chain. */
222 request = device->discipline->write_block(device, block_size); 225 request = device->discipline->write_block(device, block_size);
223 if (IS_ERR(request)) 226 if (IS_ERR(request))
224 return PTR_ERR(request); 227 return PTR_ERR(request);
225 rc = 0; 228 rc = 0;
226 written = 0; 229 written = 0;
227 for (i = 0; i < nblocks; i++) { 230 for (i = 0; i < nblocks; i++) {
228 /* Copy data from user space to idal buffer. */ 231 /* Copy data from user space to idal buffer. */
229 if (idal_buffer_from_user(device->char_data.idal_buf, 232 if (idal_buffer_from_user(device->char_data.idal_buf,
230 data, block_size)) { 233 data, block_size)) {
231 rc = -EFAULT; 234 rc = -EFAULT;
232 break; 235 break;
233 } 236 }
234 rc = tape_do_io(device, request); 237 rc = tape_do_io(device, request);
235 if (rc) 238 if (rc)
236 break; 239 break;
237 DBF_EVENT(6, "TCHAR:wbytes: %lx\n", 240 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
238 block_size - request->rescnt); 241 block_size - request->rescnt);
239 written += block_size - request->rescnt; 242 written += block_size - request->rescnt;
240 if (request->rescnt != 0) 243 if (request->rescnt != 0)
241 break; 244 break;
242 data += block_size; 245 data += block_size;
243 } 246 }
244 tape_free_request(request); 247 tape_free_request(request);
245 if (rc == -ENOSPC) { 248 if (rc == -ENOSPC) {
246 /* 249 /*
247 * Ok, the device has no more space. It has NOT written 250 * Ok, the device has no more space. It has NOT written
248 * the block. 251 * the block.
249 */ 252 */
250 if (device->discipline->process_eov) 253 if (device->discipline->process_eov)
251 device->discipline->process_eov(device); 254 device->discipline->process_eov(device);
252 if (written > 0) 255 if (written > 0)
253 rc = 0; 256 rc = 0;
254 257
255 } 258 }
256 259
257 /* 260 /*
258 * After doing a write we always need two tapemarks to correctly 261 * After doing a write we always need two tapemarks to correctly
259 * terminate the tape (one to terminate the file, the second to 262 * terminate the tape (one to terminate the file, the second to
260 * flag the end of recorded data. 263 * flag the end of recorded data.
261 * Since process_eov positions the tape in front of the written 264 * Since process_eov positions the tape in front of the written
262 * tapemark it doesn't hurt to write two marks again. 265 * tapemark it doesn't hurt to write two marks again.
263 */ 266 */
264 if (!rc) 267 if (!rc)
265 device->required_tapemarks = 2; 268 device->required_tapemarks = 2;
266 269
267 return rc ? rc : written; 270 return rc ? rc : written;
268 } 271 }
269 272
270 /* 273 /*
271 * Character frontend tape device open function. 274 * Character frontend tape device open function.
272 */ 275 */
273 static int 276 static int
274 tapechar_open (struct inode *inode, struct file *filp) 277 tapechar_open (struct inode *inode, struct file *filp)
275 { 278 {
276 struct tape_device *device; 279 struct tape_device *device;
277 int minor, rc; 280 int minor, rc;
278 281
279 DBF_EVENT(6, "TCHAR:open: %i:%i\n", 282 DBF_EVENT(6, "TCHAR:open: %i:%i\n",
280 imajor(filp->f_path.dentry->d_inode), 283 imajor(filp->f_path.dentry->d_inode),
281 iminor(filp->f_path.dentry->d_inode)); 284 iminor(filp->f_path.dentry->d_inode));
282 285
283 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 286 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
284 return -ENODEV; 287 return -ENODEV;
285 288
286 minor = iminor(filp->f_path.dentry->d_inode); 289 minor = iminor(filp->f_path.dentry->d_inode);
287 device = tape_find_device(minor / TAPE_MINORS_PER_DEV); 290 device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
288 if (IS_ERR(device)) { 291 if (IS_ERR(device)) {
289 DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n"); 292 DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
290 return PTR_ERR(device); 293 return PTR_ERR(device);
291 } 294 }
292 295
293 rc = tape_open(device); 296 rc = tape_open(device);
294 if (rc == 0) { 297 if (rc == 0) {
295 filp->private_data = device; 298 filp->private_data = device;
296 nonseekable_open(inode, filp); 299 nonseekable_open(inode, filp);
297 } else 300 } else
298 tape_put_device(device); 301 tape_put_device(device);
299 302
300 return rc; 303 return rc;
301 } 304 }
302 305
303 /* 306 /*
304 * Character frontend tape device release function. 307 * Character frontend tape device release function.
305 */ 308 */
306 309
307 static int 310 static int
308 tapechar_release(struct inode *inode, struct file *filp) 311 tapechar_release(struct inode *inode, struct file *filp)
309 { 312 {
310 struct tape_device *device; 313 struct tape_device *device;
311 314
312 DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode)); 315 DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
313 device = (struct tape_device *) filp->private_data; 316 device = (struct tape_device *) filp->private_data;
314 317
315 /* 318 /*
316 * If this is the rewinding tape minor then rewind. In that case we 319 * If this is the rewinding tape minor then rewind. In that case we
317 * write all required tapemarks. Otherwise only one to terminate the 320 * write all required tapemarks. Otherwise only one to terminate the
318 * file. 321 * file.
319 */ 322 */
320 if ((iminor(inode) & 1) != 0) { 323 if ((iminor(inode) & 1) != 0) {
321 if (device->required_tapemarks) 324 if (device->required_tapemarks)
322 tape_std_terminate_write(device); 325 tape_std_terminate_write(device);
323 tape_mtop(device, MTREW, 1); 326 tape_mtop(device, MTREW, 1);
324 } else { 327 } else {
325 if (device->required_tapemarks > 1) { 328 if (device->required_tapemarks > 1) {
326 if (tape_mtop(device, MTWEOF, 1) == 0) 329 if (tape_mtop(device, MTWEOF, 1) == 0)
327 device->required_tapemarks--; 330 device->required_tapemarks--;
328 } 331 }
329 } 332 }
330 333
331 if (device->char_data.idal_buf != NULL) { 334 if (device->char_data.idal_buf != NULL) {
332 idal_buffer_free(device->char_data.idal_buf); 335 idal_buffer_free(device->char_data.idal_buf);
333 device->char_data.idal_buf = NULL; 336 device->char_data.idal_buf = NULL;
334 } 337 }
335 tape_release(device); 338 tape_release(device);
336 filp->private_data = NULL; 339 filp->private_data = NULL;
337 tape_put_device(device); 340 tape_put_device(device);
338 341
339 return 0; 342 return 0;
340 } 343 }
341 344
342 /* 345 /*
343 * Tape device io controls. 346 * Tape device io controls.
344 */ 347 */
345 static int 348 static int
346 __tapechar_ioctl(struct tape_device *device, 349 __tapechar_ioctl(struct tape_device *device,
347 unsigned int no, unsigned long data) 350 unsigned int no, unsigned long data)
348 { 351 {
349 int rc; 352 int rc;
350 353
351 if (no == MTIOCTOP) { 354 if (no == MTIOCTOP) {
352 struct mtop op; 355 struct mtop op;
353 356
354 if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0) 357 if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
355 return -EFAULT; 358 return -EFAULT;
356 if (op.mt_count < 0) 359 if (op.mt_count < 0)
357 return -EINVAL; 360 return -EINVAL;
358 361
359 /* 362 /*
360 * Operations that change tape position should write final 363 * Operations that change tape position should write final
361 * tapemarks. 364 * tapemarks.
362 */ 365 */
363 switch (op.mt_op) { 366 switch (op.mt_op) {
364 case MTFSF: 367 case MTFSF:
365 case MTBSF: 368 case MTBSF:
366 case MTFSR: 369 case MTFSR:
367 case MTBSR: 370 case MTBSR:
368 case MTREW: 371 case MTREW:
369 case MTOFFL: 372 case MTOFFL:
370 case MTEOM: 373 case MTEOM:
371 case MTRETEN: 374 case MTRETEN:
372 case MTBSFM: 375 case MTBSFM:
373 case MTFSFM: 376 case MTFSFM:
374 case MTSEEK: 377 case MTSEEK:
375 #ifdef CONFIG_S390_TAPE_BLOCK 378 #ifdef CONFIG_S390_TAPE_BLOCK
376 device->blk_data.medium_changed = 1; 379 device->blk_data.medium_changed = 1;
377 #endif 380 #endif
378 if (device->required_tapemarks) 381 if (device->required_tapemarks)
379 tape_std_terminate_write(device); 382 tape_std_terminate_write(device);
380 default: 383 default:
381 ; 384 ;
382 } 385 }
383 rc = tape_mtop(device, op.mt_op, op.mt_count); 386 rc = tape_mtop(device, op.mt_op, op.mt_count);
384 387
385 if (op.mt_op == MTWEOF && rc == 0) { 388 if (op.mt_op == MTWEOF && rc == 0) {
386 if (op.mt_count > device->required_tapemarks) 389 if (op.mt_count > device->required_tapemarks)
387 device->required_tapemarks = 0; 390 device->required_tapemarks = 0;
388 else 391 else
389 device->required_tapemarks -= op.mt_count; 392 device->required_tapemarks -= op.mt_count;
390 } 393 }
391 return rc; 394 return rc;
392 } 395 }
393 if (no == MTIOCPOS) { 396 if (no == MTIOCPOS) {
394 /* MTIOCPOS: query the tape position. */ 397 /* MTIOCPOS: query the tape position. */
395 struct mtpos pos; 398 struct mtpos pos;
396 399
397 rc = tape_mtop(device, MTTELL, 1); 400 rc = tape_mtop(device, MTTELL, 1);
398 if (rc < 0) 401 if (rc < 0)
399 return rc; 402 return rc;
400 pos.mt_blkno = rc; 403 pos.mt_blkno = rc;
401 if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0) 404 if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
402 return -EFAULT; 405 return -EFAULT;
403 return 0; 406 return 0;
404 } 407 }
405 if (no == MTIOCGET) { 408 if (no == MTIOCGET) {
406 /* MTIOCGET: query the tape drive status. */ 409 /* MTIOCGET: query the tape drive status. */
407 struct mtget get; 410 struct mtget get;
408 411
409 memset(&get, 0, sizeof(get)); 412 memset(&get, 0, sizeof(get));
410 get.mt_type = MT_ISUNKNOWN; 413 get.mt_type = MT_ISUNKNOWN;
411 get.mt_resid = 0 /* device->devstat.rescnt */; 414 get.mt_resid = 0 /* device->devstat.rescnt */;
412 get.mt_dsreg = device->tape_state; 415 get.mt_dsreg = device->tape_state;
413 /* FIXME: mt_gstat, mt_erreg, mt_fileno */ 416 /* FIXME: mt_gstat, mt_erreg, mt_fileno */
414 get.mt_gstat = 0; 417 get.mt_gstat = 0;
415 get.mt_erreg = 0; 418 get.mt_erreg = 0;
416 get.mt_fileno = 0; 419 get.mt_fileno = 0;
417 get.mt_gstat = device->tape_generic_status; 420 get.mt_gstat = device->tape_generic_status;
418 421
419 if (device->medium_state == MS_LOADED) { 422 if (device->medium_state == MS_LOADED) {
420 rc = tape_mtop(device, MTTELL, 1); 423 rc = tape_mtop(device, MTTELL, 1);
421 424
422 if (rc < 0) 425 if (rc < 0)
423 return rc; 426 return rc;
424 427
425 if (rc == 0) 428 if (rc == 0)
426 get.mt_gstat |= GMT_BOT(~0); 429 get.mt_gstat |= GMT_BOT(~0);
427 430
428 get.mt_blkno = rc; 431 get.mt_blkno = rc;
429 } 432 }
430 433
431 if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0) 434 if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
432 return -EFAULT; 435 return -EFAULT;
433 436
434 return 0; 437 return 0;
435 } 438 }
436 /* Try the discipline ioctl function. */ 439 /* Try the discipline ioctl function. */
437 if (device->discipline->ioctl_fn == NULL) 440 if (device->discipline->ioctl_fn == NULL)
438 return -EINVAL; 441 return -EINVAL;
439 return device->discipline->ioctl_fn(device, no, data); 442 return device->discipline->ioctl_fn(device, no, data);
440 } 443 }
441 444
442 static long 445 static long
443 tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data) 446 tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
444 { 447 {
445 struct tape_device *device; 448 struct tape_device *device;
446 long rc; 449 long rc;
447 450
448 DBF_EVENT(6, "TCHAR:ioct\n"); 451 DBF_EVENT(6, "TCHAR:ioct\n");
449 452
450 device = (struct tape_device *) filp->private_data; 453 device = (struct tape_device *) filp->private_data;
451 mutex_lock(&device->mutex); 454 mutex_lock(&device->mutex);
452 rc = __tapechar_ioctl(device, no, data); 455 rc = __tapechar_ioctl(device, no, data);
453 mutex_unlock(&device->mutex); 456 mutex_unlock(&device->mutex);
454 return rc; 457 return rc;
455 } 458 }
456 459
457 static long 460 static long
458 tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 461 tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
459 { 462 {
460 struct tape_device *device = filp->private_data; 463 struct tape_device *device = filp->private_data;
461 int rval = -ENOIOCTLCMD; 464 int rval = -ENOIOCTLCMD;
462 465
463 if (device->discipline->ioctl_fn) { 466 if (device->discipline->ioctl_fn) {
464 mutex_lock(&device->mutex); 467 mutex_lock(&device->mutex);
465 rval = device->discipline->ioctl_fn(device, no, data); 468 rval = device->discipline->ioctl_fn(device, no, data);
466 mutex_unlock(&device->mutex); 469 mutex_unlock(&device->mutex);
467 if (rval == -EINVAL) 470 if (rval == -EINVAL)
468 rval = -ENOIOCTLCMD; 471 rval = -ENOIOCTLCMD;
469 } 472 }
470 473
471 return rval; 474 return rval;
472 } 475 }
473 476
474 /* 477 /*
475 * Initialize character device frontend. 478 * Initialize character device frontend.
476 */ 479 */
477 int 480 int
478 tapechar_init (void) 481 tapechar_init (void)
479 { 482 {
480 dev_t dev; 483 dev_t dev;
481 484
482 if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0) 485 if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
483 return -1; 486 return -1;
484 487
485 tapechar_major = MAJOR(dev); 488 tapechar_major = MAJOR(dev);
486 489
487 return 0; 490 return 0;
488 } 491 }
489 492
490 /* 493 /*
491 * cleanup 494 * cleanup
492 */ 495 */
493 void 496 void
494 tapechar_exit(void) 497 tapechar_exit(void)
495 { 498 {
496 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256); 499 unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
497 } 500 }
498 501
drivers/s390/char/tape_class.c
1 /* 1 /*
2 * (C) Copyright IBM Corp. 2004 2 * (C) Copyright IBM Corp. 2004
3 * tape_class.c 3 * tape_class.c
4 * 4 *
5 * Tape class device support 5 * Tape class device support
6 * 6 *
7 * Author: Stefan Bader <shbader@de.ibm.com> 7 * Author: Stefan Bader <shbader@de.ibm.com>
8 * Based on simple class device code by Greg K-H 8 * Based on simple class device code by Greg K-H
9 */ 9 */
10
11 #define KMSG_COMPONENT "tape"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10 #include "tape_class.h" 14 #include "tape_class.h"
11 15
12 MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); 16 MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
13 MODULE_DESCRIPTION( 17 MODULE_DESCRIPTION(
14 "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n" 18 "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
15 "tape_class.c" 19 "tape_class.c"
16 ); 20 );
17 MODULE_LICENSE("GPL"); 21 MODULE_LICENSE("GPL");
18 22
19 static struct class *tape_class; 23 static struct class *tape_class;
20 24
21 /* 25 /*
22 * Register a tape device and return a pointer to the cdev structure. 26 * Register a tape device and return a pointer to the cdev structure.
23 * 27 *
24 * device 28 * device
25 * The pointer to the struct device of the physical (base) device. 29 * The pointer to the struct device of the physical (base) device.
26 * drivername 30 * drivername
27 * The pointer to the drivers name for it's character devices. 31 * The pointer to the drivers name for it's character devices.
28 * dev 32 * dev
29 * The intended major/minor number. The major number may be 0 to 33 * The intended major/minor number. The major number may be 0 to
30 * get a dynamic major number. 34 * get a dynamic major number.
31 * fops 35 * fops
32 * The pointer to the drivers file operations for the tape device. 36 * The pointer to the drivers file operations for the tape device.
33 * devname 37 * devname
34 * The pointer to the name of the character device. 38 * The pointer to the name of the character device.
35 */ 39 */
36 struct tape_class_device *register_tape_dev( 40 struct tape_class_device *register_tape_dev(
37 struct device * device, 41 struct device * device,
38 dev_t dev, 42 dev_t dev,
39 const struct file_operations *fops, 43 const struct file_operations *fops,
40 char * device_name, 44 char * device_name,
41 char * mode_name) 45 char * mode_name)
42 { 46 {
43 struct tape_class_device * tcd; 47 struct tape_class_device * tcd;
44 int rc; 48 int rc;
45 char * s; 49 char * s;
46 50
47 tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL); 51 tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
48 if (!tcd) 52 if (!tcd)
49 return ERR_PTR(-ENOMEM); 53 return ERR_PTR(-ENOMEM);
50 54
51 strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN); 55 strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
52 for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/')) 56 for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
53 *s = '!'; 57 *s = '!';
54 strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN); 58 strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
55 for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/')) 59 for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
56 *s = '!'; 60 *s = '!';
57 61
58 tcd->char_device = cdev_alloc(); 62 tcd->char_device = cdev_alloc();
59 if (!tcd->char_device) { 63 if (!tcd->char_device) {
60 rc = -ENOMEM; 64 rc = -ENOMEM;
61 goto fail_with_tcd; 65 goto fail_with_tcd;
62 } 66 }
63 67
64 tcd->char_device->owner = fops->owner; 68 tcd->char_device->owner = fops->owner;
65 tcd->char_device->ops = fops; 69 tcd->char_device->ops = fops;
66 tcd->char_device->dev = dev; 70 tcd->char_device->dev = dev;
67 71
68 rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1); 72 rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
69 if (rc) 73 if (rc)
70 goto fail_with_cdev; 74 goto fail_with_cdev;
71 75
72 tcd->class_device = device_create(tape_class, device, 76 tcd->class_device = device_create(tape_class, device,
73 tcd->char_device->dev, NULL, 77 tcd->char_device->dev, NULL,
74 "%s", tcd->device_name); 78 "%s", tcd->device_name);
75 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; 79 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
76 if (rc) 80 if (rc)
77 goto fail_with_cdev; 81 goto fail_with_cdev;
78 rc = sysfs_create_link( 82 rc = sysfs_create_link(
79 &device->kobj, 83 &device->kobj,
80 &tcd->class_device->kobj, 84 &tcd->class_device->kobj,
81 tcd->mode_name 85 tcd->mode_name
82 ); 86 );
83 if (rc) 87 if (rc)
84 goto fail_with_class_device; 88 goto fail_with_class_device;
85 89
86 return tcd; 90 return tcd;
87 91
88 fail_with_class_device: 92 fail_with_class_device:
89 device_destroy(tape_class, tcd->char_device->dev); 93 device_destroy(tape_class, tcd->char_device->dev);
90 94
91 fail_with_cdev: 95 fail_with_cdev:
92 cdev_del(tcd->char_device); 96 cdev_del(tcd->char_device);
93 97
94 fail_with_tcd: 98 fail_with_tcd:
95 kfree(tcd); 99 kfree(tcd);
96 100
97 return ERR_PTR(rc); 101 return ERR_PTR(rc);
98 } 102 }
99 EXPORT_SYMBOL(register_tape_dev); 103 EXPORT_SYMBOL(register_tape_dev);
100 104
101 void unregister_tape_dev(struct device *device, struct tape_class_device *tcd) 105 void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
102 { 106 {
103 if (tcd != NULL && !IS_ERR(tcd)) { 107 if (tcd != NULL && !IS_ERR(tcd)) {
104 sysfs_remove_link(&device->kobj, tcd->mode_name); 108 sysfs_remove_link(&device->kobj, tcd->mode_name);
105 device_destroy(tape_class, tcd->char_device->dev); 109 device_destroy(tape_class, tcd->char_device->dev);
106 cdev_del(tcd->char_device); 110 cdev_del(tcd->char_device);
107 kfree(tcd); 111 kfree(tcd);
108 } 112 }
109 } 113 }
110 EXPORT_SYMBOL(unregister_tape_dev); 114 EXPORT_SYMBOL(unregister_tape_dev);
111 115
112 116
113 static int __init tape_init(void) 117 static int __init tape_init(void)
114 { 118 {
115 tape_class = class_create(THIS_MODULE, "tape390"); 119 tape_class = class_create(THIS_MODULE, "tape390");
116 120
117 return 0; 121 return 0;
118 } 122 }
119 123
120 static void __exit tape_exit(void) 124 static void __exit tape_exit(void)
121 { 125 {
122 class_destroy(tape_class); 126 class_destroy(tape_class);
123 tape_class = NULL; 127 tape_class = NULL;
124 } 128 }
125 129
126 postcore_initcall(tape_init); 130 postcore_initcall(tape_init);
127 module_exit(tape_exit); 131 module_exit(tape_exit);
128 132
drivers/s390/char/tape_core.c
1 /* 1 /*
2 * drivers/s390/char/tape_core.c 2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright IBM Corp. 2001, 2009 6 * Copyright IBM Corp. 2001, 2009
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14 #define KMSG_COMPONENT "tape" 14 #define KMSG_COMPONENT "tape"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
15 #include <linux/module.h> 17 #include <linux/module.h>
16 #include <linux/init.h> // for kernel parameters 18 #include <linux/init.h> // for kernel parameters
17 #include <linux/kmod.h> // for requesting modules 19 #include <linux/kmod.h> // for requesting modules
18 #include <linux/spinlock.h> // for locks 20 #include <linux/spinlock.h> // for locks
19 #include <linux/vmalloc.h> 21 #include <linux/vmalloc.h>
20 #include <linux/list.h> 22 #include <linux/list.h>
21 23
22 #include <asm/types.h> // for variable types 24 #include <asm/types.h> // for variable types
23 25
24 #define TAPE_DBF_AREA tape_core_dbf 26 #define TAPE_DBF_AREA tape_core_dbf
25 27
26 #include "tape.h" 28 #include "tape.h"
27 #include "tape_std.h" 29 #include "tape_std.h"
28 30
29 #define LONG_BUSY_TIMEOUT 180 /* seconds */ 31 #define LONG_BUSY_TIMEOUT 180 /* seconds */
30 32
31 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 33 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
32 static void tape_delayed_next_request(struct work_struct *); 34 static void tape_delayed_next_request(struct work_struct *);
33 static void tape_long_busy_timeout(unsigned long data); 35 static void tape_long_busy_timeout(unsigned long data);
34 36
35 /* 37 /*
36 * One list to contain all tape devices of all disciplines, so 38 * One list to contain all tape devices of all disciplines, so
37 * we can assign the devices to minor numbers of the same major 39 * we can assign the devices to minor numbers of the same major
38 * The list is protected by the rwlock 40 * The list is protected by the rwlock
39 */ 41 */
40 static LIST_HEAD(tape_device_list); 42 static LIST_HEAD(tape_device_list);
41 static DEFINE_RWLOCK(tape_device_lock); 43 static DEFINE_RWLOCK(tape_device_lock);
42 44
43 /* 45 /*
44 * Pointer to debug area. 46 * Pointer to debug area.
45 */ 47 */
46 debug_info_t *TAPE_DBF_AREA = NULL; 48 debug_info_t *TAPE_DBF_AREA = NULL;
47 EXPORT_SYMBOL(TAPE_DBF_AREA); 49 EXPORT_SYMBOL(TAPE_DBF_AREA);
48 50
49 /* 51 /*
50 * Printable strings for tape enumerations. 52 * Printable strings for tape enumerations.
51 */ 53 */
52 const char *tape_state_verbose[TS_SIZE] = 54 const char *tape_state_verbose[TS_SIZE] =
53 { 55 {
54 [TS_UNUSED] = "UNUSED", 56 [TS_UNUSED] = "UNUSED",
55 [TS_IN_USE] = "IN_USE", 57 [TS_IN_USE] = "IN_USE",
56 [TS_BLKUSE] = "BLKUSE", 58 [TS_BLKUSE] = "BLKUSE",
57 [TS_INIT] = "INIT ", 59 [TS_INIT] = "INIT ",
58 [TS_NOT_OPER] = "NOT_OP" 60 [TS_NOT_OPER] = "NOT_OP"
59 }; 61 };
60 62
61 const char *tape_op_verbose[TO_SIZE] = 63 const char *tape_op_verbose[TO_SIZE] =
62 { 64 {
63 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 65 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
64 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 66 [TO_BSF] = "BSF", [TO_DSE] = "DSE",
65 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 67 [TO_FSB] = "FSB", [TO_FSF] = "FSF",
66 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 68 [TO_LBL] = "LBL", [TO_NOP] = "NOP",
67 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 69 [TO_RBA] = "RBA", [TO_RBI] = "RBI",
68 [TO_RFO] = "RFO", [TO_REW] = "REW", 70 [TO_RFO] = "RFO", [TO_REW] = "REW",
69 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 71 [TO_RUN] = "RUN", [TO_WRI] = "WRI",
70 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 72 [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 73 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
72 [TO_READ_ATTMSG] = "RAT", 74 [TO_READ_ATTMSG] = "RAT",
73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 75 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", 76 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", 77 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", 78 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
77 }; 79 };
78 80
79 static int devid_to_int(struct ccw_dev_id *dev_id) 81 static int devid_to_int(struct ccw_dev_id *dev_id)
80 { 82 {
81 return dev_id->devno + (dev_id->ssid << 16); 83 return dev_id->devno + (dev_id->ssid << 16);
82 } 84 }
83 85
84 /* 86 /*
85 * Some channel attached tape specific attributes. 87 * Some channel attached tape specific attributes.
86 * 88 *
87 * FIXME: In the future the first_minor and blocksize attribute should be 89 * FIXME: In the future the first_minor and blocksize attribute should be
88 * replaced by a link to the cdev tree. 90 * replaced by a link to the cdev tree.
89 */ 91 */
90 static ssize_t 92 static ssize_t
91 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
92 { 94 {
93 struct tape_device *tdev; 95 struct tape_device *tdev;
94 96
95 tdev = dev_get_drvdata(dev); 97 tdev = dev_get_drvdata(dev);
96 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 98 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
97 } 99 }
98 100
99 static 101 static
100 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 102 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
101 103
102 static ssize_t 104 static ssize_t
103 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
104 { 106 {
105 struct tape_device *tdev; 107 struct tape_device *tdev;
106 108
107 tdev = dev_get_drvdata(dev); 109 tdev = dev_get_drvdata(dev);
108 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 110 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
109 } 111 }
110 112
111 static 113 static
112 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 114 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
113 115
114 static ssize_t 116 static ssize_t
115 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
116 { 118 {
117 struct tape_device *tdev; 119 struct tape_device *tdev;
118 120
119 tdev = dev_get_drvdata(dev); 121 tdev = dev_get_drvdata(dev);
120 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 122 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
121 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 123 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
122 } 124 }
123 125
124 static 126 static
125 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 127 DEVICE_ATTR(state, 0444, tape_state_show, NULL);
126 128
127 static ssize_t 129 static ssize_t
128 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
129 { 131 {
130 struct tape_device *tdev; 132 struct tape_device *tdev;
131 ssize_t rc; 133 ssize_t rc;
132 134
133 tdev = dev_get_drvdata(dev); 135 tdev = dev_get_drvdata(dev);
134 if (tdev->first_minor < 0) 136 if (tdev->first_minor < 0)
135 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 137 return scnprintf(buf, PAGE_SIZE, "N/A\n");
136 138
137 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 139 spin_lock_irq(get_ccwdev_lock(tdev->cdev));
138 if (list_empty(&tdev->req_queue)) 140 if (list_empty(&tdev->req_queue))
139 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 141 rc = scnprintf(buf, PAGE_SIZE, "---\n");
140 else { 142 else {
141 struct tape_request *req; 143 struct tape_request *req;
142 144
143 req = list_entry(tdev->req_queue.next, struct tape_request, 145 req = list_entry(tdev->req_queue.next, struct tape_request,
144 list); 146 list);
145 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 147 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
146 } 148 }
147 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 149 spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
148 return rc; 150 return rc;
149 } 151 }
150 152
151 static 153 static
152 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 154 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
153 155
154 static ssize_t 156 static ssize_t
155 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
156 { 158 {
157 struct tape_device *tdev; 159 struct tape_device *tdev;
158 160
159 tdev = dev_get_drvdata(dev); 161 tdev = dev_get_drvdata(dev);
160 162
161 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 163 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
162 } 164 }
163 165
164 static 166 static
165 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 167 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
166 168
167 static struct attribute *tape_attrs[] = { 169 static struct attribute *tape_attrs[] = {
168 &dev_attr_medium_state.attr, 170 &dev_attr_medium_state.attr,
169 &dev_attr_first_minor.attr, 171 &dev_attr_first_minor.attr,
170 &dev_attr_state.attr, 172 &dev_attr_state.attr,
171 &dev_attr_operation.attr, 173 &dev_attr_operation.attr,
172 &dev_attr_blocksize.attr, 174 &dev_attr_blocksize.attr,
173 NULL 175 NULL
174 }; 176 };
175 177
176 static struct attribute_group tape_attr_group = { 178 static struct attribute_group tape_attr_group = {
177 .attrs = tape_attrs, 179 .attrs = tape_attrs,
178 }; 180 };
179 181
180 /* 182 /*
181 * Tape state functions 183 * Tape state functions
182 */ 184 */
183 void 185 void
184 tape_state_set(struct tape_device *device, enum tape_state newstate) 186 tape_state_set(struct tape_device *device, enum tape_state newstate)
185 { 187 {
186 const char *str; 188 const char *str;
187 189
188 if (device->tape_state == TS_NOT_OPER) { 190 if (device->tape_state == TS_NOT_OPER) {
189 DBF_EVENT(3, "ts_set err: not oper\n"); 191 DBF_EVENT(3, "ts_set err: not oper\n");
190 return; 192 return;
191 } 193 }
192 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 194 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
193 DBF_EVENT(4, "old ts:\t\n"); 195 DBF_EVENT(4, "old ts:\t\n");
194 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 196 if (device->tape_state < TS_SIZE && device->tape_state >=0 )
195 str = tape_state_verbose[device->tape_state]; 197 str = tape_state_verbose[device->tape_state];
196 else 198 else
197 str = "UNKNOWN TS"; 199 str = "UNKNOWN TS";
198 DBF_EVENT(4, "%s\n", str); 200 DBF_EVENT(4, "%s\n", str);
199 DBF_EVENT(4, "new ts:\t\n"); 201 DBF_EVENT(4, "new ts:\t\n");
200 if (newstate < TS_SIZE && newstate >= 0) 202 if (newstate < TS_SIZE && newstate >= 0)
201 str = tape_state_verbose[newstate]; 203 str = tape_state_verbose[newstate];
202 else 204 else
203 str = "UNKNOWN TS"; 205 str = "UNKNOWN TS";
204 DBF_EVENT(4, "%s\n", str); 206 DBF_EVENT(4, "%s\n", str);
205 device->tape_state = newstate; 207 device->tape_state = newstate;
206 wake_up(&device->state_change_wq); 208 wake_up(&device->state_change_wq);
207 } 209 }
208 210
209 void 211 void
210 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 212 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
211 { 213 {
212 if (device->medium_state == newstate) 214 if (device->medium_state == newstate)
213 return; 215 return;
214 switch(newstate){ 216 switch(newstate){
215 case MS_UNLOADED: 217 case MS_UNLOADED:
216 device->tape_generic_status |= GMT_DR_OPEN(~0); 218 device->tape_generic_status |= GMT_DR_OPEN(~0);
217 if (device->medium_state == MS_LOADED) 219 if (device->medium_state == MS_LOADED)
218 pr_info("%s: The tape cartridge has been successfully " 220 pr_info("%s: The tape cartridge has been successfully "
219 "unloaded\n", dev_name(&device->cdev->dev)); 221 "unloaded\n", dev_name(&device->cdev->dev));
220 break; 222 break;
221 case MS_LOADED: 223 case MS_LOADED:
222 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 224 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
223 if (device->medium_state == MS_UNLOADED) 225 if (device->medium_state == MS_UNLOADED)
224 pr_info("%s: A tape cartridge has been mounted\n", 226 pr_info("%s: A tape cartridge has been mounted\n",
225 dev_name(&device->cdev->dev)); 227 dev_name(&device->cdev->dev));
226 break; 228 break;
227 default: 229 default:
228 // print nothing 230 // print nothing
229 break; 231 break;
230 } 232 }
231 device->medium_state = newstate; 233 device->medium_state = newstate;
232 wake_up(&device->state_change_wq); 234 wake_up(&device->state_change_wq);
233 } 235 }
234 236
235 /* 237 /*
236 * Stop running ccw. Has to be called with the device lock held. 238 * Stop running ccw. Has to be called with the device lock held.
237 */ 239 */
238 static int 240 static int
239 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 241 __tape_cancel_io(struct tape_device *device, struct tape_request *request)
240 { 242 {
241 int retries; 243 int retries;
242 int rc; 244 int rc;
243 245
244 /* Check if interrupt has already been processed */ 246 /* Check if interrupt has already been processed */
245 if (request->callback == NULL) 247 if (request->callback == NULL)
246 return 0; 248 return 0;
247 249
248 rc = 0; 250 rc = 0;
249 for (retries = 0; retries < 5; retries++) { 251 for (retries = 0; retries < 5; retries++) {
250 rc = ccw_device_clear(device->cdev, (long) request); 252 rc = ccw_device_clear(device->cdev, (long) request);
251 253
252 switch (rc) { 254 switch (rc) {
253 case 0: 255 case 0:
254 request->status = TAPE_REQUEST_DONE; 256 request->status = TAPE_REQUEST_DONE;
255 return 0; 257 return 0;
256 case -EBUSY: 258 case -EBUSY:
257 request->status = TAPE_REQUEST_CANCEL; 259 request->status = TAPE_REQUEST_CANCEL;
258 schedule_delayed_work(&device->tape_dnr, 0); 260 schedule_delayed_work(&device->tape_dnr, 0);
259 return 0; 261 return 0;
260 case -ENODEV: 262 case -ENODEV:
261 DBF_EXCEPTION(2, "device gone, retry\n"); 263 DBF_EXCEPTION(2, "device gone, retry\n");
262 break; 264 break;
263 case -EIO: 265 case -EIO:
264 DBF_EXCEPTION(2, "I/O error, retry\n"); 266 DBF_EXCEPTION(2, "I/O error, retry\n");
265 break; 267 break;
266 default: 268 default:
267 BUG(); 269 BUG();
268 } 270 }
269 } 271 }
270 272
271 return rc; 273 return rc;
272 } 274 }
273 275
274 /* 276 /*
275 * Add device into the sorted list, giving it the first 277 * Add device into the sorted list, giving it the first
276 * available minor number. 278 * available minor number.
277 */ 279 */
278 static int 280 static int
279 tape_assign_minor(struct tape_device *device) 281 tape_assign_minor(struct tape_device *device)
280 { 282 {
281 struct tape_device *tmp; 283 struct tape_device *tmp;
282 int minor; 284 int minor;
283 285
284 minor = 0; 286 minor = 0;
285 write_lock(&tape_device_lock); 287 write_lock(&tape_device_lock);
286 list_for_each_entry(tmp, &tape_device_list, node) { 288 list_for_each_entry(tmp, &tape_device_list, node) {
287 if (minor < tmp->first_minor) 289 if (minor < tmp->first_minor)
288 break; 290 break;
289 minor += TAPE_MINORS_PER_DEV; 291 minor += TAPE_MINORS_PER_DEV;
290 } 292 }
291 if (minor >= 256) { 293 if (minor >= 256) {
292 write_unlock(&tape_device_lock); 294 write_unlock(&tape_device_lock);
293 return -ENODEV; 295 return -ENODEV;
294 } 296 }
295 device->first_minor = minor; 297 device->first_minor = minor;
296 list_add_tail(&device->node, &tmp->node); 298 list_add_tail(&device->node, &tmp->node);
297 write_unlock(&tape_device_lock); 299 write_unlock(&tape_device_lock);
298 return 0; 300 return 0;
299 } 301 }
300 302
301 /* remove device from the list */ 303 /* remove device from the list */
302 static void 304 static void
303 tape_remove_minor(struct tape_device *device) 305 tape_remove_minor(struct tape_device *device)
304 { 306 {
305 write_lock(&tape_device_lock); 307 write_lock(&tape_device_lock);
306 list_del_init(&device->node); 308 list_del_init(&device->node);
307 device->first_minor = -1; 309 device->first_minor = -1;
308 write_unlock(&tape_device_lock); 310 write_unlock(&tape_device_lock);
309 } 311 }
310 312
311 /* 313 /*
312 * Set a device online. 314 * Set a device online.
313 * 315 *
314 * This function is called by the common I/O layer to move a device from the 316 * This function is called by the common I/O layer to move a device from the
315 * detected but offline into the online state. 317 * detected but offline into the online state.
316 * If we return an error (RC < 0) the device remains in the offline state. This 318 * If we return an error (RC < 0) the device remains in the offline state. This
317 * can happen if the device is assigned somewhere else, for example. 319 * can happen if the device is assigned somewhere else, for example.
318 */ 320 */
319 int 321 int
320 tape_generic_online(struct tape_device *device, 322 tape_generic_online(struct tape_device *device,
321 struct tape_discipline *discipline) 323 struct tape_discipline *discipline)
322 { 324 {
323 int rc; 325 int rc;
324 326
325 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 327 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
326 328
327 if (device->tape_state != TS_INIT) { 329 if (device->tape_state != TS_INIT) {
328 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 330 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
329 return -EINVAL; 331 return -EINVAL;
330 } 332 }
331 333
332 init_timer(&device->lb_timeout); 334 init_timer(&device->lb_timeout);
333 device->lb_timeout.function = tape_long_busy_timeout; 335 device->lb_timeout.function = tape_long_busy_timeout;
334 336
335 /* Let the discipline have a go at the device. */ 337 /* Let the discipline have a go at the device. */
336 device->discipline = discipline; 338 device->discipline = discipline;
337 if (!try_module_get(discipline->owner)) { 339 if (!try_module_get(discipline->owner)) {
338 return -EINVAL; 340 return -EINVAL;
339 } 341 }
340 342
341 rc = discipline->setup_device(device); 343 rc = discipline->setup_device(device);
342 if (rc) 344 if (rc)
343 goto out; 345 goto out;
344 rc = tape_assign_minor(device); 346 rc = tape_assign_minor(device);
345 if (rc) 347 if (rc)
346 goto out_discipline; 348 goto out_discipline;
347 349
348 rc = tapechar_setup_device(device); 350 rc = tapechar_setup_device(device);
349 if (rc) 351 if (rc)
350 goto out_minor; 352 goto out_minor;
351 rc = tapeblock_setup_device(device); 353 rc = tapeblock_setup_device(device);
352 if (rc) 354 if (rc)
353 goto out_char; 355 goto out_char;
354 356
355 tape_state_set(device, TS_UNUSED); 357 tape_state_set(device, TS_UNUSED);
356 358
357 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 359 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
358 360
359 return 0; 361 return 0;
360 362
361 out_char: 363 out_char:
362 tapechar_cleanup_device(device); 364 tapechar_cleanup_device(device);
363 out_minor: 365 out_minor:
364 tape_remove_minor(device); 366 tape_remove_minor(device);
365 out_discipline: 367 out_discipline:
366 device->discipline->cleanup_device(device); 368 device->discipline->cleanup_device(device);
367 device->discipline = NULL; 369 device->discipline = NULL;
368 out: 370 out:
369 module_put(discipline->owner); 371 module_put(discipline->owner);
370 return rc; 372 return rc;
371 } 373 }
372 374
373 static void 375 static void
374 tape_cleanup_device(struct tape_device *device) 376 tape_cleanup_device(struct tape_device *device)
375 { 377 {
376 tapeblock_cleanup_device(device); 378 tapeblock_cleanup_device(device);
377 tapechar_cleanup_device(device); 379 tapechar_cleanup_device(device);
378 device->discipline->cleanup_device(device); 380 device->discipline->cleanup_device(device);
379 module_put(device->discipline->owner); 381 module_put(device->discipline->owner);
380 tape_remove_minor(device); 382 tape_remove_minor(device);
381 tape_med_state_set(device, MS_UNKNOWN); 383 tape_med_state_set(device, MS_UNKNOWN);
382 } 384 }
383 385
384 /* 386 /*
385 * Suspend device. 387 * Suspend device.
386 * 388 *
387 * Called by the common I/O layer if the drive should be suspended on user 389 * Called by the common I/O layer if the drive should be suspended on user
388 * request. We refuse to suspend if the device is loaded or in use for the 390 * request. We refuse to suspend if the device is loaded or in use for the
389 * following reason: 391 * following reason:
390 * While the Linux guest is suspended, it might be logged off which causes 392 * While the Linux guest is suspended, it might be logged off which causes
391 * devices to be detached. Tape devices are automatically rewound and unloaded 393 * devices to be detached. Tape devices are automatically rewound and unloaded
392 * during DETACH processing (unless the tape device was attached with the 394 * during DETACH processing (unless the tape device was attached with the
393 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to 395 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
394 * resume the original state of the tape device, since we would need to 396 * resume the original state of the tape device, since we would need to
395 * manually re-load the cartridge which was active at suspend time. 397 * manually re-load the cartridge which was active at suspend time.
396 */ 398 */
397 int tape_generic_pm_suspend(struct ccw_device *cdev) 399 int tape_generic_pm_suspend(struct ccw_device *cdev)
398 { 400 {
399 struct tape_device *device; 401 struct tape_device *device;
400 402
401 device = dev_get_drvdata(&cdev->dev); 403 device = dev_get_drvdata(&cdev->dev);
402 if (!device) { 404 if (!device) {
403 return -ENODEV; 405 return -ENODEV;
404 } 406 }
405 407
406 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", 408 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
407 device->cdev_id, device); 409 device->cdev_id, device);
408 410
409 if (device->medium_state != MS_UNLOADED) { 411 if (device->medium_state != MS_UNLOADED) {
410 pr_err("A cartridge is loaded in tape device %s, " 412 pr_err("A cartridge is loaded in tape device %s, "
411 "refusing to suspend\n", dev_name(&cdev->dev)); 413 "refusing to suspend\n", dev_name(&cdev->dev));
412 return -EBUSY; 414 return -EBUSY;
413 } 415 }
414 416
415 spin_lock_irq(get_ccwdev_lock(device->cdev)); 417 spin_lock_irq(get_ccwdev_lock(device->cdev));
416 switch (device->tape_state) { 418 switch (device->tape_state) {
417 case TS_INIT: 419 case TS_INIT:
418 case TS_NOT_OPER: 420 case TS_NOT_OPER:
419 case TS_UNUSED: 421 case TS_UNUSED:
420 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 422 spin_unlock_irq(get_ccwdev_lock(device->cdev));
421 break; 423 break;
422 default: 424 default:
423 pr_err("Tape device %s is busy, refusing to " 425 pr_err("Tape device %s is busy, refusing to "
424 "suspend\n", dev_name(&cdev->dev)); 426 "suspend\n", dev_name(&cdev->dev));
425 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 427 spin_unlock_irq(get_ccwdev_lock(device->cdev));
426 return -EBUSY; 428 return -EBUSY;
427 } 429 }
428 430
429 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); 431 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
430 return 0; 432 return 0;
431 } 433 }
432 434
433 /* 435 /*
434 * Set device offline. 436 * Set device offline.
435 * 437 *
436 * Called by the common I/O layer if the drive should set offline on user 438 * Called by the common I/O layer if the drive should set offline on user
437 * request. We may prevent this by returning an error. 439 * request. We may prevent this by returning an error.
438 * Manual offline is only allowed while the drive is not in use. 440 * Manual offline is only allowed while the drive is not in use.
439 */ 441 */
440 int 442 int
441 tape_generic_offline(struct ccw_device *cdev) 443 tape_generic_offline(struct ccw_device *cdev)
442 { 444 {
443 struct tape_device *device; 445 struct tape_device *device;
444 446
445 device = dev_get_drvdata(&cdev->dev); 447 device = dev_get_drvdata(&cdev->dev);
446 if (!device) { 448 if (!device) {
447 return -ENODEV; 449 return -ENODEV;
448 } 450 }
449 451
450 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 452 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
451 device->cdev_id, device); 453 device->cdev_id, device);
452 454
453 spin_lock_irq(get_ccwdev_lock(device->cdev)); 455 spin_lock_irq(get_ccwdev_lock(device->cdev));
454 switch (device->tape_state) { 456 switch (device->tape_state) {
455 case TS_INIT: 457 case TS_INIT:
456 case TS_NOT_OPER: 458 case TS_NOT_OPER:
457 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 459 spin_unlock_irq(get_ccwdev_lock(device->cdev));
458 break; 460 break;
459 case TS_UNUSED: 461 case TS_UNUSED:
460 tape_state_set(device, TS_INIT); 462 tape_state_set(device, TS_INIT);
461 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 463 spin_unlock_irq(get_ccwdev_lock(device->cdev));
462 tape_cleanup_device(device); 464 tape_cleanup_device(device);
463 break; 465 break;
464 default: 466 default:
465 DBF_EVENT(3, "(%08x): Set offline failed " 467 DBF_EVENT(3, "(%08x): Set offline failed "
466 "- drive in use.\n", 468 "- drive in use.\n",
467 device->cdev_id); 469 device->cdev_id);
468 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 470 spin_unlock_irq(get_ccwdev_lock(device->cdev));
469 return -EBUSY; 471 return -EBUSY;
470 } 472 }
471 473
472 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 474 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
473 return 0; 475 return 0;
474 } 476 }
475 477
476 /* 478 /*
477 * Allocate memory for a new device structure. 479 * Allocate memory for a new device structure.
478 */ 480 */
479 static struct tape_device * 481 static struct tape_device *
480 tape_alloc_device(void) 482 tape_alloc_device(void)
481 { 483 {
482 struct tape_device *device; 484 struct tape_device *device;
483 485
484 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 486 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
485 if (device == NULL) { 487 if (device == NULL) {
486 DBF_EXCEPTION(2, "ti:no mem\n"); 488 DBF_EXCEPTION(2, "ti:no mem\n");
487 return ERR_PTR(-ENOMEM); 489 return ERR_PTR(-ENOMEM);
488 } 490 }
489 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 491 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
490 if (device->modeset_byte == NULL) { 492 if (device->modeset_byte == NULL) {
491 DBF_EXCEPTION(2, "ti:no mem\n"); 493 DBF_EXCEPTION(2, "ti:no mem\n");
492 kfree(device); 494 kfree(device);
493 return ERR_PTR(-ENOMEM); 495 return ERR_PTR(-ENOMEM);
494 } 496 }
495 mutex_init(&device->mutex); 497 mutex_init(&device->mutex);
496 INIT_LIST_HEAD(&device->req_queue); 498 INIT_LIST_HEAD(&device->req_queue);
497 INIT_LIST_HEAD(&device->node); 499 INIT_LIST_HEAD(&device->node);
498 init_waitqueue_head(&device->state_change_wq); 500 init_waitqueue_head(&device->state_change_wq);
499 init_waitqueue_head(&device->wait_queue); 501 init_waitqueue_head(&device->wait_queue);
500 device->tape_state = TS_INIT; 502 device->tape_state = TS_INIT;
501 device->medium_state = MS_UNKNOWN; 503 device->medium_state = MS_UNKNOWN;
502 *device->modeset_byte = 0; 504 *device->modeset_byte = 0;
503 device->first_minor = -1; 505 device->first_minor = -1;
504 atomic_set(&device->ref_count, 1); 506 atomic_set(&device->ref_count, 1);
505 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); 507 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
506 508
507 return device; 509 return device;
508 } 510 }
509 511
510 /* 512 /*
511 * Get a reference to an existing device structure. This will automatically 513 * Get a reference to an existing device structure. This will automatically
512 * increment the reference count. 514 * increment the reference count.
513 */ 515 */
514 struct tape_device * 516 struct tape_device *
515 tape_get_device(struct tape_device *device) 517 tape_get_device(struct tape_device *device)
516 { 518 {
517 int count; 519 int count;
518 520
519 count = atomic_inc_return(&device->ref_count); 521 count = atomic_inc_return(&device->ref_count);
520 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); 522 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
521 return device; 523 return device;
522 } 524 }
523 525
524 /* 526 /*
525 * Decrease the reference counter of a devices structure. If the 527 * Decrease the reference counter of a devices structure. If the
526 * reference counter reaches zero free the device structure. 528 * reference counter reaches zero free the device structure.
527 * The function returns a NULL pointer to be used by the caller 529 * The function returns a NULL pointer to be used by the caller
528 * for clearing reference pointers. 530 * for clearing reference pointers.
529 */ 531 */
530 void 532 void
531 tape_put_device(struct tape_device *device) 533 tape_put_device(struct tape_device *device)
532 { 534 {
533 int count; 535 int count;
534 536
535 count = atomic_dec_return(&device->ref_count); 537 count = atomic_dec_return(&device->ref_count);
536 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); 538 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
537 BUG_ON(count < 0); 539 BUG_ON(count < 0);
538 if (count == 0) { 540 if (count == 0) {
539 kfree(device->modeset_byte); 541 kfree(device->modeset_byte);
540 kfree(device); 542 kfree(device);
541 } 543 }
542 } 544 }
543 545
544 /* 546 /*
545 * Find tape device by a device index. 547 * Find tape device by a device index.
546 */ 548 */
547 struct tape_device * 549 struct tape_device *
548 tape_find_device(int devindex) 550 tape_find_device(int devindex)
549 { 551 {
550 struct tape_device *device, *tmp; 552 struct tape_device *device, *tmp;
551 553
552 device = ERR_PTR(-ENODEV); 554 device = ERR_PTR(-ENODEV);
553 read_lock(&tape_device_lock); 555 read_lock(&tape_device_lock);
554 list_for_each_entry(tmp, &tape_device_list, node) { 556 list_for_each_entry(tmp, &tape_device_list, node) {
555 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 557 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
556 device = tape_get_device(tmp); 558 device = tape_get_device(tmp);
557 break; 559 break;
558 } 560 }
559 } 561 }
560 read_unlock(&tape_device_lock); 562 read_unlock(&tape_device_lock);
561 return device; 563 return device;
562 } 564 }
563 565
564 /* 566 /*
565 * Driverfs tape probe function. 567 * Driverfs tape probe function.
566 */ 568 */
567 int 569 int
568 tape_generic_probe(struct ccw_device *cdev) 570 tape_generic_probe(struct ccw_device *cdev)
569 { 571 {
570 struct tape_device *device; 572 struct tape_device *device;
571 int ret; 573 int ret;
572 struct ccw_dev_id dev_id; 574 struct ccw_dev_id dev_id;
573 575
574 device = tape_alloc_device(); 576 device = tape_alloc_device();
575 if (IS_ERR(device)) 577 if (IS_ERR(device))
576 return -ENODEV; 578 return -ENODEV;
577 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | 579 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
578 CCWDEV_DO_MULTIPATH); 580 CCWDEV_DO_MULTIPATH);
579 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 581 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
580 if (ret) { 582 if (ret) {
581 tape_put_device(device); 583 tape_put_device(device);
582 return ret; 584 return ret;
583 } 585 }
584 dev_set_drvdata(&cdev->dev, device); 586 dev_set_drvdata(&cdev->dev, device);
585 cdev->handler = __tape_do_irq; 587 cdev->handler = __tape_do_irq;
586 device->cdev = cdev; 588 device->cdev = cdev;
587 ccw_device_get_id(cdev, &dev_id); 589 ccw_device_get_id(cdev, &dev_id);
588 device->cdev_id = devid_to_int(&dev_id); 590 device->cdev_id = devid_to_int(&dev_id);
589 return ret; 591 return ret;
590 } 592 }
591 593
592 static void 594 static void
593 __tape_discard_requests(struct tape_device *device) 595 __tape_discard_requests(struct tape_device *device)
594 { 596 {
595 struct tape_request * request; 597 struct tape_request * request;
596 struct list_head * l, *n; 598 struct list_head * l, *n;
597 599
598 list_for_each_safe(l, n, &device->req_queue) { 600 list_for_each_safe(l, n, &device->req_queue) {
599 request = list_entry(l, struct tape_request, list); 601 request = list_entry(l, struct tape_request, list);
600 if (request->status == TAPE_REQUEST_IN_IO) 602 if (request->status == TAPE_REQUEST_IN_IO)
601 request->status = TAPE_REQUEST_DONE; 603 request->status = TAPE_REQUEST_DONE;
602 list_del(&request->list); 604 list_del(&request->list);
603 605
604 /* Decrease ref_count for removed request. */ 606 /* Decrease ref_count for removed request. */
605 request->device = NULL; 607 request->device = NULL;
606 tape_put_device(device); 608 tape_put_device(device);
607 request->rc = -EIO; 609 request->rc = -EIO;
608 if (request->callback != NULL) 610 if (request->callback != NULL)
609 request->callback(request, request->callback_data); 611 request->callback(request, request->callback_data);
610 } 612 }
611 } 613 }
612 614
613 /* 615 /*
614 * Driverfs tape remove function. 616 * Driverfs tape remove function.
615 * 617 *
616 * This function is called whenever the common I/O layer detects the device 618 * This function is called whenever the common I/O layer detects the device
617 * gone. This can happen at any time and we cannot refuse. 619 * gone. This can happen at any time and we cannot refuse.
618 */ 620 */
619 void 621 void
620 tape_generic_remove(struct ccw_device *cdev) 622 tape_generic_remove(struct ccw_device *cdev)
621 { 623 {
622 struct tape_device * device; 624 struct tape_device * device;
623 625
624 device = dev_get_drvdata(&cdev->dev); 626 device = dev_get_drvdata(&cdev->dev);
625 if (!device) { 627 if (!device) {
626 return; 628 return;
627 } 629 }
628 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 630 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
629 631
630 spin_lock_irq(get_ccwdev_lock(device->cdev)); 632 spin_lock_irq(get_ccwdev_lock(device->cdev));
631 switch (device->tape_state) { 633 switch (device->tape_state) {
632 case TS_INIT: 634 case TS_INIT:
633 tape_state_set(device, TS_NOT_OPER); 635 tape_state_set(device, TS_NOT_OPER);
634 case TS_NOT_OPER: 636 case TS_NOT_OPER:
635 /* 637 /*
636 * Nothing to do. 638 * Nothing to do.
637 */ 639 */
638 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 640 spin_unlock_irq(get_ccwdev_lock(device->cdev));
639 break; 641 break;
640 case TS_UNUSED: 642 case TS_UNUSED:
641 /* 643 /*
642 * Need only to release the device. 644 * Need only to release the device.
643 */ 645 */
644 tape_state_set(device, TS_NOT_OPER); 646 tape_state_set(device, TS_NOT_OPER);
645 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 647 spin_unlock_irq(get_ccwdev_lock(device->cdev));
646 tape_cleanup_device(device); 648 tape_cleanup_device(device);
647 break; 649 break;
648 default: 650 default:
649 /* 651 /*
650 * There may be requests on the queue. We will not get 652 * There may be requests on the queue. We will not get
651 * an interrupt for a request that was running. So we 653 * an interrupt for a request that was running. So we
652 * just post them all as I/O errors. 654 * just post them all as I/O errors.
653 */ 655 */
654 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 656 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
655 device->cdev_id); 657 device->cdev_id);
656 pr_warning("%s: A tape unit was detached while in " 658 pr_warning("%s: A tape unit was detached while in "
657 "use\n", dev_name(&device->cdev->dev)); 659 "use\n", dev_name(&device->cdev->dev));
658 tape_state_set(device, TS_NOT_OPER); 660 tape_state_set(device, TS_NOT_OPER);
659 __tape_discard_requests(device); 661 __tape_discard_requests(device);
660 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 662 spin_unlock_irq(get_ccwdev_lock(device->cdev));
661 tape_cleanup_device(device); 663 tape_cleanup_device(device);
662 } 664 }
663 665
664 device = dev_get_drvdata(&cdev->dev); 666 device = dev_get_drvdata(&cdev->dev);
665 if (device) { 667 if (device) {
666 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 668 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
667 dev_set_drvdata(&cdev->dev, NULL); 669 dev_set_drvdata(&cdev->dev, NULL);
668 tape_put_device(device); 670 tape_put_device(device);
669 } 671 }
670 } 672 }
671 673
672 /* 674 /*
673 * Allocate a new tape ccw request 675 * Allocate a new tape ccw request
674 */ 676 */
675 struct tape_request * 677 struct tape_request *
676 tape_alloc_request(int cplength, int datasize) 678 tape_alloc_request(int cplength, int datasize)
677 { 679 {
678 struct tape_request *request; 680 struct tape_request *request;
679 681
680 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 682 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
681 683
682 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 684 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
683 685
684 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 686 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
685 if (request == NULL) { 687 if (request == NULL) {
686 DBF_EXCEPTION(1, "cqra nomem\n"); 688 DBF_EXCEPTION(1, "cqra nomem\n");
687 return ERR_PTR(-ENOMEM); 689 return ERR_PTR(-ENOMEM);
688 } 690 }
689 /* allocate channel program */ 691 /* allocate channel program */
690 if (cplength > 0) { 692 if (cplength > 0) {
691 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 693 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
692 GFP_ATOMIC | GFP_DMA); 694 GFP_ATOMIC | GFP_DMA);
693 if (request->cpaddr == NULL) { 695 if (request->cpaddr == NULL) {
694 DBF_EXCEPTION(1, "cqra nomem\n"); 696 DBF_EXCEPTION(1, "cqra nomem\n");
695 kfree(request); 697 kfree(request);
696 return ERR_PTR(-ENOMEM); 698 return ERR_PTR(-ENOMEM);
697 } 699 }
698 } 700 }
699 /* alloc small kernel buffer */ 701 /* alloc small kernel buffer */
700 if (datasize > 0) { 702 if (datasize > 0) {
701 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 703 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
702 if (request->cpdata == NULL) { 704 if (request->cpdata == NULL) {
703 DBF_EXCEPTION(1, "cqra nomem\n"); 705 DBF_EXCEPTION(1, "cqra nomem\n");
704 kfree(request->cpaddr); 706 kfree(request->cpaddr);
705 kfree(request); 707 kfree(request);
706 return ERR_PTR(-ENOMEM); 708 return ERR_PTR(-ENOMEM);
707 } 709 }
708 } 710 }
709 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 711 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
710 request->cpdata); 712 request->cpdata);
711 713
712 return request; 714 return request;
713 } 715 }
714 716
715 /* 717 /*
716 * Free tape ccw request 718 * Free tape ccw request
717 */ 719 */
718 void 720 void
719 tape_free_request (struct tape_request * request) 721 tape_free_request (struct tape_request * request)
720 { 722 {
721 DBF_LH(6, "Free request %p\n", request); 723 DBF_LH(6, "Free request %p\n", request);
722 724
723 if (request->device) 725 if (request->device)
724 tape_put_device(request->device); 726 tape_put_device(request->device);
725 kfree(request->cpdata); 727 kfree(request->cpdata);
726 kfree(request->cpaddr); 728 kfree(request->cpaddr);
727 kfree(request); 729 kfree(request);
728 } 730 }
729 731
730 static int 732 static int
731 __tape_start_io(struct tape_device *device, struct tape_request *request) 733 __tape_start_io(struct tape_device *device, struct tape_request *request)
732 { 734 {
733 int rc; 735 int rc;
734 736
735 #ifdef CONFIG_S390_TAPE_BLOCK 737 #ifdef CONFIG_S390_TAPE_BLOCK
736 if (request->op == TO_BLOCK) 738 if (request->op == TO_BLOCK)
737 device->discipline->check_locate(device, request); 739 device->discipline->check_locate(device, request);
738 #endif 740 #endif
739 rc = ccw_device_start( 741 rc = ccw_device_start(
740 device->cdev, 742 device->cdev,
741 request->cpaddr, 743 request->cpaddr,
742 (unsigned long) request, 744 (unsigned long) request,
743 0x00, 745 0x00,
744 request->options 746 request->options
745 ); 747 );
746 if (rc == 0) { 748 if (rc == 0) {
747 request->status = TAPE_REQUEST_IN_IO; 749 request->status = TAPE_REQUEST_IN_IO;
748 } else if (rc == -EBUSY) { 750 } else if (rc == -EBUSY) {
749 /* The common I/O subsystem is currently busy. Retry later. */ 751 /* The common I/O subsystem is currently busy. Retry later. */
750 request->status = TAPE_REQUEST_QUEUED; 752 request->status = TAPE_REQUEST_QUEUED;
751 schedule_delayed_work(&device->tape_dnr, 0); 753 schedule_delayed_work(&device->tape_dnr, 0);
752 rc = 0; 754 rc = 0;
753 } else { 755 } else {
754 /* Start failed. Remove request and indicate failure. */ 756 /* Start failed. Remove request and indicate failure. */
755 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 757 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
756 } 758 }
757 return rc; 759 return rc;
758 } 760 }
759 761
760 static void 762 static void
761 __tape_start_next_request(struct tape_device *device) 763 __tape_start_next_request(struct tape_device *device)
762 { 764 {
763 struct list_head *l, *n; 765 struct list_head *l, *n;
764 struct tape_request *request; 766 struct tape_request *request;
765 int rc; 767 int rc;
766 768
767 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 769 DBF_LH(6, "__tape_start_next_request(%p)\n", device);
768 /* 770 /*
769 * Try to start each request on request queue until one is 771 * Try to start each request on request queue until one is
770 * started successful. 772 * started successful.
771 */ 773 */
772 list_for_each_safe(l, n, &device->req_queue) { 774 list_for_each_safe(l, n, &device->req_queue) {
773 request = list_entry(l, struct tape_request, list); 775 request = list_entry(l, struct tape_request, list);
774 776
775 /* 777 /*
776 * Avoid race condition if bottom-half was triggered more than 778 * Avoid race condition if bottom-half was triggered more than
777 * once. 779 * once.
778 */ 780 */
779 if (request->status == TAPE_REQUEST_IN_IO) 781 if (request->status == TAPE_REQUEST_IN_IO)
780 return; 782 return;
781 /* 783 /*
782 * Request has already been stopped. We have to wait until 784 * Request has already been stopped. We have to wait until
783 * the request is removed from the queue in the interrupt 785 * the request is removed from the queue in the interrupt
784 * handling. 786 * handling.
785 */ 787 */
786 if (request->status == TAPE_REQUEST_DONE) 788 if (request->status == TAPE_REQUEST_DONE)
787 return; 789 return;
788 790
789 /* 791 /*
790 * We wanted to cancel the request but the common I/O layer 792 * We wanted to cancel the request but the common I/O layer
791 * was busy at that time. This can only happen if this 793 * was busy at that time. This can only happen if this
792 * function is called by delayed_next_request. 794 * function is called by delayed_next_request.
793 * Otherwise we start the next request on the queue. 795 * Otherwise we start the next request on the queue.
794 */ 796 */
795 if (request->status == TAPE_REQUEST_CANCEL) { 797 if (request->status == TAPE_REQUEST_CANCEL) {
796 rc = __tape_cancel_io(device, request); 798 rc = __tape_cancel_io(device, request);
797 } else { 799 } else {
798 rc = __tape_start_io(device, request); 800 rc = __tape_start_io(device, request);
799 } 801 }
800 if (rc == 0) 802 if (rc == 0)
801 return; 803 return;
802 804
803 /* Set ending status. */ 805 /* Set ending status. */
804 request->rc = rc; 806 request->rc = rc;
805 request->status = TAPE_REQUEST_DONE; 807 request->status = TAPE_REQUEST_DONE;
806 808
807 /* Remove from request queue. */ 809 /* Remove from request queue. */
808 list_del(&request->list); 810 list_del(&request->list);
809 811
810 /* Do callback. */ 812 /* Do callback. */
811 if (request->callback != NULL) 813 if (request->callback != NULL)
812 request->callback(request, request->callback_data); 814 request->callback(request, request->callback_data);
813 } 815 }
814 } 816 }
815 817
816 static void 818 static void
817 tape_delayed_next_request(struct work_struct *work) 819 tape_delayed_next_request(struct work_struct *work)
818 { 820 {
819 struct tape_device *device = 821 struct tape_device *device =
820 container_of(work, struct tape_device, tape_dnr.work); 822 container_of(work, struct tape_device, tape_dnr.work);
821 823
822 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 824 DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
823 spin_lock_irq(get_ccwdev_lock(device->cdev)); 825 spin_lock_irq(get_ccwdev_lock(device->cdev));
824 __tape_start_next_request(device); 826 __tape_start_next_request(device);
825 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 827 spin_unlock_irq(get_ccwdev_lock(device->cdev));
826 } 828 }
827 829
828 static void tape_long_busy_timeout(unsigned long data) 830 static void tape_long_busy_timeout(unsigned long data)
829 { 831 {
830 struct tape_request *request; 832 struct tape_request *request;
831 struct tape_device *device; 833 struct tape_device *device;
832 834
833 device = (struct tape_device *) data; 835 device = (struct tape_device *) data;
834 spin_lock_irq(get_ccwdev_lock(device->cdev)); 836 spin_lock_irq(get_ccwdev_lock(device->cdev));
835 request = list_entry(device->req_queue.next, struct tape_request, list); 837 request = list_entry(device->req_queue.next, struct tape_request, list);
836 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 838 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
837 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 839 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
838 __tape_start_next_request(device); 840 __tape_start_next_request(device);
839 device->lb_timeout.data = 0UL; 841 device->lb_timeout.data = 0UL;
840 tape_put_device(device); 842 tape_put_device(device);
841 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 843 spin_unlock_irq(get_ccwdev_lock(device->cdev));
842 } 844 }
843 845
844 static void 846 static void
845 __tape_end_request( 847 __tape_end_request(
846 struct tape_device * device, 848 struct tape_device * device,
847 struct tape_request * request, 849 struct tape_request * request,
848 int rc) 850 int rc)
849 { 851 {
850 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 852 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
851 if (request) { 853 if (request) {
852 request->rc = rc; 854 request->rc = rc;
853 request->status = TAPE_REQUEST_DONE; 855 request->status = TAPE_REQUEST_DONE;
854 856
855 /* Remove from request queue. */ 857 /* Remove from request queue. */
856 list_del(&request->list); 858 list_del(&request->list);
857 859
858 /* Do callback. */ 860 /* Do callback. */
859 if (request->callback != NULL) 861 if (request->callback != NULL)
860 request->callback(request, request->callback_data); 862 request->callback(request, request->callback_data);
861 } 863 }
862 864
863 /* Start next request. */ 865 /* Start next request. */
864 if (!list_empty(&device->req_queue)) 866 if (!list_empty(&device->req_queue))
865 __tape_start_next_request(device); 867 __tape_start_next_request(device);
866 } 868 }
867 869
868 /* 870 /*
869 * Write sense data to dbf 871 * Write sense data to dbf
870 */ 872 */
871 void 873 void
872 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 874 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
873 struct irb *irb) 875 struct irb *irb)
874 { 876 {
875 unsigned int *sptr; 877 unsigned int *sptr;
876 const char* op; 878 const char* op;
877 879
878 if (request != NULL) 880 if (request != NULL)
879 op = tape_op_verbose[request->op]; 881 op = tape_op_verbose[request->op];
880 else 882 else
881 op = "---"; 883 op = "---";
882 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 884 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
883 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); 885 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
884 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 886 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
885 sptr = (unsigned int *) irb->ecw; 887 sptr = (unsigned int *) irb->ecw;
886 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 888 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
887 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 889 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
888 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 890 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
889 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 891 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
890 } 892 }
891 893
892 /* 894 /*
893 * I/O helper function. Adds the request to the request queue 895 * I/O helper function. Adds the request to the request queue
894 * and starts it if the tape is idle. Has to be called with 896 * and starts it if the tape is idle. Has to be called with
895 * the device lock held. 897 * the device lock held.
896 */ 898 */
897 static int 899 static int
898 __tape_start_request(struct tape_device *device, struct tape_request *request) 900 __tape_start_request(struct tape_device *device, struct tape_request *request)
899 { 901 {
900 int rc; 902 int rc;
901 903
902 switch (request->op) { 904 switch (request->op) {
903 case TO_MSEN: 905 case TO_MSEN:
904 case TO_ASSIGN: 906 case TO_ASSIGN:
905 case TO_UNASSIGN: 907 case TO_UNASSIGN:
906 case TO_READ_ATTMSG: 908 case TO_READ_ATTMSG:
907 case TO_RDC: 909 case TO_RDC:
908 if (device->tape_state == TS_INIT) 910 if (device->tape_state == TS_INIT)
909 break; 911 break;
910 if (device->tape_state == TS_UNUSED) 912 if (device->tape_state == TS_UNUSED)
911 break; 913 break;
912 default: 914 default:
913 if (device->tape_state == TS_BLKUSE) 915 if (device->tape_state == TS_BLKUSE)
914 break; 916 break;
915 if (device->tape_state != TS_IN_USE) 917 if (device->tape_state != TS_IN_USE)
916 return -ENODEV; 918 return -ENODEV;
917 } 919 }
918 920
919 /* Increase use count of device for the added request. */ 921 /* Increase use count of device for the added request. */
920 request->device = tape_get_device(device); 922 request->device = tape_get_device(device);
921 923
922 if (list_empty(&device->req_queue)) { 924 if (list_empty(&device->req_queue)) {
923 /* No other requests are on the queue. Start this one. */ 925 /* No other requests are on the queue. Start this one. */
924 rc = __tape_start_io(device, request); 926 rc = __tape_start_io(device, request);
925 if (rc) 927 if (rc)
926 return rc; 928 return rc;
927 929
928 DBF_LH(5, "Request %p added for execution.\n", request); 930 DBF_LH(5, "Request %p added for execution.\n", request);
929 list_add(&request->list, &device->req_queue); 931 list_add(&request->list, &device->req_queue);
930 } else { 932 } else {
931 DBF_LH(5, "Request %p add to queue.\n", request); 933 DBF_LH(5, "Request %p add to queue.\n", request);
932 request->status = TAPE_REQUEST_QUEUED; 934 request->status = TAPE_REQUEST_QUEUED;
933 list_add_tail(&request->list, &device->req_queue); 935 list_add_tail(&request->list, &device->req_queue);
934 } 936 }
935 return 0; 937 return 0;
936 } 938 }
937 939
938 /* 940 /*
939 * Add the request to the request queue, try to start it if the 941 * Add the request to the request queue, try to start it if the
940 * tape is idle. Return without waiting for end of i/o. 942 * tape is idle. Return without waiting for end of i/o.
941 */ 943 */
942 int 944 int
943 tape_do_io_async(struct tape_device *device, struct tape_request *request) 945 tape_do_io_async(struct tape_device *device, struct tape_request *request)
944 { 946 {
945 int rc; 947 int rc;
946 948
947 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 949 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
948 950
949 spin_lock_irq(get_ccwdev_lock(device->cdev)); 951 spin_lock_irq(get_ccwdev_lock(device->cdev));
950 /* Add request to request queue and try to start it. */ 952 /* Add request to request queue and try to start it. */
951 rc = __tape_start_request(device, request); 953 rc = __tape_start_request(device, request);
952 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 954 spin_unlock_irq(get_ccwdev_lock(device->cdev));
953 return rc; 955 return rc;
954 } 956 }
955 957
956 /* 958 /*
957 * tape_do_io/__tape_wake_up 959 * tape_do_io/__tape_wake_up
958 * Add the request to the request queue, try to start it if the 960 * Add the request to the request queue, try to start it if the
959 * tape is idle and wait uninterruptible for its completion. 961 * tape is idle and wait uninterruptible for its completion.
960 */ 962 */
961 static void 963 static void
962 __tape_wake_up(struct tape_request *request, void *data) 964 __tape_wake_up(struct tape_request *request, void *data)
963 { 965 {
964 request->callback = NULL; 966 request->callback = NULL;
965 wake_up((wait_queue_head_t *) data); 967 wake_up((wait_queue_head_t *) data);
966 } 968 }
967 969
968 int 970 int
969 tape_do_io(struct tape_device *device, struct tape_request *request) 971 tape_do_io(struct tape_device *device, struct tape_request *request)
970 { 972 {
971 int rc; 973 int rc;
972 974
973 spin_lock_irq(get_ccwdev_lock(device->cdev)); 975 spin_lock_irq(get_ccwdev_lock(device->cdev));
974 /* Setup callback */ 976 /* Setup callback */
975 request->callback = __tape_wake_up; 977 request->callback = __tape_wake_up;
976 request->callback_data = &device->wait_queue; 978 request->callback_data = &device->wait_queue;
977 /* Add request to request queue and try to start it. */ 979 /* Add request to request queue and try to start it. */
978 rc = __tape_start_request(device, request); 980 rc = __tape_start_request(device, request);
979 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 981 spin_unlock_irq(get_ccwdev_lock(device->cdev));
980 if (rc) 982 if (rc)
981 return rc; 983 return rc;
982 /* Request added to the queue. Wait for its completion. */ 984 /* Request added to the queue. Wait for its completion. */
983 wait_event(device->wait_queue, (request->callback == NULL)); 985 wait_event(device->wait_queue, (request->callback == NULL));
984 /* Get rc from request */ 986 /* Get rc from request */
985 return request->rc; 987 return request->rc;
986 } 988 }
987 989
988 /* 990 /*
989 * tape_do_io_interruptible/__tape_wake_up_interruptible 991 * tape_do_io_interruptible/__tape_wake_up_interruptible
990 * Add the request to the request queue, try to start it if the 992 * Add the request to the request queue, try to start it if the
991 * tape is idle and wait uninterruptible for its completion. 993 * tape is idle and wait uninterruptible for its completion.
992 */ 994 */
993 static void 995 static void
994 __tape_wake_up_interruptible(struct tape_request *request, void *data) 996 __tape_wake_up_interruptible(struct tape_request *request, void *data)
995 { 997 {
996 request->callback = NULL; 998 request->callback = NULL;
997 wake_up_interruptible((wait_queue_head_t *) data); 999 wake_up_interruptible((wait_queue_head_t *) data);
998 } 1000 }
999 1001
1000 int 1002 int
1001 tape_do_io_interruptible(struct tape_device *device, 1003 tape_do_io_interruptible(struct tape_device *device,
1002 struct tape_request *request) 1004 struct tape_request *request)
1003 { 1005 {
1004 int rc; 1006 int rc;
1005 1007
1006 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1008 spin_lock_irq(get_ccwdev_lock(device->cdev));
1007 /* Setup callback */ 1009 /* Setup callback */
1008 request->callback = __tape_wake_up_interruptible; 1010 request->callback = __tape_wake_up_interruptible;
1009 request->callback_data = &device->wait_queue; 1011 request->callback_data = &device->wait_queue;
1010 rc = __tape_start_request(device, request); 1012 rc = __tape_start_request(device, request);
1011 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1013 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1012 if (rc) 1014 if (rc)
1013 return rc; 1015 return rc;
1014 /* Request added to the queue. Wait for its completion. */ 1016 /* Request added to the queue. Wait for its completion. */
1015 rc = wait_event_interruptible(device->wait_queue, 1017 rc = wait_event_interruptible(device->wait_queue,
1016 (request->callback == NULL)); 1018 (request->callback == NULL));
1017 if (rc != -ERESTARTSYS) 1019 if (rc != -ERESTARTSYS)
1018 /* Request finished normally. */ 1020 /* Request finished normally. */
1019 return request->rc; 1021 return request->rc;
1020 1022
1021 /* Interrupted by a signal. We have to stop the current request. */ 1023 /* Interrupted by a signal. We have to stop the current request. */
1022 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1024 spin_lock_irq(get_ccwdev_lock(device->cdev));
1023 rc = __tape_cancel_io(device, request); 1025 rc = __tape_cancel_io(device, request);
1024 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1026 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1025 if (rc == 0) { 1027 if (rc == 0) {
1026 /* Wait for the interrupt that acknowledges the halt. */ 1028 /* Wait for the interrupt that acknowledges the halt. */
1027 do { 1029 do {
1028 rc = wait_event_interruptible( 1030 rc = wait_event_interruptible(
1029 device->wait_queue, 1031 device->wait_queue,
1030 (request->callback == NULL) 1032 (request->callback == NULL)
1031 ); 1033 );
1032 } while (rc == -ERESTARTSYS); 1034 } while (rc == -ERESTARTSYS);
1033 1035
1034 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1036 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
1035 rc = -ERESTARTSYS; 1037 rc = -ERESTARTSYS;
1036 } 1038 }
1037 return rc; 1039 return rc;
1038 } 1040 }
1039 1041
1040 /* 1042 /*
1041 * Stop running ccw. 1043 * Stop running ccw.
1042 */ 1044 */
1043 int 1045 int
1044 tape_cancel_io(struct tape_device *device, struct tape_request *request) 1046 tape_cancel_io(struct tape_device *device, struct tape_request *request)
1045 { 1047 {
1046 int rc; 1048 int rc;
1047 1049
1048 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1050 spin_lock_irq(get_ccwdev_lock(device->cdev));
1049 rc = __tape_cancel_io(device, request); 1051 rc = __tape_cancel_io(device, request);
1050 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1052 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1051 return rc; 1053 return rc;
1052 } 1054 }
1053 1055
1054 /* 1056 /*
1055 * Tape interrupt routine, called from the ccw_device layer 1057 * Tape interrupt routine, called from the ccw_device layer
1056 */ 1058 */
1057 static void 1059 static void
1058 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1060 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1059 { 1061 {
1060 struct tape_device *device; 1062 struct tape_device *device;
1061 struct tape_request *request; 1063 struct tape_request *request;
1062 int rc; 1064 int rc;
1063 1065
1064 device = dev_get_drvdata(&cdev->dev); 1066 device = dev_get_drvdata(&cdev->dev);
1065 if (device == NULL) { 1067 if (device == NULL) {
1066 return; 1068 return;
1067 } 1069 }
1068 request = (struct tape_request *) intparm; 1070 request = (struct tape_request *) intparm;
1069 1071
1070 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1072 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
1071 1073
1072 /* On special conditions irb is an error pointer */ 1074 /* On special conditions irb is an error pointer */
1073 if (IS_ERR(irb)) { 1075 if (IS_ERR(irb)) {
1074 /* FIXME: What to do with the request? */ 1076 /* FIXME: What to do with the request? */
1075 switch (PTR_ERR(irb)) { 1077 switch (PTR_ERR(irb)) {
1076 case -ETIMEDOUT: 1078 case -ETIMEDOUT:
1077 DBF_LH(1, "(%s): Request timed out\n", 1079 DBF_LH(1, "(%s): Request timed out\n",
1078 dev_name(&cdev->dev)); 1080 dev_name(&cdev->dev));
1079 case -EIO: 1081 case -EIO:
1080 __tape_end_request(device, request, -EIO); 1082 __tape_end_request(device, request, -EIO);
1081 break; 1083 break;
1082 default: 1084 default:
1083 DBF_LH(1, "(%s): Unexpected i/o error %li\n", 1085 DBF_LH(1, "(%s): Unexpected i/o error %li\n",
1084 dev_name(&cdev->dev), 1086 dev_name(&cdev->dev),
1085 PTR_ERR(irb)); 1087 PTR_ERR(irb));
1086 } 1088 }
1087 return; 1089 return;
1088 } 1090 }
1089 1091
1090 /* 1092 /*
1091 * If the condition code is not zero and the start function bit is 1093 * If the condition code is not zero and the start function bit is
1092 * still set, this is an deferred error and the last start I/O did 1094 * still set, this is an deferred error and the last start I/O did
1093 * not succeed. At this point the condition that caused the deferred 1095 * not succeed. At this point the condition that caused the deferred
1094 * error might still apply. So we just schedule the request to be 1096 * error might still apply. So we just schedule the request to be
1095 * started later. 1097 * started later.
1096 */ 1098 */
1097 if (irb->scsw.cmd.cc != 0 && 1099 if (irb->scsw.cmd.cc != 0 &&
1098 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1100 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1099 (request->status == TAPE_REQUEST_IN_IO)) { 1101 (request->status == TAPE_REQUEST_IN_IO)) {
1100 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1102 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1101 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); 1103 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1102 request->status = TAPE_REQUEST_QUEUED; 1104 request->status = TAPE_REQUEST_QUEUED;
1103 schedule_delayed_work(&device->tape_dnr, HZ); 1105 schedule_delayed_work(&device->tape_dnr, HZ);
1104 return; 1106 return;
1105 } 1107 }
1106 1108
1107 /* May be an unsolicited irq */ 1109 /* May be an unsolicited irq */
1108 if(request != NULL) 1110 if(request != NULL)
1109 request->rescnt = irb->scsw.cmd.count; 1111 request->rescnt = irb->scsw.cmd.count;
1110 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1112 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1111 !list_empty(&device->req_queue)) { 1113 !list_empty(&device->req_queue)) {
1112 /* Not Ready to Ready after long busy ? */ 1114 /* Not Ready to Ready after long busy ? */
1113 struct tape_request *req; 1115 struct tape_request *req;
1114 req = list_entry(device->req_queue.next, 1116 req = list_entry(device->req_queue.next,
1115 struct tape_request, list); 1117 struct tape_request, list);
1116 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1118 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1117 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1119 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1118 if (del_timer(&device->lb_timeout)) { 1120 if (del_timer(&device->lb_timeout)) {
1119 device->lb_timeout.data = 0UL; 1121 device->lb_timeout.data = 0UL;
1120 tape_put_device(device); 1122 tape_put_device(device);
1121 __tape_start_next_request(device); 1123 __tape_start_next_request(device);
1122 } 1124 }
1123 return; 1125 return;
1124 } 1126 }
1125 } 1127 }
1126 if (irb->scsw.cmd.dstat != 0x0c) { 1128 if (irb->scsw.cmd.dstat != 0x0c) {
1127 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1129 /* Set the 'ONLINE' flag depending on sense byte 1 */
1128 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1130 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1129 device->tape_generic_status |= GMT_ONLINE(~0); 1131 device->tape_generic_status |= GMT_ONLINE(~0);
1130 else 1132 else
1131 device->tape_generic_status &= ~GMT_ONLINE(~0); 1133 device->tape_generic_status &= ~GMT_ONLINE(~0);
1132 1134
1133 /* 1135 /*
1134 * Any request that does not come back with channel end 1136 * Any request that does not come back with channel end
1135 * and device end is unusual. Log the sense data. 1137 * and device end is unusual. Log the sense data.
1136 */ 1138 */
1137 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1139 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1138 tape_dump_sense_dbf(device, request, irb); 1140 tape_dump_sense_dbf(device, request, irb);
1139 } else { 1141 } else {
1140 /* Upon normal completion the device _is_ online */ 1142 /* Upon normal completion the device _is_ online */
1141 device->tape_generic_status |= GMT_ONLINE(~0); 1143 device->tape_generic_status |= GMT_ONLINE(~0);
1142 } 1144 }
1143 if (device->tape_state == TS_NOT_OPER) { 1145 if (device->tape_state == TS_NOT_OPER) {
1144 DBF_EVENT(6, "tape:device is not operational\n"); 1146 DBF_EVENT(6, "tape:device is not operational\n");
1145 return; 1147 return;
1146 } 1148 }
1147 1149
1148 /* 1150 /*
1149 * Request that were canceled still come back with an interrupt. 1151 * Request that were canceled still come back with an interrupt.
1150 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1152 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1151 */ 1153 */
1152 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1154 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1153 __tape_end_request(device, request, -EIO); 1155 __tape_end_request(device, request, -EIO);
1154 return; 1156 return;
1155 } 1157 }
1156 1158
1157 rc = device->discipline->irq(device, request, irb); 1159 rc = device->discipline->irq(device, request, irb);
1158 /* 1160 /*
1159 * rc < 0 : request finished unsuccessfully. 1161 * rc < 0 : request finished unsuccessfully.
1160 * rc == TAPE_IO_SUCCESS: request finished successfully. 1162 * rc == TAPE_IO_SUCCESS: request finished successfully.
1161 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1163 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1162 * rc == TAPE_IO_RETRY: request finished but needs another go. 1164 * rc == TAPE_IO_RETRY: request finished but needs another go.
1163 * rc == TAPE_IO_STOP: request needs to get terminated. 1165 * rc == TAPE_IO_STOP: request needs to get terminated.
1164 */ 1166 */
1165 switch (rc) { 1167 switch (rc) {
1166 case TAPE_IO_SUCCESS: 1168 case TAPE_IO_SUCCESS:
1167 /* Upon normal completion the device _is_ online */ 1169 /* Upon normal completion the device _is_ online */
1168 device->tape_generic_status |= GMT_ONLINE(~0); 1170 device->tape_generic_status |= GMT_ONLINE(~0);
1169 __tape_end_request(device, request, rc); 1171 __tape_end_request(device, request, rc);
1170 break; 1172 break;
1171 case TAPE_IO_PENDING: 1173 case TAPE_IO_PENDING:
1172 break; 1174 break;
1173 case TAPE_IO_LONG_BUSY: 1175 case TAPE_IO_LONG_BUSY:
1174 device->lb_timeout.data = 1176 device->lb_timeout.data =
1175 (unsigned long) tape_get_device(device); 1177 (unsigned long) tape_get_device(device);
1176 device->lb_timeout.expires = jiffies + 1178 device->lb_timeout.expires = jiffies +
1177 LONG_BUSY_TIMEOUT * HZ; 1179 LONG_BUSY_TIMEOUT * HZ;
1178 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1180 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1179 add_timer(&device->lb_timeout); 1181 add_timer(&device->lb_timeout);
1180 request->status = TAPE_REQUEST_LONG_BUSY; 1182 request->status = TAPE_REQUEST_LONG_BUSY;
1181 break; 1183 break;
1182 case TAPE_IO_RETRY: 1184 case TAPE_IO_RETRY:
1183 rc = __tape_start_io(device, request); 1185 rc = __tape_start_io(device, request);
1184 if (rc) 1186 if (rc)
1185 __tape_end_request(device, request, rc); 1187 __tape_end_request(device, request, rc);
1186 break; 1188 break;
1187 case TAPE_IO_STOP: 1189 case TAPE_IO_STOP:
1188 rc = __tape_cancel_io(device, request); 1190 rc = __tape_cancel_io(device, request);
1189 if (rc) 1191 if (rc)
1190 __tape_end_request(device, request, rc); 1192 __tape_end_request(device, request, rc);
1191 break; 1193 break;
1192 default: 1194 default:
1193 if (rc > 0) { 1195 if (rc > 0) {
1194 DBF_EVENT(6, "xunknownrc\n"); 1196 DBF_EVENT(6, "xunknownrc\n");
1195 __tape_end_request(device, request, -EIO); 1197 __tape_end_request(device, request, -EIO);
1196 } else { 1198 } else {
1197 __tape_end_request(device, request, rc); 1199 __tape_end_request(device, request, rc);
1198 } 1200 }
1199 break; 1201 break;
1200 } 1202 }
1201 } 1203 }
1202 1204
1203 /* 1205 /*
1204 * Tape device open function used by tape_char & tape_block frontends. 1206 * Tape device open function used by tape_char & tape_block frontends.
1205 */ 1207 */
1206 int 1208 int
1207 tape_open(struct tape_device *device) 1209 tape_open(struct tape_device *device)
1208 { 1210 {
1209 int rc; 1211 int rc;
1210 1212
1211 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1213 spin_lock_irq(get_ccwdev_lock(device->cdev));
1212 if (device->tape_state == TS_NOT_OPER) { 1214 if (device->tape_state == TS_NOT_OPER) {
1213 DBF_EVENT(6, "TAPE:nodev\n"); 1215 DBF_EVENT(6, "TAPE:nodev\n");
1214 rc = -ENODEV; 1216 rc = -ENODEV;
1215 } else if (device->tape_state == TS_IN_USE) { 1217 } else if (device->tape_state == TS_IN_USE) {
1216 DBF_EVENT(6, "TAPE:dbusy\n"); 1218 DBF_EVENT(6, "TAPE:dbusy\n");
1217 rc = -EBUSY; 1219 rc = -EBUSY;
1218 } else if (device->tape_state == TS_BLKUSE) { 1220 } else if (device->tape_state == TS_BLKUSE) {
1219 DBF_EVENT(6, "TAPE:dbusy\n"); 1221 DBF_EVENT(6, "TAPE:dbusy\n");
1220 rc = -EBUSY; 1222 rc = -EBUSY;
1221 } else if (device->discipline != NULL && 1223 } else if (device->discipline != NULL &&
1222 !try_module_get(device->discipline->owner)) { 1224 !try_module_get(device->discipline->owner)) {
1223 DBF_EVENT(6, "TAPE:nodisc\n"); 1225 DBF_EVENT(6, "TAPE:nodisc\n");
1224 rc = -ENODEV; 1226 rc = -ENODEV;
1225 } else { 1227 } else {
1226 tape_state_set(device, TS_IN_USE); 1228 tape_state_set(device, TS_IN_USE);
1227 rc = 0; 1229 rc = 0;
1228 } 1230 }
1229 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1231 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1230 return rc; 1232 return rc;
1231 } 1233 }
1232 1234
1233 /* 1235 /*
1234 * Tape device release function used by tape_char & tape_block frontends. 1236 * Tape device release function used by tape_char & tape_block frontends.
1235 */ 1237 */
1236 int 1238 int
1237 tape_release(struct tape_device *device) 1239 tape_release(struct tape_device *device)
1238 { 1240 {
1239 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1241 spin_lock_irq(get_ccwdev_lock(device->cdev));
1240 if (device->tape_state == TS_IN_USE) 1242 if (device->tape_state == TS_IN_USE)
1241 tape_state_set(device, TS_UNUSED); 1243 tape_state_set(device, TS_UNUSED);
1242 module_put(device->discipline->owner); 1244 module_put(device->discipline->owner);
1243 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1245 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1244 return 0; 1246 return 0;
1245 } 1247 }
1246 1248
1247 /* 1249 /*
1248 * Execute a magnetic tape command a number of times. 1250 * Execute a magnetic tape command a number of times.
1249 */ 1251 */
1250 int 1252 int
1251 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1253 tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1252 { 1254 {
1253 tape_mtop_fn fn; 1255 tape_mtop_fn fn;
1254 int rc; 1256 int rc;
1255 1257
1256 DBF_EVENT(6, "TAPE:mtio\n"); 1258 DBF_EVENT(6, "TAPE:mtio\n");
1257 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1259 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1258 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1260 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
1259 1261
1260 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1262 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1261 return -EINVAL; 1263 return -EINVAL;
1262 fn = device->discipline->mtop_array[mt_op]; 1264 fn = device->discipline->mtop_array[mt_op];
1263 if (fn == NULL) 1265 if (fn == NULL)
1264 return -EINVAL; 1266 return -EINVAL;
1265 1267
1266 /* We assume that the backends can handle count up to 500. */ 1268 /* We assume that the backends can handle count up to 500. */
1267 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1269 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
1268 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1270 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
1269 rc = 0; 1271 rc = 0;
1270 for (; mt_count > 500; mt_count -= 500) 1272 for (; mt_count > 500; mt_count -= 500)
1271 if ((rc = fn(device, 500)) != 0) 1273 if ((rc = fn(device, 500)) != 0)
1272 break; 1274 break;
1273 if (rc == 0) 1275 if (rc == 0)
1274 rc = fn(device, mt_count); 1276 rc = fn(device, mt_count);
1275 } else 1277 } else
1276 rc = fn(device, mt_count); 1278 rc = fn(device, mt_count);
1277 return rc; 1279 return rc;
1278 1280
1279 } 1281 }
1280 1282
1281 /* 1283 /*
1282 * Tape init function. 1284 * Tape init function.
1283 */ 1285 */
1284 static int 1286 static int
1285 tape_init (void) 1287 tape_init (void)
1286 { 1288 {
1287 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1289 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
1288 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1290 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1289 #ifdef DBF_LIKE_HELL 1291 #ifdef DBF_LIKE_HELL
1290 debug_set_level(TAPE_DBF_AREA, 6); 1292 debug_set_level(TAPE_DBF_AREA, 6);
1291 #endif 1293 #endif
1292 DBF_EVENT(3, "tape init\n"); 1294 DBF_EVENT(3, "tape init\n");
1293 tape_proc_init(); 1295 tape_proc_init();
1294 tapechar_init (); 1296 tapechar_init ();
1295 tapeblock_init (); 1297 tapeblock_init ();
1296 return 0; 1298 return 0;
1297 } 1299 }
1298 1300
1299 /* 1301 /*
1300 * Tape exit function. 1302 * Tape exit function.
1301 */ 1303 */
1302 static void 1304 static void
1303 tape_exit(void) 1305 tape_exit(void)
1304 { 1306 {
1305 DBF_EVENT(6, "tape exit\n"); 1307 DBF_EVENT(6, "tape exit\n");
1306 1308
1307 /* Get rid of the frontends */ 1309 /* Get rid of the frontends */
1308 tapechar_exit(); 1310 tapechar_exit();
1309 tapeblock_exit(); 1311 tapeblock_exit();
1310 tape_proc_cleanup(); 1312 tape_proc_cleanup();
1311 debug_unregister (TAPE_DBF_AREA); 1313 debug_unregister (TAPE_DBF_AREA);
1312 } 1314 }
1313 1315
1314 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1316 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1315 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1317 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1316 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); 1318 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
1317 MODULE_LICENSE("GPL"); 1319 MODULE_LICENSE("GPL");
1318 1320
1319 module_init(tape_init); 1321 module_init(tape_init);
1320 module_exit(tape_exit); 1322 module_exit(tape_exit);
1321 1323
1322 EXPORT_SYMBOL(tape_generic_remove); 1324 EXPORT_SYMBOL(tape_generic_remove);
1323 EXPORT_SYMBOL(tape_generic_probe); 1325 EXPORT_SYMBOL(tape_generic_probe);
1324 EXPORT_SYMBOL(tape_generic_online); 1326 EXPORT_SYMBOL(tape_generic_online);
1325 EXPORT_SYMBOL(tape_generic_offline); 1327 EXPORT_SYMBOL(tape_generic_offline);
1326 EXPORT_SYMBOL(tape_generic_pm_suspend); 1328 EXPORT_SYMBOL(tape_generic_pm_suspend);
1327 EXPORT_SYMBOL(tape_put_device); 1329 EXPORT_SYMBOL(tape_put_device);
1328 EXPORT_SYMBOL(tape_get_device); 1330 EXPORT_SYMBOL(tape_get_device);
1329 EXPORT_SYMBOL(tape_state_verbose); 1331 EXPORT_SYMBOL(tape_state_verbose);
1330 EXPORT_SYMBOL(tape_op_verbose); 1332 EXPORT_SYMBOL(tape_op_verbose);
1331 EXPORT_SYMBOL(tape_state_set); 1333 EXPORT_SYMBOL(tape_state_set);
1332 EXPORT_SYMBOL(tape_med_state_set); 1334 EXPORT_SYMBOL(tape_med_state_set);
1333 EXPORT_SYMBOL(tape_alloc_request); 1335 EXPORT_SYMBOL(tape_alloc_request);
1334 EXPORT_SYMBOL(tape_free_request); 1336 EXPORT_SYMBOL(tape_free_request);
1335 EXPORT_SYMBOL(tape_dump_sense_dbf); 1337 EXPORT_SYMBOL(tape_dump_sense_dbf);
1336 EXPORT_SYMBOL(tape_do_io); 1338 EXPORT_SYMBOL(tape_do_io);
1337 EXPORT_SYMBOL(tape_do_io_async); 1339 EXPORT_SYMBOL(tape_do_io_async);
1338 EXPORT_SYMBOL(tape_do_io_interruptible); 1340 EXPORT_SYMBOL(tape_do_io_interruptible);
1339 EXPORT_SYMBOL(tape_cancel_io); 1341 EXPORT_SYMBOL(tape_cancel_io);
1340 EXPORT_SYMBOL(tape_mtop); 1342 EXPORT_SYMBOL(tape_mtop);
1341 1343
drivers/s390/char/tape_proc.c
1 /* 1 /*
2 * drivers/s390/char/tape.c 2 * drivers/s390/char/tape.c
3 * tape device driver for S/390 and zSeries tapes. 3 * tape device driver for S/390 and zSeries tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001 IBM Corporation 6 * Copyright (C) 2001 IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * 10 *
11 * PROCFS Functions 11 * PROCFS Functions
12 */ 12 */
13 13
14 #define KMSG_COMPONENT "tape"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14 #include <linux/module.h> 17 #include <linux/module.h>
15 #include <linux/vmalloc.h> 18 #include <linux/vmalloc.h>
16 #include <linux/seq_file.h> 19 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h> 20 #include <linux/proc_fs.h>
18 21
19 #define TAPE_DBF_AREA tape_core_dbf 22 #define TAPE_DBF_AREA tape_core_dbf
20 23
21 #include "tape.h" 24 #include "tape.h"
22 25
23 static const char *tape_med_st_verbose[MS_SIZE] = 26 static const char *tape_med_st_verbose[MS_SIZE] =
24 { 27 {
25 [MS_UNKNOWN] = "UNKNOWN ", 28 [MS_UNKNOWN] = "UNKNOWN ",
26 [MS_LOADED] = "LOADED ", 29 [MS_LOADED] = "LOADED ",
27 [MS_UNLOADED] = "UNLOADED" 30 [MS_UNLOADED] = "UNLOADED"
28 }; 31 };
29 32
30 /* our proc tapedevices entry */ 33 /* our proc tapedevices entry */
31 static struct proc_dir_entry *tape_proc_devices; 34 static struct proc_dir_entry *tape_proc_devices;
32 35
33 /* 36 /*
34 * Show function for /proc/tapedevices 37 * Show function for /proc/tapedevices
35 */ 38 */
36 static int tape_proc_show(struct seq_file *m, void *v) 39 static int tape_proc_show(struct seq_file *m, void *v)
37 { 40 {
38 struct tape_device *device; 41 struct tape_device *device;
39 struct tape_request *request; 42 struct tape_request *request;
40 const char *str; 43 const char *str;
41 unsigned long n; 44 unsigned long n;
42 45
43 n = (unsigned long) v - 1; 46 n = (unsigned long) v - 1;
44 if (!n) { 47 if (!n) {
45 seq_printf(m, "TapeNo\tBusID CuType/Model\t" 48 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); 49 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
47 } 50 }
48 device = tape_find_device(n); 51 device = tape_find_device(n);
49 if (IS_ERR(device)) 52 if (IS_ERR(device))
50 return 0; 53 return 0;
51 spin_lock_irq(get_ccwdev_lock(device->cdev)); 54 spin_lock_irq(get_ccwdev_lock(device->cdev));
52 seq_printf(m, "%d\t", (int) n); 55 seq_printf(m, "%d\t", (int) n);
53 seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev)); 56 seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
54 seq_printf(m, "%04X/", device->cdev->id.cu_type); 57 seq_printf(m, "%04X/", device->cdev->id.cu_type);
55 seq_printf(m, "%02X\t", device->cdev->id.cu_model); 58 seq_printf(m, "%02X\t", device->cdev->id.cu_model);
56 seq_printf(m, "%04X/", device->cdev->id.dev_type); 59 seq_printf(m, "%04X/", device->cdev->id.dev_type);
57 seq_printf(m, "%02X\t\t", device->cdev->id.dev_model); 60 seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
58 if (device->char_data.block_size == 0) 61 if (device->char_data.block_size == 0)
59 seq_printf(m, "auto\t"); 62 seq_printf(m, "auto\t");
60 else 63 else
61 seq_printf(m, "%i\t", device->char_data.block_size); 64 seq_printf(m, "%i\t", device->char_data.block_size);
62 if (device->tape_state >= 0 && 65 if (device->tape_state >= 0 &&
63 device->tape_state < TS_SIZE) 66 device->tape_state < TS_SIZE)
64 str = tape_state_verbose[device->tape_state]; 67 str = tape_state_verbose[device->tape_state];
65 else 68 else
66 str = "UNKNOWN"; 69 str = "UNKNOWN";
67 seq_printf(m, "%s\t", str); 70 seq_printf(m, "%s\t", str);
68 if (!list_empty(&device->req_queue)) { 71 if (!list_empty(&device->req_queue)) {
69 request = list_entry(device->req_queue.next, 72 request = list_entry(device->req_queue.next,
70 struct tape_request, list); 73 struct tape_request, list);
71 str = tape_op_verbose[request->op]; 74 str = tape_op_verbose[request->op];
72 } else 75 } else
73 str = "---"; 76 str = "---";
74 seq_printf(m, "%s\t", str); 77 seq_printf(m, "%s\t", str);
75 seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]); 78 seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
76 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 79 spin_unlock_irq(get_ccwdev_lock(device->cdev));
77 tape_put_device(device); 80 tape_put_device(device);
78 return 0; 81 return 0;
79 } 82 }
80 83
81 static void *tape_proc_start(struct seq_file *m, loff_t *pos) 84 static void *tape_proc_start(struct seq_file *m, loff_t *pos)
82 { 85 {
83 if (*pos >= 256 / TAPE_MINORS_PER_DEV) 86 if (*pos >= 256 / TAPE_MINORS_PER_DEV)
84 return NULL; 87 return NULL;
85 return (void *)((unsigned long) *pos + 1); 88 return (void *)((unsigned long) *pos + 1);
86 } 89 }
87 90
88 static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos) 91 static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
89 { 92 {
90 ++*pos; 93 ++*pos;
91 return tape_proc_start(m, pos); 94 return tape_proc_start(m, pos);
92 } 95 }
93 96
94 static void tape_proc_stop(struct seq_file *m, void *v) 97 static void tape_proc_stop(struct seq_file *m, void *v)
95 { 98 {
96 } 99 }
97 100
98 static const struct seq_operations tape_proc_seq = { 101 static const struct seq_operations tape_proc_seq = {
99 .start = tape_proc_start, 102 .start = tape_proc_start,
100 .next = tape_proc_next, 103 .next = tape_proc_next,
101 .stop = tape_proc_stop, 104 .stop = tape_proc_stop,
102 .show = tape_proc_show, 105 .show = tape_proc_show,
103 }; 106 };
104 107
105 static int tape_proc_open(struct inode *inode, struct file *file) 108 static int tape_proc_open(struct inode *inode, struct file *file)
106 { 109 {
107 return seq_open(file, &tape_proc_seq); 110 return seq_open(file, &tape_proc_seq);
108 } 111 }
109 112
110 static const struct file_operations tape_proc_ops = 113 static const struct file_operations tape_proc_ops =
111 { 114 {
112 .owner = THIS_MODULE, 115 .owner = THIS_MODULE,
113 .open = tape_proc_open, 116 .open = tape_proc_open,
114 .read = seq_read, 117 .read = seq_read,
115 .llseek = seq_lseek, 118 .llseek = seq_lseek,
116 .release = seq_release, 119 .release = seq_release,
117 }; 120 };
118 121
119 /* 122 /*
120 * Initialize procfs stuff on startup 123 * Initialize procfs stuff on startup
121 */ 124 */
122 void 125 void
123 tape_proc_init(void) 126 tape_proc_init(void)
124 { 127 {
125 tape_proc_devices = 128 tape_proc_devices =
126 proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL, 129 proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
127 &tape_proc_ops); 130 &tape_proc_ops);
128 if (tape_proc_devices == NULL) { 131 if (tape_proc_devices == NULL) {
129 return; 132 return;
130 } 133 }
131 } 134 }
132 135
133 /* 136 /*
134 * Cleanup all stuff registered to the procfs 137 * Cleanup all stuff registered to the procfs
135 */ 138 */
136 void 139 void
137 tape_proc_cleanup(void) 140 tape_proc_cleanup(void)
138 { 141 {
139 if (tape_proc_devices != NULL) 142 if (tape_proc_devices != NULL)
140 remove_proc_entry ("tapedevices", NULL); 143 remove_proc_entry ("tapedevices", NULL);
141 } 144 }
142 145
drivers/s390/char/tape_std.c
1 /* 1 /*
2 * drivers/s390/char/tape_std.c 2 * drivers/s390/char/tape_std.c
3 * standard tape device functions for ibm tapes. 3 * standard tape device functions for ibm tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com>
12 */ 12 */
13 13
14 #define KMSG_COMPONENT "tape"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
14 #include <linux/stddef.h> 17 #include <linux/stddef.h>
15 #include <linux/kernel.h> 18 #include <linux/kernel.h>
16 #include <linux/bio.h> 19 #include <linux/bio.h>
17 #include <linux/timer.h> 20 #include <linux/timer.h>
18 21
19 #include <asm/types.h> 22 #include <asm/types.h>
20 #include <asm/idals.h> 23 #include <asm/idals.h>
21 #include <asm/ebcdic.h> 24 #include <asm/ebcdic.h>
22 #include <asm/tape390.h> 25 #include <asm/tape390.h>
23 26
24 #define TAPE_DBF_AREA tape_core_dbf 27 #define TAPE_DBF_AREA tape_core_dbf
25 28
26 #include "tape.h" 29 #include "tape.h"
27 #include "tape_std.h" 30 #include "tape_std.h"
28 31
29 /* 32 /*
30 * tape_std_assign 33 * tape_std_assign
31 */ 34 */
32 static void 35 static void
33 tape_std_assign_timeout(unsigned long data) 36 tape_std_assign_timeout(unsigned long data)
34 { 37 {
35 struct tape_request * request; 38 struct tape_request * request;
36 struct tape_device * device; 39 struct tape_device * device;
37 int rc; 40 int rc;
38 41
39 request = (struct tape_request *) data; 42 request = (struct tape_request *) data;
40 device = request->device; 43 device = request->device;
41 BUG_ON(!device); 44 BUG_ON(!device);
42 45
43 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", 46 DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
44 device->cdev_id); 47 device->cdev_id);
45 rc = tape_cancel_io(device, request); 48 rc = tape_cancel_io(device, request);
46 if(rc) 49 if(rc)
47 DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n", 50 DBF_EVENT(3, "(%s): Assign timeout: Cancel failed with rc = %i\n",
48 dev_name(&device->cdev->dev), rc); 51 dev_name(&device->cdev->dev), rc);
49 } 52 }
50 53
51 int 54 int
52 tape_std_assign(struct tape_device *device) 55 tape_std_assign(struct tape_device *device)
53 { 56 {
54 int rc; 57 int rc;
55 struct timer_list timeout; 58 struct timer_list timeout;
56 struct tape_request *request; 59 struct tape_request *request;
57 60
58 request = tape_alloc_request(2, 11); 61 request = tape_alloc_request(2, 11);
59 if (IS_ERR(request)) 62 if (IS_ERR(request))
60 return PTR_ERR(request); 63 return PTR_ERR(request);
61 64
62 request->op = TO_ASSIGN; 65 request->op = TO_ASSIGN;
63 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata); 66 tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
64 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 67 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
65 68
66 /* 69 /*
67 * The assign command sometimes blocks if the device is assigned 70 * The assign command sometimes blocks if the device is assigned
68 * to another host (actually this shouldn't happen but it does). 71 * to another host (actually this shouldn't happen but it does).
69 * So we set up a timeout for this call. 72 * So we set up a timeout for this call.
70 */ 73 */
71 init_timer_on_stack(&timeout); 74 init_timer_on_stack(&timeout);
72 timeout.function = tape_std_assign_timeout; 75 timeout.function = tape_std_assign_timeout;
73 timeout.data = (unsigned long) request; 76 timeout.data = (unsigned long) request;
74 timeout.expires = jiffies + 2 * HZ; 77 timeout.expires = jiffies + 2 * HZ;
75 add_timer(&timeout); 78 add_timer(&timeout);
76 79
77 rc = tape_do_io_interruptible(device, request); 80 rc = tape_do_io_interruptible(device, request);
78 81
79 del_timer(&timeout); 82 del_timer(&timeout);
80 83
81 if (rc != 0) { 84 if (rc != 0) {
82 DBF_EVENT(3, "%08x: assign failed - device might be busy\n", 85 DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
83 device->cdev_id); 86 device->cdev_id);
84 } else { 87 } else {
85 DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id); 88 DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
86 } 89 }
87 tape_free_request(request); 90 tape_free_request(request);
88 return rc; 91 return rc;
89 } 92 }
90 93
91 /* 94 /*
92 * tape_std_unassign 95 * tape_std_unassign
93 */ 96 */
94 int 97 int
95 tape_std_unassign (struct tape_device *device) 98 tape_std_unassign (struct tape_device *device)
96 { 99 {
97 int rc; 100 int rc;
98 struct tape_request *request; 101 struct tape_request *request;
99 102
100 if (device->tape_state == TS_NOT_OPER) { 103 if (device->tape_state == TS_NOT_OPER) {
101 DBF_EVENT(3, "(%08x): Can't unassign device\n", 104 DBF_EVENT(3, "(%08x): Can't unassign device\n",
102 device->cdev_id); 105 device->cdev_id);
103 return -EIO; 106 return -EIO;
104 } 107 }
105 108
106 request = tape_alloc_request(2, 11); 109 request = tape_alloc_request(2, 11);
107 if (IS_ERR(request)) 110 if (IS_ERR(request))
108 return PTR_ERR(request); 111 return PTR_ERR(request);
109 112
110 request->op = TO_UNASSIGN; 113 request->op = TO_UNASSIGN;
111 tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata); 114 tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
112 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 115 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
113 116
114 if ((rc = tape_do_io(device, request)) != 0) { 117 if ((rc = tape_do_io(device, request)) != 0) {
115 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id); 118 DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
116 } else { 119 } else {
117 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id); 120 DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
118 } 121 }
119 tape_free_request(request); 122 tape_free_request(request);
120 return rc; 123 return rc;
121 } 124 }
122 125
123 /* 126 /*
124 * TAPE390_DISPLAY: Show a string on the tape display. 127 * TAPE390_DISPLAY: Show a string on the tape display.
125 */ 128 */
126 int 129 int
127 tape_std_display(struct tape_device *device, struct display_struct *disp) 130 tape_std_display(struct tape_device *device, struct display_struct *disp)
128 { 131 {
129 struct tape_request *request; 132 struct tape_request *request;
130 int rc; 133 int rc;
131 134
132 request = tape_alloc_request(2, 17); 135 request = tape_alloc_request(2, 17);
133 if (IS_ERR(request)) { 136 if (IS_ERR(request)) {
134 DBF_EVENT(3, "TAPE: load display failed\n"); 137 DBF_EVENT(3, "TAPE: load display failed\n");
135 return PTR_ERR(request); 138 return PTR_ERR(request);
136 } 139 }
137 request->op = TO_DIS; 140 request->op = TO_DIS;
138 141
139 *(unsigned char *) request->cpdata = disp->cntrl; 142 *(unsigned char *) request->cpdata = disp->cntrl;
140 DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl); 143 DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
141 memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8); 144 memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
142 memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8); 145 memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
143 ASCEBC(((unsigned char*) request->cpdata) + 1, 16); 146 ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
144 147
145 tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata); 148 tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
146 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 149 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
147 150
148 rc = tape_do_io_interruptible(device, request); 151 rc = tape_do_io_interruptible(device, request);
149 tape_free_request(request); 152 tape_free_request(request);
150 return rc; 153 return rc;
151 } 154 }
152 155
153 /* 156 /*
154 * Read block id. 157 * Read block id.
155 */ 158 */
156 int 159 int
157 tape_std_read_block_id(struct tape_device *device, __u64 *id) 160 tape_std_read_block_id(struct tape_device *device, __u64 *id)
158 { 161 {
159 struct tape_request *request; 162 struct tape_request *request;
160 int rc; 163 int rc;
161 164
162 request = tape_alloc_request(3, 8); 165 request = tape_alloc_request(3, 8);
163 if (IS_ERR(request)) 166 if (IS_ERR(request))
164 return PTR_ERR(request); 167 return PTR_ERR(request);
165 request->op = TO_RBI; 168 request->op = TO_RBI;
166 /* setup ccws */ 169 /* setup ccws */
167 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 170 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
168 tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata); 171 tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
169 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 172 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
170 /* execute it */ 173 /* execute it */
171 rc = tape_do_io(device, request); 174 rc = tape_do_io(device, request);
172 if (rc == 0) 175 if (rc == 0)
173 /* Get result from read buffer. */ 176 /* Get result from read buffer. */
174 *id = *(__u64 *) request->cpdata; 177 *id = *(__u64 *) request->cpdata;
175 tape_free_request(request); 178 tape_free_request(request);
176 return rc; 179 return rc;
177 } 180 }
178 181
179 int 182 int
180 tape_std_terminate_write(struct tape_device *device) 183 tape_std_terminate_write(struct tape_device *device)
181 { 184 {
182 int rc; 185 int rc;
183 186
184 if(device->required_tapemarks == 0) 187 if(device->required_tapemarks == 0)
185 return 0; 188 return 0;
186 189
187 DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor, 190 DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
188 device->required_tapemarks); 191 device->required_tapemarks);
189 192
190 rc = tape_mtop(device, MTWEOF, device->required_tapemarks); 193 rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
191 if (rc) 194 if (rc)
192 return rc; 195 return rc;
193 196
194 device->required_tapemarks = 0; 197 device->required_tapemarks = 0;
195 return tape_mtop(device, MTBSR, 1); 198 return tape_mtop(device, MTBSR, 1);
196 } 199 }
197 200
198 /* 201 /*
199 * MTLOAD: Loads the tape. 202 * MTLOAD: Loads the tape.
200 * The default implementation just wait until the tape medium state changes 203 * The default implementation just wait until the tape medium state changes
201 * to MS_LOADED. 204 * to MS_LOADED.
202 */ 205 */
203 int 206 int
204 tape_std_mtload(struct tape_device *device, int count) 207 tape_std_mtload(struct tape_device *device, int count)
205 { 208 {
206 return wait_event_interruptible(device->state_change_wq, 209 return wait_event_interruptible(device->state_change_wq,
207 (device->medium_state == MS_LOADED)); 210 (device->medium_state == MS_LOADED));
208 } 211 }
209 212
210 /* 213 /*
211 * MTSETBLK: Set block size. 214 * MTSETBLK: Set block size.
212 */ 215 */
213 int 216 int
214 tape_std_mtsetblk(struct tape_device *device, int count) 217 tape_std_mtsetblk(struct tape_device *device, int count)
215 { 218 {
216 struct idal_buffer *new; 219 struct idal_buffer *new;
217 220
218 DBF_LH(6, "tape_std_mtsetblk(%d)\n", count); 221 DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
219 if (count <= 0) { 222 if (count <= 0) {
220 /* 223 /*
221 * Just set block_size to 0. tapechar_read/tapechar_write 224 * Just set block_size to 0. tapechar_read/tapechar_write
222 * will realloc the idal buffer if a bigger one than the 225 * will realloc the idal buffer if a bigger one than the
223 * current is needed. 226 * current is needed.
224 */ 227 */
225 device->char_data.block_size = 0; 228 device->char_data.block_size = 0;
226 return 0; 229 return 0;
227 } 230 }
228 if (device->char_data.idal_buf != NULL && 231 if (device->char_data.idal_buf != NULL &&
229 device->char_data.idal_buf->size == count) 232 device->char_data.idal_buf->size == count)
230 /* We already have a idal buffer of that size. */ 233 /* We already have a idal buffer of that size. */
231 return 0; 234 return 0;
232 235
233 if (count > MAX_BLOCKSIZE) { 236 if (count > MAX_BLOCKSIZE) {
234 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n", 237 DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
235 count, MAX_BLOCKSIZE); 238 count, MAX_BLOCKSIZE);
236 return -EINVAL; 239 return -EINVAL;
237 } 240 }
238 241
239 /* Allocate a new idal buffer. */ 242 /* Allocate a new idal buffer. */
240 new = idal_buffer_alloc(count, 0); 243 new = idal_buffer_alloc(count, 0);
241 if (IS_ERR(new)) 244 if (IS_ERR(new))
242 return -ENOMEM; 245 return -ENOMEM;
243 if (device->char_data.idal_buf != NULL) 246 if (device->char_data.idal_buf != NULL)
244 idal_buffer_free(device->char_data.idal_buf); 247 idal_buffer_free(device->char_data.idal_buf);
245 device->char_data.idal_buf = new; 248 device->char_data.idal_buf = new;
246 device->char_data.block_size = count; 249 device->char_data.block_size = count;
247 250
248 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size); 251 DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
249 252
250 return 0; 253 return 0;
251 } 254 }
252 255
253 /* 256 /*
254 * MTRESET: Set block size to 0. 257 * MTRESET: Set block size to 0.
255 */ 258 */
256 int 259 int
257 tape_std_mtreset(struct tape_device *device, int count) 260 tape_std_mtreset(struct tape_device *device, int count)
258 { 261 {
259 DBF_EVENT(6, "TCHAR:devreset:\n"); 262 DBF_EVENT(6, "TCHAR:devreset:\n");
260 device->char_data.block_size = 0; 263 device->char_data.block_size = 0;
261 return 0; 264 return 0;
262 } 265 }
263 266
264 /* 267 /*
265 * MTFSF: Forward space over 'count' file marks. The tape is positioned 268 * MTFSF: Forward space over 'count' file marks. The tape is positioned
266 * at the EOT (End of Tape) side of the file mark. 269 * at the EOT (End of Tape) side of the file mark.
267 */ 270 */
268 int 271 int
269 tape_std_mtfsf(struct tape_device *device, int mt_count) 272 tape_std_mtfsf(struct tape_device *device, int mt_count)
270 { 273 {
271 struct tape_request *request; 274 struct tape_request *request;
272 struct ccw1 *ccw; 275 struct ccw1 *ccw;
273 276
274 request = tape_alloc_request(mt_count + 2, 0); 277 request = tape_alloc_request(mt_count + 2, 0);
275 if (IS_ERR(request)) 278 if (IS_ERR(request))
276 return PTR_ERR(request); 279 return PTR_ERR(request);
277 request->op = TO_FSF; 280 request->op = TO_FSF;
278 /* setup ccws */ 281 /* setup ccws */
279 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 282 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
280 device->modeset_byte); 283 device->modeset_byte);
281 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); 284 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
282 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 285 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
283 286
284 /* execute it */ 287 /* execute it */
285 return tape_do_io_free(device, request); 288 return tape_do_io_free(device, request);
286 } 289 }
287 290
288 /* 291 /*
289 * MTFSR: Forward space over 'count' tape blocks (blocksize is set 292 * MTFSR: Forward space over 'count' tape blocks (blocksize is set
290 * via MTSETBLK. 293 * via MTSETBLK.
291 */ 294 */
292 int 295 int
293 tape_std_mtfsr(struct tape_device *device, int mt_count) 296 tape_std_mtfsr(struct tape_device *device, int mt_count)
294 { 297 {
295 struct tape_request *request; 298 struct tape_request *request;
296 struct ccw1 *ccw; 299 struct ccw1 *ccw;
297 int rc; 300 int rc;
298 301
299 request = tape_alloc_request(mt_count + 2, 0); 302 request = tape_alloc_request(mt_count + 2, 0);
300 if (IS_ERR(request)) 303 if (IS_ERR(request))
301 return PTR_ERR(request); 304 return PTR_ERR(request);
302 request->op = TO_FSB; 305 request->op = TO_FSB;
303 /* setup ccws */ 306 /* setup ccws */
304 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 307 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
305 device->modeset_byte); 308 device->modeset_byte);
306 ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count); 309 ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
307 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 310 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
308 311
309 /* execute it */ 312 /* execute it */
310 rc = tape_do_io(device, request); 313 rc = tape_do_io(device, request);
311 if (rc == 0 && request->rescnt > 0) { 314 if (rc == 0 && request->rescnt > 0) {
312 DBF_LH(3, "FSR over tapemark\n"); 315 DBF_LH(3, "FSR over tapemark\n");
313 rc = 1; 316 rc = 1;
314 } 317 }
315 tape_free_request(request); 318 tape_free_request(request);
316 319
317 return rc; 320 return rc;
318 } 321 }
319 322
320 /* 323 /*
321 * MTBSR: Backward space over 'count' tape blocks. 324 * MTBSR: Backward space over 'count' tape blocks.
322 * (blocksize is set via MTSETBLK. 325 * (blocksize is set via MTSETBLK.
323 */ 326 */
324 int 327 int
325 tape_std_mtbsr(struct tape_device *device, int mt_count) 328 tape_std_mtbsr(struct tape_device *device, int mt_count)
326 { 329 {
327 struct tape_request *request; 330 struct tape_request *request;
328 struct ccw1 *ccw; 331 struct ccw1 *ccw;
329 int rc; 332 int rc;
330 333
331 request = tape_alloc_request(mt_count + 2, 0); 334 request = tape_alloc_request(mt_count + 2, 0);
332 if (IS_ERR(request)) 335 if (IS_ERR(request))
333 return PTR_ERR(request); 336 return PTR_ERR(request);
334 request->op = TO_BSB; 337 request->op = TO_BSB;
335 /* setup ccws */ 338 /* setup ccws */
336 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 339 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
337 device->modeset_byte); 340 device->modeset_byte);
338 ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count); 341 ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
339 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 342 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
340 343
341 /* execute it */ 344 /* execute it */
342 rc = tape_do_io(device, request); 345 rc = tape_do_io(device, request);
343 if (rc == 0 && request->rescnt > 0) { 346 if (rc == 0 && request->rescnt > 0) {
344 DBF_LH(3, "BSR over tapemark\n"); 347 DBF_LH(3, "BSR over tapemark\n");
345 rc = 1; 348 rc = 1;
346 } 349 }
347 tape_free_request(request); 350 tape_free_request(request);
348 351
349 return rc; 352 return rc;
350 } 353 }
351 354
352 /* 355 /*
353 * MTWEOF: Write 'count' file marks at the current position. 356 * MTWEOF: Write 'count' file marks at the current position.
354 */ 357 */
355 int 358 int
356 tape_std_mtweof(struct tape_device *device, int mt_count) 359 tape_std_mtweof(struct tape_device *device, int mt_count)
357 { 360 {
358 struct tape_request *request; 361 struct tape_request *request;
359 struct ccw1 *ccw; 362 struct ccw1 *ccw;
360 363
361 request = tape_alloc_request(mt_count + 2, 0); 364 request = tape_alloc_request(mt_count + 2, 0);
362 if (IS_ERR(request)) 365 if (IS_ERR(request))
363 return PTR_ERR(request); 366 return PTR_ERR(request);
364 request->op = TO_WTM; 367 request->op = TO_WTM;
365 /* setup ccws */ 368 /* setup ccws */
366 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 369 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
367 device->modeset_byte); 370 device->modeset_byte);
368 ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count); 371 ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
369 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 372 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
370 373
371 /* execute it */ 374 /* execute it */
372 return tape_do_io_free(device, request); 375 return tape_do_io_free(device, request);
373 } 376 }
374 377
375 /* 378 /*
376 * MTBSFM: Backward space over 'count' file marks. 379 * MTBSFM: Backward space over 'count' file marks.
377 * The tape is positioned at the BOT (Begin Of Tape) side of the 380 * The tape is positioned at the BOT (Begin Of Tape) side of the
378 * last skipped file mark. 381 * last skipped file mark.
379 */ 382 */
380 int 383 int
381 tape_std_mtbsfm(struct tape_device *device, int mt_count) 384 tape_std_mtbsfm(struct tape_device *device, int mt_count)
382 { 385 {
383 struct tape_request *request; 386 struct tape_request *request;
384 struct ccw1 *ccw; 387 struct ccw1 *ccw;
385 388
386 request = tape_alloc_request(mt_count + 2, 0); 389 request = tape_alloc_request(mt_count + 2, 0);
387 if (IS_ERR(request)) 390 if (IS_ERR(request))
388 return PTR_ERR(request); 391 return PTR_ERR(request);
389 request->op = TO_BSF; 392 request->op = TO_BSF;
390 /* setup ccws */ 393 /* setup ccws */
391 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 394 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
392 device->modeset_byte); 395 device->modeset_byte);
393 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); 396 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
394 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 397 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
395 398
396 /* execute it */ 399 /* execute it */
397 return tape_do_io_free(device, request); 400 return tape_do_io_free(device, request);
398 } 401 }
399 402
400 /* 403 /*
401 * MTBSF: Backward space over 'count' file marks. The tape is positioned at 404 * MTBSF: Backward space over 'count' file marks. The tape is positioned at
402 * the EOT (End of Tape) side of the last skipped file mark. 405 * the EOT (End of Tape) side of the last skipped file mark.
403 */ 406 */
404 int 407 int
405 tape_std_mtbsf(struct tape_device *device, int mt_count) 408 tape_std_mtbsf(struct tape_device *device, int mt_count)
406 { 409 {
407 struct tape_request *request; 410 struct tape_request *request;
408 struct ccw1 *ccw; 411 struct ccw1 *ccw;
409 int rc; 412 int rc;
410 413
411 request = tape_alloc_request(mt_count + 2, 0); 414 request = tape_alloc_request(mt_count + 2, 0);
412 if (IS_ERR(request)) 415 if (IS_ERR(request))
413 return PTR_ERR(request); 416 return PTR_ERR(request);
414 request->op = TO_BSF; 417 request->op = TO_BSF;
415 /* setup ccws */ 418 /* setup ccws */
416 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 419 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
417 device->modeset_byte); 420 device->modeset_byte);
418 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count); 421 ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
419 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 422 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
420 /* execute it */ 423 /* execute it */
421 rc = tape_do_io_free(device, request); 424 rc = tape_do_io_free(device, request);
422 if (rc == 0) { 425 if (rc == 0) {
423 rc = tape_mtop(device, MTFSR, 1); 426 rc = tape_mtop(device, MTFSR, 1);
424 if (rc > 0) 427 if (rc > 0)
425 rc = 0; 428 rc = 0;
426 } 429 }
427 return rc; 430 return rc;
428 } 431 }
429 432
430 /* 433 /*
431 * MTFSFM: Forward space over 'count' file marks. 434 * MTFSFM: Forward space over 'count' file marks.
432 * The tape is positioned at the BOT (Begin Of Tape) side 435 * The tape is positioned at the BOT (Begin Of Tape) side
433 * of the last skipped file mark. 436 * of the last skipped file mark.
434 */ 437 */
435 int 438 int
436 tape_std_mtfsfm(struct tape_device *device, int mt_count) 439 tape_std_mtfsfm(struct tape_device *device, int mt_count)
437 { 440 {
438 struct tape_request *request; 441 struct tape_request *request;
439 struct ccw1 *ccw; 442 struct ccw1 *ccw;
440 int rc; 443 int rc;
441 444
442 request = tape_alloc_request(mt_count + 2, 0); 445 request = tape_alloc_request(mt_count + 2, 0);
443 if (IS_ERR(request)) 446 if (IS_ERR(request))
444 return PTR_ERR(request); 447 return PTR_ERR(request);
445 request->op = TO_FSF; 448 request->op = TO_FSF;
446 /* setup ccws */ 449 /* setup ccws */
447 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 450 ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
448 device->modeset_byte); 451 device->modeset_byte);
449 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count); 452 ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
450 ccw = tape_ccw_end(ccw, NOP, 0, NULL); 453 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
451 /* execute it */ 454 /* execute it */
452 rc = tape_do_io_free(device, request); 455 rc = tape_do_io_free(device, request);
453 if (rc == 0) { 456 if (rc == 0) {
454 rc = tape_mtop(device, MTBSR, 1); 457 rc = tape_mtop(device, MTBSR, 1);
455 if (rc > 0) 458 if (rc > 0)
456 rc = 0; 459 rc = 0;
457 } 460 }
458 461
459 return rc; 462 return rc;
460 } 463 }
461 464
462 /* 465 /*
463 * MTREW: Rewind the tape. 466 * MTREW: Rewind the tape.
464 */ 467 */
465 int 468 int
466 tape_std_mtrew(struct tape_device *device, int mt_count) 469 tape_std_mtrew(struct tape_device *device, int mt_count)
467 { 470 {
468 struct tape_request *request; 471 struct tape_request *request;
469 472
470 request = tape_alloc_request(3, 0); 473 request = tape_alloc_request(3, 0);
471 if (IS_ERR(request)) 474 if (IS_ERR(request))
472 return PTR_ERR(request); 475 return PTR_ERR(request);
473 request->op = TO_REW; 476 request->op = TO_REW;
474 /* setup ccws */ 477 /* setup ccws */
475 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, 478 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
476 device->modeset_byte); 479 device->modeset_byte);
477 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); 480 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
478 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 481 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
479 482
480 /* execute it */ 483 /* execute it */
481 return tape_do_io_free(device, request); 484 return tape_do_io_free(device, request);
482 } 485 }
483 486
484 /* 487 /*
485 * MTOFFL: Rewind the tape and put the drive off-line. 488 * MTOFFL: Rewind the tape and put the drive off-line.
486 * Implement 'rewind unload' 489 * Implement 'rewind unload'
487 */ 490 */
488 int 491 int
489 tape_std_mtoffl(struct tape_device *device, int mt_count) 492 tape_std_mtoffl(struct tape_device *device, int mt_count)
490 { 493 {
491 struct tape_request *request; 494 struct tape_request *request;
492 495
493 request = tape_alloc_request(3, 0); 496 request = tape_alloc_request(3, 0);
494 if (IS_ERR(request)) 497 if (IS_ERR(request))
495 return PTR_ERR(request); 498 return PTR_ERR(request);
496 request->op = TO_RUN; 499 request->op = TO_RUN;
497 /* setup ccws */ 500 /* setup ccws */
498 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 501 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
499 tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL); 502 tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
500 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 503 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
501 504
502 /* execute it */ 505 /* execute it */
503 return tape_do_io_free(device, request); 506 return tape_do_io_free(device, request);
504 } 507 }
505 508
506 /* 509 /*
507 * MTNOP: 'No operation'. 510 * MTNOP: 'No operation'.
508 */ 511 */
509 int 512 int
510 tape_std_mtnop(struct tape_device *device, int mt_count) 513 tape_std_mtnop(struct tape_device *device, int mt_count)
511 { 514 {
512 struct tape_request *request; 515 struct tape_request *request;
513 516
514 request = tape_alloc_request(2, 0); 517 request = tape_alloc_request(2, 0);
515 if (IS_ERR(request)) 518 if (IS_ERR(request))
516 return PTR_ERR(request); 519 return PTR_ERR(request);
517 request->op = TO_NOP; 520 request->op = TO_NOP;
518 /* setup ccws */ 521 /* setup ccws */
519 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 522 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
520 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 523 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
521 /* execute it */ 524 /* execute it */
522 return tape_do_io_free(device, request); 525 return tape_do_io_free(device, request);
523 } 526 }
524 527
525 /* 528 /*
526 * MTEOM: positions at the end of the portion of the tape already used 529 * MTEOM: positions at the end of the portion of the tape already used
527 * for recordind data. MTEOM positions after the last file mark, ready for 530 * for recordind data. MTEOM positions after the last file mark, ready for
528 * appending another file. 531 * appending another file.
529 */ 532 */
530 int 533 int
531 tape_std_mteom(struct tape_device *device, int mt_count) 534 tape_std_mteom(struct tape_device *device, int mt_count)
532 { 535 {
533 int rc; 536 int rc;
534 537
535 /* 538 /*
536 * Seek from the beginning of tape (rewind). 539 * Seek from the beginning of tape (rewind).
537 */ 540 */
538 if ((rc = tape_mtop(device, MTREW, 1)) < 0) 541 if ((rc = tape_mtop(device, MTREW, 1)) < 0)
539 return rc; 542 return rc;
540 543
541 /* 544 /*
542 * The logical end of volume is given by two sewuential tapemarks. 545 * The logical end of volume is given by two sewuential tapemarks.
543 * Look for this by skipping to the next file (over one tapemark) 546 * Look for this by skipping to the next file (over one tapemark)
544 * and then test for another one (fsr returns 1 if a tapemark was 547 * and then test for another one (fsr returns 1 if a tapemark was
545 * encountered). 548 * encountered).
546 */ 549 */
547 do { 550 do {
548 if ((rc = tape_mtop(device, MTFSF, 1)) < 0) 551 if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
549 return rc; 552 return rc;
550 if ((rc = tape_mtop(device, MTFSR, 1)) < 0) 553 if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
551 return rc; 554 return rc;
552 } while (rc == 0); 555 } while (rc == 0);
553 556
554 return tape_mtop(device, MTBSR, 1); 557 return tape_mtop(device, MTBSR, 1);
555 } 558 }
556 559
557 /* 560 /*
558 * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind. 561 * MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
559 */ 562 */
560 int 563 int
561 tape_std_mtreten(struct tape_device *device, int mt_count) 564 tape_std_mtreten(struct tape_device *device, int mt_count)
562 { 565 {
563 struct tape_request *request; 566 struct tape_request *request;
564 int rc; 567 int rc;
565 568
566 request = tape_alloc_request(4, 0); 569 request = tape_alloc_request(4, 0);
567 if (IS_ERR(request)) 570 if (IS_ERR(request))
568 return PTR_ERR(request); 571 return PTR_ERR(request);
569 request->op = TO_FSF; 572 request->op = TO_FSF;
570 /* setup ccws */ 573 /* setup ccws */
571 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 574 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
572 tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL); 575 tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
573 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL); 576 tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
574 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr); 577 tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
575 /* execute it, MTRETEN rc gets ignored */ 578 /* execute it, MTRETEN rc gets ignored */
576 rc = tape_do_io_interruptible(device, request); 579 rc = tape_do_io_interruptible(device, request);
577 tape_free_request(request); 580 tape_free_request(request);
578 return tape_mtop(device, MTREW, 1); 581 return tape_mtop(device, MTREW, 1);
579 } 582 }
580 583
581 /* 584 /*
582 * MTERASE: erases the tape. 585 * MTERASE: erases the tape.
583 */ 586 */
584 int 587 int
585 tape_std_mterase(struct tape_device *device, int mt_count) 588 tape_std_mterase(struct tape_device *device, int mt_count)
586 { 589 {
587 struct tape_request *request; 590 struct tape_request *request;
588 591
589 request = tape_alloc_request(6, 0); 592 request = tape_alloc_request(6, 0);
590 if (IS_ERR(request)) 593 if (IS_ERR(request))
591 return PTR_ERR(request); 594 return PTR_ERR(request);
592 request->op = TO_DSE; 595 request->op = TO_DSE;
593 /* setup ccws */ 596 /* setup ccws */
594 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 597 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
595 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL); 598 tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
596 tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL); 599 tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
597 tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL); 600 tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
598 tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL); 601 tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
599 tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL); 602 tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
600 603
601 /* execute it */ 604 /* execute it */
602 return tape_do_io_free(device, request); 605 return tape_do_io_free(device, request);
603 } 606 }
604 607
605 /* 608 /*
606 * MTUNLOAD: Rewind the tape and unload it. 609 * MTUNLOAD: Rewind the tape and unload it.
607 */ 610 */
608 int 611 int
609 tape_std_mtunload(struct tape_device *device, int mt_count) 612 tape_std_mtunload(struct tape_device *device, int mt_count)
610 { 613 {
611 return tape_mtop(device, MTOFFL, mt_count); 614 return tape_mtop(device, MTOFFL, mt_count);
612 } 615 }
613 616
614 /* 617 /*
615 * MTCOMPRESSION: used to enable compression. 618 * MTCOMPRESSION: used to enable compression.
616 * Sets the IDRC on/off. 619 * Sets the IDRC on/off.
617 */ 620 */
618 int 621 int
619 tape_std_mtcompression(struct tape_device *device, int mt_count) 622 tape_std_mtcompression(struct tape_device *device, int mt_count)
620 { 623 {
621 struct tape_request *request; 624 struct tape_request *request;
622 625
623 if (mt_count < 0 || mt_count > 1) { 626 if (mt_count < 0 || mt_count > 1) {
624 DBF_EXCEPTION(6, "xcom parm\n"); 627 DBF_EXCEPTION(6, "xcom parm\n");
625 return -EINVAL; 628 return -EINVAL;
626 } 629 }
627 request = tape_alloc_request(2, 0); 630 request = tape_alloc_request(2, 0);
628 if (IS_ERR(request)) 631 if (IS_ERR(request))
629 return PTR_ERR(request); 632 return PTR_ERR(request);
630 request->op = TO_NOP; 633 request->op = TO_NOP;
631 /* setup ccws */ 634 /* setup ccws */
632 if (mt_count == 0) 635 if (mt_count == 0)
633 *device->modeset_byte &= ~0x08; 636 *device->modeset_byte &= ~0x08;
634 else 637 else
635 *device->modeset_byte |= 0x08; 638 *device->modeset_byte |= 0x08;
636 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 639 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
637 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 640 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
638 /* execute it */ 641 /* execute it */
639 return tape_do_io_free(device, request); 642 return tape_do_io_free(device, request);
640 } 643 }
641 644
642 /* 645 /*
643 * Read Block 646 * Read Block
644 */ 647 */
645 struct tape_request * 648 struct tape_request *
646 tape_std_read_block(struct tape_device *device, size_t count) 649 tape_std_read_block(struct tape_device *device, size_t count)
647 { 650 {
648 struct tape_request *request; 651 struct tape_request *request;
649 652
650 /* 653 /*
651 * We have to alloc 4 ccws in order to be able to transform request 654 * We have to alloc 4 ccws in order to be able to transform request
652 * into a read backward request in error case. 655 * into a read backward request in error case.
653 */ 656 */
654 request = tape_alloc_request(4, 0); 657 request = tape_alloc_request(4, 0);
655 if (IS_ERR(request)) { 658 if (IS_ERR(request)) {
656 DBF_EXCEPTION(6, "xrbl fail"); 659 DBF_EXCEPTION(6, "xrbl fail");
657 return request; 660 return request;
658 } 661 }
659 request->op = TO_RFO; 662 request->op = TO_RFO;
660 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 663 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
661 tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD, 664 tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
662 device->char_data.idal_buf); 665 device->char_data.idal_buf);
663 DBF_EVENT(6, "xrbl ccwg\n"); 666 DBF_EVENT(6, "xrbl ccwg\n");
664 return request; 667 return request;
665 } 668 }
666 669
667 /* 670 /*
668 * Read Block backward transformation function. 671 * Read Block backward transformation function.
669 */ 672 */
670 void 673 void
671 tape_std_read_backward(struct tape_device *device, struct tape_request *request) 674 tape_std_read_backward(struct tape_device *device, struct tape_request *request)
672 { 675 {
673 /* 676 /*
674 * We have allocated 4 ccws in tape_std_read, so we can now 677 * We have allocated 4 ccws in tape_std_read, so we can now
675 * transform the request to a read backward, followed by a 678 * transform the request to a read backward, followed by a
676 * forward space block. 679 * forward space block.
677 */ 680 */
678 request->op = TO_RBA; 681 request->op = TO_RBA;
679 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 682 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
680 tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD, 683 tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
681 device->char_data.idal_buf); 684 device->char_data.idal_buf);
682 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL); 685 tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
683 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL); 686 tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
684 DBF_EVENT(6, "xrop ccwg");} 687 DBF_EVENT(6, "xrop ccwg");}
685 688
686 /* 689 /*
687 * Write Block 690 * Write Block
688 */ 691 */
689 struct tape_request * 692 struct tape_request *
690 tape_std_write_block(struct tape_device *device, size_t count) 693 tape_std_write_block(struct tape_device *device, size_t count)
691 { 694 {
692 struct tape_request *request; 695 struct tape_request *request;
693 696
694 request = tape_alloc_request(2, 0); 697 request = tape_alloc_request(2, 0);
695 if (IS_ERR(request)) { 698 if (IS_ERR(request)) {
696 DBF_EXCEPTION(6, "xwbl fail\n"); 699 DBF_EXCEPTION(6, "xwbl fail\n");
697 return request; 700 return request;
698 } 701 }
699 request->op = TO_WRI; 702 request->op = TO_WRI;
700 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 703 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
701 tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD, 704 tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
702 device->char_data.idal_buf); 705 device->char_data.idal_buf);
703 DBF_EVENT(6, "xwbl ccwg\n"); 706 DBF_EVENT(6, "xwbl ccwg\n");
704 return request; 707 return request;
705 } 708 }
706 709
707 /* 710 /*
708 * This routine is called by frontend after an ENOSP on write 711 * This routine is called by frontend after an ENOSP on write
709 */ 712 */
710 void 713 void
711 tape_std_process_eov(struct tape_device *device) 714 tape_std_process_eov(struct tape_device *device)
712 { 715 {
713 /* 716 /*
714 * End of volume: We have to backspace the last written record, then 717 * End of volume: We have to backspace the last written record, then
715 * we TRY to write a tapemark and then backspace over the written TM 718 * we TRY to write a tapemark and then backspace over the written TM
716 */ 719 */
717 if (tape_mtop(device, MTBSR, 1) == 0 && 720 if (tape_mtop(device, MTBSR, 1) == 0 &&
718 tape_mtop(device, MTWEOF, 1) == 0) { 721 tape_mtop(device, MTWEOF, 1) == 0) {
719 tape_mtop(device, MTBSR, 1); 722 tape_mtop(device, MTBSR, 1);
720 } 723 }
721 } 724 }
722 725
723 EXPORT_SYMBOL(tape_std_assign); 726 EXPORT_SYMBOL(tape_std_assign);
724 EXPORT_SYMBOL(tape_std_unassign); 727 EXPORT_SYMBOL(tape_std_unassign);
725 EXPORT_SYMBOL(tape_std_display); 728 EXPORT_SYMBOL(tape_std_display);
726 EXPORT_SYMBOL(tape_std_read_block_id); 729 EXPORT_SYMBOL(tape_std_read_block_id);
727 EXPORT_SYMBOL(tape_std_mtload); 730 EXPORT_SYMBOL(tape_std_mtload);
728 EXPORT_SYMBOL(tape_std_mtsetblk); 731 EXPORT_SYMBOL(tape_std_mtsetblk);
729 EXPORT_SYMBOL(tape_std_mtreset); 732 EXPORT_SYMBOL(tape_std_mtreset);
730 EXPORT_SYMBOL(tape_std_mtfsf); 733 EXPORT_SYMBOL(tape_std_mtfsf);
731 EXPORT_SYMBOL(tape_std_mtfsr); 734 EXPORT_SYMBOL(tape_std_mtfsr);
732 EXPORT_SYMBOL(tape_std_mtbsr); 735 EXPORT_SYMBOL(tape_std_mtbsr);
733 EXPORT_SYMBOL(tape_std_mtweof); 736 EXPORT_SYMBOL(tape_std_mtweof);
734 EXPORT_SYMBOL(tape_std_mtbsfm); 737 EXPORT_SYMBOL(tape_std_mtbsfm);
735 EXPORT_SYMBOL(tape_std_mtbsf); 738 EXPORT_SYMBOL(tape_std_mtbsf);
736 EXPORT_SYMBOL(tape_std_mtfsfm); 739 EXPORT_SYMBOL(tape_std_mtfsfm);
737 EXPORT_SYMBOL(tape_std_mtrew); 740 EXPORT_SYMBOL(tape_std_mtrew);
738 EXPORT_SYMBOL(tape_std_mtoffl); 741 EXPORT_SYMBOL(tape_std_mtoffl);
739 EXPORT_SYMBOL(tape_std_mtnop); 742 EXPORT_SYMBOL(tape_std_mtnop);
740 EXPORT_SYMBOL(tape_std_mteom); 743 EXPORT_SYMBOL(tape_std_mteom);
741 EXPORT_SYMBOL(tape_std_mtreten); 744 EXPORT_SYMBOL(tape_std_mtreten);
742 EXPORT_SYMBOL(tape_std_mterase); 745 EXPORT_SYMBOL(tape_std_mterase);
743 EXPORT_SYMBOL(tape_std_mtunload); 746 EXPORT_SYMBOL(tape_std_mtunload);
744 EXPORT_SYMBOL(tape_std_mtcompression); 747 EXPORT_SYMBOL(tape_std_mtcompression);
745 EXPORT_SYMBOL(tape_std_read_block); 748 EXPORT_SYMBOL(tape_std_read_block);
746 EXPORT_SYMBOL(tape_std_read_backward); 749 EXPORT_SYMBOL(tape_std_read_backward);
747 EXPORT_SYMBOL(tape_std_write_block); 750 EXPORT_SYMBOL(tape_std_write_block);
748 EXPORT_SYMBOL(tape_std_process_eov); 751 EXPORT_SYMBOL(tape_std_process_eov);
749 752