Blame view
drivers/ide/ide-dma.c
13.2 KB
1da177e4c
|
1 |
/* |
204f47c5a
|
2 3 |
* IDE DMA support (including IDE PCI BM-DMA). * |
59bca8cc9
|
4 5 6 |
* Copyright (C) 1995-1998 Mark Lord * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz |
58f189fcc
|
7 |
* |
1da177e4c
|
8 |
* May be copied or modified under the terms of the GNU General Public License |
204f47c5a
|
9 10 |
* * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). |
1da177e4c
|
11 12 13 14 |
*/ /* * Special Thanks to Mark for his Six years of work. |
1da177e4c
|
15 16 17 |
*/ /* |
1da177e4c
|
18 19 20 21 22 23 24 25 26 27 28 |
* Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for * fixing the problem with the BIOS on some Acer motherboards. * * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing * "TX" chipset compatibility and for providing patches for the "TX" chipset. * * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack * at generic DMA -- his patches were referred to when preparing this code. * * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> * for supplying a Promise UDMA board & WD UDMA drive for this work! |
1da177e4c
|
29 |
*/ |
1da177e4c
|
30 |
#include <linux/types.h> |
5a0e3ad6a
|
31 |
#include <linux/gfp.h> |
1da177e4c
|
32 |
#include <linux/kernel.h> |
38789fda2
|
33 |
#include <linux/export.h> |
1da177e4c
|
34 |
#include <linux/ide.h> |
1da177e4c
|
35 |
#include <linux/scatterlist.h> |
5c05ff68b
|
36 |
#include <linux/dma-mapping.h> |
1da177e4c
|
37 |
|
db3f99ef7
|
38 |
static const struct drive_list_entry drive_whitelist[] = { |
c2d3ce8c3
|
39 40 41 42 |
{ "Micropolis 2112A" , NULL }, { "CONNER CTMA 4000" , NULL }, { "CONNER CTT8000-A" , NULL }, { "ST34342A" , NULL }, |
1da177e4c
|
43 44 |
{ NULL , NULL } }; |
db3f99ef7
|
45 |
static const struct drive_list_entry drive_blacklist[] = { |
c2d3ce8c3
|
46 47 48 49 50 |
{ "WDC AC11000H" , NULL }, { "WDC AC22100H" , NULL }, { "WDC AC32500H" , NULL }, { "WDC AC33100H" , NULL }, { "WDC AC31600H" , NULL }, |
1da177e4c
|
51 52 |
{ "WDC AC32100H" , "24.09P07" }, { "WDC AC23200L" , "21.10N21" }, |
c2d3ce8c3
|
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
{ "Compaq CRD-8241B" , NULL }, { "CRD-8400B" , NULL }, { "CRD-8480B", NULL }, { "CRD-8482B", NULL }, { "CRD-84" , NULL }, { "SanDisk SDP3B" , NULL }, { "SanDisk SDP3B-64" , NULL }, { "SANYO CD-ROM CRD" , NULL }, { "HITACHI CDR-8" , NULL }, { "HITACHI CDR-8335" , NULL }, { "HITACHI CDR-8435" , NULL }, { "Toshiba CD-ROM XM-6202B" , NULL }, { "TOSHIBA CD-ROM XM-1702BC", NULL }, { "CD-532E-A" , NULL }, { "E-IDE CD-ROM CR-840", NULL }, { "CD-ROM Drive/F5A", NULL }, { "WPI CDD-820", NULL }, { "SAMSUNG CD-ROM SC-148C", NULL }, { "SAMSUNG CD-ROM SC", NULL }, { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL }, { "_NEC DV5800A", NULL }, |
5a6248cac
|
74 |
{ "SAMSUNG CD-ROM SN-124", "N001" }, |
c2d3ce8c3
|
75 |
{ "Seagate STT20000A", NULL }, |
b0bc65b9a
|
76 |
{ "CD-ROM CDR_U200", "1.09" }, |
1da177e4c
|
77 78 79 80 81 |
{ NULL , NULL } }; /** |
1da177e4c
|
82 83 84 |
* ide_dma_intr - IDE DMA interrupt handler * @drive: the drive the interrupt is for * |
db3f99ef7
|
85 |
* Handle an interrupt completing a read/write DMA transfer on an |
1da177e4c
|
86 87 |
* IDE device */ |
db3f99ef7
|
88 89 |
ide_startstop_t ide_dma_intr(ide_drive_t *drive) |
1da177e4c
|
90 |
{ |
b73c7ee25
|
91 |
ide_hwif_t *hwif = drive->hwif; |
f094d4d83
|
92 |
struct ide_cmd *cmd = &hwif->cmd; |
1da177e4c
|
93 |
u8 stat = 0, dma_stat = 0; |
88b4132e1
|
94 |
drive->waiting_for_dma = 0; |
b73c7ee25
|
95 |
dma_stat = hwif->dma_ops->dma_end(drive); |
f094d4d83
|
96 |
ide_dma_unmap_sg(drive, cmd); |
374e042c3
|
97 |
stat = hwif->tp_ops->read_status(hwif); |
c47137a99
|
98 |
|
3a7d24841
|
99 |
if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) { |
1da177e4c
|
100 |
if (!dma_stat) { |
2230d90dd
|
101 102 103 |
if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) ide_finish_cmd(drive, cmd, stat); else |
130e88670
|
104 |
ide_complete_rq(drive, 0, |
9780e2dd8
|
105 |
blk_rq_sectors(cmd->rq) << 9); |
1da177e4c
|
106 107 |
return ide_stopped; } |
db3f99ef7
|
108 109 110 |
printk(KERN_ERR "%s: %s: bad DMA status (0x%02x) ", drive->name, __func__, dma_stat); |
1da177e4c
|
111 112 113 |
} return ide_error(drive, "dma_intr", stat); } |
1da177e4c
|
114 |
|
2dbe7e919
|
115 |
int ide_dma_good_drive(ide_drive_t *drive) |
75d7d963e
|
116 117 118 |
{ return ide_in_drive_list(drive->id, drive_whitelist); } |
1da177e4c
|
119 |
/** |
f094d4d83
|
120 121 |
* ide_dma_map_sg - map IDE scatter gather for DMA I/O * @drive: the drive to map the DMA table for |
229816941
|
122 |
* @cmd: command |
1da177e4c
|
123 |
* |
5c05ff68b
|
124 125 |
* Perform the DMA mapping magic necessary to access the source or * target buffers of a request via DMA. The lower layers of the |
1da177e4c
|
126 |
* kernel provide the necessary cache management so that we can |
5c05ff68b
|
127 |
* operate in a portable fashion. |
1da177e4c
|
128 |
*/ |
f094d4d83
|
129 |
static int ide_dma_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) |
1da177e4c
|
130 |
{ |
db3f99ef7
|
131 |
ide_hwif_t *hwif = drive->hwif; |
1da177e4c
|
132 |
struct scatterlist *sg = hwif->sg_table; |
5d82720a7
|
133 |
int i; |
1da177e4c
|
134 |
|
229816941
|
135 |
if (cmd->tf_flags & IDE_TFLAG_WRITE) |
b6308ee0c
|
136 |
cmd->sg_dma_direction = DMA_TO_DEVICE; |
229816941
|
137 138 |
else cmd->sg_dma_direction = DMA_FROM_DEVICE; |
1da177e4c
|
139 |
|
b6308ee0c
|
140 |
i = dma_map_sg(hwif->dev, sg, cmd->sg_nents, cmd->sg_dma_direction); |
f094d4d83
|
141 |
if (i) { |
b6308ee0c
|
142 143 |
cmd->orig_sg_nents = cmd->sg_nents; cmd->sg_nents = i; |
5d82720a7
|
144 145 146 |
} return i; |
1da177e4c
|
147 |
} |
1da177e4c
|
148 |
|
1da177e4c
|
149 |
/** |
f094d4d83
|
150 |
* ide_dma_unmap_sg - clean up DMA mapping |
1da177e4c
|
151 152 153 154 155 156 157 158 |
* @drive: The drive to unmap * * Teardown mappings after DMA has completed. This must be called * after the completion of each use of ide_build_dmatable and before * the next use of ide_build_dmatable. Failure to do so will cause * an oops as only one mapping can be live for each target at a given * time. */ |
db3f99ef7
|
159 |
|
f094d4d83
|
160 |
void ide_dma_unmap_sg(ide_drive_t *drive, struct ide_cmd *cmd) |
1da177e4c
|
161 |
{ |
36501650e
|
162 |
ide_hwif_t *hwif = drive->hwif; |
1da177e4c
|
163 |
|
b6308ee0c
|
164 165 |
dma_unmap_sg(hwif->dev, hwif->sg_table, cmd->orig_sg_nents, cmd->sg_dma_direction); |
1da177e4c
|
166 |
} |
f094d4d83
|
167 |
EXPORT_SYMBOL_GPL(ide_dma_unmap_sg); |
1da177e4c
|
168 |
|
1da177e4c
|
169 |
/** |
7469aaf6a
|
170 |
* ide_dma_off_quietly - Generic DMA kill |
1da177e4c
|
171 172 |
* @drive: drive to control * |
db3f99ef7
|
173 |
* Turn off the current DMA on this IDE controller. |
1da177e4c
|
174 |
*/ |
7469aaf6a
|
175 |
void ide_dma_off_quietly(ide_drive_t *drive) |
1da177e4c
|
176 |
{ |
97100fc81
|
177 |
drive->dev_flags &= ~IDE_DFLAG_USING_DMA; |
1da177e4c
|
178 |
ide_toggle_bounce(drive, 0); |
5e37bdc08
|
179 |
drive->hwif->dma_ops->dma_host_set(drive, 0); |
1da177e4c
|
180 |
} |
7469aaf6a
|
181 |
EXPORT_SYMBOL(ide_dma_off_quietly); |
1da177e4c
|
182 183 |
/** |
7469aaf6a
|
184 |
* ide_dma_off - disable DMA on a device |
1da177e4c
|
185 186 187 188 189 |
* @drive: drive to disable DMA on * * Disable IDE DMA for a device on this IDE controller. * Inform the user that DMA has been disabled. */ |
7469aaf6a
|
190 |
void ide_dma_off(ide_drive_t *drive) |
1da177e4c
|
191 192 193 |
{ printk(KERN_INFO "%s: DMA disabled ", drive->name); |
4a546e046
|
194 |
ide_dma_off_quietly(drive); |
1da177e4c
|
195 |
} |
7469aaf6a
|
196 |
EXPORT_SYMBOL(ide_dma_off); |
1da177e4c
|
197 |
|
1da177e4c
|
198 |
/** |
4a546e046
|
199 |
* ide_dma_on - Enable DMA on a device |
1da177e4c
|
200 201 202 203 |
* @drive: drive to enable DMA on * * Enable IDE DMA for a device on this IDE controller. */ |
4a546e046
|
204 205 |
void ide_dma_on(ide_drive_t *drive) |
1da177e4c
|
206 |
{ |
97100fc81
|
207 |
drive->dev_flags |= IDE_DFLAG_USING_DMA; |
1da177e4c
|
208 |
ide_toggle_bounce(drive, 1); |
5e37bdc08
|
209 |
drive->hwif->dma_ops->dma_host_set(drive, 1); |
1da177e4c
|
210 |
} |
db3f99ef7
|
211 |
int __ide_dma_bad_drive(ide_drive_t *drive) |
1da177e4c
|
212 |
{ |
4dde4492d
|
213 |
u16 *id = drive->id; |
1da177e4c
|
214 |
|
65e5f2e3b
|
215 |
int blacklist = ide_in_drive_list(id, drive_blacklist); |
1da177e4c
|
216 217 218 |
if (blacklist) { printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted) ", |
4dde4492d
|
219 |
drive->name, (char *)&id[ATA_ID_PROD]); |
1da177e4c
|
220 221 222 223 |
return blacklist; } return 0; } |
1da177e4c
|
224 |
EXPORT_SYMBOL(__ide_dma_bad_drive); |
2d5eaa6dd
|
225 226 227 228 229 |
static const u8 xfer_mode_bases[] = { XFER_UDMA_0, XFER_MW_DMA_0, XFER_SW_DMA_0, }; |
7670df73f
|
230 |
static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode) |
2d5eaa6dd
|
231 |
{ |
4dde4492d
|
232 |
u16 *id = drive->id; |
2d5eaa6dd
|
233 |
ide_hwif_t *hwif = drive->hwif; |
ac95beedf
|
234 |
const struct ide_port_ops *port_ops = hwif->port_ops; |
2d5eaa6dd
|
235 |
unsigned int mask = 0; |
db3f99ef7
|
236 |
switch (base) { |
2d5eaa6dd
|
237 |
case XFER_UDMA_0: |
4dde4492d
|
238 |
if ((id[ATA_ID_FIELD_VALID] & 4) == 0) |
2d5eaa6dd
|
239 |
break; |
8d64fcd93
|
240 |
mask = id[ATA_ID_UDMA_MODES]; |
ac95beedf
|
241 |
if (port_ops && port_ops->udma_filter) |
8d64fcd93
|
242 |
mask &= port_ops->udma_filter(drive); |
851dd33bc
|
243 |
else |
8d64fcd93
|
244 |
mask &= hwif->ultra_mask; |
2d5eaa6dd
|
245 |
|
7670df73f
|
246 247 248 249 250 251 252 |
/* * avoid false cable warning from eighty_ninty_three() */ if (req_mode > XFER_UDMA_2) { if ((mask & 0x78) && (eighty_ninty_three(drive) == 0)) mask &= 0x07; } |
2d5eaa6dd
|
253 254 |
break; case XFER_MW_DMA_0: |
8d64fcd93
|
255 |
mask = id[ATA_ID_MWDMA_MODES]; |
74638c848
|
256 257 258 259 260 261 262 |
/* Also look for the CF specific MWDMA modes... */ if (ata_id_is_cfa(id) && (id[ATA_ID_CFA_MODES] & 0x38)) { u8 mode = ((id[ATA_ID_CFA_MODES] & 0x38) >> 3) - 1; mask |= ((2 << mode) - 1) << 3; } |
ac95beedf
|
263 |
if (port_ops && port_ops->mdma_filter) |
8d64fcd93
|
264 |
mask &= port_ops->mdma_filter(drive); |
b4e44369a
|
265 |
else |
8d64fcd93
|
266 |
mask &= hwif->mwdma_mask; |
2d5eaa6dd
|
267 268 |
break; case XFER_SW_DMA_0: |
8d64fcd93
|
269 270 |
mask = id[ATA_ID_SWDMA_MODES]; if (!(mask & ATA_SWDMA2) && (id[ATA_ID_OLD_DMA_MODES] >> 8)) { |
48fb2688a
|
271 |
u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8; |
15a4f943e
|
272 273 274 275 276 277 |
/* * if the mode is valid convert it to the mask * (the maximum allowed mode is XFER_SW_DMA_2) */ if (mode <= 2) |
8d64fcd93
|
278 |
mask = (2 << mode) - 1; |
15a4f943e
|
279 |
} |
8d64fcd93
|
280 |
mask &= hwif->swdma_mask; |
2d5eaa6dd
|
281 282 283 284 285 286 287 288 289 290 |
break; default: BUG(); break; } return mask; } /** |
7670df73f
|
291 |
* ide_find_dma_mode - compute DMA speed |
2d5eaa6dd
|
292 |
* @drive: IDE device |
7670df73f
|
293 294 295 296 |
* @req_mode: requested mode * * Checks the drive/host capabilities and finds the speed to use for * the DMA transfer. The speed is then limited by the requested mode. |
2d5eaa6dd
|
297 |
* |
7670df73f
|
298 299 |
* Returns 0 if the drive/host combination is incapable of DMA transfers * or if the requested mode is not a DMA mode. |
2d5eaa6dd
|
300 |
*/ |
7670df73f
|
301 |
u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) |
2d5eaa6dd
|
302 303 304 305 306 |
{ ide_hwif_t *hwif = drive->hwif; unsigned int mask; int x, i; u8 mode = 0; |
33c1002ed
|
307 308 309 310 |
if (drive->media != ide_disk) { if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) return 0; } |
2d5eaa6dd
|
311 312 |
for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) { |
7670df73f
|
313 314 315 |
if (req_mode < xfer_mode_bases[i]) continue; mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode); |
2d5eaa6dd
|
316 317 318 319 320 321 |
x = fls(mask) - 1; if (x >= 0) { mode = xfer_mode_bases[i] + x; break; } } |
75d7d963e
|
322 323 324 325 |
if (hwif->chipset == ide_acorn && mode == 0) { /* * is this correct? */ |
4dde4492d
|
326 327 |
if (ide_dma_good_drive(drive) && drive->id[ATA_ID_EIDE_DMA_TIME] < 150) |
75d7d963e
|
328 329 |
mode = XFER_MW_DMA_1; } |
3ab7efe8e
|
330 331 332 333 |
mode = min(mode, req_mode); printk(KERN_INFO "%s: %s mode selected ", drive->name, |
d34887da6
|
334 |
mode ? ide_xfer_verbose(mode) : "no DMA"); |
2d5eaa6dd
|
335 |
|
3ab7efe8e
|
336 |
return mode; |
2d5eaa6dd
|
337 |
} |
2d5eaa6dd
|
338 |
|
0ae2e1786
|
339 |
static int ide_tune_dma(ide_drive_t *drive) |
29e744d08
|
340 |
{ |
8704de8f2
|
341 |
ide_hwif_t *hwif = drive->hwif; |
29e744d08
|
342 |
u8 speed; |
97100fc81
|
343 344 |
if (ata_id_has_dma(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_NODMA)) |
122ab0887
|
345 346 347 348 |
return 0; /* consult the list of known "bad" drives */ if (__ide_dma_bad_drive(drive)) |
29e744d08
|
349 |
return 0; |
8704de8f2
|
350 |
if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) |
0ae2e1786
|
351 |
return config_drive_for_dma(drive); |
29e744d08
|
352 |
speed = ide_max_dma_mode(drive); |
951784b66
|
353 354 |
if (!speed) return 0; |
29e744d08
|
355 |
|
88b2b32ba
|
356 |
if (ide_set_dma_mode(drive, speed)) |
4728d546d
|
357 |
return 0; |
29e744d08
|
358 |
|
4728d546d
|
359 |
return 1; |
29e744d08
|
360 |
} |
0ae2e1786
|
361 362 363 |
static int ide_dma_check(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; |
0ae2e1786
|
364 |
|
ba4b2e607
|
365 |
if (ide_tune_dma(drive)) |
0ae2e1786
|
366 367 368 369 370 371 372 |
return 0; /* TODO: always do PIO fallback */ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA) return -1; ide_set_max_pio(drive); |
ba4b2e607
|
373 |
return -1; |
0ae2e1786
|
374 |
} |
3608b5d71
|
375 376 |
int ide_set_dma(ide_drive_t *drive) { |
3608b5d71
|
377 |
int rc; |
7b905994c
|
378 379 380 381 382 383 |
/* * Force DMAing for the beginning of the check. * Some chipsets appear to do interesting * things, if not checked and cleared. * PARANOIA!!! */ |
4a546e046
|
384 |
ide_dma_off_quietly(drive); |
3608b5d71
|
385 |
|
7b905994c
|
386 387 388 |
rc = ide_dma_check(drive); if (rc) return rc; |
3608b5d71
|
389 |
|
4a546e046
|
390 391 392 |
ide_dma_on(drive); return 0; |
3608b5d71
|
393 |
} |
578cfa0d7
|
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 |
void ide_check_dma_crc(ide_drive_t *drive) { u8 mode; ide_dma_off_quietly(drive); drive->crc_count = 0; mode = drive->current_speed; /* * Don't try non Ultra-DMA modes without iCRC's. Force the * device to PIO and make the user enable SWDMA/MWDMA modes. */ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7) mode--; else mode = XFER_PIO_4; ide_set_xfer_rate(drive, mode); if (drive->current_speed >= XFER_SW_DMA_0) ide_dma_on(drive); } |
de23ec9ca
|
413 |
void ide_dma_lost_irq(ide_drive_t *drive) |
1da177e4c
|
414 |
{ |
de23ec9ca
|
415 416 |
printk(KERN_ERR "%s: DMA interrupt recovery ", drive->name); |
1da177e4c
|
417 |
} |
de23ec9ca
|
418 |
EXPORT_SYMBOL_GPL(ide_dma_lost_irq); |
1da177e4c
|
419 |
|
65ca53773
|
420 421 422 423 424 425 426 427 |
/* * un-busy the port etc, and clear any pending DMA status. we want to * retry the current request in pio mode instead of risking tossing it * all away */ ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { ide_hwif_t *hwif = drive->hwif; |
35c9b4daf
|
428 |
const struct ide_dma_ops *dma_ops = hwif->dma_ops; |
f094d4d83
|
429 |
struct ide_cmd *cmd = &hwif->cmd; |
65ca53773
|
430 431 432 433 434 435 436 437 438 |
ide_startstop_t ret = ide_stopped; /* * end current dma transaction */ if (error < 0) { printk(KERN_WARNING "%s: DMA timeout error ", drive->name); |
88b4132e1
|
439 |
drive->waiting_for_dma = 0; |
35c9b4daf
|
440 |
(void)dma_ops->dma_end(drive); |
f094d4d83
|
441 |
ide_dma_unmap_sg(drive, cmd); |
65ca53773
|
442 443 444 445 446 |
ret = ide_error(drive, "dma timeout error", hwif->tp_ops->read_status(hwif)); } else { printk(KERN_WARNING "%s: DMA timeout retry ", drive->name); |
35c9b4daf
|
447 448 |
if (dma_ops->dma_clear) dma_ops->dma_clear(drive); |
1cee52de2
|
449 450 451 452 453 |
printk(KERN_ERR "%s: timeout waiting for DMA ", drive->name); if (dma_ops->dma_test_irq(drive) == 0) { ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif)); |
88b4132e1
|
454 |
drive->waiting_for_dma = 0; |
1cee52de2
|
455 |
(void)dma_ops->dma_end(drive); |
f094d4d83
|
456 |
ide_dma_unmap_sg(drive, cmd); |
1cee52de2
|
457 |
} |
65ca53773
|
458 459 460 461 462 463 464 465 466 467 468 469 |
} /* * disable dma for now, but remember that we did so because of * a timeout -- we'll reenable after we finish this next request * (or rather the first chunk of it) in pio. */ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; drive->retry_pio++; ide_dma_off_quietly(drive); /* |
dd8717da6
|
470 |
* make sure request is sane |
65ca53773
|
471 |
*/ |
dd8717da6
|
472 473 |
if (hwif->rq) hwif->rq->errors = 0; |
65ca53773
|
474 475 |
return ret; } |
0d1bad216
|
476 |
void ide_release_dma_engine(ide_hwif_t *hwif) |
1da177e4c
|
477 478 |
{ if (hwif->dmatable_cpu) { |
2bbd57cad
|
479 |
int prd_size = hwif->prd_max_nents * hwif->prd_ent_size; |
36501650e
|
480 |
|
2bbd57cad
|
481 482 |
dma_free_coherent(hwif->dev, prd_size, hwif->dmatable_cpu, hwif->dmatable_dma); |
1da177e4c
|
483 484 |
hwif->dmatable_cpu = NULL; } |
1da177e4c
|
485 |
} |
2bbd57cad
|
486 |
EXPORT_SYMBOL_GPL(ide_release_dma_engine); |
1da177e4c
|
487 |
|
b8e73fba6
|
488 |
int ide_allocate_dma_engine(ide_hwif_t *hwif) |
1da177e4c
|
489 |
{ |
2bbd57cad
|
490 |
int prd_size; |
36501650e
|
491 |
|
2bbd57cad
|
492 493 494 495 |
if (hwif->prd_max_nents == 0) hwif->prd_max_nents = PRD_ENTRIES; if (hwif->prd_ent_size == 0) hwif->prd_ent_size = PRD_BYTES; |
1da177e4c
|
496 |
|
2bbd57cad
|
497 |
prd_size = hwif->prd_max_nents * hwif->prd_ent_size; |
1da177e4c
|
498 |
|
2bbd57cad
|
499 500 501 502 503 504 |
hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size, &hwif->dmatable_dma, GFP_ATOMIC); if (hwif->dmatable_cpu == NULL) { printk(KERN_ERR "%s: unable to allocate PRD table ", |
5e59c2368
|
505 |
hwif->name); |
2bbd57cad
|
506 507 |
return -ENOMEM; } |
1da177e4c
|
508 |
|
2bbd57cad
|
509 |
return 0; |
1da177e4c
|
510 |
} |
b8e73fba6
|
511 |
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); |
5ae5412d9
|
512 513 514 |
int ide_dma_prepare(ide_drive_t *drive, struct ide_cmd *cmd) { |
8a4a5738b
|
515 |
const struct ide_dma_ops *dma_ops = drive->hwif->dma_ops; |
5ae5412d9
|
516 |
if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 || |
f094d4d83
|
517 518 519 520 521 522 523 |
(dma_ops->dma_check && dma_ops->dma_check(drive, cmd))) goto out; ide_map_sg(drive, cmd); if (ide_dma_map_sg(drive, cmd) == 0) goto out_map; if (dma_ops->dma_setup(drive, cmd)) goto out_dma_unmap; |
88b4132e1
|
524 |
drive->waiting_for_dma = 1; |
5ae5412d9
|
525 |
return 0; |
f094d4d83
|
526 527 528 529 530 531 |
out_dma_unmap: ide_dma_unmap_sg(drive, cmd); out_map: ide_map_sg(drive, cmd); out: return 1; |
5ae5412d9
|
532 |
} |