Blame view
drivers/ata/pata_pdc202xx_old.c
9.63 KB
669a5db41
|
1 2 3 |
/* * pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer * (C) 2005 Red Hat Inc |
ab7716300
|
4 |
* Alan Cox <alan@lxorguk.ukuu.org.uk> |
a75032e87
|
5 |
* (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz |
669a5db41
|
6 7 8 9 10 11 |
* * Based in part on linux/drivers/ide/pci/pdc202xx_old.c * * First cut with LBA48/ATAPI * * TODO: |
06b74dd28
|
12 |
* Channel interlock/reset on both required ? |
669a5db41
|
13 |
*/ |
85cd7251b
|
14 |
|
669a5db41
|
15 16 17 18 19 20 21 22 23 24 |
#include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_pdc202xx_old" |
06b74dd28
|
25 |
#define DRV_VERSION "0.4.3" |
669a5db41
|
26 |
|
a0fcdc025
|
27 |
static int pdc2026x_cable_detect(struct ata_port *ap) |
669a5db41
|
28 29 30 |
{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 cis; |
85cd7251b
|
31 |
|
669a5db41
|
32 33 |
pci_read_config_word(pdev, 0x50, &cis); if (cis & (1 << (10 + ap->port_no))) |
a0ac38f16
|
34 35 |
return ATA_CBL_PATA40; return ATA_CBL_PATA80; |
669a5db41
|
36 |
} |
750e519da
|
37 |
static void pdc202xx_exec_command(struct ata_port *ap, |
a75032e87
|
38 39 40 41 42 43 44 45 |
const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X ", ap->print_id, tf->command); iowrite8(tf->command, ap->ioaddr.command_addr); ndelay(400); } |
669a5db41
|
46 |
/** |
ada406c82
|
47 |
* pdc202xx_configure_piomode - set chip PIO timing |
669a5db41
|
48 49 50 51 52 53 54 55 |
* @ap: ATA interface * @adev: ATA device * @pio: PIO mode * * Called to do the PIO mode setup. Our timing registers are shared * so a configure_dmamode call will undo any work we do here and vice * versa */ |
85cd7251b
|
56 |
|
ada406c82
|
57 |
static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio) |
669a5db41
|
58 59 |
{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
63ed71019
|
60 |
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; |
669a5db41
|
61 62 63 64 65 66 67 68 |
static u16 pio_timing[5] = { 0x0913, 0x050C , 0x0308, 0x0206, 0x0104 }; u8 r_ap, r_bp; pci_read_config_byte(pdev, port, &r_ap); pci_read_config_byte(pdev, port + 1, &r_bp); r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */ |
63ed71019
|
69 |
r_bp &= ~0x1F; |
669a5db41
|
70 71 |
r_ap |= (pio_timing[pio] >> 8); r_bp |= (pio_timing[pio] & 0xFF); |
85cd7251b
|
72 |
|
669a5db41
|
73 74 75 76 77 78 79 80 81 |
if (ata_pio_need_iordy(adev)) r_ap |= 0x20; /* IORDY enable */ if (adev->class == ATA_DEV_ATA) r_ap |= 0x10; /* FIFO enable */ pci_write_config_byte(pdev, port, r_ap); pci_write_config_byte(pdev, port + 1, r_bp); } /** |
ada406c82
|
82 |
* pdc202xx_set_piomode - set initial PIO mode data |
669a5db41
|
83 84 85 86 87 88 |
* @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. Our timing registers are shared * but we want to set the PIO timing by default. */ |
85cd7251b
|
89 |
|
ada406c82
|
90 |
static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev) |
669a5db41
|
91 |
{ |
ada406c82
|
92 |
pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); |
669a5db41
|
93 94 95 |
} /** |
ada406c82
|
96 |
* pdc202xx_configure_dmamode - set DMA mode in chip |
669a5db41
|
97 98 99 100 101 102 |
* @ap: ATA interface * @adev: ATA device * * Load DMA cycle times into the chip ready for a DMA transfer * to occur. */ |
85cd7251b
|
103 |
|
ada406c82
|
104 |
static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev) |
669a5db41
|
105 106 |
{ struct pci_dev *pdev = to_pci_dev(ap->host->dev); |
63ed71019
|
107 |
int port = 0x60 + 8 * ap->port_no + 4 * adev->devno; |
669a5db41
|
108 109 110 111 112 113 |
static u8 udma_timing[6][2] = { { 0x60, 0x03 }, /* 33 Mhz Clock */ { 0x40, 0x02 }, { 0x20, 0x01 }, { 0x40, 0x02 }, /* 66 Mhz Clock */ { 0x20, 0x01 }, |
85cd7251b
|
114 |
{ 0x20, 0x01 } |
669a5db41
|
115 |
}; |
63ed71019
|
116 |
static u8 mdma_timing[3][2] = { |
63ed71019
|
117 |
{ 0xe0, 0x0f }, |
06b74dd28
|
118 119 |
{ 0x60, 0x04 }, { 0x60, 0x03 }, |
63ed71019
|
120 |
}; |
669a5db41
|
121 |
u8 r_bp, r_cp; |
85cd7251b
|
122 |
|
669a5db41
|
123 124 |
pci_read_config_byte(pdev, port + 1, &r_bp); pci_read_config_byte(pdev, port + 2, &r_cp); |
85cd7251b
|
125 |
|
63ed71019
|
126 |
r_bp &= ~0xE0; |
669a5db41
|
127 |
r_cp &= ~0x0F; |
85cd7251b
|
128 |
|
669a5db41
|
129 130 131 132 |
if (adev->dma_mode >= XFER_UDMA_0) { int speed = adev->dma_mode - XFER_UDMA_0; r_bp |= udma_timing[speed][0]; r_cp |= udma_timing[speed][1]; |
85cd7251b
|
133 |
|
669a5db41
|
134 135 |
} else { int speed = adev->dma_mode - XFER_MW_DMA_0; |
63ed71019
|
136 137 |
r_bp |= mdma_timing[speed][0]; r_cp |= mdma_timing[speed][1]; |
669a5db41
|
138 139 140 |
} pci_write_config_byte(pdev, port + 1, r_bp); pci_write_config_byte(pdev, port + 2, r_cp); |
85cd7251b
|
141 |
|
669a5db41
|
142 143 144 145 146 147 148 149 |
} /** * pdc2026x_bmdma_start - DMA engine begin * @qc: ATA command * * In UDMA3 or higher we have to clock switch for the duration of the * DMA transfer sequence. |
06b74dd28
|
150 151 152 |
* * Note: The host lock held by the libata layer protects * us from two channels both trying to set DMA bits at once |
669a5db41
|
153 |
*/ |
85cd7251b
|
154 |
|
669a5db41
|
155 156 157 158 159 160 |
static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_taskfile *tf = &qc->tf; int sel66 = ap->port_no ? 0x08: 0x02; |
0d5ff5667
|
161 162 163 |
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; void __iomem *clock = master + 0x11; void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); |
85cd7251b
|
164 |
|
669a5db41
|
165 |
u32 len; |
85cd7251b
|
166 |
|
669a5db41
|
167 |
/* Check we keep host level locking here */ |
6ad58b245
|
168 |
if (adev->dma_mode > XFER_UDMA_2) |
0d5ff5667
|
169 |
iowrite8(ioread8(clock) | sel66, clock); |
669a5db41
|
170 |
else |
0d5ff5667
|
171 |
iowrite8(ioread8(clock) & ~sel66, clock); |
669a5db41
|
172 |
|
85cd7251b
|
173 |
/* The DMA clocks may have been trashed by a reset. FIXME: make conditional |
669a5db41
|
174 |
and move to qc_issue ? */ |
ada406c82
|
175 |
pdc202xx_set_dmamode(ap, qc->dev); |
669a5db41
|
176 177 |
/* Cases the state machine will not complete correctly without help */ |
0dc36888d
|
178 |
if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATAPI_PROT_DMA) { |
5e5188108
|
179 |
len = qc->nbytes / 2; |
85cd7251b
|
180 |
|
669a5db41
|
181 182 183 184 |
if (tf->flags & ATA_TFLAG_WRITE) len |= 0x06000000; else len |= 0x05000000; |
85cd7251b
|
185 |
|
0d5ff5667
|
186 |
iowrite32(len, atapi_reg); |
669a5db41
|
187 |
} |
85cd7251b
|
188 189 |
/* Activate DMA */ |
669a5db41
|
190 191 192 193 194 195 196 197 198 |
ata_bmdma_start(qc); } /** * pdc2026x_bmdma_end - DMA engine stop * @qc: ATA command * * After a DMA completes we need to put the clock back to 33MHz for * PIO timings. |
06b74dd28
|
199 200 201 |
* * Note: The host lock held by the libata layer protects * us from two channels both trying to set DMA bits at once |
669a5db41
|
202 |
*/ |
85cd7251b
|
203 |
|
669a5db41
|
204 205 206 207 208 |
static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_taskfile *tf = &qc->tf; |
85cd7251b
|
209 |
|
669a5db41
|
210 211 |
int sel66 = ap->port_no ? 0x08: 0x02; /* The clock bits are in the same register for both channels */ |
0d5ff5667
|
212 213 214 |
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr; void __iomem *clock = master + 0x11; void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no); |
85cd7251b
|
215 |
|
669a5db41
|
216 |
/* Cases the state machine will not complete correctly */ |
0dc36888d
|
217 |
if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) { |
0d5ff5667
|
218 219 |
iowrite32(0, atapi_reg); iowrite8(ioread8(clock) & ~sel66, clock); |
669a5db41
|
220 |
} |
669a5db41
|
221 |
/* Flip back to 33Mhz for PIO */ |
6ad58b245
|
222 |
if (adev->dma_mode > XFER_UDMA_2) |
0d5ff5667
|
223 |
iowrite8(ioread8(clock) & ~sel66, clock); |
669a5db41
|
224 |
ata_bmdma_stop(qc); |
36906d9be
|
225 |
pdc202xx_set_piomode(ap, adev); |
669a5db41
|
226 227 228 229 |
} /** * pdc2026x_dev_config - device setup hook |
669a5db41
|
230 231 232 233 234 235 |
* @adev: newly found device * * Perform chip specific early setup. We need to lock the transfer * sizes to 8bit to avoid making the state engine on the 2026x cards * barf. */ |
85cd7251b
|
236 |
|
cd0d3bbcd
|
237 |
static void pdc2026x_dev_config(struct ata_device *adev) |
669a5db41
|
238 239 240 |
{ adev->max_sectors = 256; } |
36906d9be
|
241 242 243 244 245 246 247 248 |
static int pdc2026x_port_start(struct ata_port *ap) { void __iomem *bmdma = ap->ioaddr.bmdma_addr; if (bmdma) { /* Enable burst mode */ u8 burst = ioread8(bmdma + 0x1f); iowrite8(burst | 0x01, bmdma + 0x1f); } |
c7087652e
|
249 |
return ata_bmdma_port_start(ap); |
36906d9be
|
250 |
} |
aa8f2371c
|
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
/** * pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command * @qc: Metadata associated with taskfile to check * * Just say no - not supported on older Promise. * * LOCKING: * None (inherited from caller). * * RETURNS: 0 when ATAPI DMA can be used * 1 otherwise */ static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc) { return 1; } |
ada406c82
|
268 |
static struct scsi_host_template pdc202xx_sht = { |
68d1d07b5
|
269 |
ATA_BMDMA_SHT(DRV_NAME), |
669a5db41
|
270 271 272 |
}; static struct ata_port_operations pdc2024x_port_ops = { |
029cfd6b7
|
273 274 275 276 277 |
.inherits = &ata_bmdma_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = pdc202xx_set_piomode, .set_dmamode = pdc202xx_set_dmamode, |
a75032e87
|
278 |
|
750e519da
|
279 |
.sff_exec_command = pdc202xx_exec_command, |
85cd7251b
|
280 |
}; |
669a5db41
|
281 282 |
static struct ata_port_operations pdc2026x_port_ops = { |
029cfd6b7
|
283 284 285 286 287 288 289 290 291 292 |
.inherits = &pdc2024x_port_ops, .check_atapi_dma = pdc2026x_check_atapi_dma, .bmdma_start = pdc2026x_bmdma_start, .bmdma_stop = pdc2026x_bmdma_stop, .cable_detect = pdc2026x_cable_detect, .dev_config = pdc2026x_dev_config, .port_start = pdc2026x_port_start, |
750e519da
|
293 294 |
.sff_exec_command = pdc202xx_exec_command, |
85cd7251b
|
295 |
}; |
669a5db41
|
296 |
|
ada406c82
|
297 |
static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) |
669a5db41
|
298 |
{ |
1626aeb88
|
299 |
static const struct ata_port_info info[3] = { |
669a5db41
|
300 |
{ |
1d2808fd3
|
301 |
.flags = ATA_FLAG_SLAVE_POSS, |
14bdef982
|
302 303 |
.pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, |
669a5db41
|
304 305 |
.udma_mask = ATA_UDMA2, .port_ops = &pdc2024x_port_ops |
85cd7251b
|
306 |
}, |
669a5db41
|
307 |
{ |
1d2808fd3
|
308 |
.flags = ATA_FLAG_SLAVE_POSS, |
14bdef982
|
309 310 |
.pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, |
669a5db41
|
311 312 313 314 |
.udma_mask = ATA_UDMA4, .port_ops = &pdc2026x_port_ops }, { |
1d2808fd3
|
315 |
.flags = ATA_FLAG_SLAVE_POSS, |
14bdef982
|
316 317 |
.pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, |
669a5db41
|
318 319 320 |
.udma_mask = ATA_UDMA5, .port_ops = &pdc2026x_port_ops } |
85cd7251b
|
321 |
|
669a5db41
|
322 |
}; |
1626aeb88
|
323 |
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; |
85cd7251b
|
324 |
|
669a5db41
|
325 326 327 328 |
if (dev->device == PCI_DEVICE_ID_PROMISE_20265) { struct pci_dev *bridge = dev->bus->self; /* Don't grab anything behind a Promise I2O RAID */ if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) { |
b447916e2
|
329 |
if (bridge->device == PCI_DEVICE_ID_INTEL_I960) |
669a5db41
|
330 |
return -ENODEV; |
b447916e2
|
331 |
if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM) |
669a5db41
|
332 333 334 |
return -ENODEV; } } |
1c5afdf7a
|
335 |
return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0); |
669a5db41
|
336 |
} |
ada406c82
|
337 |
static const struct pci_device_id pdc202xx[] = { |
2d2744fc8
|
338 339 340 341 342 343 344 |
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 }, { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 }, { }, |
669a5db41
|
345 |
}; |
ada406c82
|
346 |
static struct pci_driver pdc202xx_pci_driver = { |
2d2744fc8
|
347 |
.name = DRV_NAME, |
ada406c82
|
348 349 |
.id_table = pdc202xx, .probe = pdc202xx_init_one, |
62d64ae0e
|
350 |
.remove = ata_pci_remove_one, |
438ac6d5e
|
351 |
#ifdef CONFIG_PM |
62d64ae0e
|
352 353 |
.suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, |
438ac6d5e
|
354 |
#endif |
669a5db41
|
355 |
}; |
ada406c82
|
356 |
static int __init pdc202xx_init(void) |
669a5db41
|
357 |
{ |
ada406c82
|
358 |
return pci_register_driver(&pdc202xx_pci_driver); |
669a5db41
|
359 |
} |
ada406c82
|
360 |
static void __exit pdc202xx_exit(void) |
669a5db41
|
361 |
{ |
ada406c82
|
362 |
pci_unregister_driver(&pdc202xx_pci_driver); |
669a5db41
|
363 |
} |
669a5db41
|
364 365 366 |
MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); MODULE_LICENSE("GPL"); |
ada406c82
|
367 |
MODULE_DEVICE_TABLE(pci, pdc202xx); |
669a5db41
|
368 |
MODULE_VERSION(DRV_VERSION); |
ada406c82
|
369 370 |
module_init(pdc202xx_init); module_exit(pdc202xx_exit); |