Commit df423dc7f2a801b9a45d7c501a8eb5c529455ea1

Authored by Linus Torvalds

Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  libata-sff: Reenable Port Multiplier after libata-sff remodeling.
  libata: skip EH autopsy and recovery during suspend
  ahci: AHCI and RAID mode SATA patch for Intel Patsburg DeviceIDs
  ata_piix: IDE Mode SATA patch for Intel Patsburg DeviceIDs
  libata,pata_via: revert ata_wait_idle() removal from ata_sff/via_tf_load()
  ahci: fix hang on failed softreset
  pata_artop: Fix device ID parity check

Showing 10 changed files Inline Diff

1 /* 1 /*
2 * ahci.c - AHCI SATA support 2 * ahci.c - AHCI SATA support
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2004-2005 Red Hat, Inc. 8 * Copyright 2004-2005 Red Hat, Inc.
9 * 9 *
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option) 13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to 22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 * 24 *
25 * 25 *
26 * libata documentation is available via 'make {ps|pdf}docs', 26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.* 27 * as Documentation/DocBook/libata.*
28 * 28 *
29 * AHCI hardware documentation: 29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/module.h> 36 #include <linux/module.h>
37 #include <linux/pci.h> 37 #include <linux/pci.h>
38 #include <linux/init.h> 38 #include <linux/init.h>
39 #include <linux/blkdev.h> 39 #include <linux/blkdev.h>
40 #include <linux/delay.h> 40 #include <linux/delay.h>
41 #include <linux/interrupt.h> 41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h> 42 #include <linux/dma-mapping.h>
43 #include <linux/device.h> 43 #include <linux/device.h>
44 #include <linux/dmi.h> 44 #include <linux/dmi.h>
45 #include <linux/gfp.h> 45 #include <linux/gfp.h>
46 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h> 47 #include <scsi/scsi_cmnd.h>
48 #include <linux/libata.h> 48 #include <linux/libata.h>
49 #include "ahci.h" 49 #include "ahci.h"
50 50
51 #define DRV_NAME "ahci" 51 #define DRV_NAME "ahci"
52 #define DRV_VERSION "3.0" 52 #define DRV_VERSION "3.0"
53 53
54 enum { 54 enum {
55 AHCI_PCI_BAR = 5, 55 AHCI_PCI_BAR = 5,
56 }; 56 };
57 57
58 enum board_ids { 58 enum board_ids {
59 /* board IDs by feature in alphabetical order */ 59 /* board IDs by feature in alphabetical order */
60 board_ahci, 60 board_ahci,
61 board_ahci_ign_iferr, 61 board_ahci_ign_iferr,
62 board_ahci_nosntf, 62 board_ahci_nosntf,
63 board_ahci_yes_fbs, 63 board_ahci_yes_fbs,
64 64
65 /* board IDs for specific chipsets in alphabetical order */ 65 /* board IDs for specific chipsets in alphabetical order */
66 board_ahci_mcp65, 66 board_ahci_mcp65,
67 board_ahci_mcp77, 67 board_ahci_mcp77,
68 board_ahci_mcp89, 68 board_ahci_mcp89,
69 board_ahci_mv, 69 board_ahci_mv,
70 board_ahci_sb600, 70 board_ahci_sb600,
71 board_ahci_sb700, /* for SB700 and SB800 */ 71 board_ahci_sb700, /* for SB700 and SB800 */
72 board_ahci_vt8251, 72 board_ahci_vt8251,
73 73
74 /* aliases */ 74 /* aliases */
75 board_ahci_mcp_linux = board_ahci_mcp65, 75 board_ahci_mcp_linux = board_ahci_mcp65,
76 board_ahci_mcp67 = board_ahci_mcp65, 76 board_ahci_mcp67 = board_ahci_mcp65,
77 board_ahci_mcp73 = board_ahci_mcp65, 77 board_ahci_mcp73 = board_ahci_mcp65,
78 board_ahci_mcp79 = board_ahci_mcp77, 78 board_ahci_mcp79 = board_ahci_mcp77,
79 }; 79 };
80 80
81 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 81 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
82 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 82 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
83 unsigned long deadline); 83 unsigned long deadline);
84 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 84 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
85 unsigned long deadline); 85 unsigned long deadline);
86 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 86 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
87 unsigned long deadline); 87 unsigned long deadline);
88 #ifdef CONFIG_PM 88 #ifdef CONFIG_PM
89 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 89 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
90 static int ahci_pci_device_resume(struct pci_dev *pdev); 90 static int ahci_pci_device_resume(struct pci_dev *pdev);
91 #endif 91 #endif
92 92
93 static struct ata_port_operations ahci_vt8251_ops = { 93 static struct ata_port_operations ahci_vt8251_ops = {
94 .inherits = &ahci_ops, 94 .inherits = &ahci_ops,
95 .hardreset = ahci_vt8251_hardreset, 95 .hardreset = ahci_vt8251_hardreset,
96 }; 96 };
97 97
98 static struct ata_port_operations ahci_p5wdh_ops = { 98 static struct ata_port_operations ahci_p5wdh_ops = {
99 .inherits = &ahci_ops, 99 .inherits = &ahci_ops,
100 .hardreset = ahci_p5wdh_hardreset, 100 .hardreset = ahci_p5wdh_hardreset,
101 }; 101 };
102 102
103 static struct ata_port_operations ahci_sb600_ops = { 103 static struct ata_port_operations ahci_sb600_ops = {
104 .inherits = &ahci_ops, 104 .inherits = &ahci_ops,
105 .softreset = ahci_sb600_softreset, 105 .softreset = ahci_sb600_softreset,
106 .pmp_softreset = ahci_sb600_softreset, 106 .pmp_softreset = ahci_sb600_softreset,
107 }; 107 };
108 108
109 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) 109 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
110 110
111 static const struct ata_port_info ahci_port_info[] = { 111 static const struct ata_port_info ahci_port_info[] = {
112 /* by features */ 112 /* by features */
113 [board_ahci] = 113 [board_ahci] =
114 { 114 {
115 .flags = AHCI_FLAG_COMMON, 115 .flags = AHCI_FLAG_COMMON,
116 .pio_mask = ATA_PIO4, 116 .pio_mask = ATA_PIO4,
117 .udma_mask = ATA_UDMA6, 117 .udma_mask = ATA_UDMA6,
118 .port_ops = &ahci_ops, 118 .port_ops = &ahci_ops,
119 }, 119 },
120 [board_ahci_ign_iferr] = 120 [board_ahci_ign_iferr] =
121 { 121 {
122 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), 122 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
123 .flags = AHCI_FLAG_COMMON, 123 .flags = AHCI_FLAG_COMMON,
124 .pio_mask = ATA_PIO4, 124 .pio_mask = ATA_PIO4,
125 .udma_mask = ATA_UDMA6, 125 .udma_mask = ATA_UDMA6,
126 .port_ops = &ahci_ops, 126 .port_ops = &ahci_ops,
127 }, 127 },
128 [board_ahci_nosntf] = 128 [board_ahci_nosntf] =
129 { 129 {
130 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), 130 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
131 .flags = AHCI_FLAG_COMMON, 131 .flags = AHCI_FLAG_COMMON,
132 .pio_mask = ATA_PIO4, 132 .pio_mask = ATA_PIO4,
133 .udma_mask = ATA_UDMA6, 133 .udma_mask = ATA_UDMA6,
134 .port_ops = &ahci_ops, 134 .port_ops = &ahci_ops,
135 }, 135 },
136 [board_ahci_yes_fbs] = 136 [board_ahci_yes_fbs] =
137 { 137 {
138 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS), 138 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
139 .flags = AHCI_FLAG_COMMON, 139 .flags = AHCI_FLAG_COMMON,
140 .pio_mask = ATA_PIO4, 140 .pio_mask = ATA_PIO4,
141 .udma_mask = ATA_UDMA6, 141 .udma_mask = ATA_UDMA6,
142 .port_ops = &ahci_ops, 142 .port_ops = &ahci_ops,
143 }, 143 },
144 /* by chipsets */ 144 /* by chipsets */
145 [board_ahci_mcp65] = 145 [board_ahci_mcp65] =
146 { 146 {
147 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 147 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
148 AHCI_HFLAG_YES_NCQ), 148 AHCI_HFLAG_YES_NCQ),
149 .flags = AHCI_FLAG_COMMON, 149 .flags = AHCI_FLAG_COMMON,
150 .pio_mask = ATA_PIO4, 150 .pio_mask = ATA_PIO4,
151 .udma_mask = ATA_UDMA6, 151 .udma_mask = ATA_UDMA6,
152 .port_ops = &ahci_ops, 152 .port_ops = &ahci_ops,
153 }, 153 },
154 [board_ahci_mcp77] = 154 [board_ahci_mcp77] =
155 { 155 {
156 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP), 156 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP),
157 .flags = AHCI_FLAG_COMMON, 157 .flags = AHCI_FLAG_COMMON,
158 .pio_mask = ATA_PIO4, 158 .pio_mask = ATA_PIO4,
159 .udma_mask = ATA_UDMA6, 159 .udma_mask = ATA_UDMA6,
160 .port_ops = &ahci_ops, 160 .port_ops = &ahci_ops,
161 }, 161 },
162 [board_ahci_mcp89] = 162 [board_ahci_mcp89] =
163 { 163 {
164 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA), 164 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA),
165 .flags = AHCI_FLAG_COMMON, 165 .flags = AHCI_FLAG_COMMON,
166 .pio_mask = ATA_PIO4, 166 .pio_mask = ATA_PIO4,
167 .udma_mask = ATA_UDMA6, 167 .udma_mask = ATA_UDMA6,
168 .port_ops = &ahci_ops, 168 .port_ops = &ahci_ops,
169 }, 169 },
170 [board_ahci_mv] = 170 [board_ahci_mv] =
171 { 171 {
172 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI | 172 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
173 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP), 173 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
174 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 174 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
175 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA, 175 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
176 .pio_mask = ATA_PIO4, 176 .pio_mask = ATA_PIO4,
177 .udma_mask = ATA_UDMA6, 177 .udma_mask = ATA_UDMA6,
178 .port_ops = &ahci_ops, 178 .port_ops = &ahci_ops,
179 }, 179 },
180 [board_ahci_sb600] = 180 [board_ahci_sb600] =
181 { 181 {
182 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 182 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
183 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 | 183 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
184 AHCI_HFLAG_32BIT_ONLY), 184 AHCI_HFLAG_32BIT_ONLY),
185 .flags = AHCI_FLAG_COMMON, 185 .flags = AHCI_FLAG_COMMON,
186 .pio_mask = ATA_PIO4, 186 .pio_mask = ATA_PIO4,
187 .udma_mask = ATA_UDMA6, 187 .udma_mask = ATA_UDMA6,
188 .port_ops = &ahci_sb600_ops, 188 .port_ops = &ahci_sb600_ops,
189 }, 189 },
190 [board_ahci_sb700] = /* for SB700 and SB800 */ 190 [board_ahci_sb700] = /* for SB700 and SB800 */
191 { 191 {
192 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), 192 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
193 .flags = AHCI_FLAG_COMMON, 193 .flags = AHCI_FLAG_COMMON,
194 .pio_mask = ATA_PIO4, 194 .pio_mask = ATA_PIO4,
195 .udma_mask = ATA_UDMA6, 195 .udma_mask = ATA_UDMA6,
196 .port_ops = &ahci_sb600_ops, 196 .port_ops = &ahci_sb600_ops,
197 }, 197 },
198 [board_ahci_vt8251] = 198 [board_ahci_vt8251] =
199 { 199 {
200 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP), 200 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
201 .flags = AHCI_FLAG_COMMON, 201 .flags = AHCI_FLAG_COMMON,
202 .pio_mask = ATA_PIO4, 202 .pio_mask = ATA_PIO4,
203 .udma_mask = ATA_UDMA6, 203 .udma_mask = ATA_UDMA6,
204 .port_ops = &ahci_vt8251_ops, 204 .port_ops = &ahci_vt8251_ops,
205 }, 205 },
206 }; 206 };
207 207
208 static const struct pci_device_id ahci_pci_tbl[] = { 208 static const struct pci_device_id ahci_pci_tbl[] = {
209 /* Intel */ 209 /* Intel */
210 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */ 210 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
211 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */ 211 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
212 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */ 212 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
213 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */ 213 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
214 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */ 214 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
215 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */ 215 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
216 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */ 216 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
217 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */ 217 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
218 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ 218 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
219 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ 219 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
220 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ 220 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
221 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */ 221 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
222 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ 222 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
223 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ 223 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
224 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ 224 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
225 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */ 225 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
226 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */ 226 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
227 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */ 227 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
228 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */ 228 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
229 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */ 229 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
230 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */ 230 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
231 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */ 231 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
232 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */ 232 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
233 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */ 233 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
234 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */ 234 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
235 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */ 235 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
236 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */ 236 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
237 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */ 237 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
238 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */ 238 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
239 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */ 239 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
240 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */ 240 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
241 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */ 241 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
242 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */ 242 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
243 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */ 243 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
244 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */ 244 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
245 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */ 245 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
246 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */ 246 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
247 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 247 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
248 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 248 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
249 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ 249 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
250 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ 250 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
251 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ 251 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
252 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ 252 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
253 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 253 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
254 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 254 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
255 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 255 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
256 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
257 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
258 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
256 259
257 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 260 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
258 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 261 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
259 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, 262 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
260 263
261 /* ATI */ 264 /* ATI */
262 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 265 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
263 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */ 266 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
264 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */ 267 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
265 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */ 268 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
266 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */ 269 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
267 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 270 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
268 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 271 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
269 272
270 /* AMD */ 273 /* AMD */
271 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ 274 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
272 /* AMD is using RAID class only for ahci controllers */ 275 /* AMD is using RAID class only for ahci controllers */
273 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 276 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
274 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, 277 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
275 278
276 /* VIA */ 279 /* VIA */
277 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 280 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
278 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 281 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
279 282
280 /* NVIDIA */ 283 /* NVIDIA */
281 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ 284 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
282 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ 285 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
283 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ 286 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
284 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ 287 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
285 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ 288 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
286 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ 289 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
287 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ 290 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
288 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ 291 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
289 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */ 292 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_mcp67 }, /* MCP67 */
290 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */ 293 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_mcp67 }, /* MCP67 */
291 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */ 294 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_mcp67 }, /* MCP67 */
292 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */ 295 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_mcp67 }, /* MCP67 */
293 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */ 296 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_mcp67 }, /* MCP67 */
294 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */ 297 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_mcp67 }, /* MCP67 */
295 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */ 298 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_mcp67 }, /* MCP67 */
296 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */ 299 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_mcp67 }, /* MCP67 */
297 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */ 300 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_mcp67 }, /* MCP67 */
298 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */ 301 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_mcp67 }, /* MCP67 */
299 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */ 302 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_mcp67 }, /* MCP67 */
300 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */ 303 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_mcp67 }, /* MCP67 */
301 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */ 304 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_mcp_linux }, /* Linux ID */
302 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */ 305 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_mcp_linux }, /* Linux ID */
303 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */ 306 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_mcp_linux }, /* Linux ID */
304 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */ 307 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_mcp_linux }, /* Linux ID */
305 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */ 308 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_mcp_linux }, /* Linux ID */
306 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */ 309 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_mcp_linux }, /* Linux ID */
307 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */ 310 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_mcp_linux }, /* Linux ID */
308 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */ 311 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_mcp_linux }, /* Linux ID */
309 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */ 312 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_mcp_linux }, /* Linux ID */
310 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */ 313 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_mcp_linux }, /* Linux ID */
311 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */ 314 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_mcp_linux }, /* Linux ID */
312 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */ 315 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_mcp_linux }, /* Linux ID */
313 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */ 316 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_mcp_linux }, /* Linux ID */
314 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */ 317 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_mcp_linux }, /* Linux ID */
315 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */ 318 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_mcp_linux }, /* Linux ID */
316 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */ 319 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_mcp_linux }, /* Linux ID */
317 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */ 320 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_mcp73 }, /* MCP73 */
318 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */ 321 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_mcp73 }, /* MCP73 */
319 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */ 322 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_mcp73 }, /* MCP73 */
320 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */ 323 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_mcp73 }, /* MCP73 */
321 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */ 324 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_mcp73 }, /* MCP73 */
322 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */ 325 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_mcp73 }, /* MCP73 */
323 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */ 326 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_mcp73 }, /* MCP73 */
324 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */ 327 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_mcp73 }, /* MCP73 */
325 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */ 328 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_mcp73 }, /* MCP73 */
326 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */ 329 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_mcp73 }, /* MCP73 */
327 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */ 330 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_mcp73 }, /* MCP73 */
328 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */ 331 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_mcp73 }, /* MCP73 */
329 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */ 332 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci_mcp77 }, /* MCP77 */
330 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */ 333 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci_mcp77 }, /* MCP77 */
331 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */ 334 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci_mcp77 }, /* MCP77 */
332 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */ 335 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci_mcp77 }, /* MCP77 */
333 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */ 336 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci_mcp77 }, /* MCP77 */
334 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */ 337 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci_mcp77 }, /* MCP77 */
335 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */ 338 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci_mcp77 }, /* MCP77 */
336 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */ 339 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci_mcp77 }, /* MCP77 */
337 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */ 340 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci_mcp77 }, /* MCP77 */
338 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */ 341 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci_mcp77 }, /* MCP77 */
339 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */ 342 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci_mcp77 }, /* MCP77 */
340 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */ 343 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci_mcp77 }, /* MCP77 */
341 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */ 344 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci_mcp79 }, /* MCP79 */
342 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */ 345 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci_mcp79 }, /* MCP79 */
343 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */ 346 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci_mcp79 }, /* MCP79 */
344 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */ 347 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci_mcp79 }, /* MCP79 */
345 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */ 348 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci_mcp79 }, /* MCP79 */
346 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */ 349 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci_mcp79 }, /* MCP79 */
347 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */ 350 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci_mcp79 }, /* MCP79 */
348 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */ 351 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci_mcp79 }, /* MCP79 */
349 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */ 352 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci_mcp79 }, /* MCP79 */
350 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */ 353 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci_mcp79 }, /* MCP79 */
351 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */ 354 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci_mcp79 }, /* MCP79 */
352 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */ 355 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci_mcp79 }, /* MCP79 */
353 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */ 356 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci_mcp89 }, /* MCP89 */
354 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */ 357 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci_mcp89 }, /* MCP89 */
355 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */ 358 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci_mcp89 }, /* MCP89 */
356 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */ 359 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci_mcp89 }, /* MCP89 */
357 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */ 360 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci_mcp89 }, /* MCP89 */
358 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */ 361 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci_mcp89 }, /* MCP89 */
359 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */ 362 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci_mcp89 }, /* MCP89 */
360 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */ 363 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci_mcp89 }, /* MCP89 */
361 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */ 364 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci_mcp89 }, /* MCP89 */
362 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */ 365 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci_mcp89 }, /* MCP89 */
363 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */ 366 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci_mcp89 }, /* MCP89 */
364 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */ 367 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci_mcp89 }, /* MCP89 */
365 368
366 /* SiS */ 369 /* SiS */
367 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 370 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
368 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */ 371 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
369 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ 372 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
370 373
371 /* Marvell */ 374 /* Marvell */
372 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 375 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
373 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 376 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
374 { PCI_DEVICE(0x1b4b, 0x9123), 377 { PCI_DEVICE(0x1b4b, 0x9123),
375 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 378 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
376 379
377 /* Promise */ 380 /* Promise */
378 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 381 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
379 382
380 /* Generic, PCI class code for AHCI */ 383 /* Generic, PCI class code for AHCI */
381 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 384 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
382 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, 385 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
383 386
384 { } /* terminate list */ 387 { } /* terminate list */
385 }; 388 };
386 389
387 390
388 static struct pci_driver ahci_pci_driver = { 391 static struct pci_driver ahci_pci_driver = {
389 .name = DRV_NAME, 392 .name = DRV_NAME,
390 .id_table = ahci_pci_tbl, 393 .id_table = ahci_pci_tbl,
391 .probe = ahci_init_one, 394 .probe = ahci_init_one,
392 .remove = ata_pci_remove_one, 395 .remove = ata_pci_remove_one,
393 #ifdef CONFIG_PM 396 #ifdef CONFIG_PM
394 .suspend = ahci_pci_device_suspend, 397 .suspend = ahci_pci_device_suspend,
395 .resume = ahci_pci_device_resume, 398 .resume = ahci_pci_device_resume,
396 #endif 399 #endif
397 }; 400 };
398 401
399 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE) 402 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
400 static int marvell_enable; 403 static int marvell_enable;
401 #else 404 #else
402 static int marvell_enable = 1; 405 static int marvell_enable = 1;
403 #endif 406 #endif
404 module_param(marvell_enable, int, 0644); 407 module_param(marvell_enable, int, 0644);
405 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)"); 408 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
406 409
407 410
408 static void ahci_pci_save_initial_config(struct pci_dev *pdev, 411 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
409 struct ahci_host_priv *hpriv) 412 struct ahci_host_priv *hpriv)
410 { 413 {
411 unsigned int force_port_map = 0; 414 unsigned int force_port_map = 0;
412 unsigned int mask_port_map = 0; 415 unsigned int mask_port_map = 0;
413 416
414 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { 417 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
415 dev_info(&pdev->dev, "JMB361 has only one port\n"); 418 dev_info(&pdev->dev, "JMB361 has only one port\n");
416 force_port_map = 1; 419 force_port_map = 1;
417 } 420 }
418 421
419 /* 422 /*
420 * Temporary Marvell 6145 hack: PATA port presence 423 * Temporary Marvell 6145 hack: PATA port presence
421 * is asserted through the standard AHCI port 424 * is asserted through the standard AHCI port
422 * presence register, as bit 4 (counting from 0) 425 * presence register, as bit 4 (counting from 0)
423 */ 426 */
424 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 427 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
425 if (pdev->device == 0x6121) 428 if (pdev->device == 0x6121)
426 mask_port_map = 0x3; 429 mask_port_map = 0x3;
427 else 430 else
428 mask_port_map = 0xf; 431 mask_port_map = 0xf;
429 dev_info(&pdev->dev, 432 dev_info(&pdev->dev,
430 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 433 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
431 } 434 }
432 435
433 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map, 436 ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
434 mask_port_map); 437 mask_port_map);
435 } 438 }
436 439
437 static int ahci_pci_reset_controller(struct ata_host *host) 440 static int ahci_pci_reset_controller(struct ata_host *host)
438 { 441 {
439 struct pci_dev *pdev = to_pci_dev(host->dev); 442 struct pci_dev *pdev = to_pci_dev(host->dev);
440 443
441 ahci_reset_controller(host); 444 ahci_reset_controller(host);
442 445
443 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 446 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
444 struct ahci_host_priv *hpriv = host->private_data; 447 struct ahci_host_priv *hpriv = host->private_data;
445 u16 tmp16; 448 u16 tmp16;
446 449
447 /* configure PCS */ 450 /* configure PCS */
448 pci_read_config_word(pdev, 0x92, &tmp16); 451 pci_read_config_word(pdev, 0x92, &tmp16);
449 if ((tmp16 & hpriv->port_map) != hpriv->port_map) { 452 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
450 tmp16 |= hpriv->port_map; 453 tmp16 |= hpriv->port_map;
451 pci_write_config_word(pdev, 0x92, tmp16); 454 pci_write_config_word(pdev, 0x92, tmp16);
452 } 455 }
453 } 456 }
454 457
455 return 0; 458 return 0;
456 } 459 }
457 460
458 static void ahci_pci_init_controller(struct ata_host *host) 461 static void ahci_pci_init_controller(struct ata_host *host)
459 { 462 {
460 struct ahci_host_priv *hpriv = host->private_data; 463 struct ahci_host_priv *hpriv = host->private_data;
461 struct pci_dev *pdev = to_pci_dev(host->dev); 464 struct pci_dev *pdev = to_pci_dev(host->dev);
462 void __iomem *port_mmio; 465 void __iomem *port_mmio;
463 u32 tmp; 466 u32 tmp;
464 int mv; 467 int mv;
465 468
466 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 469 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
467 if (pdev->device == 0x6121) 470 if (pdev->device == 0x6121)
468 mv = 2; 471 mv = 2;
469 else 472 else
470 mv = 4; 473 mv = 4;
471 port_mmio = __ahci_port_base(host, mv); 474 port_mmio = __ahci_port_base(host, mv);
472 475
473 writel(0, port_mmio + PORT_IRQ_MASK); 476 writel(0, port_mmio + PORT_IRQ_MASK);
474 477
475 /* clear port IRQ */ 478 /* clear port IRQ */
476 tmp = readl(port_mmio + PORT_IRQ_STAT); 479 tmp = readl(port_mmio + PORT_IRQ_STAT);
477 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 480 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
478 if (tmp) 481 if (tmp)
479 writel(tmp, port_mmio + PORT_IRQ_STAT); 482 writel(tmp, port_mmio + PORT_IRQ_STAT);
480 } 483 }
481 484
482 ahci_init_controller(host); 485 ahci_init_controller(host);
483 } 486 }
484 487
485 static int ahci_sb600_check_ready(struct ata_link *link) 488 static int ahci_sb600_check_ready(struct ata_link *link)
486 { 489 {
487 void __iomem *port_mmio = ahci_port_base(link->ap); 490 void __iomem *port_mmio = ahci_port_base(link->ap);
488 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 491 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
489 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); 492 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
490 493
491 /* 494 /*
492 * There is no need to check TFDATA if BAD PMP is found due to HW bug, 495 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
493 * which can save timeout delay. 496 * which can save timeout delay.
494 */ 497 */
495 if (irq_status & PORT_IRQ_BAD_PMP) 498 if (irq_status & PORT_IRQ_BAD_PMP)
496 return -EIO; 499 return -EIO;
497 500
498 return ata_check_ready(status); 501 return ata_check_ready(status);
499 } 502 }
500 503
501 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, 504 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
502 unsigned long deadline) 505 unsigned long deadline)
503 { 506 {
504 struct ata_port *ap = link->ap; 507 struct ata_port *ap = link->ap;
505 void __iomem *port_mmio = ahci_port_base(ap); 508 void __iomem *port_mmio = ahci_port_base(ap);
506 int pmp = sata_srst_pmp(link); 509 int pmp = sata_srst_pmp(link);
507 int rc; 510 int rc;
508 u32 irq_sts; 511 u32 irq_sts;
509 512
510 DPRINTK("ENTER\n"); 513 DPRINTK("ENTER\n");
511 514
512 rc = ahci_do_softreset(link, class, pmp, deadline, 515 rc = ahci_do_softreset(link, class, pmp, deadline,
513 ahci_sb600_check_ready); 516 ahci_sb600_check_ready);
514 517
515 /* 518 /*
516 * Soft reset fails on some ATI chips with IPMS set when PMP 519 * Soft reset fails on some ATI chips with IPMS set when PMP
517 * is enabled but SATA HDD/ODD is connected to SATA port, 520 * is enabled but SATA HDD/ODD is connected to SATA port,
518 * do soft reset again to port 0. 521 * do soft reset again to port 0.
519 */ 522 */
520 if (rc == -EIO) { 523 if (rc == -EIO) {
521 irq_sts = readl(port_mmio + PORT_IRQ_STAT); 524 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
522 if (irq_sts & PORT_IRQ_BAD_PMP) { 525 if (irq_sts & PORT_IRQ_BAD_PMP) {
523 ata_link_printk(link, KERN_WARNING, 526 ata_link_printk(link, KERN_WARNING,
524 "applying SB600 PMP SRST workaround " 527 "applying SB600 PMP SRST workaround "
525 "and retrying\n"); 528 "and retrying\n");
526 rc = ahci_do_softreset(link, class, 0, deadline, 529 rc = ahci_do_softreset(link, class, 0, deadline,
527 ahci_check_ready); 530 ahci_check_ready);
528 } 531 }
529 } 532 }
530 533
531 return rc; 534 return rc;
532 } 535 }
533 536
534 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, 537 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline) 538 unsigned long deadline)
536 { 539 {
537 struct ata_port *ap = link->ap; 540 struct ata_port *ap = link->ap;
538 bool online; 541 bool online;
539 int rc; 542 int rc;
540 543
541 DPRINTK("ENTER\n"); 544 DPRINTK("ENTER\n");
542 545
543 ahci_stop_engine(ap); 546 ahci_stop_engine(ap);
544 547
545 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 548 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
546 deadline, &online, NULL); 549 deadline, &online, NULL);
547 550
548 ahci_start_engine(ap); 551 ahci_start_engine(ap);
549 552
550 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 553 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
551 554
552 /* vt8251 doesn't clear BSY on signature FIS reception, 555 /* vt8251 doesn't clear BSY on signature FIS reception,
553 * request follow-up softreset. 556 * request follow-up softreset.
554 */ 557 */
555 return online ? -EAGAIN : rc; 558 return online ? -EAGAIN : rc;
556 } 559 }
557 560
558 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, 561 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
559 unsigned long deadline) 562 unsigned long deadline)
560 { 563 {
561 struct ata_port *ap = link->ap; 564 struct ata_port *ap = link->ap;
562 struct ahci_port_priv *pp = ap->private_data; 565 struct ahci_port_priv *pp = ap->private_data;
563 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 566 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
564 struct ata_taskfile tf; 567 struct ata_taskfile tf;
565 bool online; 568 bool online;
566 int rc; 569 int rc;
567 570
568 ahci_stop_engine(ap); 571 ahci_stop_engine(ap);
569 572
570 /* clear D2H reception area to properly wait for D2H FIS */ 573 /* clear D2H reception area to properly wait for D2H FIS */
571 ata_tf_init(link->device, &tf); 574 ata_tf_init(link->device, &tf);
572 tf.command = 0x80; 575 tf.command = 0x80;
573 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 576 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
574 577
575 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), 578 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
576 deadline, &online, NULL); 579 deadline, &online, NULL);
577 580
578 ahci_start_engine(ap); 581 ahci_start_engine(ap);
579 582
580 /* The pseudo configuration device on SIMG4726 attached to 583 /* The pseudo configuration device on SIMG4726 attached to
581 * ASUS P5W-DH Deluxe doesn't send signature FIS after 584 * ASUS P5W-DH Deluxe doesn't send signature FIS after
582 * hardreset if no device is attached to the first downstream 585 * hardreset if no device is attached to the first downstream
583 * port && the pseudo device locks up on SRST w/ PMP==0. To 586 * port && the pseudo device locks up on SRST w/ PMP==0. To
584 * work around this, wait for !BSY only briefly. If BSY isn't 587 * work around this, wait for !BSY only briefly. If BSY isn't
585 * cleared, perform CLO and proceed to IDENTIFY (achieved by 588 * cleared, perform CLO and proceed to IDENTIFY (achieved by
586 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA). 589 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
587 * 590 *
588 * Wait for two seconds. Devices attached to downstream port 591 * Wait for two seconds. Devices attached to downstream port
589 * which can't process the following IDENTIFY after this will 592 * which can't process the following IDENTIFY after this will
590 * have to be reset again. For most cases, this should 593 * have to be reset again. For most cases, this should
591 * suffice while making probing snappish enough. 594 * suffice while making probing snappish enough.
592 */ 595 */
593 if (online) { 596 if (online) {
594 rc = ata_wait_after_reset(link, jiffies + 2 * HZ, 597 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
595 ahci_check_ready); 598 ahci_check_ready);
596 if (rc) 599 if (rc)
597 ahci_kick_engine(ap); 600 ahci_kick_engine(ap);
598 } 601 }
599 return rc; 602 return rc;
600 } 603 }
601 604
602 #ifdef CONFIG_PM 605 #ifdef CONFIG_PM
603 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 606 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
604 { 607 {
605 struct ata_host *host = dev_get_drvdata(&pdev->dev); 608 struct ata_host *host = dev_get_drvdata(&pdev->dev);
606 struct ahci_host_priv *hpriv = host->private_data; 609 struct ahci_host_priv *hpriv = host->private_data;
607 void __iomem *mmio = hpriv->mmio; 610 void __iomem *mmio = hpriv->mmio;
608 u32 ctl; 611 u32 ctl;
609 612
610 if (mesg.event & PM_EVENT_SUSPEND && 613 if (mesg.event & PM_EVENT_SUSPEND &&
611 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) { 614 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
612 dev_printk(KERN_ERR, &pdev->dev, 615 dev_printk(KERN_ERR, &pdev->dev,
613 "BIOS update required for suspend/resume\n"); 616 "BIOS update required for suspend/resume\n");
614 return -EIO; 617 return -EIO;
615 } 618 }
616 619
617 if (mesg.event & PM_EVENT_SLEEP) { 620 if (mesg.event & PM_EVENT_SLEEP) {
618 /* AHCI spec rev1.1 section 8.3.3: 621 /* AHCI spec rev1.1 section 8.3.3:
619 * Software must disable interrupts prior to requesting a 622 * Software must disable interrupts prior to requesting a
620 * transition of the HBA to D3 state. 623 * transition of the HBA to D3 state.
621 */ 624 */
622 ctl = readl(mmio + HOST_CTL); 625 ctl = readl(mmio + HOST_CTL);
623 ctl &= ~HOST_IRQ_EN; 626 ctl &= ~HOST_IRQ_EN;
624 writel(ctl, mmio + HOST_CTL); 627 writel(ctl, mmio + HOST_CTL);
625 readl(mmio + HOST_CTL); /* flush */ 628 readl(mmio + HOST_CTL); /* flush */
626 } 629 }
627 630
628 return ata_pci_device_suspend(pdev, mesg); 631 return ata_pci_device_suspend(pdev, mesg);
629 } 632 }
630 633
631 static int ahci_pci_device_resume(struct pci_dev *pdev) 634 static int ahci_pci_device_resume(struct pci_dev *pdev)
632 { 635 {
633 struct ata_host *host = dev_get_drvdata(&pdev->dev); 636 struct ata_host *host = dev_get_drvdata(&pdev->dev);
634 int rc; 637 int rc;
635 638
636 rc = ata_pci_device_do_resume(pdev); 639 rc = ata_pci_device_do_resume(pdev);
637 if (rc) 640 if (rc)
638 return rc; 641 return rc;
639 642
640 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { 643 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
641 rc = ahci_pci_reset_controller(host); 644 rc = ahci_pci_reset_controller(host);
642 if (rc) 645 if (rc)
643 return rc; 646 return rc;
644 647
645 ahci_pci_init_controller(host); 648 ahci_pci_init_controller(host);
646 } 649 }
647 650
648 ata_host_resume(host); 651 ata_host_resume(host);
649 652
650 return 0; 653 return 0;
651 } 654 }
652 #endif 655 #endif
653 656
654 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) 657 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
655 { 658 {
656 int rc; 659 int rc;
657 660
658 if (using_dac && 661 if (using_dac &&
659 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 662 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
660 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 663 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
661 if (rc) { 664 if (rc) {
662 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 665 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
663 if (rc) { 666 if (rc) {
664 dev_printk(KERN_ERR, &pdev->dev, 667 dev_printk(KERN_ERR, &pdev->dev,
665 "64-bit DMA enable failed\n"); 668 "64-bit DMA enable failed\n");
666 return rc; 669 return rc;
667 } 670 }
668 } 671 }
669 } else { 672 } else {
670 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 673 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
671 if (rc) { 674 if (rc) {
672 dev_printk(KERN_ERR, &pdev->dev, 675 dev_printk(KERN_ERR, &pdev->dev,
673 "32-bit DMA enable failed\n"); 676 "32-bit DMA enable failed\n");
674 return rc; 677 return rc;
675 } 678 }
676 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 679 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
677 if (rc) { 680 if (rc) {
678 dev_printk(KERN_ERR, &pdev->dev, 681 dev_printk(KERN_ERR, &pdev->dev,
679 "32-bit consistent DMA enable failed\n"); 682 "32-bit consistent DMA enable failed\n");
680 return rc; 683 return rc;
681 } 684 }
682 } 685 }
683 return 0; 686 return 0;
684 } 687 }
685 688
686 static void ahci_pci_print_info(struct ata_host *host) 689 static void ahci_pci_print_info(struct ata_host *host)
687 { 690 {
688 struct pci_dev *pdev = to_pci_dev(host->dev); 691 struct pci_dev *pdev = to_pci_dev(host->dev);
689 u16 cc; 692 u16 cc;
690 const char *scc_s; 693 const char *scc_s;
691 694
692 pci_read_config_word(pdev, 0x0a, &cc); 695 pci_read_config_word(pdev, 0x0a, &cc);
693 if (cc == PCI_CLASS_STORAGE_IDE) 696 if (cc == PCI_CLASS_STORAGE_IDE)
694 scc_s = "IDE"; 697 scc_s = "IDE";
695 else if (cc == PCI_CLASS_STORAGE_SATA) 698 else if (cc == PCI_CLASS_STORAGE_SATA)
696 scc_s = "SATA"; 699 scc_s = "SATA";
697 else if (cc == PCI_CLASS_STORAGE_RAID) 700 else if (cc == PCI_CLASS_STORAGE_RAID)
698 scc_s = "RAID"; 701 scc_s = "RAID";
699 else 702 else
700 scc_s = "unknown"; 703 scc_s = "unknown";
701 704
702 ahci_print_info(host, scc_s); 705 ahci_print_info(host, scc_s);
703 } 706 }
704 707
705 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is 708 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
706 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't 709 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
707 * support PMP and the 4726 either directly exports the device 710 * support PMP and the 4726 either directly exports the device
708 * attached to the first downstream port or acts as a hardware storage 711 * attached to the first downstream port or acts as a hardware storage
709 * controller and emulate a single ATA device (can be RAID 0/1 or some 712 * controller and emulate a single ATA device (can be RAID 0/1 or some
710 * other configuration). 713 * other configuration).
711 * 714 *
712 * When there's no device attached to the first downstream port of the 715 * When there's no device attached to the first downstream port of the
713 * 4726, "Config Disk" appears, which is a pseudo ATA device to 716 * 4726, "Config Disk" appears, which is a pseudo ATA device to
714 * configure the 4726. However, ATA emulation of the device is very 717 * configure the 4726. However, ATA emulation of the device is very
715 * lame. It doesn't send signature D2H Reg FIS after the initial 718 * lame. It doesn't send signature D2H Reg FIS after the initial
716 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues. 719 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
717 * 720 *
718 * The following function works around the problem by always using 721 * The following function works around the problem by always using
719 * hardreset on the port and not depending on receiving signature FIS 722 * hardreset on the port and not depending on receiving signature FIS
720 * afterward. If signature FIS isn't received soon, ATA class is 723 * afterward. If signature FIS isn't received soon, ATA class is
721 * assumed without follow-up softreset. 724 * assumed without follow-up softreset.
722 */ 725 */
723 static void ahci_p5wdh_workaround(struct ata_host *host) 726 static void ahci_p5wdh_workaround(struct ata_host *host)
724 { 727 {
725 static struct dmi_system_id sysids[] = { 728 static struct dmi_system_id sysids[] = {
726 { 729 {
727 .ident = "P5W DH Deluxe", 730 .ident = "P5W DH Deluxe",
728 .matches = { 731 .matches = {
729 DMI_MATCH(DMI_SYS_VENDOR, 732 DMI_MATCH(DMI_SYS_VENDOR,
730 "ASUSTEK COMPUTER INC"), 733 "ASUSTEK COMPUTER INC"),
731 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), 734 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
732 }, 735 },
733 }, 736 },
734 { } 737 { }
735 }; 738 };
736 struct pci_dev *pdev = to_pci_dev(host->dev); 739 struct pci_dev *pdev = to_pci_dev(host->dev);
737 740
738 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) && 741 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
739 dmi_check_system(sysids)) { 742 dmi_check_system(sysids)) {
740 struct ata_port *ap = host->ports[1]; 743 struct ata_port *ap = host->ports[1];
741 744
742 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH " 745 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
743 "Deluxe on-board SIMG4726 workaround\n"); 746 "Deluxe on-board SIMG4726 workaround\n");
744 747
745 ap->ops = &ahci_p5wdh_ops; 748 ap->ops = &ahci_p5wdh_ops;
746 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; 749 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
747 } 750 }
748 } 751 }
749 752
750 /* only some SB600 ahci controllers can do 64bit DMA */ 753 /* only some SB600 ahci controllers can do 64bit DMA */
751 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev) 754 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
752 { 755 {
753 static const struct dmi_system_id sysids[] = { 756 static const struct dmi_system_id sysids[] = {
754 /* 757 /*
755 * The oldest version known to be broken is 0901 and 758 * The oldest version known to be broken is 0901 and
756 * working is 1501 which was released on 2007-10-26. 759 * working is 1501 which was released on 2007-10-26.
757 * Enable 64bit DMA on 1501 and anything newer. 760 * Enable 64bit DMA on 1501 and anything newer.
758 * 761 *
759 * Please read bko#9412 for more info. 762 * Please read bko#9412 for more info.
760 */ 763 */
761 { 764 {
762 .ident = "ASUS M2A-VM", 765 .ident = "ASUS M2A-VM",
763 .matches = { 766 .matches = {
764 DMI_MATCH(DMI_BOARD_VENDOR, 767 DMI_MATCH(DMI_BOARD_VENDOR,
765 "ASUSTeK Computer INC."), 768 "ASUSTeK Computer INC."),
766 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), 769 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
767 }, 770 },
768 .driver_data = "20071026", /* yyyymmdd */ 771 .driver_data = "20071026", /* yyyymmdd */
769 }, 772 },
770 /* 773 /*
771 * All BIOS versions for the MSI K9A2 Platinum (MS-7376) 774 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
772 * support 64bit DMA. 775 * support 64bit DMA.
773 * 776 *
774 * BIOS versions earlier than 1.5 had the Manufacturer DMI 777 * BIOS versions earlier than 1.5 had the Manufacturer DMI
775 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD". 778 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
776 * This spelling mistake was fixed in BIOS version 1.5, so 779 * This spelling mistake was fixed in BIOS version 1.5, so
777 * 1.5 and later have the Manufacturer as 780 * 1.5 and later have the Manufacturer as
778 * "MICRO-STAR INTERNATIONAL CO.,LTD". 781 * "MICRO-STAR INTERNATIONAL CO.,LTD".
779 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER". 782 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
780 * 783 *
781 * BIOS versions earlier than 1.9 had a Board Product Name 784 * BIOS versions earlier than 1.9 had a Board Product Name
782 * DMI field of "MS-7376". This was changed to be 785 * DMI field of "MS-7376". This was changed to be
783 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still 786 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
784 * match on DMI_BOARD_NAME of "MS-7376". 787 * match on DMI_BOARD_NAME of "MS-7376".
785 */ 788 */
786 { 789 {
787 .ident = "MSI K9A2 Platinum", 790 .ident = "MSI K9A2 Platinum",
788 .matches = { 791 .matches = {
789 DMI_MATCH(DMI_BOARD_VENDOR, 792 DMI_MATCH(DMI_BOARD_VENDOR,
790 "MICRO-STAR INTER"), 793 "MICRO-STAR INTER"),
791 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"), 794 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
792 }, 795 },
793 }, 796 },
794 { } 797 { }
795 }; 798 };
796 const struct dmi_system_id *match; 799 const struct dmi_system_id *match;
797 int year, month, date; 800 int year, month, date;
798 char buf[9]; 801 char buf[9];
799 802
800 match = dmi_first_match(sysids); 803 match = dmi_first_match(sysids);
801 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || 804 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
802 !match) 805 !match)
803 return false; 806 return false;
804 807
805 if (!match->driver_data) 808 if (!match->driver_data)
806 goto enable_64bit; 809 goto enable_64bit;
807 810
808 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); 811 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
809 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); 812 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
810 813
811 if (strcmp(buf, match->driver_data) >= 0) 814 if (strcmp(buf, match->driver_data) >= 0)
812 goto enable_64bit; 815 goto enable_64bit;
813 else { 816 else {
814 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, " 817 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
815 "forcing 32bit DMA, update BIOS\n", match->ident); 818 "forcing 32bit DMA, update BIOS\n", match->ident);
816 return false; 819 return false;
817 } 820 }
818 821
819 enable_64bit: 822 enable_64bit:
820 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n", 823 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
821 match->ident); 824 match->ident);
822 return true; 825 return true;
823 } 826 }
824 827
825 static bool ahci_broken_system_poweroff(struct pci_dev *pdev) 828 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
826 { 829 {
827 static const struct dmi_system_id broken_systems[] = { 830 static const struct dmi_system_id broken_systems[] = {
828 { 831 {
829 .ident = "HP Compaq nx6310", 832 .ident = "HP Compaq nx6310",
830 .matches = { 833 .matches = {
831 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 834 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
832 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"), 835 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
833 }, 836 },
834 /* PCI slot number of the controller */ 837 /* PCI slot number of the controller */
835 .driver_data = (void *)0x1FUL, 838 .driver_data = (void *)0x1FUL,
836 }, 839 },
837 { 840 {
838 .ident = "HP Compaq 6720s", 841 .ident = "HP Compaq 6720s",
839 .matches = { 842 .matches = {
840 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 843 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
841 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"), 844 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
842 }, 845 },
843 /* PCI slot number of the controller */ 846 /* PCI slot number of the controller */
844 .driver_data = (void *)0x1FUL, 847 .driver_data = (void *)0x1FUL,
845 }, 848 },
846 849
847 { } /* terminate list */ 850 { } /* terminate list */
848 }; 851 };
849 const struct dmi_system_id *dmi = dmi_first_match(broken_systems); 852 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
850 853
851 if (dmi) { 854 if (dmi) {
852 unsigned long slot = (unsigned long)dmi->driver_data; 855 unsigned long slot = (unsigned long)dmi->driver_data;
853 /* apply the quirk only to on-board controllers */ 856 /* apply the quirk only to on-board controllers */
854 return slot == PCI_SLOT(pdev->devfn); 857 return slot == PCI_SLOT(pdev->devfn);
855 } 858 }
856 859
857 return false; 860 return false;
858 } 861 }
859 862
860 static bool ahci_broken_suspend(struct pci_dev *pdev) 863 static bool ahci_broken_suspend(struct pci_dev *pdev)
861 { 864 {
862 static const struct dmi_system_id sysids[] = { 865 static const struct dmi_system_id sysids[] = {
863 /* 866 /*
864 * On HP dv[4-6] and HDX18 with earlier BIOSen, link 867 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
865 * to the harddisk doesn't become online after 868 * to the harddisk doesn't become online after
866 * resuming from STR. Warn and fail suspend. 869 * resuming from STR. Warn and fail suspend.
867 * 870 *
868 * http://bugzilla.kernel.org/show_bug.cgi?id=12276 871 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
869 * 872 *
870 * Use dates instead of versions to match as HP is 873 * Use dates instead of versions to match as HP is
871 * apparently recycling both product and version 874 * apparently recycling both product and version
872 * strings. 875 * strings.
873 * 876 *
874 * http://bugzilla.kernel.org/show_bug.cgi?id=15462 877 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
875 */ 878 */
876 { 879 {
877 .ident = "dv4", 880 .ident = "dv4",
878 .matches = { 881 .matches = {
879 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 882 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
880 DMI_MATCH(DMI_PRODUCT_NAME, 883 DMI_MATCH(DMI_PRODUCT_NAME,
881 "HP Pavilion dv4 Notebook PC"), 884 "HP Pavilion dv4 Notebook PC"),
882 }, 885 },
883 .driver_data = "20090105", /* F.30 */ 886 .driver_data = "20090105", /* F.30 */
884 }, 887 },
885 { 888 {
886 .ident = "dv5", 889 .ident = "dv5",
887 .matches = { 890 .matches = {
888 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 891 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
889 DMI_MATCH(DMI_PRODUCT_NAME, 892 DMI_MATCH(DMI_PRODUCT_NAME,
890 "HP Pavilion dv5 Notebook PC"), 893 "HP Pavilion dv5 Notebook PC"),
891 }, 894 },
892 .driver_data = "20090506", /* F.16 */ 895 .driver_data = "20090506", /* F.16 */
893 }, 896 },
894 { 897 {
895 .ident = "dv6", 898 .ident = "dv6",
896 .matches = { 899 .matches = {
897 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 900 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
898 DMI_MATCH(DMI_PRODUCT_NAME, 901 DMI_MATCH(DMI_PRODUCT_NAME,
899 "HP Pavilion dv6 Notebook PC"), 902 "HP Pavilion dv6 Notebook PC"),
900 }, 903 },
901 .driver_data = "20090423", /* F.21 */ 904 .driver_data = "20090423", /* F.21 */
902 }, 905 },
903 { 906 {
904 .ident = "HDX18", 907 .ident = "HDX18",
905 .matches = { 908 .matches = {
906 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 909 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
907 DMI_MATCH(DMI_PRODUCT_NAME, 910 DMI_MATCH(DMI_PRODUCT_NAME,
908 "HP HDX18 Notebook PC"), 911 "HP HDX18 Notebook PC"),
909 }, 912 },
910 .driver_data = "20090430", /* F.23 */ 913 .driver_data = "20090430", /* F.23 */
911 }, 914 },
912 /* 915 /*
913 * Acer eMachines G725 has the same problem. BIOS 916 * Acer eMachines G725 has the same problem. BIOS
914 * V1.03 is known to be broken. V3.04 is known to 917 * V1.03 is known to be broken. V3.04 is known to
915 * work. Inbetween, there are V1.06, V2.06 and V3.03 918 * work. Inbetween, there are V1.06, V2.06 and V3.03
916 * that we don't have much idea about. For now, 919 * that we don't have much idea about. For now,
917 * blacklist anything older than V3.04. 920 * blacklist anything older than V3.04.
918 * 921 *
919 * http://bugzilla.kernel.org/show_bug.cgi?id=15104 922 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
920 */ 923 */
921 { 924 {
922 .ident = "G725", 925 .ident = "G725",
923 .matches = { 926 .matches = {
924 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), 927 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
925 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), 928 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
926 }, 929 },
927 .driver_data = "20091216", /* V3.04 */ 930 .driver_data = "20091216", /* V3.04 */
928 }, 931 },
929 { } /* terminate list */ 932 { } /* terminate list */
930 }; 933 };
931 const struct dmi_system_id *dmi = dmi_first_match(sysids); 934 const struct dmi_system_id *dmi = dmi_first_match(sysids);
932 int year, month, date; 935 int year, month, date;
933 char buf[9]; 936 char buf[9];
934 937
935 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) 938 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
936 return false; 939 return false;
937 940
938 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); 941 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
939 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); 942 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
940 943
941 return strcmp(buf, dmi->driver_data) < 0; 944 return strcmp(buf, dmi->driver_data) < 0;
942 } 945 }
943 946
944 static bool ahci_broken_online(struct pci_dev *pdev) 947 static bool ahci_broken_online(struct pci_dev *pdev)
945 { 948 {
946 #define ENCODE_BUSDEVFN(bus, slot, func) \ 949 #define ENCODE_BUSDEVFN(bus, slot, func) \
947 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func))) 950 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
948 static const struct dmi_system_id sysids[] = { 951 static const struct dmi_system_id sysids[] = {
949 /* 952 /*
950 * There are several gigabyte boards which use 953 * There are several gigabyte boards which use
951 * SIMG5723s configured as hardware RAID. Certain 954 * SIMG5723s configured as hardware RAID. Certain
952 * 5723 firmware revisions shipped there keep the link 955 * 5723 firmware revisions shipped there keep the link
953 * online but fail to answer properly to SRST or 956 * online but fail to answer properly to SRST or
954 * IDENTIFY when no device is attached downstream 957 * IDENTIFY when no device is attached downstream
955 * causing libata to retry quite a few times leading 958 * causing libata to retry quite a few times leading
956 * to excessive detection delay. 959 * to excessive detection delay.
957 * 960 *
958 * As these firmwares respond to the second reset try 961 * As these firmwares respond to the second reset try
959 * with invalid device signature, considering unknown 962 * with invalid device signature, considering unknown
960 * sig as offline works around the problem acceptably. 963 * sig as offline works around the problem acceptably.
961 */ 964 */
962 { 965 {
963 .ident = "EP45-DQ6", 966 .ident = "EP45-DQ6",
964 .matches = { 967 .matches = {
965 DMI_MATCH(DMI_BOARD_VENDOR, 968 DMI_MATCH(DMI_BOARD_VENDOR,
966 "Gigabyte Technology Co., Ltd."), 969 "Gigabyte Technology Co., Ltd."),
967 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"), 970 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
968 }, 971 },
969 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0), 972 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
970 }, 973 },
971 { 974 {
972 .ident = "EP45-DS5", 975 .ident = "EP45-DS5",
973 .matches = { 976 .matches = {
974 DMI_MATCH(DMI_BOARD_VENDOR, 977 DMI_MATCH(DMI_BOARD_VENDOR,
975 "Gigabyte Technology Co., Ltd."), 978 "Gigabyte Technology Co., Ltd."),
976 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"), 979 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
977 }, 980 },
978 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0), 981 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
979 }, 982 },
980 { } /* terminate list */ 983 { } /* terminate list */
981 }; 984 };
982 #undef ENCODE_BUSDEVFN 985 #undef ENCODE_BUSDEVFN
983 const struct dmi_system_id *dmi = dmi_first_match(sysids); 986 const struct dmi_system_id *dmi = dmi_first_match(sysids);
984 unsigned int val; 987 unsigned int val;
985 988
986 if (!dmi) 989 if (!dmi)
987 return false; 990 return false;
988 991
989 val = (unsigned long)dmi->driver_data; 992 val = (unsigned long)dmi->driver_data;
990 993
991 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 994 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
992 } 995 }
993 996
994 #ifdef CONFIG_ATA_ACPI 997 #ifdef CONFIG_ATA_ACPI
995 static void ahci_gtf_filter_workaround(struct ata_host *host) 998 static void ahci_gtf_filter_workaround(struct ata_host *host)
996 { 999 {
997 static const struct dmi_system_id sysids[] = { 1000 static const struct dmi_system_id sysids[] = {
998 /* 1001 /*
999 * Aspire 3810T issues a bunch of SATA enable commands 1002 * Aspire 3810T issues a bunch of SATA enable commands
1000 * via _GTF including an invalid one and one which is 1003 * via _GTF including an invalid one and one which is
1001 * rejected by the device. Among the successful ones 1004 * rejected by the device. Among the successful ones
1002 * is FPDMA non-zero offset enable which when enabled 1005 * is FPDMA non-zero offset enable which when enabled
1003 * only on the drive side leads to NCQ command 1006 * only on the drive side leads to NCQ command
1004 * failures. Filter it out. 1007 * failures. Filter it out.
1005 */ 1008 */
1006 { 1009 {
1007 .ident = "Aspire 3810T", 1010 .ident = "Aspire 3810T",
1008 .matches = { 1011 .matches = {
1009 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 1012 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1010 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"), 1013 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
1011 }, 1014 },
1012 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET, 1015 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
1013 }, 1016 },
1014 { } 1017 { }
1015 }; 1018 };
1016 const struct dmi_system_id *dmi = dmi_first_match(sysids); 1019 const struct dmi_system_id *dmi = dmi_first_match(sysids);
1017 unsigned int filter; 1020 unsigned int filter;
1018 int i; 1021 int i;
1019 1022
1020 if (!dmi) 1023 if (!dmi)
1021 return; 1024 return;
1022 1025
1023 filter = (unsigned long)dmi->driver_data; 1026 filter = (unsigned long)dmi->driver_data;
1024 dev_printk(KERN_INFO, host->dev, 1027 dev_printk(KERN_INFO, host->dev,
1025 "applying extra ACPI _GTF filter 0x%x for %s\n", 1028 "applying extra ACPI _GTF filter 0x%x for %s\n",
1026 filter, dmi->ident); 1029 filter, dmi->ident);
1027 1030
1028 for (i = 0; i < host->n_ports; i++) { 1031 for (i = 0; i < host->n_ports; i++) {
1029 struct ata_port *ap = host->ports[i]; 1032 struct ata_port *ap = host->ports[i];
1030 struct ata_link *link; 1033 struct ata_link *link;
1031 struct ata_device *dev; 1034 struct ata_device *dev;
1032 1035
1033 ata_for_each_link(link, ap, EDGE) 1036 ata_for_each_link(link, ap, EDGE)
1034 ata_for_each_dev(dev, link, ALL) 1037 ata_for_each_dev(dev, link, ALL)
1035 dev->gtf_filter |= filter; 1038 dev->gtf_filter |= filter;
1036 } 1039 }
1037 } 1040 }
1038 #else 1041 #else
1039 static inline void ahci_gtf_filter_workaround(struct ata_host *host) 1042 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1040 {} 1043 {}
1041 #endif 1044 #endif
1042 1045
1043 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1046 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1044 { 1047 {
1045 static int printed_version; 1048 static int printed_version;
1046 unsigned int board_id = ent->driver_data; 1049 unsigned int board_id = ent->driver_data;
1047 struct ata_port_info pi = ahci_port_info[board_id]; 1050 struct ata_port_info pi = ahci_port_info[board_id];
1048 const struct ata_port_info *ppi[] = { &pi, NULL }; 1051 const struct ata_port_info *ppi[] = { &pi, NULL };
1049 struct device *dev = &pdev->dev; 1052 struct device *dev = &pdev->dev;
1050 struct ahci_host_priv *hpriv; 1053 struct ahci_host_priv *hpriv;
1051 struct ata_host *host; 1054 struct ata_host *host;
1052 int n_ports, i, rc; 1055 int n_ports, i, rc;
1053 1056
1054 VPRINTK("ENTER\n"); 1057 VPRINTK("ENTER\n");
1055 1058
1056 WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS); 1059 WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1057 1060
1058 if (!printed_version++) 1061 if (!printed_version++)
1059 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 1062 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1060 1063
1061 /* The AHCI driver can only drive the SATA ports, the PATA driver 1064 /* The AHCI driver can only drive the SATA ports, the PATA driver
1062 can drive them all so if both drivers are selected make sure 1065 can drive them all so if both drivers are selected make sure
1063 AHCI stays out of the way */ 1066 AHCI stays out of the way */
1064 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable) 1067 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
1065 return -ENODEV; 1068 return -ENODEV;
1066 1069
1067 /* 1070 /*
1068 * For some reason, MCP89 on MacBook 7,1 doesn't work with 1071 * For some reason, MCP89 on MacBook 7,1 doesn't work with
1069 * ahci, use ata_generic instead. 1072 * ahci, use ata_generic instead.
1070 */ 1073 */
1071 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && 1074 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
1072 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA && 1075 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
1073 pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE && 1076 pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1074 pdev->subsystem_device == 0xcb89) 1077 pdev->subsystem_device == 0xcb89)
1075 return -ENODEV; 1078 return -ENODEV;
1076 1079
1077 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode. 1080 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
1078 * At the moment, we can only use the AHCI mode. Let the users know 1081 * At the moment, we can only use the AHCI mode. Let the users know
1079 * that for SAS drives they're out of luck. 1082 * that for SAS drives they're out of luck.
1080 */ 1083 */
1081 if (pdev->vendor == PCI_VENDOR_ID_PROMISE) 1084 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
1082 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 " 1085 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
1083 "can only drive SATA devices with this driver\n"); 1086 "can only drive SATA devices with this driver\n");
1084 1087
1085 /* acquire resources */ 1088 /* acquire resources */
1086 rc = pcim_enable_device(pdev); 1089 rc = pcim_enable_device(pdev);
1087 if (rc) 1090 if (rc)
1088 return rc; 1091 return rc;
1089 1092
1090 /* AHCI controllers often implement SFF compatible interface. 1093 /* AHCI controllers often implement SFF compatible interface.
1091 * Grab all PCI BARs just in case. 1094 * Grab all PCI BARs just in case.
1092 */ 1095 */
1093 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); 1096 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
1094 if (rc == -EBUSY) 1097 if (rc == -EBUSY)
1095 pcim_pin_device(pdev); 1098 pcim_pin_device(pdev);
1096 if (rc) 1099 if (rc)
1097 return rc; 1100 return rc;
1098 1101
1099 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 1102 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1100 (pdev->device == 0x2652 || pdev->device == 0x2653)) { 1103 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
1101 u8 map; 1104 u8 map;
1102 1105
1103 /* ICH6s share the same PCI ID for both piix and ahci 1106 /* ICH6s share the same PCI ID for both piix and ahci
1104 * modes. Enabling ahci mode while MAP indicates 1107 * modes. Enabling ahci mode while MAP indicates
1105 * combined mode is a bad idea. Yield to ata_piix. 1108 * combined mode is a bad idea. Yield to ata_piix.
1106 */ 1109 */
1107 pci_read_config_byte(pdev, ICH_MAP, &map); 1110 pci_read_config_byte(pdev, ICH_MAP, &map);
1108 if (map & 0x3) { 1111 if (map & 0x3) {
1109 dev_printk(KERN_INFO, &pdev->dev, "controller is in " 1112 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
1110 "combined mode, can't enable AHCI mode\n"); 1113 "combined mode, can't enable AHCI mode\n");
1111 return -ENODEV; 1114 return -ENODEV;
1112 } 1115 }
1113 } 1116 }
1114 1117
1115 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1118 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1116 if (!hpriv) 1119 if (!hpriv)
1117 return -ENOMEM; 1120 return -ENOMEM;
1118 hpriv->flags |= (unsigned long)pi.private_data; 1121 hpriv->flags |= (unsigned long)pi.private_data;
1119 1122
1120 /* MCP65 revision A1 and A2 can't do MSI */ 1123 /* MCP65 revision A1 and A2 can't do MSI */
1121 if (board_id == board_ahci_mcp65 && 1124 if (board_id == board_ahci_mcp65 &&
1122 (pdev->revision == 0xa1 || pdev->revision == 0xa2)) 1125 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
1123 hpriv->flags |= AHCI_HFLAG_NO_MSI; 1126 hpriv->flags |= AHCI_HFLAG_NO_MSI;
1124 1127
1125 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ 1128 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
1126 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) 1129 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
1127 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; 1130 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
1128 1131
1129 /* only some SB600s can do 64bit DMA */ 1132 /* only some SB600s can do 64bit DMA */
1130 if (ahci_sb600_enable_64bit(pdev)) 1133 if (ahci_sb600_enable_64bit(pdev))
1131 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY; 1134 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
1132 1135
1133 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) 1136 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
1134 pci_intx(pdev, 1); 1137 pci_intx(pdev, 1);
1135 1138
1136 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; 1139 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
1137 1140
1138 /* save initial config */ 1141 /* save initial config */
1139 ahci_pci_save_initial_config(pdev, hpriv); 1142 ahci_pci_save_initial_config(pdev, hpriv);
1140 1143
1141 /* prepare host */ 1144 /* prepare host */
1142 if (hpriv->cap & HOST_CAP_NCQ) { 1145 if (hpriv->cap & HOST_CAP_NCQ) {
1143 pi.flags |= ATA_FLAG_NCQ; 1146 pi.flags |= ATA_FLAG_NCQ;
1144 /* 1147 /*
1145 * Auto-activate optimization is supposed to be 1148 * Auto-activate optimization is supposed to be
1146 * supported on all AHCI controllers indicating NCQ 1149 * supported on all AHCI controllers indicating NCQ
1147 * capability, but it seems to be broken on some 1150 * capability, but it seems to be broken on some
1148 * chipsets including NVIDIAs. 1151 * chipsets including NVIDIAs.
1149 */ 1152 */
1150 if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA)) 1153 if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
1151 pi.flags |= ATA_FLAG_FPDMA_AA; 1154 pi.flags |= ATA_FLAG_FPDMA_AA;
1152 } 1155 }
1153 1156
1154 if (hpriv->cap & HOST_CAP_PMP) 1157 if (hpriv->cap & HOST_CAP_PMP)
1155 pi.flags |= ATA_FLAG_PMP; 1158 pi.flags |= ATA_FLAG_PMP;
1156 1159
1157 ahci_set_em_messages(hpriv, &pi); 1160 ahci_set_em_messages(hpriv, &pi);
1158 1161
1159 if (ahci_broken_system_poweroff(pdev)) { 1162 if (ahci_broken_system_poweroff(pdev)) {
1160 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN; 1163 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
1161 dev_info(&pdev->dev, 1164 dev_info(&pdev->dev,
1162 "quirky BIOS, skipping spindown on poweroff\n"); 1165 "quirky BIOS, skipping spindown on poweroff\n");
1163 } 1166 }
1164 1167
1165 if (ahci_broken_suspend(pdev)) { 1168 if (ahci_broken_suspend(pdev)) {
1166 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; 1169 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
1167 dev_printk(KERN_WARNING, &pdev->dev, 1170 dev_printk(KERN_WARNING, &pdev->dev,
1168 "BIOS update required for suspend/resume\n"); 1171 "BIOS update required for suspend/resume\n");
1169 } 1172 }
1170 1173
1171 if (ahci_broken_online(pdev)) { 1174 if (ahci_broken_online(pdev)) {
1172 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE; 1175 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
1173 dev_info(&pdev->dev, 1176 dev_info(&pdev->dev,
1174 "online status unreliable, applying workaround\n"); 1177 "online status unreliable, applying workaround\n");
1175 } 1178 }
1176 1179
1177 /* CAP.NP sometimes indicate the index of the last enabled 1180 /* CAP.NP sometimes indicate the index of the last enabled
1178 * port, at other times, that of the last possible port, so 1181 * port, at other times, that of the last possible port, so
1179 * determining the maximum port number requires looking at 1182 * determining the maximum port number requires looking at
1180 * both CAP.NP and port_map. 1183 * both CAP.NP and port_map.
1181 */ 1184 */
1182 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map)); 1185 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
1183 1186
1184 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 1187 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
1185 if (!host) 1188 if (!host)
1186 return -ENOMEM; 1189 return -ENOMEM;
1187 host->private_data = hpriv; 1190 host->private_data = hpriv;
1188 1191
1189 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss) 1192 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
1190 host->flags |= ATA_HOST_PARALLEL_SCAN; 1193 host->flags |= ATA_HOST_PARALLEL_SCAN;
1191 else 1194 else
1192 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n"); 1195 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
1193 1196
1194 if (pi.flags & ATA_FLAG_EM) 1197 if (pi.flags & ATA_FLAG_EM)
1195 ahci_reset_em(host); 1198 ahci_reset_em(host);
1196 1199
1197 for (i = 0; i < host->n_ports; i++) { 1200 for (i = 0; i < host->n_ports; i++) {
1198 struct ata_port *ap = host->ports[i]; 1201 struct ata_port *ap = host->ports[i];
1199 1202
1200 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar"); 1203 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
1201 ata_port_pbar_desc(ap, AHCI_PCI_BAR, 1204 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
1202 0x100 + ap->port_no * 0x80, "port"); 1205 0x100 + ap->port_no * 0x80, "port");
1203 1206
1204 /* set initial link pm policy */ 1207 /* set initial link pm policy */
1205 ap->pm_policy = NOT_AVAILABLE; 1208 ap->pm_policy = NOT_AVAILABLE;
1206 1209
1207 /* set enclosure management message type */ 1210 /* set enclosure management message type */
1208 if (ap->flags & ATA_FLAG_EM) 1211 if (ap->flags & ATA_FLAG_EM)
1209 ap->em_message_type = hpriv->em_msg_type; 1212 ap->em_message_type = hpriv->em_msg_type;
1210 1213
1211 1214
1212 /* disabled/not-implemented port */ 1215 /* disabled/not-implemented port */
1213 if (!(hpriv->port_map & (1 << i))) 1216 if (!(hpriv->port_map & (1 << i)))
1214 ap->ops = &ata_dummy_port_ops; 1217 ap->ops = &ata_dummy_port_ops;
1215 } 1218 }
1216 1219
1217 /* apply workaround for ASUS P5W DH Deluxe mainboard */ 1220 /* apply workaround for ASUS P5W DH Deluxe mainboard */
1218 ahci_p5wdh_workaround(host); 1221 ahci_p5wdh_workaround(host);
1219 1222
1220 /* apply gtf filter quirk */ 1223 /* apply gtf filter quirk */
1221 ahci_gtf_filter_workaround(host); 1224 ahci_gtf_filter_workaround(host);
1222 1225
1223 /* initialize adapter */ 1226 /* initialize adapter */
1224 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); 1227 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
1225 if (rc) 1228 if (rc)
1226 return rc; 1229 return rc;
1227 1230
1228 rc = ahci_pci_reset_controller(host); 1231 rc = ahci_pci_reset_controller(host);
1229 if (rc) 1232 if (rc)
1230 return rc; 1233 return rc;
1231 1234
1232 ahci_pci_init_controller(host); 1235 ahci_pci_init_controller(host);
1233 ahci_pci_print_info(host); 1236 ahci_pci_print_info(host);
1234 1237
1235 pci_set_master(pdev); 1238 pci_set_master(pdev);
1236 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED, 1239 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
1237 &ahci_sht); 1240 &ahci_sht);
1238 } 1241 }
1239 1242
1240 static int __init ahci_init(void) 1243 static int __init ahci_init(void)
1241 { 1244 {
1242 return pci_register_driver(&ahci_pci_driver); 1245 return pci_register_driver(&ahci_pci_driver);
1243 } 1246 }
1244 1247
1245 static void __exit ahci_exit(void) 1248 static void __exit ahci_exit(void)
1246 { 1249 {
1247 pci_unregister_driver(&ahci_pci_driver); 1250 pci_unregister_driver(&ahci_pci_driver);
1248 } 1251 }
1249 1252
1250 1253
1251 MODULE_AUTHOR("Jeff Garzik"); 1254 MODULE_AUTHOR("Jeff Garzik");
1252 MODULE_DESCRIPTION("AHCI SATA low-level driver"); 1255 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1253 MODULE_LICENSE("GPL"); 1256 MODULE_LICENSE("GPL");
1254 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl); 1257 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1255 MODULE_VERSION(DRV_VERSION); 1258 MODULE_VERSION(DRV_VERSION);
1256 1259
1257 module_init(ahci_init); 1260 module_init(ahci_init);
1258 module_exit(ahci_exit); 1261 module_exit(ahci_exit);
1259 1262
drivers/ata/ata_piix.c
1 /* 1 /*
2 * ata_piix.c - Intel PATA/SATA controllers 2 * ata_piix.c - Intel PATA/SATA controllers
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * 8 *
9 * Copyright 2003-2005 Red Hat Inc 9 * Copyright 2003-2005 Red Hat Inc
10 * Copyright 2003-2005 Jeff Garzik 10 * Copyright 2003-2005 Jeff Garzik
11 * 11 *
12 * 12 *
13 * Copyright header from piix.c: 13 * Copyright header from piix.c:
14 * 14 *
15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 15 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 16 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
17 * Copyright (C) 2003 Red Hat Inc 17 * Copyright (C) 2003 Red Hat Inc
18 * 18 *
19 * 19 *
20 * This program is free software; you can redistribute it and/or modify 20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option) 22 * the Free Software Foundation; either version 2, or (at your option)
23 * any later version. 23 * any later version.
24 * 24 *
25 * This program is distributed in the hope that it will be useful, 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details. 28 * GNU General Public License for more details.
29 * 29 *
30 * You should have received a copy of the GNU General Public License 30 * You should have received a copy of the GNU General Public License
31 * along with this program; see the file COPYING. If not, write to 31 * along with this program; see the file COPYING. If not, write to
32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 32 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
33 * 33 *
34 * 34 *
35 * libata documentation is available via 'make {ps|pdf}docs', 35 * libata documentation is available via 'make {ps|pdf}docs',
36 * as Documentation/DocBook/libata.* 36 * as Documentation/DocBook/libata.*
37 * 37 *
38 * Hardware documentation available at http://developer.intel.com/ 38 * Hardware documentation available at http://developer.intel.com/
39 * 39 *
40 * Documentation 40 * Documentation
41 * Publically available from Intel web site. Errata documentation 41 * Publically available from Intel web site. Errata documentation
42 * is also publically available. As an aide to anyone hacking on this 42 * is also publically available. As an aide to anyone hacking on this
43 * driver the list of errata that are relevant is below, going back to 43 * driver the list of errata that are relevant is below, going back to
44 * PIIX4. Older device documentation is now a bit tricky to find. 44 * PIIX4. Older device documentation is now a bit tricky to find.
45 * 45 *
46 * The chipsets all follow very much the same design. The original Triton 46 * The chipsets all follow very much the same design. The original Triton
47 * series chipsets do _not_ support independant device timings, but this 47 * series chipsets do _not_ support independant device timings, but this
48 * is fixed in Triton II. With the odd mobile exception the chips then 48 * is fixed in Triton II. With the odd mobile exception the chips then
49 * change little except in gaining more modes until SATA arrives. This 49 * change little except in gaining more modes until SATA arrives. This
50 * driver supports only the chips with independant timing (that is those 50 * driver supports only the chips with independant timing (that is those
51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix 51 * with SITRE and the 0x44 timing register). See pata_oldpiix and pata_mpiix
52 * for the early chip drivers. 52 * for the early chip drivers.
53 * 53 *
54 * Errata of note: 54 * Errata of note:
55 * 55 *
56 * Unfixable 56 * Unfixable
57 * PIIX4 errata #9 - Only on ultra obscure hw 57 * PIIX4 errata #9 - Only on ultra obscure hw
58 * ICH3 errata #13 - Not observed to affect real hw 58 * ICH3 errata #13 - Not observed to affect real hw
59 * by Intel 59 * by Intel
60 * 60 *
61 * Things we must deal with 61 * Things we must deal with
62 * PIIX4 errata #10 - BM IDE hang with non UDMA 62 * PIIX4 errata #10 - BM IDE hang with non UDMA
63 * (must stop/start dma to recover) 63 * (must stop/start dma to recover)
64 * 440MX errata #15 - As PIIX4 errata #10 64 * 440MX errata #15 - As PIIX4 errata #10
65 * PIIX4 errata #15 - Must not read control registers 65 * PIIX4 errata #15 - Must not read control registers
66 * during a PIO transfer 66 * during a PIO transfer
67 * 440MX errata #13 - As PIIX4 errata #15 67 * 440MX errata #13 - As PIIX4 errata #15
68 * ICH2 errata #21 - DMA mode 0 doesn't work right 68 * ICH2 errata #21 - DMA mode 0 doesn't work right
69 * ICH0/1 errata #55 - As ICH2 errata #21 69 * ICH0/1 errata #55 - As ICH2 errata #21
70 * ICH2 spec c #9 - Extra operations needed to handle 70 * ICH2 spec c #9 - Extra operations needed to handle
71 * drive hotswap [NOT YET SUPPORTED] 71 * drive hotswap [NOT YET SUPPORTED]
72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary 72 * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
73 * and must be dword aligned 73 * and must be dword aligned
74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 74 * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
75 * ICH7 errata #16 - MWDMA1 timings are incorrect 75 * ICH7 errata #16 - MWDMA1 timings are incorrect
76 * 76 *
77 * Should have been BIOS fixed: 77 * Should have been BIOS fixed:
78 * 450NX: errata #19 - DMA hangs on old 450NX 78 * 450NX: errata #19 - DMA hangs on old 450NX
79 * 450NX: errata #20 - DMA hangs on old 450NX 79 * 450NX: errata #20 - DMA hangs on old 450NX
80 * 450NX: errata #25 - Corruption with DMA on old 450NX 80 * 450NX: errata #25 - Corruption with DMA on old 450NX
81 * ICH3 errata #15 - IDE deadlock under high load 81 * ICH3 errata #15 - IDE deadlock under high load
82 * (BIOS must set dev 31 fn 0 bit 23) 82 * (BIOS must set dev 31 fn 0 bit 23)
83 * ICH3 errata #18 - Don't use native mode 83 * ICH3 errata #18 - Don't use native mode
84 */ 84 */
85 85
86 #include <linux/kernel.h> 86 #include <linux/kernel.h>
87 #include <linux/module.h> 87 #include <linux/module.h>
88 #include <linux/pci.h> 88 #include <linux/pci.h>
89 #include <linux/init.h> 89 #include <linux/init.h>
90 #include <linux/blkdev.h> 90 #include <linux/blkdev.h>
91 #include <linux/delay.h> 91 #include <linux/delay.h>
92 #include <linux/device.h> 92 #include <linux/device.h>
93 #include <linux/gfp.h> 93 #include <linux/gfp.h>
94 #include <scsi/scsi_host.h> 94 #include <scsi/scsi_host.h>
95 #include <linux/libata.h> 95 #include <linux/libata.h>
96 #include <linux/dmi.h> 96 #include <linux/dmi.h>
97 97
98 #define DRV_NAME "ata_piix" 98 #define DRV_NAME "ata_piix"
99 #define DRV_VERSION "2.13" 99 #define DRV_VERSION "2.13"
100 100
101 enum { 101 enum {
102 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ 102 PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
103 ICH5_PMR = 0x90, /* port mapping register */ 103 ICH5_PMR = 0x90, /* port mapping register */
104 ICH5_PCS = 0x92, /* port control and status */ 104 ICH5_PCS = 0x92, /* port control and status */
105 PIIX_SIDPR_BAR = 5, 105 PIIX_SIDPR_BAR = 5,
106 PIIX_SIDPR_LEN = 16, 106 PIIX_SIDPR_LEN = 16,
107 PIIX_SIDPR_IDX = 0, 107 PIIX_SIDPR_IDX = 0,
108 PIIX_SIDPR_DATA = 4, 108 PIIX_SIDPR_DATA = 4,
109 109
110 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */ 110 PIIX_FLAG_CHECKINTR = (1 << 28), /* make sure PCI INTx enabled */
111 PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */ 111 PIIX_FLAG_SIDPR = (1 << 29), /* SATA idx/data pair regs */
112 112
113 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS, 113 PIIX_PATA_FLAGS = ATA_FLAG_SLAVE_POSS,
114 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR, 114 PIIX_SATA_FLAGS = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR,
115 115
116 PIIX_80C_PRI = (1 << 5) | (1 << 4), 116 PIIX_80C_PRI = (1 << 5) | (1 << 4),
117 PIIX_80C_SEC = (1 << 7) | (1 << 6), 117 PIIX_80C_SEC = (1 << 7) | (1 << 6),
118 118
119 /* constants for mapping table */ 119 /* constants for mapping table */
120 P0 = 0, /* port 0 */ 120 P0 = 0, /* port 0 */
121 P1 = 1, /* port 1 */ 121 P1 = 1, /* port 1 */
122 P2 = 2, /* port 2 */ 122 P2 = 2, /* port 2 */
123 P3 = 3, /* port 3 */ 123 P3 = 3, /* port 3 */
124 IDE = -1, /* IDE */ 124 IDE = -1, /* IDE */
125 NA = -2, /* not avaliable */ 125 NA = -2, /* not avaliable */
126 RV = -3, /* reserved */ 126 RV = -3, /* reserved */
127 127
128 PIIX_AHCI_DEVICE = 6, 128 PIIX_AHCI_DEVICE = 6,
129 129
130 /* host->flags bits */ 130 /* host->flags bits */
131 PIIX_HOST_BROKEN_SUSPEND = (1 << 24), 131 PIIX_HOST_BROKEN_SUSPEND = (1 << 24),
132 }; 132 };
133 133
134 enum piix_controller_ids { 134 enum piix_controller_ids {
135 /* controller IDs */ 135 /* controller IDs */
136 piix_pata_mwdma, /* PIIX3 MWDMA only */ 136 piix_pata_mwdma, /* PIIX3 MWDMA only */
137 piix_pata_33, /* PIIX4 at 33Mhz */ 137 piix_pata_33, /* PIIX4 at 33Mhz */
138 ich_pata_33, /* ICH up to UDMA 33 only */ 138 ich_pata_33, /* ICH up to UDMA 33 only */
139 ich_pata_66, /* ICH up to 66 Mhz */ 139 ich_pata_66, /* ICH up to 66 Mhz */
140 ich_pata_100, /* ICH up to UDMA 100 */ 140 ich_pata_100, /* ICH up to UDMA 100 */
141 ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/ 141 ich_pata_100_nomwdma1, /* ICH up to UDMA 100 but with no MWDMA1*/
142 ich5_sata, 142 ich5_sata,
143 ich6_sata, 143 ich6_sata,
144 ich6m_sata, 144 ich6m_sata,
145 ich8_sata, 145 ich8_sata,
146 ich8_2port_sata, 146 ich8_2port_sata,
147 ich8m_apple_sata, /* locks up on second port enable */ 147 ich8m_apple_sata, /* locks up on second port enable */
148 tolapai_sata, 148 tolapai_sata,
149 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ 149 piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
150 }; 150 };
151 151
152 struct piix_map_db { 152 struct piix_map_db {
153 const u32 mask; 153 const u32 mask;
154 const u16 port_enable; 154 const u16 port_enable;
155 const int map[][4]; 155 const int map[][4];
156 }; 156 };
157 157
158 struct piix_host_priv { 158 struct piix_host_priv {
159 const int *map; 159 const int *map;
160 u32 saved_iocfg; 160 u32 saved_iocfg;
161 spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */ 161 spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */
162 void __iomem *sidpr; 162 void __iomem *sidpr;
163 }; 163 };
164 164
165 static int piix_init_one(struct pci_dev *pdev, 165 static int piix_init_one(struct pci_dev *pdev,
166 const struct pci_device_id *ent); 166 const struct pci_device_id *ent);
167 static void piix_remove_one(struct pci_dev *pdev); 167 static void piix_remove_one(struct pci_dev *pdev);
168 static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); 168 static int piix_pata_prereset(struct ata_link *link, unsigned long deadline);
169 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); 169 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev);
170 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); 170 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev);
171 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 171 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev);
172 static int ich_pata_cable_detect(struct ata_port *ap); 172 static int ich_pata_cable_detect(struct ata_port *ap);
173 static u8 piix_vmw_bmdma_status(struct ata_port *ap); 173 static u8 piix_vmw_bmdma_status(struct ata_port *ap);
174 static int piix_sidpr_scr_read(struct ata_link *link, 174 static int piix_sidpr_scr_read(struct ata_link *link,
175 unsigned int reg, u32 *val); 175 unsigned int reg, u32 *val);
176 static int piix_sidpr_scr_write(struct ata_link *link, 176 static int piix_sidpr_scr_write(struct ata_link *link,
177 unsigned int reg, u32 val); 177 unsigned int reg, u32 val);
178 static bool piix_irq_check(struct ata_port *ap); 178 static bool piix_irq_check(struct ata_port *ap);
179 #ifdef CONFIG_PM 179 #ifdef CONFIG_PM
180 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 180 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
181 static int piix_pci_device_resume(struct pci_dev *pdev); 181 static int piix_pci_device_resume(struct pci_dev *pdev);
182 #endif 182 #endif
183 183
184 static unsigned int in_module_init = 1; 184 static unsigned int in_module_init = 1;
185 185
186 static const struct pci_device_id piix_pci_tbl[] = { 186 static const struct pci_device_id piix_pci_tbl[] = {
187 /* Intel PIIX3 for the 430HX etc */ 187 /* Intel PIIX3 for the 430HX etc */
188 { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma }, 188 { 0x8086, 0x7010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_mwdma },
189 /* VMware ICH4 */ 189 /* VMware ICH4 */
190 { 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw }, 190 { 0x8086, 0x7111, 0x15ad, 0x1976, 0, 0, piix_pata_vmw },
191 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */ 191 /* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
192 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */ 192 /* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
193 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 193 { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
194 /* Intel PIIX4 */ 194 /* Intel PIIX4 */
195 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 195 { 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
196 /* Intel PIIX4 */ 196 /* Intel PIIX4 */
197 { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 197 { 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
198 /* Intel PIIX */ 198 /* Intel PIIX */
199 { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 }, 199 { 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
200 /* Intel ICH (i810, i815, i840) UDMA 66*/ 200 /* Intel ICH (i810, i815, i840) UDMA 66*/
201 { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 }, 201 { 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
202 /* Intel ICH0 : UDMA 33*/ 202 /* Intel ICH0 : UDMA 33*/
203 { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 }, 203 { 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
204 /* Intel ICH2M */ 204 /* Intel ICH2M */
205 { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 205 { 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
206 /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */ 206 /* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
207 { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 207 { 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
208 /* Intel ICH3M */ 208 /* Intel ICH3M */
209 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 209 { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
210 /* Intel ICH3 (E7500/1) UDMA 100 */ 210 /* Intel ICH3 (E7500/1) UDMA 100 */
211 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 211 { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
212 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */ 212 /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
213 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 213 { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
214 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 214 { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
215 /* Intel ICH5 */ 215 /* Intel ICH5 */
216 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 216 { 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
217 /* C-ICH (i810E2) */ 217 /* C-ICH (i810E2) */
218 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 218 { 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
219 /* ESB (855GME/875P + 6300ESB) UDMA 100 */ 219 /* ESB (855GME/875P + 6300ESB) UDMA 100 */
220 { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 220 { 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
221 /* ICH6 (and 6) (i915) UDMA 100 */ 221 /* ICH6 (and 6) (i915) UDMA 100 */
222 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 222 { 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
223 /* ICH7/7-R (i945, i975) UDMA 100*/ 223 /* ICH7/7-R (i945, i975) UDMA 100*/
224 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, 224 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
225 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 }, 225 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100_nomwdma1 },
226 /* ICH8 Mobile PATA Controller */ 226 /* ICH8 Mobile PATA Controller */
227 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, 227 { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
228 228
229 /* SATA ports */ 229 /* SATA ports */
230 230
231 /* 82801EB (ICH5) */ 231 /* 82801EB (ICH5) */
232 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 232 { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
233 /* 82801EB (ICH5) */ 233 /* 82801EB (ICH5) */
234 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 234 { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
235 /* 6300ESB (ICH5 variant with broken PCS present bits) */ 235 /* 6300ESB (ICH5 variant with broken PCS present bits) */
236 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 236 { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
237 /* 6300ESB pretending RAID */ 237 /* 6300ESB pretending RAID */
238 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, 238 { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
239 /* 82801FB/FW (ICH6/ICH6W) */ 239 /* 82801FB/FW (ICH6/ICH6W) */
240 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 240 { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
241 /* 82801FR/FRW (ICH6R/ICH6RW) */ 241 /* 82801FR/FRW (ICH6R/ICH6RW) */
242 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 242 { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
243 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented). 243 /* 82801FBM ICH6M (ICH6R with only port 0 and 2 implemented).
244 * Attach iff the controller is in IDE mode. */ 244 * Attach iff the controller is in IDE mode. */
245 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 245 { 0x8086, 0x2653, PCI_ANY_ID, PCI_ANY_ID,
246 PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata }, 246 PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
247 /* 82801GB/GR/GH (ICH7, identical to ICH6) */ 247 /* 82801GB/GR/GH (ICH7, identical to ICH6) */
248 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 248 { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
249 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */ 249 /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
250 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata }, 250 { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
251 /* Enterprise Southbridge 2 (631xESB/632xESB) */ 251 /* Enterprise Southbridge 2 (631xESB/632xESB) */
252 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata }, 252 { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
253 /* SATA Controller 1 IDE (ICH8) */ 253 /* SATA Controller 1 IDE (ICH8) */
254 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 254 { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
255 /* SATA Controller 2 IDE (ICH8) */ 255 /* SATA Controller 2 IDE (ICH8) */
256 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 256 { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
257 /* Mobile SATA Controller IDE (ICH8M), Apple */ 257 /* Mobile SATA Controller IDE (ICH8M), Apple */
258 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, 258 { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata },
259 { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, 259 { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata },
260 { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata }, 260 { 0x8086, 0x2828, 0x106b, 0x00a3, 0, 0, ich8m_apple_sata },
261 /* Mobile SATA Controller IDE (ICH8M) */ 261 /* Mobile SATA Controller IDE (ICH8M) */
262 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 262 { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
263 /* SATA Controller IDE (ICH9) */ 263 /* SATA Controller IDE (ICH9) */
264 { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 264 { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
265 /* SATA Controller IDE (ICH9) */ 265 /* SATA Controller IDE (ICH9) */
266 { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 266 { 0x8086, 0x2921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
267 /* SATA Controller IDE (ICH9) */ 267 /* SATA Controller IDE (ICH9) */
268 { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 268 { 0x8086, 0x2926, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
269 /* SATA Controller IDE (ICH9M) */ 269 /* SATA Controller IDE (ICH9M) */
270 { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 270 { 0x8086, 0x2928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
271 /* SATA Controller IDE (ICH9M) */ 271 /* SATA Controller IDE (ICH9M) */
272 { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 272 { 0x8086, 0x292d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
273 /* SATA Controller IDE (ICH9M) */ 273 /* SATA Controller IDE (ICH9M) */
274 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 274 { 0x8086, 0x292e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
275 /* SATA Controller IDE (Tolapai) */ 275 /* SATA Controller IDE (Tolapai) */
276 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata }, 276 { 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata },
277 /* SATA Controller IDE (ICH10) */ 277 /* SATA Controller IDE (ICH10) */
278 { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 278 { 0x8086, 0x3a00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
279 /* SATA Controller IDE (ICH10) */ 279 /* SATA Controller IDE (ICH10) */
280 { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 280 { 0x8086, 0x3a06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
281 /* SATA Controller IDE (ICH10) */ 281 /* SATA Controller IDE (ICH10) */
282 { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 282 { 0x8086, 0x3a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
283 /* SATA Controller IDE (ICH10) */ 283 /* SATA Controller IDE (ICH10) */
284 { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 284 { 0x8086, 0x3a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
285 /* SATA Controller IDE (PCH) */ 285 /* SATA Controller IDE (PCH) */
286 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 286 { 0x8086, 0x3b20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
287 /* SATA Controller IDE (PCH) */ 287 /* SATA Controller IDE (PCH) */
288 { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 288 { 0x8086, 0x3b21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
289 /* SATA Controller IDE (PCH) */ 289 /* SATA Controller IDE (PCH) */
290 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 290 { 0x8086, 0x3b26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
291 /* SATA Controller IDE (PCH) */ 291 /* SATA Controller IDE (PCH) */
292 { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 292 { 0x8086, 0x3b28, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
293 /* SATA Controller IDE (PCH) */ 293 /* SATA Controller IDE (PCH) */
294 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 294 { 0x8086, 0x3b2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
295 /* SATA Controller IDE (PCH) */ 295 /* SATA Controller IDE (PCH) */
296 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 296 { 0x8086, 0x3b2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
297 /* SATA Controller IDE (CPT) */ 297 /* SATA Controller IDE (CPT) */
298 { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 298 { 0x8086, 0x1c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
299 /* SATA Controller IDE (CPT) */ 299 /* SATA Controller IDE (CPT) */
300 { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 300 { 0x8086, 0x1c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
301 /* SATA Controller IDE (CPT) */ 301 /* SATA Controller IDE (CPT) */
302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
303 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 { } /* terminate list */ 309 { } /* terminate list */
306 }; 310 };
307 311
308 static struct pci_driver piix_pci_driver = { 312 static struct pci_driver piix_pci_driver = {
309 .name = DRV_NAME, 313 .name = DRV_NAME,
310 .id_table = piix_pci_tbl, 314 .id_table = piix_pci_tbl,
311 .probe = piix_init_one, 315 .probe = piix_init_one,
312 .remove = piix_remove_one, 316 .remove = piix_remove_one,
313 #ifdef CONFIG_PM 317 #ifdef CONFIG_PM
314 .suspend = piix_pci_device_suspend, 318 .suspend = piix_pci_device_suspend,
315 .resume = piix_pci_device_resume, 319 .resume = piix_pci_device_resume,
316 #endif 320 #endif
317 }; 321 };
318 322
319 static struct scsi_host_template piix_sht = { 323 static struct scsi_host_template piix_sht = {
320 ATA_BMDMA_SHT(DRV_NAME), 324 ATA_BMDMA_SHT(DRV_NAME),
321 }; 325 };
322 326
323 static struct ata_port_operations piix_sata_ops = { 327 static struct ata_port_operations piix_sata_ops = {
324 .inherits = &ata_bmdma32_port_ops, 328 .inherits = &ata_bmdma32_port_ops,
325 .sff_irq_check = piix_irq_check, 329 .sff_irq_check = piix_irq_check,
326 }; 330 };
327 331
328 static struct ata_port_operations piix_pata_ops = { 332 static struct ata_port_operations piix_pata_ops = {
329 .inherits = &piix_sata_ops, 333 .inherits = &piix_sata_ops,
330 .cable_detect = ata_cable_40wire, 334 .cable_detect = ata_cable_40wire,
331 .set_piomode = piix_set_piomode, 335 .set_piomode = piix_set_piomode,
332 .set_dmamode = piix_set_dmamode, 336 .set_dmamode = piix_set_dmamode,
333 .prereset = piix_pata_prereset, 337 .prereset = piix_pata_prereset,
334 }; 338 };
335 339
336 static struct ata_port_operations piix_vmw_ops = { 340 static struct ata_port_operations piix_vmw_ops = {
337 .inherits = &piix_pata_ops, 341 .inherits = &piix_pata_ops,
338 .bmdma_status = piix_vmw_bmdma_status, 342 .bmdma_status = piix_vmw_bmdma_status,
339 }; 343 };
340 344
341 static struct ata_port_operations ich_pata_ops = { 345 static struct ata_port_operations ich_pata_ops = {
342 .inherits = &piix_pata_ops, 346 .inherits = &piix_pata_ops,
343 .cable_detect = ich_pata_cable_detect, 347 .cable_detect = ich_pata_cable_detect,
344 .set_dmamode = ich_set_dmamode, 348 .set_dmamode = ich_set_dmamode,
345 }; 349 };
346 350
347 static struct ata_port_operations piix_sidpr_sata_ops = { 351 static struct ata_port_operations piix_sidpr_sata_ops = {
348 .inherits = &piix_sata_ops, 352 .inherits = &piix_sata_ops,
349 .hardreset = sata_std_hardreset, 353 .hardreset = sata_std_hardreset,
350 .scr_read = piix_sidpr_scr_read, 354 .scr_read = piix_sidpr_scr_read,
351 .scr_write = piix_sidpr_scr_write, 355 .scr_write = piix_sidpr_scr_write,
352 }; 356 };
353 357
354 static const struct piix_map_db ich5_map_db = { 358 static const struct piix_map_db ich5_map_db = {
355 .mask = 0x7, 359 .mask = 0x7,
356 .port_enable = 0x3, 360 .port_enable = 0x3,
357 .map = { 361 .map = {
358 /* PM PS SM SS MAP */ 362 /* PM PS SM SS MAP */
359 { P0, NA, P1, NA }, /* 000b */ 363 { P0, NA, P1, NA }, /* 000b */
360 { P1, NA, P0, NA }, /* 001b */ 364 { P1, NA, P0, NA }, /* 001b */
361 { RV, RV, RV, RV }, 365 { RV, RV, RV, RV },
362 { RV, RV, RV, RV }, 366 { RV, RV, RV, RV },
363 { P0, P1, IDE, IDE }, /* 100b */ 367 { P0, P1, IDE, IDE }, /* 100b */
364 { P1, P0, IDE, IDE }, /* 101b */ 368 { P1, P0, IDE, IDE }, /* 101b */
365 { IDE, IDE, P0, P1 }, /* 110b */ 369 { IDE, IDE, P0, P1 }, /* 110b */
366 { IDE, IDE, P1, P0 }, /* 111b */ 370 { IDE, IDE, P1, P0 }, /* 111b */
367 }, 371 },
368 }; 372 };
369 373
370 static const struct piix_map_db ich6_map_db = { 374 static const struct piix_map_db ich6_map_db = {
371 .mask = 0x3, 375 .mask = 0x3,
372 .port_enable = 0xf, 376 .port_enable = 0xf,
373 .map = { 377 .map = {
374 /* PM PS SM SS MAP */ 378 /* PM PS SM SS MAP */
375 { P0, P2, P1, P3 }, /* 00b */ 379 { P0, P2, P1, P3 }, /* 00b */
376 { IDE, IDE, P1, P3 }, /* 01b */ 380 { IDE, IDE, P1, P3 }, /* 01b */
377 { P0, P2, IDE, IDE }, /* 10b */ 381 { P0, P2, IDE, IDE }, /* 10b */
378 { RV, RV, RV, RV }, 382 { RV, RV, RV, RV },
379 }, 383 },
380 }; 384 };
381 385
382 static const struct piix_map_db ich6m_map_db = { 386 static const struct piix_map_db ich6m_map_db = {
383 .mask = 0x3, 387 .mask = 0x3,
384 .port_enable = 0x5, 388 .port_enable = 0x5,
385 389
386 /* Map 01b isn't specified in the doc but some notebooks use 390 /* Map 01b isn't specified in the doc but some notebooks use
387 * it anyway. MAP 01b have been spotted on both ICH6M and 391 * it anyway. MAP 01b have been spotted on both ICH6M and
388 * ICH7M. 392 * ICH7M.
389 */ 393 */
390 .map = { 394 .map = {
391 /* PM PS SM SS MAP */ 395 /* PM PS SM SS MAP */
392 { P0, P2, NA, NA }, /* 00b */ 396 { P0, P2, NA, NA }, /* 00b */
393 { IDE, IDE, P1, P3 }, /* 01b */ 397 { IDE, IDE, P1, P3 }, /* 01b */
394 { P0, P2, IDE, IDE }, /* 10b */ 398 { P0, P2, IDE, IDE }, /* 10b */
395 { RV, RV, RV, RV }, 399 { RV, RV, RV, RV },
396 }, 400 },
397 }; 401 };
398 402
399 static const struct piix_map_db ich8_map_db = { 403 static const struct piix_map_db ich8_map_db = {
400 .mask = 0x3, 404 .mask = 0x3,
401 .port_enable = 0xf, 405 .port_enable = 0xf,
402 .map = { 406 .map = {
403 /* PM PS SM SS MAP */ 407 /* PM PS SM SS MAP */
404 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ 408 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
405 { RV, RV, RV, RV }, 409 { RV, RV, RV, RV },
406 { P0, P2, IDE, IDE }, /* 10b (IDE mode) */ 410 { P0, P2, IDE, IDE }, /* 10b (IDE mode) */
407 { RV, RV, RV, RV }, 411 { RV, RV, RV, RV },
408 }, 412 },
409 }; 413 };
410 414
411 static const struct piix_map_db ich8_2port_map_db = { 415 static const struct piix_map_db ich8_2port_map_db = {
412 .mask = 0x3, 416 .mask = 0x3,
413 .port_enable = 0x3, 417 .port_enable = 0x3,
414 .map = { 418 .map = {
415 /* PM PS SM SS MAP */ 419 /* PM PS SM SS MAP */
416 { P0, NA, P1, NA }, /* 00b */ 420 { P0, NA, P1, NA }, /* 00b */
417 { RV, RV, RV, RV }, /* 01b */ 421 { RV, RV, RV, RV }, /* 01b */
418 { RV, RV, RV, RV }, /* 10b */ 422 { RV, RV, RV, RV }, /* 10b */
419 { RV, RV, RV, RV }, 423 { RV, RV, RV, RV },
420 }, 424 },
421 }; 425 };
422 426
423 static const struct piix_map_db ich8m_apple_map_db = { 427 static const struct piix_map_db ich8m_apple_map_db = {
424 .mask = 0x3, 428 .mask = 0x3,
425 .port_enable = 0x1, 429 .port_enable = 0x1,
426 .map = { 430 .map = {
427 /* PM PS SM SS MAP */ 431 /* PM PS SM SS MAP */
428 { P0, NA, NA, NA }, /* 00b */ 432 { P0, NA, NA, NA }, /* 00b */
429 { RV, RV, RV, RV }, 433 { RV, RV, RV, RV },
430 { P0, P2, IDE, IDE }, /* 10b */ 434 { P0, P2, IDE, IDE }, /* 10b */
431 { RV, RV, RV, RV }, 435 { RV, RV, RV, RV },
432 }, 436 },
433 }; 437 };
434 438
435 static const struct piix_map_db tolapai_map_db = { 439 static const struct piix_map_db tolapai_map_db = {
436 .mask = 0x3, 440 .mask = 0x3,
437 .port_enable = 0x3, 441 .port_enable = 0x3,
438 .map = { 442 .map = {
439 /* PM PS SM SS MAP */ 443 /* PM PS SM SS MAP */
440 { P0, NA, P1, NA }, /* 00b */ 444 { P0, NA, P1, NA }, /* 00b */
441 { RV, RV, RV, RV }, /* 01b */ 445 { RV, RV, RV, RV }, /* 01b */
442 { RV, RV, RV, RV }, /* 10b */ 446 { RV, RV, RV, RV }, /* 10b */
443 { RV, RV, RV, RV }, 447 { RV, RV, RV, RV },
444 }, 448 },
445 }; 449 };
446 450
447 static const struct piix_map_db *piix_map_db_table[] = { 451 static const struct piix_map_db *piix_map_db_table[] = {
448 [ich5_sata] = &ich5_map_db, 452 [ich5_sata] = &ich5_map_db,
449 [ich6_sata] = &ich6_map_db, 453 [ich6_sata] = &ich6_map_db,
450 [ich6m_sata] = &ich6m_map_db, 454 [ich6m_sata] = &ich6m_map_db,
451 [ich8_sata] = &ich8_map_db, 455 [ich8_sata] = &ich8_map_db,
452 [ich8_2port_sata] = &ich8_2port_map_db, 456 [ich8_2port_sata] = &ich8_2port_map_db,
453 [ich8m_apple_sata] = &ich8m_apple_map_db, 457 [ich8m_apple_sata] = &ich8m_apple_map_db,
454 [tolapai_sata] = &tolapai_map_db, 458 [tolapai_sata] = &tolapai_map_db,
455 }; 459 };
456 460
457 static struct ata_port_info piix_port_info[] = { 461 static struct ata_port_info piix_port_info[] = {
458 [piix_pata_mwdma] = /* PIIX3 MWDMA only */ 462 [piix_pata_mwdma] = /* PIIX3 MWDMA only */
459 { 463 {
460 .flags = PIIX_PATA_FLAGS, 464 .flags = PIIX_PATA_FLAGS,
461 .pio_mask = ATA_PIO4, 465 .pio_mask = ATA_PIO4,
462 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 466 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
463 .port_ops = &piix_pata_ops, 467 .port_ops = &piix_pata_ops,
464 }, 468 },
465 469
466 [piix_pata_33] = /* PIIX4 at 33MHz */ 470 [piix_pata_33] = /* PIIX4 at 33MHz */
467 { 471 {
468 .flags = PIIX_PATA_FLAGS, 472 .flags = PIIX_PATA_FLAGS,
469 .pio_mask = ATA_PIO4, 473 .pio_mask = ATA_PIO4,
470 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 474 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
471 .udma_mask = ATA_UDMA2, 475 .udma_mask = ATA_UDMA2,
472 .port_ops = &piix_pata_ops, 476 .port_ops = &piix_pata_ops,
473 }, 477 },
474 478
475 [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/ 479 [ich_pata_33] = /* ICH0 - ICH at 33Mhz*/
476 { 480 {
477 .flags = PIIX_PATA_FLAGS, 481 .flags = PIIX_PATA_FLAGS,
478 .pio_mask = ATA_PIO4, 482 .pio_mask = ATA_PIO4,
479 .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */ 483 .mwdma_mask = ATA_MWDMA12_ONLY, /* Check: maybe MWDMA0 is ok */
480 .udma_mask = ATA_UDMA2, 484 .udma_mask = ATA_UDMA2,
481 .port_ops = &ich_pata_ops, 485 .port_ops = &ich_pata_ops,
482 }, 486 },
483 487
484 [ich_pata_66] = /* ICH controllers up to 66MHz */ 488 [ich_pata_66] = /* ICH controllers up to 66MHz */
485 { 489 {
486 .flags = PIIX_PATA_FLAGS, 490 .flags = PIIX_PATA_FLAGS,
487 .pio_mask = ATA_PIO4, 491 .pio_mask = ATA_PIO4,
488 .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */ 492 .mwdma_mask = ATA_MWDMA12_ONLY, /* MWDMA0 is broken on chip */
489 .udma_mask = ATA_UDMA4, 493 .udma_mask = ATA_UDMA4,
490 .port_ops = &ich_pata_ops, 494 .port_ops = &ich_pata_ops,
491 }, 495 },
492 496
493 [ich_pata_100] = 497 [ich_pata_100] =
494 { 498 {
495 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, 499 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
496 .pio_mask = ATA_PIO4, 500 .pio_mask = ATA_PIO4,
497 .mwdma_mask = ATA_MWDMA12_ONLY, 501 .mwdma_mask = ATA_MWDMA12_ONLY,
498 .udma_mask = ATA_UDMA5, 502 .udma_mask = ATA_UDMA5,
499 .port_ops = &ich_pata_ops, 503 .port_ops = &ich_pata_ops,
500 }, 504 },
501 505
502 [ich_pata_100_nomwdma1] = 506 [ich_pata_100_nomwdma1] =
503 { 507 {
504 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR, 508 .flags = PIIX_PATA_FLAGS | PIIX_FLAG_CHECKINTR,
505 .pio_mask = ATA_PIO4, 509 .pio_mask = ATA_PIO4,
506 .mwdma_mask = ATA_MWDMA2_ONLY, 510 .mwdma_mask = ATA_MWDMA2_ONLY,
507 .udma_mask = ATA_UDMA5, 511 .udma_mask = ATA_UDMA5,
508 .port_ops = &ich_pata_ops, 512 .port_ops = &ich_pata_ops,
509 }, 513 },
510 514
511 [ich5_sata] = 515 [ich5_sata] =
512 { 516 {
513 .flags = PIIX_SATA_FLAGS, 517 .flags = PIIX_SATA_FLAGS,
514 .pio_mask = ATA_PIO4, 518 .pio_mask = ATA_PIO4,
515 .mwdma_mask = ATA_MWDMA2, 519 .mwdma_mask = ATA_MWDMA2,
516 .udma_mask = ATA_UDMA6, 520 .udma_mask = ATA_UDMA6,
517 .port_ops = &piix_sata_ops, 521 .port_ops = &piix_sata_ops,
518 }, 522 },
519 523
520 [ich6_sata] = 524 [ich6_sata] =
521 { 525 {
522 .flags = PIIX_SATA_FLAGS, 526 .flags = PIIX_SATA_FLAGS,
523 .pio_mask = ATA_PIO4, 527 .pio_mask = ATA_PIO4,
524 .mwdma_mask = ATA_MWDMA2, 528 .mwdma_mask = ATA_MWDMA2,
525 .udma_mask = ATA_UDMA6, 529 .udma_mask = ATA_UDMA6,
526 .port_ops = &piix_sata_ops, 530 .port_ops = &piix_sata_ops,
527 }, 531 },
528 532
529 [ich6m_sata] = 533 [ich6m_sata] =
530 { 534 {
531 .flags = PIIX_SATA_FLAGS, 535 .flags = PIIX_SATA_FLAGS,
532 .pio_mask = ATA_PIO4, 536 .pio_mask = ATA_PIO4,
533 .mwdma_mask = ATA_MWDMA2, 537 .mwdma_mask = ATA_MWDMA2,
534 .udma_mask = ATA_UDMA6, 538 .udma_mask = ATA_UDMA6,
535 .port_ops = &piix_sata_ops, 539 .port_ops = &piix_sata_ops,
536 }, 540 },
537 541
538 [ich8_sata] = 542 [ich8_sata] =
539 { 543 {
540 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, 544 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
541 .pio_mask = ATA_PIO4, 545 .pio_mask = ATA_PIO4,
542 .mwdma_mask = ATA_MWDMA2, 546 .mwdma_mask = ATA_MWDMA2,
543 .udma_mask = ATA_UDMA6, 547 .udma_mask = ATA_UDMA6,
544 .port_ops = &piix_sata_ops, 548 .port_ops = &piix_sata_ops,
545 }, 549 },
546 550
547 [ich8_2port_sata] = 551 [ich8_2port_sata] =
548 { 552 {
549 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, 553 .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR,
550 .pio_mask = ATA_PIO4, 554 .pio_mask = ATA_PIO4,
551 .mwdma_mask = ATA_MWDMA2, 555 .mwdma_mask = ATA_MWDMA2,
552 .udma_mask = ATA_UDMA6, 556 .udma_mask = ATA_UDMA6,
553 .port_ops = &piix_sata_ops, 557 .port_ops = &piix_sata_ops,
554 }, 558 },
555 559
556 [tolapai_sata] = 560 [tolapai_sata] =
557 { 561 {
558 .flags = PIIX_SATA_FLAGS, 562 .flags = PIIX_SATA_FLAGS,
559 .pio_mask = ATA_PIO4, 563 .pio_mask = ATA_PIO4,
560 .mwdma_mask = ATA_MWDMA2, 564 .mwdma_mask = ATA_MWDMA2,
561 .udma_mask = ATA_UDMA6, 565 .udma_mask = ATA_UDMA6,
562 .port_ops = &piix_sata_ops, 566 .port_ops = &piix_sata_ops,
563 }, 567 },
564 568
565 [ich8m_apple_sata] = 569 [ich8m_apple_sata] =
566 { 570 {
567 .flags = PIIX_SATA_FLAGS, 571 .flags = PIIX_SATA_FLAGS,
568 .pio_mask = ATA_PIO4, 572 .pio_mask = ATA_PIO4,
569 .mwdma_mask = ATA_MWDMA2, 573 .mwdma_mask = ATA_MWDMA2,
570 .udma_mask = ATA_UDMA6, 574 .udma_mask = ATA_UDMA6,
571 .port_ops = &piix_sata_ops, 575 .port_ops = &piix_sata_ops,
572 }, 576 },
573 577
574 [piix_pata_vmw] = 578 [piix_pata_vmw] =
575 { 579 {
576 .flags = PIIX_PATA_FLAGS, 580 .flags = PIIX_PATA_FLAGS,
577 .pio_mask = ATA_PIO4, 581 .pio_mask = ATA_PIO4,
578 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */ 582 .mwdma_mask = ATA_MWDMA12_ONLY, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
579 .udma_mask = ATA_UDMA2, 583 .udma_mask = ATA_UDMA2,
580 .port_ops = &piix_vmw_ops, 584 .port_ops = &piix_vmw_ops,
581 }, 585 },
582 586
583 }; 587 };
584 588
585 static struct pci_bits piix_enable_bits[] = { 589 static struct pci_bits piix_enable_bits[] = {
586 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ 590 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
587 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ 591 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
588 }; 592 };
589 593
590 MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik"); 594 MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik");
591 MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers"); 595 MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers");
592 MODULE_LICENSE("GPL"); 596 MODULE_LICENSE("GPL");
593 MODULE_DEVICE_TABLE(pci, piix_pci_tbl); 597 MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
594 MODULE_VERSION(DRV_VERSION); 598 MODULE_VERSION(DRV_VERSION);
595 599
596 struct ich_laptop { 600 struct ich_laptop {
597 u16 device; 601 u16 device;
598 u16 subvendor; 602 u16 subvendor;
599 u16 subdevice; 603 u16 subdevice;
600 }; 604 };
601 605
602 /* 606 /*
603 * List of laptops that use short cables rather than 80 wire 607 * List of laptops that use short cables rather than 80 wire
604 */ 608 */
605 609
606 static const struct ich_laptop ich_laptop[] = { 610 static const struct ich_laptop ich_laptop[] = {
607 /* devid, subvendor, subdev */ 611 /* devid, subvendor, subdev */
608 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ 612 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
609 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ 613 { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
610 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ 614 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
611 { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ 615 { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */
612 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ 616 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
613 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ 617 { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
614 { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */ 618 { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */
615 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ 619 { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
616 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ 620 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
617 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 621 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
618 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 622 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
619 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 623 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
620 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ 624 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
621 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ 625 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
622 /* end marker */ 626 /* end marker */
623 { 0, } 627 { 0, }
624 }; 628 };
625 629
626 /** 630 /**
627 * ich_pata_cable_detect - Probe host controller cable detect info 631 * ich_pata_cable_detect - Probe host controller cable detect info
628 * @ap: Port for which cable detect info is desired 632 * @ap: Port for which cable detect info is desired
629 * 633 *
630 * Read 80c cable indicator from ATA PCI device's PCI config 634 * Read 80c cable indicator from ATA PCI device's PCI config
631 * register. This register is normally set by firmware (BIOS). 635 * register. This register is normally set by firmware (BIOS).
632 * 636 *
633 * LOCKING: 637 * LOCKING:
634 * None (inherited from caller). 638 * None (inherited from caller).
635 */ 639 */
636 640
637 static int ich_pata_cable_detect(struct ata_port *ap) 641 static int ich_pata_cable_detect(struct ata_port *ap)
638 { 642 {
639 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 643 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
640 struct piix_host_priv *hpriv = ap->host->private_data; 644 struct piix_host_priv *hpriv = ap->host->private_data;
641 const struct ich_laptop *lap = &ich_laptop[0]; 645 const struct ich_laptop *lap = &ich_laptop[0];
642 u8 mask; 646 u8 mask;
643 647
644 /* Check for specials - Acer Aspire 5602WLMi */ 648 /* Check for specials - Acer Aspire 5602WLMi */
645 while (lap->device) { 649 while (lap->device) {
646 if (lap->device == pdev->device && 650 if (lap->device == pdev->device &&
647 lap->subvendor == pdev->subsystem_vendor && 651 lap->subvendor == pdev->subsystem_vendor &&
648 lap->subdevice == pdev->subsystem_device) 652 lap->subdevice == pdev->subsystem_device)
649 return ATA_CBL_PATA40_SHORT; 653 return ATA_CBL_PATA40_SHORT;
650 654
651 lap++; 655 lap++;
652 } 656 }
653 657
654 /* check BIOS cable detect results */ 658 /* check BIOS cable detect results */
655 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; 659 mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
656 if ((hpriv->saved_iocfg & mask) == 0) 660 if ((hpriv->saved_iocfg & mask) == 0)
657 return ATA_CBL_PATA40; 661 return ATA_CBL_PATA40;
658 return ATA_CBL_PATA80; 662 return ATA_CBL_PATA80;
659 } 663 }
660 664
661 /** 665 /**
662 * piix_pata_prereset - prereset for PATA host controller 666 * piix_pata_prereset - prereset for PATA host controller
663 * @link: Target link 667 * @link: Target link
664 * @deadline: deadline jiffies for the operation 668 * @deadline: deadline jiffies for the operation
665 * 669 *
666 * LOCKING: 670 * LOCKING:
667 * None (inherited from caller). 671 * None (inherited from caller).
668 */ 672 */
669 static int piix_pata_prereset(struct ata_link *link, unsigned long deadline) 673 static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
670 { 674 {
671 struct ata_port *ap = link->ap; 675 struct ata_port *ap = link->ap;
672 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 676 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
673 677
674 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) 678 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no]))
675 return -ENOENT; 679 return -ENOENT;
676 return ata_sff_prereset(link, deadline); 680 return ata_sff_prereset(link, deadline);
677 } 681 }
678 682
679 static DEFINE_SPINLOCK(piix_lock); 683 static DEFINE_SPINLOCK(piix_lock);
680 684
681 /** 685 /**
682 * piix_set_piomode - Initialize host controller PATA PIO timings 686 * piix_set_piomode - Initialize host controller PATA PIO timings
683 * @ap: Port whose timings we are configuring 687 * @ap: Port whose timings we are configuring
684 * @adev: um 688 * @adev: um
685 * 689 *
686 * Set PIO mode for device, in host controller PCI config space. 690 * Set PIO mode for device, in host controller PCI config space.
687 * 691 *
688 * LOCKING: 692 * LOCKING:
689 * None (inherited from caller). 693 * None (inherited from caller).
690 */ 694 */
691 695
692 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) 696 static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
693 { 697 {
694 struct pci_dev *dev = to_pci_dev(ap->host->dev); 698 struct pci_dev *dev = to_pci_dev(ap->host->dev);
695 unsigned long flags; 699 unsigned long flags;
696 unsigned int pio = adev->pio_mode - XFER_PIO_0; 700 unsigned int pio = adev->pio_mode - XFER_PIO_0;
697 unsigned int is_slave = (adev->devno != 0); 701 unsigned int is_slave = (adev->devno != 0);
698 unsigned int master_port= ap->port_no ? 0x42 : 0x40; 702 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
699 unsigned int slave_port = 0x44; 703 unsigned int slave_port = 0x44;
700 u16 master_data; 704 u16 master_data;
701 u8 slave_data; 705 u8 slave_data;
702 u8 udma_enable; 706 u8 udma_enable;
703 int control = 0; 707 int control = 0;
704 708
705 /* 709 /*
706 * See Intel Document 298600-004 for the timing programing rules 710 * See Intel Document 298600-004 for the timing programing rules
707 * for ICH controllers. 711 * for ICH controllers.
708 */ 712 */
709 713
710 static const /* ISP RTC */ 714 static const /* ISP RTC */
711 u8 timings[][2] = { { 0, 0 }, 715 u8 timings[][2] = { { 0, 0 },
712 { 0, 0 }, 716 { 0, 0 },
713 { 1, 0 }, 717 { 1, 0 },
714 { 2, 1 }, 718 { 2, 1 },
715 { 2, 3 }, }; 719 { 2, 3 }, };
716 720
717 if (pio >= 2) 721 if (pio >= 2)
718 control |= 1; /* TIME1 enable */ 722 control |= 1; /* TIME1 enable */
719 if (ata_pio_need_iordy(adev)) 723 if (ata_pio_need_iordy(adev))
720 control |= 2; /* IE enable */ 724 control |= 2; /* IE enable */
721 725
722 /* Intel specifies that the PPE functionality is for disk only */ 726 /* Intel specifies that the PPE functionality is for disk only */
723 if (adev->class == ATA_DEV_ATA) 727 if (adev->class == ATA_DEV_ATA)
724 control |= 4; /* PPE enable */ 728 control |= 4; /* PPE enable */
725 729
726 spin_lock_irqsave(&piix_lock, flags); 730 spin_lock_irqsave(&piix_lock, flags);
727 731
728 /* PIO configuration clears DTE unconditionally. It will be 732 /* PIO configuration clears DTE unconditionally. It will be
729 * programmed in set_dmamode which is guaranteed to be called 733 * programmed in set_dmamode which is guaranteed to be called
730 * after set_piomode if any DMA mode is available. 734 * after set_piomode if any DMA mode is available.
731 */ 735 */
732 pci_read_config_word(dev, master_port, &master_data); 736 pci_read_config_word(dev, master_port, &master_data);
733 if (is_slave) { 737 if (is_slave) {
734 /* clear TIME1|IE1|PPE1|DTE1 */ 738 /* clear TIME1|IE1|PPE1|DTE1 */
735 master_data &= 0xff0f; 739 master_data &= 0xff0f;
736 /* Enable SITRE (separate slave timing register) */ 740 /* Enable SITRE (separate slave timing register) */
737 master_data |= 0x4000; 741 master_data |= 0x4000;
738 /* enable PPE1, IE1 and TIME1 as needed */ 742 /* enable PPE1, IE1 and TIME1 as needed */
739 master_data |= (control << 4); 743 master_data |= (control << 4);
740 pci_read_config_byte(dev, slave_port, &slave_data); 744 pci_read_config_byte(dev, slave_port, &slave_data);
741 slave_data &= (ap->port_no ? 0x0f : 0xf0); 745 slave_data &= (ap->port_no ? 0x0f : 0xf0);
742 /* Load the timing nibble for this slave */ 746 /* Load the timing nibble for this slave */
743 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) 747 slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
744 << (ap->port_no ? 4 : 0); 748 << (ap->port_no ? 4 : 0);
745 } else { 749 } else {
746 /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */ 750 /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
747 master_data &= 0xccf0; 751 master_data &= 0xccf0;
748 /* Enable PPE, IE and TIME as appropriate */ 752 /* Enable PPE, IE and TIME as appropriate */
749 master_data |= control; 753 master_data |= control;
750 /* load ISP and RCT */ 754 /* load ISP and RCT */
751 master_data |= 755 master_data |=
752 (timings[pio][0] << 12) | 756 (timings[pio][0] << 12) |
753 (timings[pio][1] << 8); 757 (timings[pio][1] << 8);
754 } 758 }
755 pci_write_config_word(dev, master_port, master_data); 759 pci_write_config_word(dev, master_port, master_data);
756 if (is_slave) 760 if (is_slave)
757 pci_write_config_byte(dev, slave_port, slave_data); 761 pci_write_config_byte(dev, slave_port, slave_data);
758 762
759 /* Ensure the UDMA bit is off - it will be turned back on if 763 /* Ensure the UDMA bit is off - it will be turned back on if
760 UDMA is selected */ 764 UDMA is selected */
761 765
762 if (ap->udma_mask) { 766 if (ap->udma_mask) {
763 pci_read_config_byte(dev, 0x48, &udma_enable); 767 pci_read_config_byte(dev, 0x48, &udma_enable);
764 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); 768 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
765 pci_write_config_byte(dev, 0x48, udma_enable); 769 pci_write_config_byte(dev, 0x48, udma_enable);
766 } 770 }
767 771
768 spin_unlock_irqrestore(&piix_lock, flags); 772 spin_unlock_irqrestore(&piix_lock, flags);
769 } 773 }
770 774
771 /** 775 /**
772 * do_pata_set_dmamode - Initialize host controller PATA PIO timings 776 * do_pata_set_dmamode - Initialize host controller PATA PIO timings
773 * @ap: Port whose timings we are configuring 777 * @ap: Port whose timings we are configuring
774 * @adev: Drive in question 778 * @adev: Drive in question
775 * @isich: set if the chip is an ICH device 779 * @isich: set if the chip is an ICH device
776 * 780 *
777 * Set UDMA mode for device, in host controller PCI config space. 781 * Set UDMA mode for device, in host controller PCI config space.
778 * 782 *
779 * LOCKING: 783 * LOCKING:
780 * None (inherited from caller). 784 * None (inherited from caller).
781 */ 785 */
782 786
783 static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) 787 static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
784 { 788 {
785 struct pci_dev *dev = to_pci_dev(ap->host->dev); 789 struct pci_dev *dev = to_pci_dev(ap->host->dev);
786 unsigned long flags; 790 unsigned long flags;
787 u8 master_port = ap->port_no ? 0x42 : 0x40; 791 u8 master_port = ap->port_no ? 0x42 : 0x40;
788 u16 master_data; 792 u16 master_data;
789 u8 speed = adev->dma_mode; 793 u8 speed = adev->dma_mode;
790 int devid = adev->devno + 2 * ap->port_no; 794 int devid = adev->devno + 2 * ap->port_no;
791 u8 udma_enable = 0; 795 u8 udma_enable = 0;
792 796
793 static const /* ISP RTC */ 797 static const /* ISP RTC */
794 u8 timings[][2] = { { 0, 0 }, 798 u8 timings[][2] = { { 0, 0 },
795 { 0, 0 }, 799 { 0, 0 },
796 { 1, 0 }, 800 { 1, 0 },
797 { 2, 1 }, 801 { 2, 1 },
798 { 2, 3 }, }; 802 { 2, 3 }, };
799 803
800 spin_lock_irqsave(&piix_lock, flags); 804 spin_lock_irqsave(&piix_lock, flags);
801 805
802 pci_read_config_word(dev, master_port, &master_data); 806 pci_read_config_word(dev, master_port, &master_data);
803 if (ap->udma_mask) 807 if (ap->udma_mask)
804 pci_read_config_byte(dev, 0x48, &udma_enable); 808 pci_read_config_byte(dev, 0x48, &udma_enable);
805 809
806 if (speed >= XFER_UDMA_0) { 810 if (speed >= XFER_UDMA_0) {
807 unsigned int udma = adev->dma_mode - XFER_UDMA_0; 811 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
808 u16 udma_timing; 812 u16 udma_timing;
809 u16 ideconf; 813 u16 ideconf;
810 int u_clock, u_speed; 814 int u_clock, u_speed;
811 815
812 /* 816 /*
813 * UDMA is handled by a combination of clock switching and 817 * UDMA is handled by a combination of clock switching and
814 * selection of dividers 818 * selection of dividers
815 * 819 *
816 * Handy rule: Odd modes are UDMATIMx 01, even are 02 820 * Handy rule: Odd modes are UDMATIMx 01, even are 02
817 * except UDMA0 which is 00 821 * except UDMA0 which is 00
818 */ 822 */
819 u_speed = min(2 - (udma & 1), udma); 823 u_speed = min(2 - (udma & 1), udma);
820 if (udma == 5) 824 if (udma == 5)
821 u_clock = 0x1000; /* 100Mhz */ 825 u_clock = 0x1000; /* 100Mhz */
822 else if (udma > 2) 826 else if (udma > 2)
823 u_clock = 1; /* 66Mhz */ 827 u_clock = 1; /* 66Mhz */
824 else 828 else
825 u_clock = 0; /* 33Mhz */ 829 u_clock = 0; /* 33Mhz */
826 830
827 udma_enable |= (1 << devid); 831 udma_enable |= (1 << devid);
828 832
829 /* Load the CT/RP selection */ 833 /* Load the CT/RP selection */
830 pci_read_config_word(dev, 0x4A, &udma_timing); 834 pci_read_config_word(dev, 0x4A, &udma_timing);
831 udma_timing &= ~(3 << (4 * devid)); 835 udma_timing &= ~(3 << (4 * devid));
832 udma_timing |= u_speed << (4 * devid); 836 udma_timing |= u_speed << (4 * devid);
833 pci_write_config_word(dev, 0x4A, udma_timing); 837 pci_write_config_word(dev, 0x4A, udma_timing);
834 838
835 if (isich) { 839 if (isich) {
836 /* Select a 33/66/100Mhz clock */ 840 /* Select a 33/66/100Mhz clock */
837 pci_read_config_word(dev, 0x54, &ideconf); 841 pci_read_config_word(dev, 0x54, &ideconf);
838 ideconf &= ~(0x1001 << devid); 842 ideconf &= ~(0x1001 << devid);
839 ideconf |= u_clock << devid; 843 ideconf |= u_clock << devid;
840 /* For ICH or later we should set bit 10 for better 844 /* For ICH or later we should set bit 10 for better
841 performance (WR_PingPong_En) */ 845 performance (WR_PingPong_En) */
842 pci_write_config_word(dev, 0x54, ideconf); 846 pci_write_config_word(dev, 0x54, ideconf);
843 } 847 }
844 } else { 848 } else {
845 /* 849 /*
846 * MWDMA is driven by the PIO timings. We must also enable 850 * MWDMA is driven by the PIO timings. We must also enable
847 * IORDY unconditionally along with TIME1. PPE has already 851 * IORDY unconditionally along with TIME1. PPE has already
848 * been set when the PIO timing was set. 852 * been set when the PIO timing was set.
849 */ 853 */
850 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0; 854 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
851 unsigned int control; 855 unsigned int control;
852 u8 slave_data; 856 u8 slave_data;
853 const unsigned int needed_pio[3] = { 857 const unsigned int needed_pio[3] = {
854 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 858 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
855 }; 859 };
856 int pio = needed_pio[mwdma] - XFER_PIO_0; 860 int pio = needed_pio[mwdma] - XFER_PIO_0;
857 861
858 control = 3; /* IORDY|TIME1 */ 862 control = 3; /* IORDY|TIME1 */
859 863
860 /* If the drive MWDMA is faster than it can do PIO then 864 /* If the drive MWDMA is faster than it can do PIO then
861 we must force PIO into PIO0 */ 865 we must force PIO into PIO0 */
862 866
863 if (adev->pio_mode < needed_pio[mwdma]) 867 if (adev->pio_mode < needed_pio[mwdma])
864 /* Enable DMA timing only */ 868 /* Enable DMA timing only */
865 control |= 8; /* PIO cycles in PIO0 */ 869 control |= 8; /* PIO cycles in PIO0 */
866 870
867 if (adev->devno) { /* Slave */ 871 if (adev->devno) { /* Slave */
868 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */ 872 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
869 master_data |= control << 4; 873 master_data |= control << 4;
870 pci_read_config_byte(dev, 0x44, &slave_data); 874 pci_read_config_byte(dev, 0x44, &slave_data);
871 slave_data &= (ap->port_no ? 0x0f : 0xf0); 875 slave_data &= (ap->port_no ? 0x0f : 0xf0);
872 /* Load the matching timing */ 876 /* Load the matching timing */
873 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0); 877 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
874 pci_write_config_byte(dev, 0x44, slave_data); 878 pci_write_config_byte(dev, 0x44, slave_data);
875 } else { /* Master */ 879 } else { /* Master */
876 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY 880 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
877 and master timing bits */ 881 and master timing bits */
878 master_data |= control; 882 master_data |= control;
879 master_data |= 883 master_data |=
880 (timings[pio][0] << 12) | 884 (timings[pio][0] << 12) |
881 (timings[pio][1] << 8); 885 (timings[pio][1] << 8);
882 } 886 }
883 887
884 if (ap->udma_mask) 888 if (ap->udma_mask)
885 udma_enable &= ~(1 << devid); 889 udma_enable &= ~(1 << devid);
886 890
887 pci_write_config_word(dev, master_port, master_data); 891 pci_write_config_word(dev, master_port, master_data);
888 } 892 }
889 /* Don't scribble on 0x48 if the controller does not support UDMA */ 893 /* Don't scribble on 0x48 if the controller does not support UDMA */
890 if (ap->udma_mask) 894 if (ap->udma_mask)
891 pci_write_config_byte(dev, 0x48, udma_enable); 895 pci_write_config_byte(dev, 0x48, udma_enable);
892 896
893 spin_unlock_irqrestore(&piix_lock, flags); 897 spin_unlock_irqrestore(&piix_lock, flags);
894 } 898 }
895 899
896 /** 900 /**
897 * piix_set_dmamode - Initialize host controller PATA DMA timings 901 * piix_set_dmamode - Initialize host controller PATA DMA timings
898 * @ap: Port whose timings we are configuring 902 * @ap: Port whose timings we are configuring
899 * @adev: um 903 * @adev: um
900 * 904 *
901 * Set MW/UDMA mode for device, in host controller PCI config space. 905 * Set MW/UDMA mode for device, in host controller PCI config space.
902 * 906 *
903 * LOCKING: 907 * LOCKING:
904 * None (inherited from caller). 908 * None (inherited from caller).
905 */ 909 */
906 910
907 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) 911 static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev)
908 { 912 {
909 do_pata_set_dmamode(ap, adev, 0); 913 do_pata_set_dmamode(ap, adev, 0);
910 } 914 }
911 915
912 /** 916 /**
913 * ich_set_dmamode - Initialize host controller PATA DMA timings 917 * ich_set_dmamode - Initialize host controller PATA DMA timings
914 * @ap: Port whose timings we are configuring 918 * @ap: Port whose timings we are configuring
915 * @adev: um 919 * @adev: um
916 * 920 *
917 * Set MW/UDMA mode for device, in host controller PCI config space. 921 * Set MW/UDMA mode for device, in host controller PCI config space.
918 * 922 *
919 * LOCKING: 923 * LOCKING:
920 * None (inherited from caller). 924 * None (inherited from caller).
921 */ 925 */
922 926
923 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) 927 static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev)
924 { 928 {
925 do_pata_set_dmamode(ap, adev, 1); 929 do_pata_set_dmamode(ap, adev, 1);
926 } 930 }
927 931
928 /* 932 /*
929 * Serial ATA Index/Data Pair Superset Registers access 933 * Serial ATA Index/Data Pair Superset Registers access
930 * 934 *
931 * Beginning from ICH8, there's a sane way to access SCRs using index 935 * Beginning from ICH8, there's a sane way to access SCRs using index
932 * and data register pair located at BAR5 which means that we have 936 * and data register pair located at BAR5 which means that we have
933 * separate SCRs for master and slave. This is handled using libata 937 * separate SCRs for master and slave. This is handled using libata
934 * slave_link facility. 938 * slave_link facility.
935 */ 939 */
936 static const int piix_sidx_map[] = { 940 static const int piix_sidx_map[] = {
937 [SCR_STATUS] = 0, 941 [SCR_STATUS] = 0,
938 [SCR_ERROR] = 2, 942 [SCR_ERROR] = 2,
939 [SCR_CONTROL] = 1, 943 [SCR_CONTROL] = 1,
940 }; 944 };
941 945
942 static void piix_sidpr_sel(struct ata_link *link, unsigned int reg) 946 static void piix_sidpr_sel(struct ata_link *link, unsigned int reg)
943 { 947 {
944 struct ata_port *ap = link->ap; 948 struct ata_port *ap = link->ap;
945 struct piix_host_priv *hpriv = ap->host->private_data; 949 struct piix_host_priv *hpriv = ap->host->private_data;
946 950
947 iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg], 951 iowrite32(((ap->port_no * 2 + link->pmp) << 8) | piix_sidx_map[reg],
948 hpriv->sidpr + PIIX_SIDPR_IDX); 952 hpriv->sidpr + PIIX_SIDPR_IDX);
949 } 953 }
950 954
951 static int piix_sidpr_scr_read(struct ata_link *link, 955 static int piix_sidpr_scr_read(struct ata_link *link,
952 unsigned int reg, u32 *val) 956 unsigned int reg, u32 *val)
953 { 957 {
954 struct piix_host_priv *hpriv = link->ap->host->private_data; 958 struct piix_host_priv *hpriv = link->ap->host->private_data;
955 unsigned long flags; 959 unsigned long flags;
956 960
957 if (reg >= ARRAY_SIZE(piix_sidx_map)) 961 if (reg >= ARRAY_SIZE(piix_sidx_map))
958 return -EINVAL; 962 return -EINVAL;
959 963
960 spin_lock_irqsave(&hpriv->sidpr_lock, flags); 964 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
961 piix_sidpr_sel(link, reg); 965 piix_sidpr_sel(link, reg);
962 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); 966 *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA);
963 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); 967 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
964 return 0; 968 return 0;
965 } 969 }
966 970
967 static int piix_sidpr_scr_write(struct ata_link *link, 971 static int piix_sidpr_scr_write(struct ata_link *link,
968 unsigned int reg, u32 val) 972 unsigned int reg, u32 val)
969 { 973 {
970 struct piix_host_priv *hpriv = link->ap->host->private_data; 974 struct piix_host_priv *hpriv = link->ap->host->private_data;
971 unsigned long flags; 975 unsigned long flags;
972 976
973 if (reg >= ARRAY_SIZE(piix_sidx_map)) 977 if (reg >= ARRAY_SIZE(piix_sidx_map))
974 return -EINVAL; 978 return -EINVAL;
975 979
976 spin_lock_irqsave(&hpriv->sidpr_lock, flags); 980 spin_lock_irqsave(&hpriv->sidpr_lock, flags);
977 piix_sidpr_sel(link, reg); 981 piix_sidpr_sel(link, reg);
978 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); 982 iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA);
979 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); 983 spin_unlock_irqrestore(&hpriv->sidpr_lock, flags);
980 return 0; 984 return 0;
981 } 985 }
982 986
983 static bool piix_irq_check(struct ata_port *ap) 987 static bool piix_irq_check(struct ata_port *ap)
984 { 988 {
985 if (unlikely(!ap->ioaddr.bmdma_addr)) 989 if (unlikely(!ap->ioaddr.bmdma_addr))
986 return false; 990 return false;
987 991
988 return ap->ops->bmdma_status(ap) & ATA_DMA_INTR; 992 return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
989 } 993 }
990 994
991 #ifdef CONFIG_PM 995 #ifdef CONFIG_PM
992 static int piix_broken_suspend(void) 996 static int piix_broken_suspend(void)
993 { 997 {
994 static const struct dmi_system_id sysids[] = { 998 static const struct dmi_system_id sysids[] = {
995 { 999 {
996 .ident = "TECRA M3", 1000 .ident = "TECRA M3",
997 .matches = { 1001 .matches = {
998 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1002 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
999 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"), 1003 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
1000 }, 1004 },
1001 }, 1005 },
1002 { 1006 {
1003 .ident = "TECRA M3", 1007 .ident = "TECRA M3",
1004 .matches = { 1008 .matches = {
1005 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1009 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1006 DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"), 1010 DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M3"),
1007 }, 1011 },
1008 }, 1012 },
1009 { 1013 {
1010 .ident = "TECRA M4", 1014 .ident = "TECRA M4",
1011 .matches = { 1015 .matches = {
1012 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1016 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1013 DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"), 1017 DMI_MATCH(DMI_PRODUCT_NAME, "Tecra M4"),
1014 }, 1018 },
1015 }, 1019 },
1016 { 1020 {
1017 .ident = "TECRA M4", 1021 .ident = "TECRA M4",
1018 .matches = { 1022 .matches = {
1019 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1023 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1020 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"), 1024 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"),
1021 }, 1025 },
1022 }, 1026 },
1023 { 1027 {
1024 .ident = "TECRA M5", 1028 .ident = "TECRA M5",
1025 .matches = { 1029 .matches = {
1026 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1030 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1027 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"), 1031 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M5"),
1028 }, 1032 },
1029 }, 1033 },
1030 { 1034 {
1031 .ident = "TECRA M6", 1035 .ident = "TECRA M6",
1032 .matches = { 1036 .matches = {
1033 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1037 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1034 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"), 1038 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M6"),
1035 }, 1039 },
1036 }, 1040 },
1037 { 1041 {
1038 .ident = "TECRA M7", 1042 .ident = "TECRA M7",
1039 .matches = { 1043 .matches = {
1040 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1044 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1041 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"), 1045 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M7"),
1042 }, 1046 },
1043 }, 1047 },
1044 { 1048 {
1045 .ident = "TECRA A8", 1049 .ident = "TECRA A8",
1046 .matches = { 1050 .matches = {
1047 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1051 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1048 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"), 1052 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A8"),
1049 }, 1053 },
1050 }, 1054 },
1051 { 1055 {
1052 .ident = "Satellite R20", 1056 .ident = "Satellite R20",
1053 .matches = { 1057 .matches = {
1054 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1058 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1055 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"), 1059 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R20"),
1056 }, 1060 },
1057 }, 1061 },
1058 { 1062 {
1059 .ident = "Satellite R25", 1063 .ident = "Satellite R25",
1060 .matches = { 1064 .matches = {
1061 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1065 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1062 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"), 1066 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite R25"),
1063 }, 1067 },
1064 }, 1068 },
1065 { 1069 {
1066 .ident = "Satellite U200", 1070 .ident = "Satellite U200",
1067 .matches = { 1071 .matches = {
1068 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1072 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1069 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"), 1073 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U200"),
1070 }, 1074 },
1071 }, 1075 },
1072 { 1076 {
1073 .ident = "Satellite U200", 1077 .ident = "Satellite U200",
1074 .matches = { 1078 .matches = {
1075 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1079 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1076 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"), 1080 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U200"),
1077 }, 1081 },
1078 }, 1082 },
1079 { 1083 {
1080 .ident = "Satellite Pro U200", 1084 .ident = "Satellite Pro U200",
1081 .matches = { 1085 .matches = {
1082 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1086 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1083 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"), 1087 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE PRO U200"),
1084 }, 1088 },
1085 }, 1089 },
1086 { 1090 {
1087 .ident = "Satellite U205", 1091 .ident = "Satellite U205",
1088 .matches = { 1092 .matches = {
1089 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1093 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1090 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"), 1094 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite U205"),
1091 }, 1095 },
1092 }, 1096 },
1093 { 1097 {
1094 .ident = "SATELLITE U205", 1098 .ident = "SATELLITE U205",
1095 .matches = { 1099 .matches = {
1096 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1100 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1097 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"), 1101 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
1098 }, 1102 },
1099 }, 1103 },
1100 { 1104 {
1101 .ident = "Portege M500", 1105 .ident = "Portege M500",
1102 .matches = { 1106 .matches = {
1103 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 1107 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1104 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"), 1108 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M500"),
1105 }, 1109 },
1106 }, 1110 },
1107 { 1111 {
1108 .ident = "VGN-BX297XP", 1112 .ident = "VGN-BX297XP",
1109 .matches = { 1113 .matches = {
1110 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 1114 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
1111 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"), 1115 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-BX297XP"),
1112 }, 1116 },
1113 }, 1117 },
1114 1118
1115 { } /* terminate list */ 1119 { } /* terminate list */
1116 }; 1120 };
1117 static const char *oemstrs[] = { 1121 static const char *oemstrs[] = {
1118 "Tecra M3,", 1122 "Tecra M3,",
1119 }; 1123 };
1120 int i; 1124 int i;
1121 1125
1122 if (dmi_check_system(sysids)) 1126 if (dmi_check_system(sysids))
1123 return 1; 1127 return 1;
1124 1128
1125 for (i = 0; i < ARRAY_SIZE(oemstrs); i++) 1129 for (i = 0; i < ARRAY_SIZE(oemstrs); i++)
1126 if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL)) 1130 if (dmi_find_device(DMI_DEV_TYPE_OEM_STRING, oemstrs[i], NULL))
1127 return 1; 1131 return 1;
1128 1132
1129 /* TECRA M4 sometimes forgets its identify and reports bogus 1133 /* TECRA M4 sometimes forgets its identify and reports bogus
1130 * DMI information. As the bogus information is a bit 1134 * DMI information. As the bogus information is a bit
1131 * generic, match as many entries as possible. This manual 1135 * generic, match as many entries as possible. This manual
1132 * matching is necessary because dmi_system_id.matches is 1136 * matching is necessary because dmi_system_id.matches is
1133 * limited to four entries. 1137 * limited to four entries.
1134 */ 1138 */
1135 if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") && 1139 if (dmi_match(DMI_SYS_VENDOR, "TOSHIBA") &&
1136 dmi_match(DMI_PRODUCT_NAME, "000000") && 1140 dmi_match(DMI_PRODUCT_NAME, "000000") &&
1137 dmi_match(DMI_PRODUCT_VERSION, "000000") && 1141 dmi_match(DMI_PRODUCT_VERSION, "000000") &&
1138 dmi_match(DMI_PRODUCT_SERIAL, "000000") && 1142 dmi_match(DMI_PRODUCT_SERIAL, "000000") &&
1139 dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") && 1143 dmi_match(DMI_BOARD_VENDOR, "TOSHIBA") &&
1140 dmi_match(DMI_BOARD_NAME, "Portable PC") && 1144 dmi_match(DMI_BOARD_NAME, "Portable PC") &&
1141 dmi_match(DMI_BOARD_VERSION, "Version A0")) 1145 dmi_match(DMI_BOARD_VERSION, "Version A0"))
1142 return 1; 1146 return 1;
1143 1147
1144 return 0; 1148 return 0;
1145 } 1149 }
1146 1150
1147 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1151 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1148 { 1152 {
1149 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1153 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1150 unsigned long flags; 1154 unsigned long flags;
1151 int rc = 0; 1155 int rc = 0;
1152 1156
1153 rc = ata_host_suspend(host, mesg); 1157 rc = ata_host_suspend(host, mesg);
1154 if (rc) 1158 if (rc)
1155 return rc; 1159 return rc;
1156 1160
1157 /* Some braindamaged ACPI suspend implementations expect the 1161 /* Some braindamaged ACPI suspend implementations expect the
1158 * controller to be awake on entry; otherwise, it burns cpu 1162 * controller to be awake on entry; otherwise, it burns cpu
1159 * cycles and power trying to do something to the sleeping 1163 * cycles and power trying to do something to the sleeping
1160 * beauty. 1164 * beauty.
1161 */ 1165 */
1162 if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) { 1166 if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
1163 pci_save_state(pdev); 1167 pci_save_state(pdev);
1164 1168
1165 /* mark its power state as "unknown", since we don't 1169 /* mark its power state as "unknown", since we don't
1166 * know if e.g. the BIOS will change its device state 1170 * know if e.g. the BIOS will change its device state
1167 * when we suspend. 1171 * when we suspend.
1168 */ 1172 */
1169 if (pdev->current_state == PCI_D0) 1173 if (pdev->current_state == PCI_D0)
1170 pdev->current_state = PCI_UNKNOWN; 1174 pdev->current_state = PCI_UNKNOWN;
1171 1175
1172 /* tell resume that it's waking up from broken suspend */ 1176 /* tell resume that it's waking up from broken suspend */
1173 spin_lock_irqsave(&host->lock, flags); 1177 spin_lock_irqsave(&host->lock, flags);
1174 host->flags |= PIIX_HOST_BROKEN_SUSPEND; 1178 host->flags |= PIIX_HOST_BROKEN_SUSPEND;
1175 spin_unlock_irqrestore(&host->lock, flags); 1179 spin_unlock_irqrestore(&host->lock, flags);
1176 } else 1180 } else
1177 ata_pci_device_do_suspend(pdev, mesg); 1181 ata_pci_device_do_suspend(pdev, mesg);
1178 1182
1179 return 0; 1183 return 0;
1180 } 1184 }
1181 1185
1182 static int piix_pci_device_resume(struct pci_dev *pdev) 1186 static int piix_pci_device_resume(struct pci_dev *pdev)
1183 { 1187 {
1184 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1188 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1185 unsigned long flags; 1189 unsigned long flags;
1186 int rc; 1190 int rc;
1187 1191
1188 if (host->flags & PIIX_HOST_BROKEN_SUSPEND) { 1192 if (host->flags & PIIX_HOST_BROKEN_SUSPEND) {
1189 spin_lock_irqsave(&host->lock, flags); 1193 spin_lock_irqsave(&host->lock, flags);
1190 host->flags &= ~PIIX_HOST_BROKEN_SUSPEND; 1194 host->flags &= ~PIIX_HOST_BROKEN_SUSPEND;
1191 spin_unlock_irqrestore(&host->lock, flags); 1195 spin_unlock_irqrestore(&host->lock, flags);
1192 1196
1193 pci_set_power_state(pdev, PCI_D0); 1197 pci_set_power_state(pdev, PCI_D0);
1194 pci_restore_state(pdev); 1198 pci_restore_state(pdev);
1195 1199
1196 /* PCI device wasn't disabled during suspend. Use 1200 /* PCI device wasn't disabled during suspend. Use
1197 * pci_reenable_device() to avoid affecting the enable 1201 * pci_reenable_device() to avoid affecting the enable
1198 * count. 1202 * count.
1199 */ 1203 */
1200 rc = pci_reenable_device(pdev); 1204 rc = pci_reenable_device(pdev);
1201 if (rc) 1205 if (rc)
1202 dev_printk(KERN_ERR, &pdev->dev, "failed to enable " 1206 dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
1203 "device after resume (%d)\n", rc); 1207 "device after resume (%d)\n", rc);
1204 } else 1208 } else
1205 rc = ata_pci_device_do_resume(pdev); 1209 rc = ata_pci_device_do_resume(pdev);
1206 1210
1207 if (rc == 0) 1211 if (rc == 0)
1208 ata_host_resume(host); 1212 ata_host_resume(host);
1209 1213
1210 return rc; 1214 return rc;
1211 } 1215 }
1212 #endif 1216 #endif
1213 1217
1214 static u8 piix_vmw_bmdma_status(struct ata_port *ap) 1218 static u8 piix_vmw_bmdma_status(struct ata_port *ap)
1215 { 1219 {
1216 return ata_bmdma_status(ap) & ~ATA_DMA_ERR; 1220 return ata_bmdma_status(ap) & ~ATA_DMA_ERR;
1217 } 1221 }
1218 1222
1219 #define AHCI_PCI_BAR 5 1223 #define AHCI_PCI_BAR 5
1220 #define AHCI_GLOBAL_CTL 0x04 1224 #define AHCI_GLOBAL_CTL 0x04
1221 #define AHCI_ENABLE (1 << 31) 1225 #define AHCI_ENABLE (1 << 31)
1222 static int piix_disable_ahci(struct pci_dev *pdev) 1226 static int piix_disable_ahci(struct pci_dev *pdev)
1223 { 1227 {
1224 void __iomem *mmio; 1228 void __iomem *mmio;
1225 u32 tmp; 1229 u32 tmp;
1226 int rc = 0; 1230 int rc = 0;
1227 1231
1228 /* BUG: pci_enable_device has not yet been called. This 1232 /* BUG: pci_enable_device has not yet been called. This
1229 * works because this device is usually set up by BIOS. 1233 * works because this device is usually set up by BIOS.
1230 */ 1234 */
1231 1235
1232 if (!pci_resource_start(pdev, AHCI_PCI_BAR) || 1236 if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
1233 !pci_resource_len(pdev, AHCI_PCI_BAR)) 1237 !pci_resource_len(pdev, AHCI_PCI_BAR))
1234 return 0; 1238 return 0;
1235 1239
1236 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64); 1240 mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
1237 if (!mmio) 1241 if (!mmio)
1238 return -ENOMEM; 1242 return -ENOMEM;
1239 1243
1240 tmp = ioread32(mmio + AHCI_GLOBAL_CTL); 1244 tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
1241 if (tmp & AHCI_ENABLE) { 1245 if (tmp & AHCI_ENABLE) {
1242 tmp &= ~AHCI_ENABLE; 1246 tmp &= ~AHCI_ENABLE;
1243 iowrite32(tmp, mmio + AHCI_GLOBAL_CTL); 1247 iowrite32(tmp, mmio + AHCI_GLOBAL_CTL);
1244 1248
1245 tmp = ioread32(mmio + AHCI_GLOBAL_CTL); 1249 tmp = ioread32(mmio + AHCI_GLOBAL_CTL);
1246 if (tmp & AHCI_ENABLE) 1250 if (tmp & AHCI_ENABLE)
1247 rc = -EIO; 1251 rc = -EIO;
1248 } 1252 }
1249 1253
1250 pci_iounmap(pdev, mmio); 1254 pci_iounmap(pdev, mmio);
1251 return rc; 1255 return rc;
1252 } 1256 }
1253 1257
1254 /** 1258 /**
1255 * piix_check_450nx_errata - Check for problem 450NX setup 1259 * piix_check_450nx_errata - Check for problem 450NX setup
1256 * @ata_dev: the PCI device to check 1260 * @ata_dev: the PCI device to check
1257 * 1261 *
1258 * Check for the present of 450NX errata #19 and errata #25. If 1262 * Check for the present of 450NX errata #19 and errata #25. If
1259 * they are found return an error code so we can turn off DMA 1263 * they are found return an error code so we can turn off DMA
1260 */ 1264 */
1261 1265
1262 static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev) 1266 static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
1263 { 1267 {
1264 struct pci_dev *pdev = NULL; 1268 struct pci_dev *pdev = NULL;
1265 u16 cfg; 1269 u16 cfg;
1266 int no_piix_dma = 0; 1270 int no_piix_dma = 0;
1267 1271
1268 while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { 1272 while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) {
1269 /* Look for 450NX PXB. Check for problem configurations 1273 /* Look for 450NX PXB. Check for problem configurations
1270 A PCI quirk checks bit 6 already */ 1274 A PCI quirk checks bit 6 already */
1271 pci_read_config_word(pdev, 0x41, &cfg); 1275 pci_read_config_word(pdev, 0x41, &cfg);
1272 /* Only on the original revision: IDE DMA can hang */ 1276 /* Only on the original revision: IDE DMA can hang */
1273 if (pdev->revision == 0x00) 1277 if (pdev->revision == 0x00)
1274 no_piix_dma = 1; 1278 no_piix_dma = 1;
1275 /* On all revisions below 5 PXB bus lock must be disabled for IDE */ 1279 /* On all revisions below 5 PXB bus lock must be disabled for IDE */
1276 else if (cfg & (1<<14) && pdev->revision < 5) 1280 else if (cfg & (1<<14) && pdev->revision < 5)
1277 no_piix_dma = 2; 1281 no_piix_dma = 2;
1278 } 1282 }
1279 if (no_piix_dma) 1283 if (no_piix_dma)
1280 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n"); 1284 dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
1281 if (no_piix_dma == 2) 1285 if (no_piix_dma == 2)
1282 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n"); 1286 dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
1283 return no_piix_dma; 1287 return no_piix_dma;
1284 } 1288 }
1285 1289
1286 static void __devinit piix_init_pcs(struct ata_host *host, 1290 static void __devinit piix_init_pcs(struct ata_host *host,
1287 const struct piix_map_db *map_db) 1291 const struct piix_map_db *map_db)
1288 { 1292 {
1289 struct pci_dev *pdev = to_pci_dev(host->dev); 1293 struct pci_dev *pdev = to_pci_dev(host->dev);
1290 u16 pcs, new_pcs; 1294 u16 pcs, new_pcs;
1291 1295
1292 pci_read_config_word(pdev, ICH5_PCS, &pcs); 1296 pci_read_config_word(pdev, ICH5_PCS, &pcs);
1293 1297
1294 new_pcs = pcs | map_db->port_enable; 1298 new_pcs = pcs | map_db->port_enable;
1295 1299
1296 if (new_pcs != pcs) { 1300 if (new_pcs != pcs) {
1297 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs); 1301 DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
1298 pci_write_config_word(pdev, ICH5_PCS, new_pcs); 1302 pci_write_config_word(pdev, ICH5_PCS, new_pcs);
1299 msleep(150); 1303 msleep(150);
1300 } 1304 }
1301 } 1305 }
1302 1306
1303 static const int *__devinit piix_init_sata_map(struct pci_dev *pdev, 1307 static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
1304 struct ata_port_info *pinfo, 1308 struct ata_port_info *pinfo,
1305 const struct piix_map_db *map_db) 1309 const struct piix_map_db *map_db)
1306 { 1310 {
1307 const int *map; 1311 const int *map;
1308 int i, invalid_map = 0; 1312 int i, invalid_map = 0;
1309 u8 map_value; 1313 u8 map_value;
1310 1314
1311 pci_read_config_byte(pdev, ICH5_PMR, &map_value); 1315 pci_read_config_byte(pdev, ICH5_PMR, &map_value);
1312 1316
1313 map = map_db->map[map_value & map_db->mask]; 1317 map = map_db->map[map_value & map_db->mask];
1314 1318
1315 dev_printk(KERN_INFO, &pdev->dev, "MAP ["); 1319 dev_printk(KERN_INFO, &pdev->dev, "MAP [");
1316 for (i = 0; i < 4; i++) { 1320 for (i = 0; i < 4; i++) {
1317 switch (map[i]) { 1321 switch (map[i]) {
1318 case RV: 1322 case RV:
1319 invalid_map = 1; 1323 invalid_map = 1;
1320 printk(" XX"); 1324 printk(" XX");
1321 break; 1325 break;
1322 1326
1323 case NA: 1327 case NA:
1324 printk(" --"); 1328 printk(" --");
1325 break; 1329 break;
1326 1330
1327 case IDE: 1331 case IDE:
1328 WARN_ON((i & 1) || map[i + 1] != IDE); 1332 WARN_ON((i & 1) || map[i + 1] != IDE);
1329 pinfo[i / 2] = piix_port_info[ich_pata_100]; 1333 pinfo[i / 2] = piix_port_info[ich_pata_100];
1330 i++; 1334 i++;
1331 printk(" IDE IDE"); 1335 printk(" IDE IDE");
1332 break; 1336 break;
1333 1337
1334 default: 1338 default:
1335 printk(" P%d", map[i]); 1339 printk(" P%d", map[i]);
1336 if (i & 1) 1340 if (i & 1)
1337 pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS; 1341 pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
1338 break; 1342 break;
1339 } 1343 }
1340 } 1344 }
1341 printk(" ]\n"); 1345 printk(" ]\n");
1342 1346
1343 if (invalid_map) 1347 if (invalid_map)
1344 dev_printk(KERN_ERR, &pdev->dev, 1348 dev_printk(KERN_ERR, &pdev->dev,
1345 "invalid MAP value %u\n", map_value); 1349 "invalid MAP value %u\n", map_value);
1346 1350
1347 return map; 1351 return map;
1348 } 1352 }
1349 1353
1350 static bool piix_no_sidpr(struct ata_host *host) 1354 static bool piix_no_sidpr(struct ata_host *host)
1351 { 1355 {
1352 struct pci_dev *pdev = to_pci_dev(host->dev); 1356 struct pci_dev *pdev = to_pci_dev(host->dev);
1353 1357
1354 /* 1358 /*
1355 * Samsung DB-P70 only has three ATA ports exposed and 1359 * Samsung DB-P70 only has three ATA ports exposed and
1356 * curiously the unconnected first port reports link online 1360 * curiously the unconnected first port reports link online
1357 * while not responding to SRST protocol causing excessive 1361 * while not responding to SRST protocol causing excessive
1358 * detection delay. 1362 * detection delay.
1359 * 1363 *
1360 * Unfortunately, the system doesn't carry enough DMI 1364 * Unfortunately, the system doesn't carry enough DMI
1361 * information to identify the machine but does have subsystem 1365 * information to identify the machine but does have subsystem
1362 * vendor and device set. As it's unclear whether the 1366 * vendor and device set. As it's unclear whether the
1363 * subsystem vendor/device is used only for this specific 1367 * subsystem vendor/device is used only for this specific
1364 * board, the port can't be disabled solely with the 1368 * board, the port can't be disabled solely with the
1365 * information; however, turning off SIDPR access works around 1369 * information; however, turning off SIDPR access works around
1366 * the problem. Turn it off. 1370 * the problem. Turn it off.
1367 * 1371 *
1368 * This problem is reported in bnc#441240. 1372 * This problem is reported in bnc#441240.
1369 * 1373 *
1370 * https://bugzilla.novell.com/show_bug.cgi?id=441420 1374 * https://bugzilla.novell.com/show_bug.cgi?id=441420
1371 */ 1375 */
1372 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 && 1376 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
1373 pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG && 1377 pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
1374 pdev->subsystem_device == 0xb049) { 1378 pdev->subsystem_device == 0xb049) {
1375 dev_printk(KERN_WARNING, host->dev, 1379 dev_printk(KERN_WARNING, host->dev,
1376 "Samsung DB-P70 detected, disabling SIDPR\n"); 1380 "Samsung DB-P70 detected, disabling SIDPR\n");
1377 return true; 1381 return true;
1378 } 1382 }
1379 1383
1380 return false; 1384 return false;
1381 } 1385 }
1382 1386
1383 static int __devinit piix_init_sidpr(struct ata_host *host) 1387 static int __devinit piix_init_sidpr(struct ata_host *host)
1384 { 1388 {
1385 struct pci_dev *pdev = to_pci_dev(host->dev); 1389 struct pci_dev *pdev = to_pci_dev(host->dev);
1386 struct piix_host_priv *hpriv = host->private_data; 1390 struct piix_host_priv *hpriv = host->private_data;
1387 struct ata_link *link0 = &host->ports[0]->link; 1391 struct ata_link *link0 = &host->ports[0]->link;
1388 u32 scontrol; 1392 u32 scontrol;
1389 int i, rc; 1393 int i, rc;
1390 1394
1391 /* check for availability */ 1395 /* check for availability */
1392 for (i = 0; i < 4; i++) 1396 for (i = 0; i < 4; i++)
1393 if (hpriv->map[i] == IDE) 1397 if (hpriv->map[i] == IDE)
1394 return 0; 1398 return 0;
1395 1399
1396 /* is it blacklisted? */ 1400 /* is it blacklisted? */
1397 if (piix_no_sidpr(host)) 1401 if (piix_no_sidpr(host))
1398 return 0; 1402 return 0;
1399 1403
1400 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR)) 1404 if (!(host->ports[0]->flags & PIIX_FLAG_SIDPR))
1401 return 0; 1405 return 0;
1402 1406
1403 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 || 1407 if (pci_resource_start(pdev, PIIX_SIDPR_BAR) == 0 ||
1404 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN) 1408 pci_resource_len(pdev, PIIX_SIDPR_BAR) != PIIX_SIDPR_LEN)
1405 return 0; 1409 return 0;
1406 1410
1407 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME)) 1411 if (pcim_iomap_regions(pdev, 1 << PIIX_SIDPR_BAR, DRV_NAME))
1408 return 0; 1412 return 0;
1409 1413
1410 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR]; 1414 hpriv->sidpr = pcim_iomap_table(pdev)[PIIX_SIDPR_BAR];
1411 1415
1412 /* SCR access via SIDPR doesn't work on some configurations. 1416 /* SCR access via SIDPR doesn't work on some configurations.
1413 * Give it a test drive by inhibiting power save modes which 1417 * Give it a test drive by inhibiting power save modes which
1414 * we'll do anyway. 1418 * we'll do anyway.
1415 */ 1419 */
1416 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); 1420 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1417 1421
1418 /* if IPM is already 3, SCR access is probably working. Don't 1422 /* if IPM is already 3, SCR access is probably working. Don't
1419 * un-inhibit power save modes as BIOS might have inhibited 1423 * un-inhibit power save modes as BIOS might have inhibited
1420 * them for a reason. 1424 * them for a reason.
1421 */ 1425 */
1422 if ((scontrol & 0xf00) != 0x300) { 1426 if ((scontrol & 0xf00) != 0x300) {
1423 scontrol |= 0x300; 1427 scontrol |= 0x300;
1424 piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol); 1428 piix_sidpr_scr_write(link0, SCR_CONTROL, scontrol);
1425 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol); 1429 piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
1426 1430
1427 if ((scontrol & 0xf00) != 0x300) { 1431 if ((scontrol & 0xf00) != 0x300) {
1428 dev_printk(KERN_INFO, host->dev, "SCR access via " 1432 dev_printk(KERN_INFO, host->dev, "SCR access via "
1429 "SIDPR is available but doesn't work\n"); 1433 "SIDPR is available but doesn't work\n");
1430 return 0; 1434 return 0;
1431 } 1435 }
1432 } 1436 }
1433 1437
1434 /* okay, SCRs available, set ops and ask libata for slave_link */ 1438 /* okay, SCRs available, set ops and ask libata for slave_link */
1435 for (i = 0; i < 2; i++) { 1439 for (i = 0; i < 2; i++) {
1436 struct ata_port *ap = host->ports[i]; 1440 struct ata_port *ap = host->ports[i];
1437 1441
1438 ap->ops = &piix_sidpr_sata_ops; 1442 ap->ops = &piix_sidpr_sata_ops;
1439 1443
1440 if (ap->flags & ATA_FLAG_SLAVE_POSS) { 1444 if (ap->flags & ATA_FLAG_SLAVE_POSS) {
1441 rc = ata_slave_link_init(ap); 1445 rc = ata_slave_link_init(ap);
1442 if (rc) 1446 if (rc)
1443 return rc; 1447 return rc;
1444 } 1448 }
1445 } 1449 }
1446 1450
1447 return 0; 1451 return 0;
1448 } 1452 }
1449 1453
1450 static void piix_iocfg_bit18_quirk(struct ata_host *host) 1454 static void piix_iocfg_bit18_quirk(struct ata_host *host)
1451 { 1455 {
1452 static const struct dmi_system_id sysids[] = { 1456 static const struct dmi_system_id sysids[] = {
1453 { 1457 {
1454 /* Clevo M570U sets IOCFG bit 18 if the cdrom 1458 /* Clevo M570U sets IOCFG bit 18 if the cdrom
1455 * isn't used to boot the system which 1459 * isn't used to boot the system which
1456 * disables the channel. 1460 * disables the channel.
1457 */ 1461 */
1458 .ident = "M570U", 1462 .ident = "M570U",
1459 .matches = { 1463 .matches = {
1460 DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."), 1464 DMI_MATCH(DMI_SYS_VENDOR, "Clevo Co."),
1461 DMI_MATCH(DMI_PRODUCT_NAME, "M570U"), 1465 DMI_MATCH(DMI_PRODUCT_NAME, "M570U"),
1462 }, 1466 },
1463 }, 1467 },
1464 1468
1465 { } /* terminate list */ 1469 { } /* terminate list */
1466 }; 1470 };
1467 struct pci_dev *pdev = to_pci_dev(host->dev); 1471 struct pci_dev *pdev = to_pci_dev(host->dev);
1468 struct piix_host_priv *hpriv = host->private_data; 1472 struct piix_host_priv *hpriv = host->private_data;
1469 1473
1470 if (!dmi_check_system(sysids)) 1474 if (!dmi_check_system(sysids))
1471 return; 1475 return;
1472 1476
1473 /* The datasheet says that bit 18 is NOOP but certain systems 1477 /* The datasheet says that bit 18 is NOOP but certain systems
1474 * seem to use it to disable a channel. Clear the bit on the 1478 * seem to use it to disable a channel. Clear the bit on the
1475 * affected systems. 1479 * affected systems.
1476 */ 1480 */
1477 if (hpriv->saved_iocfg & (1 << 18)) { 1481 if (hpriv->saved_iocfg & (1 << 18)) {
1478 dev_printk(KERN_INFO, &pdev->dev, 1482 dev_printk(KERN_INFO, &pdev->dev,
1479 "applying IOCFG bit18 quirk\n"); 1483 "applying IOCFG bit18 quirk\n");
1480 pci_write_config_dword(pdev, PIIX_IOCFG, 1484 pci_write_config_dword(pdev, PIIX_IOCFG,
1481 hpriv->saved_iocfg & ~(1 << 18)); 1485 hpriv->saved_iocfg & ~(1 << 18));
1482 } 1486 }
1483 } 1487 }
1484 1488
1485 static bool piix_broken_system_poweroff(struct pci_dev *pdev) 1489 static bool piix_broken_system_poweroff(struct pci_dev *pdev)
1486 { 1490 {
1487 static const struct dmi_system_id broken_systems[] = { 1491 static const struct dmi_system_id broken_systems[] = {
1488 { 1492 {
1489 .ident = "HP Compaq 2510p", 1493 .ident = "HP Compaq 2510p",
1490 .matches = { 1494 .matches = {
1491 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1495 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1492 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"), 1496 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 2510p"),
1493 }, 1497 },
1494 /* PCI slot number of the controller */ 1498 /* PCI slot number of the controller */
1495 .driver_data = (void *)0x1FUL, 1499 .driver_data = (void *)0x1FUL,
1496 }, 1500 },
1497 { 1501 {
1498 .ident = "HP Compaq nc6000", 1502 .ident = "HP Compaq nc6000",
1499 .matches = { 1503 .matches = {
1500 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 1504 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1501 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"), 1505 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nc6000"),
1502 }, 1506 },
1503 /* PCI slot number of the controller */ 1507 /* PCI slot number of the controller */
1504 .driver_data = (void *)0x1FUL, 1508 .driver_data = (void *)0x1FUL,
1505 }, 1509 },
1506 1510
1507 { } /* terminate list */ 1511 { } /* terminate list */
1508 }; 1512 };
1509 const struct dmi_system_id *dmi = dmi_first_match(broken_systems); 1513 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
1510 1514
1511 if (dmi) { 1515 if (dmi) {
1512 unsigned long slot = (unsigned long)dmi->driver_data; 1516 unsigned long slot = (unsigned long)dmi->driver_data;
1513 /* apply the quirk only to on-board controllers */ 1517 /* apply the quirk only to on-board controllers */
1514 return slot == PCI_SLOT(pdev->devfn); 1518 return slot == PCI_SLOT(pdev->devfn);
1515 } 1519 }
1516 1520
1517 return false; 1521 return false;
1518 } 1522 }
1519 1523
1520 /** 1524 /**
1521 * piix_init_one - Register PIIX ATA PCI device with kernel services 1525 * piix_init_one - Register PIIX ATA PCI device with kernel services
1522 * @pdev: PCI device to register 1526 * @pdev: PCI device to register
1523 * @ent: Entry in piix_pci_tbl matching with @pdev 1527 * @ent: Entry in piix_pci_tbl matching with @pdev
1524 * 1528 *
1525 * Called from kernel PCI layer. We probe for combined mode (sigh), 1529 * Called from kernel PCI layer. We probe for combined mode (sigh),
1526 * and then hand over control to libata, for it to do the rest. 1530 * and then hand over control to libata, for it to do the rest.
1527 * 1531 *
1528 * LOCKING: 1532 * LOCKING:
1529 * Inherited from PCI layer (may sleep). 1533 * Inherited from PCI layer (may sleep).
1530 * 1534 *
1531 * RETURNS: 1535 * RETURNS:
1532 * Zero on success, or -ERRNO value. 1536 * Zero on success, or -ERRNO value.
1533 */ 1537 */
1534 1538
1535 static int __devinit piix_init_one(struct pci_dev *pdev, 1539 static int __devinit piix_init_one(struct pci_dev *pdev,
1536 const struct pci_device_id *ent) 1540 const struct pci_device_id *ent)
1537 { 1541 {
1538 static int printed_version; 1542 static int printed_version;
1539 struct device *dev = &pdev->dev; 1543 struct device *dev = &pdev->dev;
1540 struct ata_port_info port_info[2]; 1544 struct ata_port_info port_info[2];
1541 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; 1545 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
1542 unsigned long port_flags; 1546 unsigned long port_flags;
1543 struct ata_host *host; 1547 struct ata_host *host;
1544 struct piix_host_priv *hpriv; 1548 struct piix_host_priv *hpriv;
1545 int rc; 1549 int rc;
1546 1550
1547 if (!printed_version++) 1551 if (!printed_version++)
1548 dev_printk(KERN_DEBUG, &pdev->dev, 1552 dev_printk(KERN_DEBUG, &pdev->dev,
1549 "version " DRV_VERSION "\n"); 1553 "version " DRV_VERSION "\n");
1550 1554
1551 /* no hotplugging support for later devices (FIXME) */ 1555 /* no hotplugging support for later devices (FIXME) */
1552 if (!in_module_init && ent->driver_data >= ich5_sata) 1556 if (!in_module_init && ent->driver_data >= ich5_sata)
1553 return -ENODEV; 1557 return -ENODEV;
1554 1558
1555 if (piix_broken_system_poweroff(pdev)) { 1559 if (piix_broken_system_poweroff(pdev)) {
1556 piix_port_info[ent->driver_data].flags |= 1560 piix_port_info[ent->driver_data].flags |=
1557 ATA_FLAG_NO_POWEROFF_SPINDOWN | 1561 ATA_FLAG_NO_POWEROFF_SPINDOWN |
1558 ATA_FLAG_NO_HIBERNATE_SPINDOWN; 1562 ATA_FLAG_NO_HIBERNATE_SPINDOWN;
1559 dev_info(&pdev->dev, "quirky BIOS, skipping spindown " 1563 dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
1560 "on poweroff and hibernation\n"); 1564 "on poweroff and hibernation\n");
1561 } 1565 }
1562 1566
1563 port_info[0] = piix_port_info[ent->driver_data]; 1567 port_info[0] = piix_port_info[ent->driver_data];
1564 port_info[1] = piix_port_info[ent->driver_data]; 1568 port_info[1] = piix_port_info[ent->driver_data];
1565 1569
1566 port_flags = port_info[0].flags; 1570 port_flags = port_info[0].flags;
1567 1571
1568 /* enable device and prepare host */ 1572 /* enable device and prepare host */
1569 rc = pcim_enable_device(pdev); 1573 rc = pcim_enable_device(pdev);
1570 if (rc) 1574 if (rc)
1571 return rc; 1575 return rc;
1572 1576
1573 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); 1577 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
1574 if (!hpriv) 1578 if (!hpriv)
1575 return -ENOMEM; 1579 return -ENOMEM;
1576 spin_lock_init(&hpriv->sidpr_lock); 1580 spin_lock_init(&hpriv->sidpr_lock);
1577 1581
1578 /* Save IOCFG, this will be used for cable detection, quirk 1582 /* Save IOCFG, this will be used for cable detection, quirk
1579 * detection and restoration on detach. This is necessary 1583 * detection and restoration on detach. This is necessary
1580 * because some ACPI implementations mess up cable related 1584 * because some ACPI implementations mess up cable related
1581 * bits on _STM. Reported on kernel bz#11879. 1585 * bits on _STM. Reported on kernel bz#11879.
1582 */ 1586 */
1583 pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg); 1587 pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg);
1584 1588
1585 /* ICH6R may be driven by either ata_piix or ahci driver 1589 /* ICH6R may be driven by either ata_piix or ahci driver
1586 * regardless of BIOS configuration. Make sure AHCI mode is 1590 * regardless of BIOS configuration. Make sure AHCI mode is
1587 * off. 1591 * off.
1588 */ 1592 */
1589 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) { 1593 if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2652) {
1590 rc = piix_disable_ahci(pdev); 1594 rc = piix_disable_ahci(pdev);
1591 if (rc) 1595 if (rc)
1592 return rc; 1596 return rc;
1593 } 1597 }
1594 1598
1595 /* SATA map init can change port_info, do it before prepping host */ 1599 /* SATA map init can change port_info, do it before prepping host */
1596 if (port_flags & ATA_FLAG_SATA) 1600 if (port_flags & ATA_FLAG_SATA)
1597 hpriv->map = piix_init_sata_map(pdev, port_info, 1601 hpriv->map = piix_init_sata_map(pdev, port_info,
1598 piix_map_db_table[ent->driver_data]); 1602 piix_map_db_table[ent->driver_data]);
1599 1603
1600 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 1604 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
1601 if (rc) 1605 if (rc)
1602 return rc; 1606 return rc;
1603 host->private_data = hpriv; 1607 host->private_data = hpriv;
1604 1608
1605 /* initialize controller */ 1609 /* initialize controller */
1606 if (port_flags & ATA_FLAG_SATA) { 1610 if (port_flags & ATA_FLAG_SATA) {
1607 piix_init_pcs(host, piix_map_db_table[ent->driver_data]); 1611 piix_init_pcs(host, piix_map_db_table[ent->driver_data]);
1608 rc = piix_init_sidpr(host); 1612 rc = piix_init_sidpr(host);
1609 if (rc) 1613 if (rc)
1610 return rc; 1614 return rc;
1611 } 1615 }
1612 1616
1613 /* apply IOCFG bit18 quirk */ 1617 /* apply IOCFG bit18 quirk */
1614 piix_iocfg_bit18_quirk(host); 1618 piix_iocfg_bit18_quirk(host);
1615 1619
1616 /* On ICH5, some BIOSen disable the interrupt using the 1620 /* On ICH5, some BIOSen disable the interrupt using the
1617 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. 1621 * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3.
1618 * On ICH6, this bit has the same effect, but only when 1622 * On ICH6, this bit has the same effect, but only when
1619 * MSI is disabled (and it is disabled, as we don't use 1623 * MSI is disabled (and it is disabled, as we don't use
1620 * message-signalled interrupts currently). 1624 * message-signalled interrupts currently).
1621 */ 1625 */
1622 if (port_flags & PIIX_FLAG_CHECKINTR) 1626 if (port_flags & PIIX_FLAG_CHECKINTR)
1623 pci_intx(pdev, 1); 1627 pci_intx(pdev, 1);
1624 1628
1625 if (piix_check_450nx_errata(pdev)) { 1629 if (piix_check_450nx_errata(pdev)) {
1626 /* This writes into the master table but it does not 1630 /* This writes into the master table but it does not
1627 really matter for this errata as we will apply it to 1631 really matter for this errata as we will apply it to
1628 all the PIIX devices on the board */ 1632 all the PIIX devices on the board */
1629 host->ports[0]->mwdma_mask = 0; 1633 host->ports[0]->mwdma_mask = 0;
1630 host->ports[0]->udma_mask = 0; 1634 host->ports[0]->udma_mask = 0;
1631 host->ports[1]->mwdma_mask = 0; 1635 host->ports[1]->mwdma_mask = 0;
1632 host->ports[1]->udma_mask = 0; 1636 host->ports[1]->udma_mask = 0;
1633 } 1637 }
1634 host->flags |= ATA_HOST_PARALLEL_SCAN; 1638 host->flags |= ATA_HOST_PARALLEL_SCAN;
1635 1639
1636 pci_set_master(pdev); 1640 pci_set_master(pdev);
1637 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); 1641 return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht);
1638 } 1642 }
1639 1643
1640 static void piix_remove_one(struct pci_dev *pdev) 1644 static void piix_remove_one(struct pci_dev *pdev)
1641 { 1645 {
1642 struct ata_host *host = dev_get_drvdata(&pdev->dev); 1646 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1643 struct piix_host_priv *hpriv = host->private_data; 1647 struct piix_host_priv *hpriv = host->private_data;
1644 1648
1645 pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg); 1649 pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg);
1646 1650
1647 ata_pci_remove_one(pdev); 1651 ata_pci_remove_one(pdev);
1648 } 1652 }
1649 1653
1650 static int __init piix_init(void) 1654 static int __init piix_init(void)
1651 { 1655 {
1652 int rc; 1656 int rc;
1653 1657
1654 DPRINTK("pci_register_driver\n"); 1658 DPRINTK("pci_register_driver\n");
1655 rc = pci_register_driver(&piix_pci_driver); 1659 rc = pci_register_driver(&piix_pci_driver);
1656 if (rc) 1660 if (rc)
1657 return rc; 1661 return rc;
1658 1662
1659 in_module_init = 0; 1663 in_module_init = 0;
1660 1664
1661 DPRINTK("done\n"); 1665 DPRINTK("done\n");
1662 return 0; 1666 return 0;
1663 } 1667 }
1664 1668
1665 static void __exit piix_exit(void) 1669 static void __exit piix_exit(void)
1666 { 1670 {
1667 pci_unregister_driver(&piix_pci_driver); 1671 pci_unregister_driver(&piix_pci_driver);
1668 } 1672 }
1669 1673
1670 module_init(piix_init); 1674 module_init(piix_init);
1671 module_exit(piix_exit); 1675 module_exit(piix_exit);
1672 1676
drivers/ata/libahci.c
1 /* 1 /*
2 * libahci.c - Common AHCI SATA low-level routines 2 * libahci.c - Common AHCI SATA low-level routines
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2004-2005 Red Hat, Inc. 8 * Copyright 2004-2005 Red Hat, Inc.
9 * 9 *
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option) 13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version. 14 * any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to 22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 * 24 *
25 * 25 *
26 * libata documentation is available via 'make {ps|pdf}docs', 26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.* 27 * as Documentation/DocBook/libata.*
28 * 28 *
29 * AHCI hardware documentation: 29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/gfp.h> 36 #include <linux/gfp.h>
37 #include <linux/module.h> 37 #include <linux/module.h>
38 #include <linux/init.h> 38 #include <linux/init.h>
39 #include <linux/blkdev.h> 39 #include <linux/blkdev.h>
40 #include <linux/delay.h> 40 #include <linux/delay.h>
41 #include <linux/interrupt.h> 41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h> 42 #include <linux/dma-mapping.h>
43 #include <linux/device.h> 43 #include <linux/device.h>
44 #include <scsi/scsi_host.h> 44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_cmnd.h> 45 #include <scsi/scsi_cmnd.h>
46 #include <linux/libata.h> 46 #include <linux/libata.h>
47 #include "ahci.h" 47 #include "ahci.h"
48 48
49 static int ahci_skip_host_reset; 49 static int ahci_skip_host_reset;
50 int ahci_ignore_sss; 50 int ahci_ignore_sss;
51 EXPORT_SYMBOL_GPL(ahci_ignore_sss); 51 EXPORT_SYMBOL_GPL(ahci_ignore_sss);
52 52
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
55 55
56 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); 56 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
57 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); 57 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
58 58
59 static int ahci_enable_alpm(struct ata_port *ap, 59 static int ahci_enable_alpm(struct ata_port *ap,
60 enum link_pm policy); 60 enum link_pm policy);
61 static void ahci_disable_alpm(struct ata_port *ap); 61 static void ahci_disable_alpm(struct ata_port *ap);
62 static ssize_t ahci_led_show(struct ata_port *ap, char *buf); 62 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
63 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 63 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
64 size_t size); 64 size_t size);
65 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 65 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
66 ssize_t size); 66 ssize_t size);
67 67
68 68
69 69
70 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 70 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
71 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 71 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
72 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 72 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
73 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 73 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
74 static int ahci_port_start(struct ata_port *ap); 74 static int ahci_port_start(struct ata_port *ap);
75 static void ahci_port_stop(struct ata_port *ap); 75 static void ahci_port_stop(struct ata_port *ap);
76 static void ahci_qc_prep(struct ata_queued_cmd *qc); 76 static void ahci_qc_prep(struct ata_queued_cmd *qc);
77 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); 77 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
78 static void ahci_freeze(struct ata_port *ap); 78 static void ahci_freeze(struct ata_port *ap);
79 static void ahci_thaw(struct ata_port *ap); 79 static void ahci_thaw(struct ata_port *ap);
80 static void ahci_enable_fbs(struct ata_port *ap); 80 static void ahci_enable_fbs(struct ata_port *ap);
81 static void ahci_disable_fbs(struct ata_port *ap); 81 static void ahci_disable_fbs(struct ata_port *ap);
82 static void ahci_pmp_attach(struct ata_port *ap); 82 static void ahci_pmp_attach(struct ata_port *ap);
83 static void ahci_pmp_detach(struct ata_port *ap); 83 static void ahci_pmp_detach(struct ata_port *ap);
84 static int ahci_softreset(struct ata_link *link, unsigned int *class, 84 static int ahci_softreset(struct ata_link *link, unsigned int *class,
85 unsigned long deadline); 85 unsigned long deadline);
86 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 86 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
87 unsigned long deadline); 87 unsigned long deadline);
88 static void ahci_postreset(struct ata_link *link, unsigned int *class); 88 static void ahci_postreset(struct ata_link *link, unsigned int *class);
89 static void ahci_error_handler(struct ata_port *ap); 89 static void ahci_error_handler(struct ata_port *ap);
90 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 90 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
91 static int ahci_port_resume(struct ata_port *ap); 91 static int ahci_port_resume(struct ata_port *ap);
92 static void ahci_dev_config(struct ata_device *dev); 92 static void ahci_dev_config(struct ata_device *dev);
93 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 93 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
94 u32 opts); 94 u32 opts);
95 #ifdef CONFIG_PM 95 #ifdef CONFIG_PM
96 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 96 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
97 #endif 97 #endif
98 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); 98 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
99 static ssize_t ahci_activity_store(struct ata_device *dev, 99 static ssize_t ahci_activity_store(struct ata_device *dev,
100 enum sw_activity val); 100 enum sw_activity val);
101 static void ahci_init_sw_activity(struct ata_link *link); 101 static void ahci_init_sw_activity(struct ata_link *link);
102 102
103 static ssize_t ahci_show_host_caps(struct device *dev, 103 static ssize_t ahci_show_host_caps(struct device *dev,
104 struct device_attribute *attr, char *buf); 104 struct device_attribute *attr, char *buf);
105 static ssize_t ahci_show_host_cap2(struct device *dev, 105 static ssize_t ahci_show_host_cap2(struct device *dev,
106 struct device_attribute *attr, char *buf); 106 struct device_attribute *attr, char *buf);
107 static ssize_t ahci_show_host_version(struct device *dev, 107 static ssize_t ahci_show_host_version(struct device *dev,
108 struct device_attribute *attr, char *buf); 108 struct device_attribute *attr, char *buf);
109 static ssize_t ahci_show_port_cmd(struct device *dev, 109 static ssize_t ahci_show_port_cmd(struct device *dev,
110 struct device_attribute *attr, char *buf); 110 struct device_attribute *attr, char *buf);
111 static ssize_t ahci_read_em_buffer(struct device *dev, 111 static ssize_t ahci_read_em_buffer(struct device *dev,
112 struct device_attribute *attr, char *buf); 112 struct device_attribute *attr, char *buf);
113 static ssize_t ahci_store_em_buffer(struct device *dev, 113 static ssize_t ahci_store_em_buffer(struct device *dev,
114 struct device_attribute *attr, 114 struct device_attribute *attr,
115 const char *buf, size_t size); 115 const char *buf, size_t size);
116 116
117 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 117 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 118 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
119 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); 119 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
120 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 120 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124 static struct device_attribute *ahci_shost_attrs[] = { 124 static struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
128 &dev_attr_ahci_host_caps, 128 &dev_attr_ahci_host_caps,
129 &dev_attr_ahci_host_cap2, 129 &dev_attr_ahci_host_cap2,
130 &dev_attr_ahci_host_version, 130 &dev_attr_ahci_host_version,
131 &dev_attr_ahci_port_cmd, 131 &dev_attr_ahci_port_cmd,
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134 }; 134 };
135 135
136 static struct device_attribute *ahci_sdev_attrs[] = { 136 static struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 137 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 138 &dev_attr_unload_heads,
139 NULL 139 NULL
140 }; 140 };
141 141
142 struct scsi_host_template ahci_sht = { 142 struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"), 143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1, 144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG, 145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY, 146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs, 147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs, 148 .sdev_attrs = ahci_sdev_attrs,
149 }; 149 };
150 EXPORT_SYMBOL_GPL(ahci_sht); 150 EXPORT_SYMBOL_GPL(ahci_sht);
151 151
152 struct ata_port_operations ahci_ops = { 152 struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 153 .inherits = &sata_pmp_port_ops,
154 154
155 .qc_defer = ahci_pmp_qc_defer, 155 .qc_defer = ahci_pmp_qc_defer,
156 .qc_prep = ahci_qc_prep, 156 .qc_prep = ahci_qc_prep,
157 .qc_issue = ahci_qc_issue, 157 .qc_issue = ahci_qc_issue,
158 .qc_fill_rtf = ahci_qc_fill_rtf, 158 .qc_fill_rtf = ahci_qc_fill_rtf,
159 159
160 .freeze = ahci_freeze, 160 .freeze = ahci_freeze,
161 .thaw = ahci_thaw, 161 .thaw = ahci_thaw,
162 .softreset = ahci_softreset, 162 .softreset = ahci_softreset,
163 .hardreset = ahci_hardreset, 163 .hardreset = ahci_hardreset,
164 .postreset = ahci_postreset, 164 .postreset = ahci_postreset,
165 .pmp_softreset = ahci_softreset, 165 .pmp_softreset = ahci_softreset,
166 .error_handler = ahci_error_handler, 166 .error_handler = ahci_error_handler,
167 .post_internal_cmd = ahci_post_internal_cmd, 167 .post_internal_cmd = ahci_post_internal_cmd,
168 .dev_config = ahci_dev_config, 168 .dev_config = ahci_dev_config,
169 169
170 .scr_read = ahci_scr_read, 170 .scr_read = ahci_scr_read,
171 .scr_write = ahci_scr_write, 171 .scr_write = ahci_scr_write,
172 .pmp_attach = ahci_pmp_attach, 172 .pmp_attach = ahci_pmp_attach,
173 .pmp_detach = ahci_pmp_detach, 173 .pmp_detach = ahci_pmp_detach,
174 174
175 .enable_pm = ahci_enable_alpm, 175 .enable_pm = ahci_enable_alpm,
176 .disable_pm = ahci_disable_alpm, 176 .disable_pm = ahci_disable_alpm,
177 .em_show = ahci_led_show, 177 .em_show = ahci_led_show,
178 .em_store = ahci_led_store, 178 .em_store = ahci_led_store,
179 .sw_activity_show = ahci_activity_show, 179 .sw_activity_show = ahci_activity_show,
180 .sw_activity_store = ahci_activity_store, 180 .sw_activity_store = ahci_activity_store,
181 #ifdef CONFIG_PM 181 #ifdef CONFIG_PM
182 .port_suspend = ahci_port_suspend, 182 .port_suspend = ahci_port_suspend,
183 .port_resume = ahci_port_resume, 183 .port_resume = ahci_port_resume,
184 #endif 184 #endif
185 .port_start = ahci_port_start, 185 .port_start = ahci_port_start,
186 .port_stop = ahci_port_stop, 186 .port_stop = ahci_port_stop,
187 }; 187 };
188 EXPORT_SYMBOL_GPL(ahci_ops); 188 EXPORT_SYMBOL_GPL(ahci_ops);
189 189
190 int ahci_em_messages = 1; 190 int ahci_em_messages = 1;
191 EXPORT_SYMBOL_GPL(ahci_em_messages); 191 EXPORT_SYMBOL_GPL(ahci_em_messages);
192 module_param(ahci_em_messages, int, 0444); 192 module_param(ahci_em_messages, int, 0444);
193 /* add other LED protocol types when they become supported */ 193 /* add other LED protocol types when they become supported */
194 MODULE_PARM_DESC(ahci_em_messages, 194 MODULE_PARM_DESC(ahci_em_messages,
195 "AHCI Enclosure Management Message control (0 = off, 1 = on)"); 195 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
196 196
197 static void ahci_enable_ahci(void __iomem *mmio) 197 static void ahci_enable_ahci(void __iomem *mmio)
198 { 198 {
199 int i; 199 int i;
200 u32 tmp; 200 u32 tmp;
201 201
202 /* turn on AHCI_EN */ 202 /* turn on AHCI_EN */
203 tmp = readl(mmio + HOST_CTL); 203 tmp = readl(mmio + HOST_CTL);
204 if (tmp & HOST_AHCI_EN) 204 if (tmp & HOST_AHCI_EN)
205 return; 205 return;
206 206
207 /* Some controllers need AHCI_EN to be written multiple times. 207 /* Some controllers need AHCI_EN to be written multiple times.
208 * Try a few times before giving up. 208 * Try a few times before giving up.
209 */ 209 */
210 for (i = 0; i < 5; i++) { 210 for (i = 0; i < 5; i++) {
211 tmp |= HOST_AHCI_EN; 211 tmp |= HOST_AHCI_EN;
212 writel(tmp, mmio + HOST_CTL); 212 writel(tmp, mmio + HOST_CTL);
213 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 213 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
214 if (tmp & HOST_AHCI_EN) 214 if (tmp & HOST_AHCI_EN)
215 return; 215 return;
216 msleep(10); 216 msleep(10);
217 } 217 }
218 218
219 WARN_ON(1); 219 WARN_ON(1);
220 } 220 }
221 221
222 static ssize_t ahci_show_host_caps(struct device *dev, 222 static ssize_t ahci_show_host_caps(struct device *dev,
223 struct device_attribute *attr, char *buf) 223 struct device_attribute *attr, char *buf)
224 { 224 {
225 struct Scsi_Host *shost = class_to_shost(dev); 225 struct Scsi_Host *shost = class_to_shost(dev);
226 struct ata_port *ap = ata_shost_to_port(shost); 226 struct ata_port *ap = ata_shost_to_port(shost);
227 struct ahci_host_priv *hpriv = ap->host->private_data; 227 struct ahci_host_priv *hpriv = ap->host->private_data;
228 228
229 return sprintf(buf, "%x\n", hpriv->cap); 229 return sprintf(buf, "%x\n", hpriv->cap);
230 } 230 }
231 231
232 static ssize_t ahci_show_host_cap2(struct device *dev, 232 static ssize_t ahci_show_host_cap2(struct device *dev,
233 struct device_attribute *attr, char *buf) 233 struct device_attribute *attr, char *buf)
234 { 234 {
235 struct Scsi_Host *shost = class_to_shost(dev); 235 struct Scsi_Host *shost = class_to_shost(dev);
236 struct ata_port *ap = ata_shost_to_port(shost); 236 struct ata_port *ap = ata_shost_to_port(shost);
237 struct ahci_host_priv *hpriv = ap->host->private_data; 237 struct ahci_host_priv *hpriv = ap->host->private_data;
238 238
239 return sprintf(buf, "%x\n", hpriv->cap2); 239 return sprintf(buf, "%x\n", hpriv->cap2);
240 } 240 }
241 241
242 static ssize_t ahci_show_host_version(struct device *dev, 242 static ssize_t ahci_show_host_version(struct device *dev,
243 struct device_attribute *attr, char *buf) 243 struct device_attribute *attr, char *buf)
244 { 244 {
245 struct Scsi_Host *shost = class_to_shost(dev); 245 struct Scsi_Host *shost = class_to_shost(dev);
246 struct ata_port *ap = ata_shost_to_port(shost); 246 struct ata_port *ap = ata_shost_to_port(shost);
247 struct ahci_host_priv *hpriv = ap->host->private_data; 247 struct ahci_host_priv *hpriv = ap->host->private_data;
248 void __iomem *mmio = hpriv->mmio; 248 void __iomem *mmio = hpriv->mmio;
249 249
250 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); 250 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
251 } 251 }
252 252
253 static ssize_t ahci_show_port_cmd(struct device *dev, 253 static ssize_t ahci_show_port_cmd(struct device *dev,
254 struct device_attribute *attr, char *buf) 254 struct device_attribute *attr, char *buf)
255 { 255 {
256 struct Scsi_Host *shost = class_to_shost(dev); 256 struct Scsi_Host *shost = class_to_shost(dev);
257 struct ata_port *ap = ata_shost_to_port(shost); 257 struct ata_port *ap = ata_shost_to_port(shost);
258 void __iomem *port_mmio = ahci_port_base(ap); 258 void __iomem *port_mmio = ahci_port_base(ap);
259 259
260 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); 260 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
261 } 261 }
262 262
263 static ssize_t ahci_read_em_buffer(struct device *dev, 263 static ssize_t ahci_read_em_buffer(struct device *dev,
264 struct device_attribute *attr, char *buf) 264 struct device_attribute *attr, char *buf)
265 { 265 {
266 struct Scsi_Host *shost = class_to_shost(dev); 266 struct Scsi_Host *shost = class_to_shost(dev);
267 struct ata_port *ap = ata_shost_to_port(shost); 267 struct ata_port *ap = ata_shost_to_port(shost);
268 struct ahci_host_priv *hpriv = ap->host->private_data; 268 struct ahci_host_priv *hpriv = ap->host->private_data;
269 void __iomem *mmio = hpriv->mmio; 269 void __iomem *mmio = hpriv->mmio;
270 void __iomem *em_mmio = mmio + hpriv->em_loc; 270 void __iomem *em_mmio = mmio + hpriv->em_loc;
271 u32 em_ctl, msg; 271 u32 em_ctl, msg;
272 unsigned long flags; 272 unsigned long flags;
273 size_t count; 273 size_t count;
274 int i; 274 int i;
275 275
276 spin_lock_irqsave(ap->lock, flags); 276 spin_lock_irqsave(ap->lock, flags);
277 277
278 em_ctl = readl(mmio + HOST_EM_CTL); 278 em_ctl = readl(mmio + HOST_EM_CTL);
279 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || 279 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
280 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) { 280 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
281 spin_unlock_irqrestore(ap->lock, flags); 281 spin_unlock_irqrestore(ap->lock, flags);
282 return -EINVAL; 282 return -EINVAL;
283 } 283 }
284 284
285 if (!(em_ctl & EM_CTL_MR)) { 285 if (!(em_ctl & EM_CTL_MR)) {
286 spin_unlock_irqrestore(ap->lock, flags); 286 spin_unlock_irqrestore(ap->lock, flags);
287 return -EAGAIN; 287 return -EAGAIN;
288 } 288 }
289 289
290 if (!(em_ctl & EM_CTL_SMB)) 290 if (!(em_ctl & EM_CTL_SMB))
291 em_mmio += hpriv->em_buf_sz; 291 em_mmio += hpriv->em_buf_sz;
292 292
293 count = hpriv->em_buf_sz; 293 count = hpriv->em_buf_sz;
294 294
295 /* the count should not be larger than PAGE_SIZE */ 295 /* the count should not be larger than PAGE_SIZE */
296 if (count > PAGE_SIZE) { 296 if (count > PAGE_SIZE) {
297 if (printk_ratelimit()) 297 if (printk_ratelimit())
298 ata_port_printk(ap, KERN_WARNING, 298 ata_port_printk(ap, KERN_WARNING,
299 "EM read buffer size too large: " 299 "EM read buffer size too large: "
300 "buffer size %u, page size %lu\n", 300 "buffer size %u, page size %lu\n",
301 hpriv->em_buf_sz, PAGE_SIZE); 301 hpriv->em_buf_sz, PAGE_SIZE);
302 count = PAGE_SIZE; 302 count = PAGE_SIZE;
303 } 303 }
304 304
305 for (i = 0; i < count; i += 4) { 305 for (i = 0; i < count; i += 4) {
306 msg = readl(em_mmio + i); 306 msg = readl(em_mmio + i);
307 buf[i] = msg & 0xff; 307 buf[i] = msg & 0xff;
308 buf[i + 1] = (msg >> 8) & 0xff; 308 buf[i + 1] = (msg >> 8) & 0xff;
309 buf[i + 2] = (msg >> 16) & 0xff; 309 buf[i + 2] = (msg >> 16) & 0xff;
310 buf[i + 3] = (msg >> 24) & 0xff; 310 buf[i + 3] = (msg >> 24) & 0xff;
311 } 311 }
312 312
313 spin_unlock_irqrestore(ap->lock, flags); 313 spin_unlock_irqrestore(ap->lock, flags);
314 314
315 return i; 315 return i;
316 } 316 }
317 317
318 static ssize_t ahci_store_em_buffer(struct device *dev, 318 static ssize_t ahci_store_em_buffer(struct device *dev,
319 struct device_attribute *attr, 319 struct device_attribute *attr,
320 const char *buf, size_t size) 320 const char *buf, size_t size)
321 { 321 {
322 struct Scsi_Host *shost = class_to_shost(dev); 322 struct Scsi_Host *shost = class_to_shost(dev);
323 struct ata_port *ap = ata_shost_to_port(shost); 323 struct ata_port *ap = ata_shost_to_port(shost);
324 struct ahci_host_priv *hpriv = ap->host->private_data; 324 struct ahci_host_priv *hpriv = ap->host->private_data;
325 void __iomem *mmio = hpriv->mmio; 325 void __iomem *mmio = hpriv->mmio;
326 void __iomem *em_mmio = mmio + hpriv->em_loc; 326 void __iomem *em_mmio = mmio + hpriv->em_loc;
327 const unsigned char *msg_buf = buf; 327 const unsigned char *msg_buf = buf;
328 u32 em_ctl, msg; 328 u32 em_ctl, msg;
329 unsigned long flags; 329 unsigned long flags;
330 int i; 330 int i;
331 331
332 /* check size validity */ 332 /* check size validity */
333 if (!(ap->flags & ATA_FLAG_EM) || 333 if (!(ap->flags & ATA_FLAG_EM) ||
334 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) || 334 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
335 size % 4 || size > hpriv->em_buf_sz) 335 size % 4 || size > hpriv->em_buf_sz)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 spin_lock_irqsave(ap->lock, flags); 338 spin_lock_irqsave(ap->lock, flags);
339 339
340 em_ctl = readl(mmio + HOST_EM_CTL); 340 em_ctl = readl(mmio + HOST_EM_CTL);
341 if (em_ctl & EM_CTL_TM) { 341 if (em_ctl & EM_CTL_TM) {
342 spin_unlock_irqrestore(ap->lock, flags); 342 spin_unlock_irqrestore(ap->lock, flags);
343 return -EBUSY; 343 return -EBUSY;
344 } 344 }
345 345
346 for (i = 0; i < size; i += 4) { 346 for (i = 0; i < size; i += 4) {
347 msg = msg_buf[i] | msg_buf[i + 1] << 8 | 347 msg = msg_buf[i] | msg_buf[i + 1] << 8 |
348 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; 348 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
349 writel(msg, em_mmio + i); 349 writel(msg, em_mmio + i);
350 } 350 }
351 351
352 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 352 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
353 353
354 spin_unlock_irqrestore(ap->lock, flags); 354 spin_unlock_irqrestore(ap->lock, flags);
355 355
356 return size; 356 return size;
357 } 357 }
358 358
359 /** 359 /**
360 * ahci_save_initial_config - Save and fixup initial config values 360 * ahci_save_initial_config - Save and fixup initial config values
361 * @dev: target AHCI device 361 * @dev: target AHCI device
362 * @hpriv: host private area to store config values 362 * @hpriv: host private area to store config values
363 * @force_port_map: force port map to a specified value 363 * @force_port_map: force port map to a specified value
364 * @mask_port_map: mask out particular bits from port map 364 * @mask_port_map: mask out particular bits from port map
365 * 365 *
366 * Some registers containing configuration info might be setup by 366 * Some registers containing configuration info might be setup by
367 * BIOS and might be cleared on reset. This function saves the 367 * BIOS and might be cleared on reset. This function saves the
368 * initial values of those registers into @hpriv such that they 368 * initial values of those registers into @hpriv such that they
369 * can be restored after controller reset. 369 * can be restored after controller reset.
370 * 370 *
371 * If inconsistent, config values are fixed up by this function. 371 * If inconsistent, config values are fixed up by this function.
372 * 372 *
373 * LOCKING: 373 * LOCKING:
374 * None. 374 * None.
375 */ 375 */
376 void ahci_save_initial_config(struct device *dev, 376 void ahci_save_initial_config(struct device *dev,
377 struct ahci_host_priv *hpriv, 377 struct ahci_host_priv *hpriv,
378 unsigned int force_port_map, 378 unsigned int force_port_map,
379 unsigned int mask_port_map) 379 unsigned int mask_port_map)
380 { 380 {
381 void __iomem *mmio = hpriv->mmio; 381 void __iomem *mmio = hpriv->mmio;
382 u32 cap, cap2, vers, port_map; 382 u32 cap, cap2, vers, port_map;
383 int i; 383 int i;
384 384
385 /* make sure AHCI mode is enabled before accessing CAP */ 385 /* make sure AHCI mode is enabled before accessing CAP */
386 ahci_enable_ahci(mmio); 386 ahci_enable_ahci(mmio);
387 387
388 /* Values prefixed with saved_ are written back to host after 388 /* Values prefixed with saved_ are written back to host after
389 * reset. Values without are used for driver operation. 389 * reset. Values without are used for driver operation.
390 */ 390 */
391 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 391 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
392 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 392 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
393 393
394 /* CAP2 register is only defined for AHCI 1.2 and later */ 394 /* CAP2 register is only defined for AHCI 1.2 and later */
395 vers = readl(mmio + HOST_VERSION); 395 vers = readl(mmio + HOST_VERSION);
396 if ((vers >> 16) > 1 || 396 if ((vers >> 16) > 1 ||
397 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200)) 397 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
398 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2); 398 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
399 else 399 else
400 hpriv->saved_cap2 = cap2 = 0; 400 hpriv->saved_cap2 = cap2 = 0;
401 401
402 /* some chips have errata preventing 64bit use */ 402 /* some chips have errata preventing 64bit use */
403 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 403 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
404 dev_printk(KERN_INFO, dev, 404 dev_printk(KERN_INFO, dev,
405 "controller can't do 64bit DMA, forcing 32bit\n"); 405 "controller can't do 64bit DMA, forcing 32bit\n");
406 cap &= ~HOST_CAP_64; 406 cap &= ~HOST_CAP_64;
407 } 407 }
408 408
409 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 409 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
410 dev_printk(KERN_INFO, dev, 410 dev_printk(KERN_INFO, dev,
411 "controller can't do NCQ, turning off CAP_NCQ\n"); 411 "controller can't do NCQ, turning off CAP_NCQ\n");
412 cap &= ~HOST_CAP_NCQ; 412 cap &= ~HOST_CAP_NCQ;
413 } 413 }
414 414
415 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { 415 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
416 dev_printk(KERN_INFO, dev, 416 dev_printk(KERN_INFO, dev,
417 "controller can do NCQ, turning on CAP_NCQ\n"); 417 "controller can do NCQ, turning on CAP_NCQ\n");
418 cap |= HOST_CAP_NCQ; 418 cap |= HOST_CAP_NCQ;
419 } 419 }
420 420
421 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 421 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
422 dev_printk(KERN_INFO, dev, 422 dev_printk(KERN_INFO, dev,
423 "controller can't do PMP, turning off CAP_PMP\n"); 423 "controller can't do PMP, turning off CAP_PMP\n");
424 cap &= ~HOST_CAP_PMP; 424 cap &= ~HOST_CAP_PMP;
425 } 425 }
426 426
427 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { 427 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
428 dev_printk(KERN_INFO, dev, 428 dev_printk(KERN_INFO, dev,
429 "controller can't do SNTF, turning off CAP_SNTF\n"); 429 "controller can't do SNTF, turning off CAP_SNTF\n");
430 cap &= ~HOST_CAP_SNTF; 430 cap &= ~HOST_CAP_SNTF;
431 } 431 }
432 432
433 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 433 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
434 dev_printk(KERN_INFO, dev, 434 dev_printk(KERN_INFO, dev,
435 "controller can do FBS, turning on CAP_FBS\n"); 435 "controller can do FBS, turning on CAP_FBS\n");
436 cap |= HOST_CAP_FBS; 436 cap |= HOST_CAP_FBS;
437 } 437 }
438 438
439 if (force_port_map && port_map != force_port_map) { 439 if (force_port_map && port_map != force_port_map) {
440 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", 440 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
441 port_map, force_port_map); 441 port_map, force_port_map);
442 port_map = force_port_map; 442 port_map = force_port_map;
443 } 443 }
444 444
445 if (mask_port_map) { 445 if (mask_port_map) {
446 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", 446 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
447 port_map, 447 port_map,
448 port_map & mask_port_map); 448 port_map & mask_port_map);
449 port_map &= mask_port_map; 449 port_map &= mask_port_map;
450 } 450 }
451 451
452 /* cross check port_map and cap.n_ports */ 452 /* cross check port_map and cap.n_ports */
453 if (port_map) { 453 if (port_map) {
454 int map_ports = 0; 454 int map_ports = 0;
455 455
456 for (i = 0; i < AHCI_MAX_PORTS; i++) 456 for (i = 0; i < AHCI_MAX_PORTS; i++)
457 if (port_map & (1 << i)) 457 if (port_map & (1 << i))
458 map_ports++; 458 map_ports++;
459 459
460 /* If PI has more ports than n_ports, whine, clear 460 /* If PI has more ports than n_ports, whine, clear
461 * port_map and let it be generated from n_ports. 461 * port_map and let it be generated from n_ports.
462 */ 462 */
463 if (map_ports > ahci_nr_ports(cap)) { 463 if (map_ports > ahci_nr_ports(cap)) {
464 dev_printk(KERN_WARNING, dev, 464 dev_printk(KERN_WARNING, dev,
465 "implemented port map (0x%x) contains more " 465 "implemented port map (0x%x) contains more "
466 "ports than nr_ports (%u), using nr_ports\n", 466 "ports than nr_ports (%u), using nr_ports\n",
467 port_map, ahci_nr_ports(cap)); 467 port_map, ahci_nr_ports(cap));
468 port_map = 0; 468 port_map = 0;
469 } 469 }
470 } 470 }
471 471
472 /* fabricate port_map from cap.nr_ports */ 472 /* fabricate port_map from cap.nr_ports */
473 if (!port_map) { 473 if (!port_map) {
474 port_map = (1 << ahci_nr_ports(cap)) - 1; 474 port_map = (1 << ahci_nr_ports(cap)) - 1;
475 dev_printk(KERN_WARNING, dev, 475 dev_printk(KERN_WARNING, dev,
476 "forcing PORTS_IMPL to 0x%x\n", port_map); 476 "forcing PORTS_IMPL to 0x%x\n", port_map);
477 477
478 /* write the fixed up value to the PI register */ 478 /* write the fixed up value to the PI register */
479 hpriv->saved_port_map = port_map; 479 hpriv->saved_port_map = port_map;
480 } 480 }
481 481
482 /* record values to use during operation */ 482 /* record values to use during operation */
483 hpriv->cap = cap; 483 hpriv->cap = cap;
484 hpriv->cap2 = cap2; 484 hpriv->cap2 = cap2;
485 hpriv->port_map = port_map; 485 hpriv->port_map = port_map;
486 } 486 }
487 EXPORT_SYMBOL_GPL(ahci_save_initial_config); 487 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
488 488
489 /** 489 /**
490 * ahci_restore_initial_config - Restore initial config 490 * ahci_restore_initial_config - Restore initial config
491 * @host: target ATA host 491 * @host: target ATA host
492 * 492 *
493 * Restore initial config stored by ahci_save_initial_config(). 493 * Restore initial config stored by ahci_save_initial_config().
494 * 494 *
495 * LOCKING: 495 * LOCKING:
496 * None. 496 * None.
497 */ 497 */
498 static void ahci_restore_initial_config(struct ata_host *host) 498 static void ahci_restore_initial_config(struct ata_host *host)
499 { 499 {
500 struct ahci_host_priv *hpriv = host->private_data; 500 struct ahci_host_priv *hpriv = host->private_data;
501 void __iomem *mmio = hpriv->mmio; 501 void __iomem *mmio = hpriv->mmio;
502 502
503 writel(hpriv->saved_cap, mmio + HOST_CAP); 503 writel(hpriv->saved_cap, mmio + HOST_CAP);
504 if (hpriv->saved_cap2) 504 if (hpriv->saved_cap2)
505 writel(hpriv->saved_cap2, mmio + HOST_CAP2); 505 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
506 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 506 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
507 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 507 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
508 } 508 }
509 509
510 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 510 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
511 { 511 {
512 static const int offset[] = { 512 static const int offset[] = {
513 [SCR_STATUS] = PORT_SCR_STAT, 513 [SCR_STATUS] = PORT_SCR_STAT,
514 [SCR_CONTROL] = PORT_SCR_CTL, 514 [SCR_CONTROL] = PORT_SCR_CTL,
515 [SCR_ERROR] = PORT_SCR_ERR, 515 [SCR_ERROR] = PORT_SCR_ERR,
516 [SCR_ACTIVE] = PORT_SCR_ACT, 516 [SCR_ACTIVE] = PORT_SCR_ACT,
517 [SCR_NOTIFICATION] = PORT_SCR_NTF, 517 [SCR_NOTIFICATION] = PORT_SCR_NTF,
518 }; 518 };
519 struct ahci_host_priv *hpriv = ap->host->private_data; 519 struct ahci_host_priv *hpriv = ap->host->private_data;
520 520
521 if (sc_reg < ARRAY_SIZE(offset) && 521 if (sc_reg < ARRAY_SIZE(offset) &&
522 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 522 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
523 return offset[sc_reg]; 523 return offset[sc_reg];
524 return 0; 524 return 0;
525 } 525 }
526 526
527 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 527 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
528 { 528 {
529 void __iomem *port_mmio = ahci_port_base(link->ap); 529 void __iomem *port_mmio = ahci_port_base(link->ap);
530 int offset = ahci_scr_offset(link->ap, sc_reg); 530 int offset = ahci_scr_offset(link->ap, sc_reg);
531 531
532 if (offset) { 532 if (offset) {
533 *val = readl(port_mmio + offset); 533 *val = readl(port_mmio + offset);
534 return 0; 534 return 0;
535 } 535 }
536 return -EINVAL; 536 return -EINVAL;
537 } 537 }
538 538
539 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 539 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
540 { 540 {
541 void __iomem *port_mmio = ahci_port_base(link->ap); 541 void __iomem *port_mmio = ahci_port_base(link->ap);
542 int offset = ahci_scr_offset(link->ap, sc_reg); 542 int offset = ahci_scr_offset(link->ap, sc_reg);
543 543
544 if (offset) { 544 if (offset) {
545 writel(val, port_mmio + offset); 545 writel(val, port_mmio + offset);
546 return 0; 546 return 0;
547 } 547 }
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 void ahci_start_engine(struct ata_port *ap) 551 void ahci_start_engine(struct ata_port *ap)
552 { 552 {
553 void __iomem *port_mmio = ahci_port_base(ap); 553 void __iomem *port_mmio = ahci_port_base(ap);
554 u32 tmp; 554 u32 tmp;
555 555
556 /* start DMA */ 556 /* start DMA */
557 tmp = readl(port_mmio + PORT_CMD); 557 tmp = readl(port_mmio + PORT_CMD);
558 tmp |= PORT_CMD_START; 558 tmp |= PORT_CMD_START;
559 writel(tmp, port_mmio + PORT_CMD); 559 writel(tmp, port_mmio + PORT_CMD);
560 readl(port_mmio + PORT_CMD); /* flush */ 560 readl(port_mmio + PORT_CMD); /* flush */
561 } 561 }
562 EXPORT_SYMBOL_GPL(ahci_start_engine); 562 EXPORT_SYMBOL_GPL(ahci_start_engine);
563 563
564 int ahci_stop_engine(struct ata_port *ap) 564 int ahci_stop_engine(struct ata_port *ap)
565 { 565 {
566 void __iomem *port_mmio = ahci_port_base(ap); 566 void __iomem *port_mmio = ahci_port_base(ap);
567 u32 tmp; 567 u32 tmp;
568 568
569 tmp = readl(port_mmio + PORT_CMD); 569 tmp = readl(port_mmio + PORT_CMD);
570 570
571 /* check if the HBA is idle */ 571 /* check if the HBA is idle */
572 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 572 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
573 return 0; 573 return 0;
574 574
575 /* setting HBA to idle */ 575 /* setting HBA to idle */
576 tmp &= ~PORT_CMD_START; 576 tmp &= ~PORT_CMD_START;
577 writel(tmp, port_mmio + PORT_CMD); 577 writel(tmp, port_mmio + PORT_CMD);
578 578
579 /* wait for engine to stop. This could be as long as 500 msec */ 579 /* wait for engine to stop. This could be as long as 500 msec */
580 tmp = ata_wait_register(port_mmio + PORT_CMD, 580 tmp = ata_wait_register(port_mmio + PORT_CMD,
581 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 581 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
582 if (tmp & PORT_CMD_LIST_ON) 582 if (tmp & PORT_CMD_LIST_ON)
583 return -EIO; 583 return -EIO;
584 584
585 return 0; 585 return 0;
586 } 586 }
587 EXPORT_SYMBOL_GPL(ahci_stop_engine); 587 EXPORT_SYMBOL_GPL(ahci_stop_engine);
588 588
589 static void ahci_start_fis_rx(struct ata_port *ap) 589 static void ahci_start_fis_rx(struct ata_port *ap)
590 { 590 {
591 void __iomem *port_mmio = ahci_port_base(ap); 591 void __iomem *port_mmio = ahci_port_base(ap);
592 struct ahci_host_priv *hpriv = ap->host->private_data; 592 struct ahci_host_priv *hpriv = ap->host->private_data;
593 struct ahci_port_priv *pp = ap->private_data; 593 struct ahci_port_priv *pp = ap->private_data;
594 u32 tmp; 594 u32 tmp;
595 595
596 /* set FIS registers */ 596 /* set FIS registers */
597 if (hpriv->cap & HOST_CAP_64) 597 if (hpriv->cap & HOST_CAP_64)
598 writel((pp->cmd_slot_dma >> 16) >> 16, 598 writel((pp->cmd_slot_dma >> 16) >> 16,
599 port_mmio + PORT_LST_ADDR_HI); 599 port_mmio + PORT_LST_ADDR_HI);
600 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 600 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
601 601
602 if (hpriv->cap & HOST_CAP_64) 602 if (hpriv->cap & HOST_CAP_64)
603 writel((pp->rx_fis_dma >> 16) >> 16, 603 writel((pp->rx_fis_dma >> 16) >> 16,
604 port_mmio + PORT_FIS_ADDR_HI); 604 port_mmio + PORT_FIS_ADDR_HI);
605 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 605 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
606 606
607 /* enable FIS reception */ 607 /* enable FIS reception */
608 tmp = readl(port_mmio + PORT_CMD); 608 tmp = readl(port_mmio + PORT_CMD);
609 tmp |= PORT_CMD_FIS_RX; 609 tmp |= PORT_CMD_FIS_RX;
610 writel(tmp, port_mmio + PORT_CMD); 610 writel(tmp, port_mmio + PORT_CMD);
611 611
612 /* flush */ 612 /* flush */
613 readl(port_mmio + PORT_CMD); 613 readl(port_mmio + PORT_CMD);
614 } 614 }
615 615
616 static int ahci_stop_fis_rx(struct ata_port *ap) 616 static int ahci_stop_fis_rx(struct ata_port *ap)
617 { 617 {
618 void __iomem *port_mmio = ahci_port_base(ap); 618 void __iomem *port_mmio = ahci_port_base(ap);
619 u32 tmp; 619 u32 tmp;
620 620
621 /* disable FIS reception */ 621 /* disable FIS reception */
622 tmp = readl(port_mmio + PORT_CMD); 622 tmp = readl(port_mmio + PORT_CMD);
623 tmp &= ~PORT_CMD_FIS_RX; 623 tmp &= ~PORT_CMD_FIS_RX;
624 writel(tmp, port_mmio + PORT_CMD); 624 writel(tmp, port_mmio + PORT_CMD);
625 625
626 /* wait for completion, spec says 500ms, give it 1000 */ 626 /* wait for completion, spec says 500ms, give it 1000 */
627 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 627 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
628 PORT_CMD_FIS_ON, 10, 1000); 628 PORT_CMD_FIS_ON, 10, 1000);
629 if (tmp & PORT_CMD_FIS_ON) 629 if (tmp & PORT_CMD_FIS_ON)
630 return -EBUSY; 630 return -EBUSY;
631 631
632 return 0; 632 return 0;
633 } 633 }
634 634
635 static void ahci_power_up(struct ata_port *ap) 635 static void ahci_power_up(struct ata_port *ap)
636 { 636 {
637 struct ahci_host_priv *hpriv = ap->host->private_data; 637 struct ahci_host_priv *hpriv = ap->host->private_data;
638 void __iomem *port_mmio = ahci_port_base(ap); 638 void __iomem *port_mmio = ahci_port_base(ap);
639 u32 cmd; 639 u32 cmd;
640 640
641 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 641 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
642 642
643 /* spin up device */ 643 /* spin up device */
644 if (hpriv->cap & HOST_CAP_SSS) { 644 if (hpriv->cap & HOST_CAP_SSS) {
645 cmd |= PORT_CMD_SPIN_UP; 645 cmd |= PORT_CMD_SPIN_UP;
646 writel(cmd, port_mmio + PORT_CMD); 646 writel(cmd, port_mmio + PORT_CMD);
647 } 647 }
648 648
649 /* wake up link */ 649 /* wake up link */
650 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 650 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
651 } 651 }
652 652
653 static void ahci_disable_alpm(struct ata_port *ap) 653 static void ahci_disable_alpm(struct ata_port *ap)
654 { 654 {
655 struct ahci_host_priv *hpriv = ap->host->private_data; 655 struct ahci_host_priv *hpriv = ap->host->private_data;
656 void __iomem *port_mmio = ahci_port_base(ap); 656 void __iomem *port_mmio = ahci_port_base(ap);
657 u32 cmd; 657 u32 cmd;
658 struct ahci_port_priv *pp = ap->private_data; 658 struct ahci_port_priv *pp = ap->private_data;
659 659
660 /* IPM bits should be disabled by libata-core */ 660 /* IPM bits should be disabled by libata-core */
661 /* get the existing command bits */ 661 /* get the existing command bits */
662 cmd = readl(port_mmio + PORT_CMD); 662 cmd = readl(port_mmio + PORT_CMD);
663 663
664 /* disable ALPM and ASP */ 664 /* disable ALPM and ASP */
665 cmd &= ~PORT_CMD_ASP; 665 cmd &= ~PORT_CMD_ASP;
666 cmd &= ~PORT_CMD_ALPE; 666 cmd &= ~PORT_CMD_ALPE;
667 667
668 /* force the interface back to active */ 668 /* force the interface back to active */
669 cmd |= PORT_CMD_ICC_ACTIVE; 669 cmd |= PORT_CMD_ICC_ACTIVE;
670 670
671 /* write out new cmd value */ 671 /* write out new cmd value */
672 writel(cmd, port_mmio + PORT_CMD); 672 writel(cmd, port_mmio + PORT_CMD);
673 cmd = readl(port_mmio + PORT_CMD); 673 cmd = readl(port_mmio + PORT_CMD);
674 674
675 /* wait 10ms to be sure we've come out of any low power state */ 675 /* wait 10ms to be sure we've come out of any low power state */
676 msleep(10); 676 msleep(10);
677 677
678 /* clear out any PhyRdy stuff from interrupt status */ 678 /* clear out any PhyRdy stuff from interrupt status */
679 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); 679 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
680 680
681 /* go ahead and clean out PhyRdy Change from Serror too */ 681 /* go ahead and clean out PhyRdy Change from Serror too */
682 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); 682 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
683 683
684 /* 684 /*
685 * Clear flag to indicate that we should ignore all PhyRdy 685 * Clear flag to indicate that we should ignore all PhyRdy
686 * state changes 686 * state changes
687 */ 687 */
688 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; 688 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
689 689
690 /* 690 /*
691 * Enable interrupts on Phy Ready. 691 * Enable interrupts on Phy Ready.
692 */ 692 */
693 pp->intr_mask |= PORT_IRQ_PHYRDY; 693 pp->intr_mask |= PORT_IRQ_PHYRDY;
694 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 694 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
695 695
696 /* 696 /*
697 * don't change the link pm policy - we can be called 697 * don't change the link pm policy - we can be called
698 * just to turn of link pm temporarily 698 * just to turn of link pm temporarily
699 */ 699 */
700 } 700 }
701 701
702 static int ahci_enable_alpm(struct ata_port *ap, 702 static int ahci_enable_alpm(struct ata_port *ap,
703 enum link_pm policy) 703 enum link_pm policy)
704 { 704 {
705 struct ahci_host_priv *hpriv = ap->host->private_data; 705 struct ahci_host_priv *hpriv = ap->host->private_data;
706 void __iomem *port_mmio = ahci_port_base(ap); 706 void __iomem *port_mmio = ahci_port_base(ap);
707 u32 cmd; 707 u32 cmd;
708 struct ahci_port_priv *pp = ap->private_data; 708 struct ahci_port_priv *pp = ap->private_data;
709 u32 asp; 709 u32 asp;
710 710
711 /* Make sure the host is capable of link power management */ 711 /* Make sure the host is capable of link power management */
712 if (!(hpriv->cap & HOST_CAP_ALPM)) 712 if (!(hpriv->cap & HOST_CAP_ALPM))
713 return -EINVAL; 713 return -EINVAL;
714 714
715 switch (policy) { 715 switch (policy) {
716 case MAX_PERFORMANCE: 716 case MAX_PERFORMANCE:
717 case NOT_AVAILABLE: 717 case NOT_AVAILABLE:
718 /* 718 /*
719 * if we came here with NOT_AVAILABLE, 719 * if we came here with NOT_AVAILABLE,
720 * it just means this is the first time we 720 * it just means this is the first time we
721 * have tried to enable - default to max performance, 721 * have tried to enable - default to max performance,
722 * and let the user go to lower power modes on request. 722 * and let the user go to lower power modes on request.
723 */ 723 */
724 ahci_disable_alpm(ap); 724 ahci_disable_alpm(ap);
725 return 0; 725 return 0;
726 case MIN_POWER: 726 case MIN_POWER:
727 /* configure HBA to enter SLUMBER */ 727 /* configure HBA to enter SLUMBER */
728 asp = PORT_CMD_ASP; 728 asp = PORT_CMD_ASP;
729 break; 729 break;
730 case MEDIUM_POWER: 730 case MEDIUM_POWER:
731 /* configure HBA to enter PARTIAL */ 731 /* configure HBA to enter PARTIAL */
732 asp = 0; 732 asp = 0;
733 break; 733 break;
734 default: 734 default:
735 return -EINVAL; 735 return -EINVAL;
736 } 736 }
737 737
738 /* 738 /*
739 * Disable interrupts on Phy Ready. This keeps us from 739 * Disable interrupts on Phy Ready. This keeps us from
740 * getting woken up due to spurious phy ready interrupts 740 * getting woken up due to spurious phy ready interrupts
741 * TBD - Hot plug should be done via polling now, is 741 * TBD - Hot plug should be done via polling now, is
742 * that even supported? 742 * that even supported?
743 */ 743 */
744 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 744 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
745 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 745 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
746 746
747 /* 747 /*
748 * Set a flag to indicate that we should ignore all PhyRdy 748 * Set a flag to indicate that we should ignore all PhyRdy
749 * state changes since these can happen now whenever we 749 * state changes since these can happen now whenever we
750 * change link state 750 * change link state
751 */ 751 */
752 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; 752 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
753 753
754 /* get the existing command bits */ 754 /* get the existing command bits */
755 cmd = readl(port_mmio + PORT_CMD); 755 cmd = readl(port_mmio + PORT_CMD);
756 756
757 /* 757 /*
758 * Set ASP based on Policy 758 * Set ASP based on Policy
759 */ 759 */
760 cmd |= asp; 760 cmd |= asp;
761 761
762 /* 762 /*
763 * Setting this bit will instruct the HBA to aggressively 763 * Setting this bit will instruct the HBA to aggressively
764 * enter a lower power link state when it's appropriate and 764 * enter a lower power link state when it's appropriate and
765 * based on the value set above for ASP 765 * based on the value set above for ASP
766 */ 766 */
767 cmd |= PORT_CMD_ALPE; 767 cmd |= PORT_CMD_ALPE;
768 768
769 /* write out new cmd value */ 769 /* write out new cmd value */
770 writel(cmd, port_mmio + PORT_CMD); 770 writel(cmd, port_mmio + PORT_CMD);
771 cmd = readl(port_mmio + PORT_CMD); 771 cmd = readl(port_mmio + PORT_CMD);
772 772
773 /* IPM bits should be set by libata-core */ 773 /* IPM bits should be set by libata-core */
774 return 0; 774 return 0;
775 } 775 }
776 776
777 #ifdef CONFIG_PM 777 #ifdef CONFIG_PM
778 static void ahci_power_down(struct ata_port *ap) 778 static void ahci_power_down(struct ata_port *ap)
779 { 779 {
780 struct ahci_host_priv *hpriv = ap->host->private_data; 780 struct ahci_host_priv *hpriv = ap->host->private_data;
781 void __iomem *port_mmio = ahci_port_base(ap); 781 void __iomem *port_mmio = ahci_port_base(ap);
782 u32 cmd, scontrol; 782 u32 cmd, scontrol;
783 783
784 if (!(hpriv->cap & HOST_CAP_SSS)) 784 if (!(hpriv->cap & HOST_CAP_SSS))
785 return; 785 return;
786 786
787 /* put device into listen mode, first set PxSCTL.DET to 0 */ 787 /* put device into listen mode, first set PxSCTL.DET to 0 */
788 scontrol = readl(port_mmio + PORT_SCR_CTL); 788 scontrol = readl(port_mmio + PORT_SCR_CTL);
789 scontrol &= ~0xf; 789 scontrol &= ~0xf;
790 writel(scontrol, port_mmio + PORT_SCR_CTL); 790 writel(scontrol, port_mmio + PORT_SCR_CTL);
791 791
792 /* then set PxCMD.SUD to 0 */ 792 /* then set PxCMD.SUD to 0 */
793 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 793 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
794 cmd &= ~PORT_CMD_SPIN_UP; 794 cmd &= ~PORT_CMD_SPIN_UP;
795 writel(cmd, port_mmio + PORT_CMD); 795 writel(cmd, port_mmio + PORT_CMD);
796 } 796 }
797 #endif 797 #endif
798 798
799 static void ahci_start_port(struct ata_port *ap) 799 static void ahci_start_port(struct ata_port *ap)
800 { 800 {
801 struct ahci_port_priv *pp = ap->private_data; 801 struct ahci_port_priv *pp = ap->private_data;
802 struct ata_link *link; 802 struct ata_link *link;
803 struct ahci_em_priv *emp; 803 struct ahci_em_priv *emp;
804 ssize_t rc; 804 ssize_t rc;
805 int i; 805 int i;
806 806
807 /* enable FIS reception */ 807 /* enable FIS reception */
808 ahci_start_fis_rx(ap); 808 ahci_start_fis_rx(ap);
809 809
810 /* enable DMA */ 810 /* enable DMA */
811 ahci_start_engine(ap); 811 ahci_start_engine(ap);
812 812
813 /* turn on LEDs */ 813 /* turn on LEDs */
814 if (ap->flags & ATA_FLAG_EM) { 814 if (ap->flags & ATA_FLAG_EM) {
815 ata_for_each_link(link, ap, EDGE) { 815 ata_for_each_link(link, ap, EDGE) {
816 emp = &pp->em_priv[link->pmp]; 816 emp = &pp->em_priv[link->pmp];
817 817
818 /* EM Transmit bit maybe busy during init */ 818 /* EM Transmit bit maybe busy during init */
819 for (i = 0; i < EM_MAX_RETRY; i++) { 819 for (i = 0; i < EM_MAX_RETRY; i++) {
820 rc = ahci_transmit_led_message(ap, 820 rc = ahci_transmit_led_message(ap,
821 emp->led_state, 821 emp->led_state,
822 4); 822 4);
823 if (rc == -EBUSY) 823 if (rc == -EBUSY)
824 msleep(1); 824 msleep(1);
825 else 825 else
826 break; 826 break;
827 } 827 }
828 } 828 }
829 } 829 }
830 830
831 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 831 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
832 ata_for_each_link(link, ap, EDGE) 832 ata_for_each_link(link, ap, EDGE)
833 ahci_init_sw_activity(link); 833 ahci_init_sw_activity(link);
834 834
835 } 835 }
836 836
837 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 837 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
838 { 838 {
839 int rc; 839 int rc;
840 840
841 /* disable DMA */ 841 /* disable DMA */
842 rc = ahci_stop_engine(ap); 842 rc = ahci_stop_engine(ap);
843 if (rc) { 843 if (rc) {
844 *emsg = "failed to stop engine"; 844 *emsg = "failed to stop engine";
845 return rc; 845 return rc;
846 } 846 }
847 847
848 /* disable FIS reception */ 848 /* disable FIS reception */
849 rc = ahci_stop_fis_rx(ap); 849 rc = ahci_stop_fis_rx(ap);
850 if (rc) { 850 if (rc) {
851 *emsg = "failed stop FIS RX"; 851 *emsg = "failed stop FIS RX";
852 return rc; 852 return rc;
853 } 853 }
854 854
855 return 0; 855 return 0;
856 } 856 }
857 857
858 int ahci_reset_controller(struct ata_host *host) 858 int ahci_reset_controller(struct ata_host *host)
859 { 859 {
860 struct ahci_host_priv *hpriv = host->private_data; 860 struct ahci_host_priv *hpriv = host->private_data;
861 void __iomem *mmio = hpriv->mmio; 861 void __iomem *mmio = hpriv->mmio;
862 u32 tmp; 862 u32 tmp;
863 863
864 /* we must be in AHCI mode, before using anything 864 /* we must be in AHCI mode, before using anything
865 * AHCI-specific, such as HOST_RESET. 865 * AHCI-specific, such as HOST_RESET.
866 */ 866 */
867 ahci_enable_ahci(mmio); 867 ahci_enable_ahci(mmio);
868 868
869 /* global controller reset */ 869 /* global controller reset */
870 if (!ahci_skip_host_reset) { 870 if (!ahci_skip_host_reset) {
871 tmp = readl(mmio + HOST_CTL); 871 tmp = readl(mmio + HOST_CTL);
872 if ((tmp & HOST_RESET) == 0) { 872 if ((tmp & HOST_RESET) == 0) {
873 writel(tmp | HOST_RESET, mmio + HOST_CTL); 873 writel(tmp | HOST_RESET, mmio + HOST_CTL);
874 readl(mmio + HOST_CTL); /* flush */ 874 readl(mmio + HOST_CTL); /* flush */
875 } 875 }
876 876
877 /* 877 /*
878 * to perform host reset, OS should set HOST_RESET 878 * to perform host reset, OS should set HOST_RESET
879 * and poll until this bit is read to be "0". 879 * and poll until this bit is read to be "0".
880 * reset must complete within 1 second, or 880 * reset must complete within 1 second, or
881 * the hardware should be considered fried. 881 * the hardware should be considered fried.
882 */ 882 */
883 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, 883 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
884 HOST_RESET, 10, 1000); 884 HOST_RESET, 10, 1000);
885 885
886 if (tmp & HOST_RESET) { 886 if (tmp & HOST_RESET) {
887 dev_printk(KERN_ERR, host->dev, 887 dev_printk(KERN_ERR, host->dev,
888 "controller reset failed (0x%x)\n", tmp); 888 "controller reset failed (0x%x)\n", tmp);
889 return -EIO; 889 return -EIO;
890 } 890 }
891 891
892 /* turn on AHCI mode */ 892 /* turn on AHCI mode */
893 ahci_enable_ahci(mmio); 893 ahci_enable_ahci(mmio);
894 894
895 /* Some registers might be cleared on reset. Restore 895 /* Some registers might be cleared on reset. Restore
896 * initial values. 896 * initial values.
897 */ 897 */
898 ahci_restore_initial_config(host); 898 ahci_restore_initial_config(host);
899 } else 899 } else
900 dev_printk(KERN_INFO, host->dev, 900 dev_printk(KERN_INFO, host->dev,
901 "skipping global host reset\n"); 901 "skipping global host reset\n");
902 902
903 return 0; 903 return 0;
904 } 904 }
905 EXPORT_SYMBOL_GPL(ahci_reset_controller); 905 EXPORT_SYMBOL_GPL(ahci_reset_controller);
906 906
907 static void ahci_sw_activity(struct ata_link *link) 907 static void ahci_sw_activity(struct ata_link *link)
908 { 908 {
909 struct ata_port *ap = link->ap; 909 struct ata_port *ap = link->ap;
910 struct ahci_port_priv *pp = ap->private_data; 910 struct ahci_port_priv *pp = ap->private_data;
911 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 911 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
912 912
913 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) 913 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
914 return; 914 return;
915 915
916 emp->activity++; 916 emp->activity++;
917 if (!timer_pending(&emp->timer)) 917 if (!timer_pending(&emp->timer))
918 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); 918 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
919 } 919 }
920 920
921 static void ahci_sw_activity_blink(unsigned long arg) 921 static void ahci_sw_activity_blink(unsigned long arg)
922 { 922 {
923 struct ata_link *link = (struct ata_link *)arg; 923 struct ata_link *link = (struct ata_link *)arg;
924 struct ata_port *ap = link->ap; 924 struct ata_port *ap = link->ap;
925 struct ahci_port_priv *pp = ap->private_data; 925 struct ahci_port_priv *pp = ap->private_data;
926 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 926 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
927 unsigned long led_message = emp->led_state; 927 unsigned long led_message = emp->led_state;
928 u32 activity_led_state; 928 u32 activity_led_state;
929 unsigned long flags; 929 unsigned long flags;
930 930
931 led_message &= EM_MSG_LED_VALUE; 931 led_message &= EM_MSG_LED_VALUE;
932 led_message |= ap->port_no | (link->pmp << 8); 932 led_message |= ap->port_no | (link->pmp << 8);
933 933
934 /* check to see if we've had activity. If so, 934 /* check to see if we've had activity. If so,
935 * toggle state of LED and reset timer. If not, 935 * toggle state of LED and reset timer. If not,
936 * turn LED to desired idle state. 936 * turn LED to desired idle state.
937 */ 937 */
938 spin_lock_irqsave(ap->lock, flags); 938 spin_lock_irqsave(ap->lock, flags);
939 if (emp->saved_activity != emp->activity) { 939 if (emp->saved_activity != emp->activity) {
940 emp->saved_activity = emp->activity; 940 emp->saved_activity = emp->activity;
941 /* get the current LED state */ 941 /* get the current LED state */
942 activity_led_state = led_message & EM_MSG_LED_VALUE_ON; 942 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
943 943
944 if (activity_led_state) 944 if (activity_led_state)
945 activity_led_state = 0; 945 activity_led_state = 0;
946 else 946 else
947 activity_led_state = 1; 947 activity_led_state = 1;
948 948
949 /* clear old state */ 949 /* clear old state */
950 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 950 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
951 951
952 /* toggle state */ 952 /* toggle state */
953 led_message |= (activity_led_state << 16); 953 led_message |= (activity_led_state << 16);
954 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); 954 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
955 } else { 955 } else {
956 /* switch to idle */ 956 /* switch to idle */
957 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 957 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
958 if (emp->blink_policy == BLINK_OFF) 958 if (emp->blink_policy == BLINK_OFF)
959 led_message |= (1 << 16); 959 led_message |= (1 << 16);
960 } 960 }
961 spin_unlock_irqrestore(ap->lock, flags); 961 spin_unlock_irqrestore(ap->lock, flags);
962 ahci_transmit_led_message(ap, led_message, 4); 962 ahci_transmit_led_message(ap, led_message, 4);
963 } 963 }
964 964
965 static void ahci_init_sw_activity(struct ata_link *link) 965 static void ahci_init_sw_activity(struct ata_link *link)
966 { 966 {
967 struct ata_port *ap = link->ap; 967 struct ata_port *ap = link->ap;
968 struct ahci_port_priv *pp = ap->private_data; 968 struct ahci_port_priv *pp = ap->private_data;
969 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 969 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
970 970
971 /* init activity stats, setup timer */ 971 /* init activity stats, setup timer */
972 emp->saved_activity = emp->activity = 0; 972 emp->saved_activity = emp->activity = 0;
973 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); 973 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
974 974
975 /* check our blink policy and set flag for link if it's enabled */ 975 /* check our blink policy and set flag for link if it's enabled */
976 if (emp->blink_policy) 976 if (emp->blink_policy)
977 link->flags |= ATA_LFLAG_SW_ACTIVITY; 977 link->flags |= ATA_LFLAG_SW_ACTIVITY;
978 } 978 }
979 979
980 int ahci_reset_em(struct ata_host *host) 980 int ahci_reset_em(struct ata_host *host)
981 { 981 {
982 struct ahci_host_priv *hpriv = host->private_data; 982 struct ahci_host_priv *hpriv = host->private_data;
983 void __iomem *mmio = hpriv->mmio; 983 void __iomem *mmio = hpriv->mmio;
984 u32 em_ctl; 984 u32 em_ctl;
985 985
986 em_ctl = readl(mmio + HOST_EM_CTL); 986 em_ctl = readl(mmio + HOST_EM_CTL);
987 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) 987 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
988 return -EINVAL; 988 return -EINVAL;
989 989
990 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); 990 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
991 return 0; 991 return 0;
992 } 992 }
993 EXPORT_SYMBOL_GPL(ahci_reset_em); 993 EXPORT_SYMBOL_GPL(ahci_reset_em);
994 994
995 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 995 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
996 ssize_t size) 996 ssize_t size)
997 { 997 {
998 struct ahci_host_priv *hpriv = ap->host->private_data; 998 struct ahci_host_priv *hpriv = ap->host->private_data;
999 struct ahci_port_priv *pp = ap->private_data; 999 struct ahci_port_priv *pp = ap->private_data;
1000 void __iomem *mmio = hpriv->mmio; 1000 void __iomem *mmio = hpriv->mmio;
1001 u32 em_ctl; 1001 u32 em_ctl;
1002 u32 message[] = {0, 0}; 1002 u32 message[] = {0, 0};
1003 unsigned long flags; 1003 unsigned long flags;
1004 int pmp; 1004 int pmp;
1005 struct ahci_em_priv *emp; 1005 struct ahci_em_priv *emp;
1006 1006
1007 /* get the slot number from the message */ 1007 /* get the slot number from the message */
1008 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1008 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1009 if (pmp < EM_MAX_SLOTS) 1009 if (pmp < EM_MAX_SLOTS)
1010 emp = &pp->em_priv[pmp]; 1010 emp = &pp->em_priv[pmp];
1011 else 1011 else
1012 return -EINVAL; 1012 return -EINVAL;
1013 1013
1014 spin_lock_irqsave(ap->lock, flags); 1014 spin_lock_irqsave(ap->lock, flags);
1015 1015
1016 /* 1016 /*
1017 * if we are still busy transmitting a previous message, 1017 * if we are still busy transmitting a previous message,
1018 * do not allow 1018 * do not allow
1019 */ 1019 */
1020 em_ctl = readl(mmio + HOST_EM_CTL); 1020 em_ctl = readl(mmio + HOST_EM_CTL);
1021 if (em_ctl & EM_CTL_TM) { 1021 if (em_ctl & EM_CTL_TM) {
1022 spin_unlock_irqrestore(ap->lock, flags); 1022 spin_unlock_irqrestore(ap->lock, flags);
1023 return -EBUSY; 1023 return -EBUSY;
1024 } 1024 }
1025 1025
1026 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) { 1026 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1027 /* 1027 /*
1028 * create message header - this is all zero except for 1028 * create message header - this is all zero except for
1029 * the message size, which is 4 bytes. 1029 * the message size, which is 4 bytes.
1030 */ 1030 */
1031 message[0] |= (4 << 8); 1031 message[0] |= (4 << 8);
1032 1032
1033 /* ignore 0:4 of byte zero, fill in port info yourself */ 1033 /* ignore 0:4 of byte zero, fill in port info yourself */
1034 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); 1034 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1035 1035
1036 /* write message to EM_LOC */ 1036 /* write message to EM_LOC */
1037 writel(message[0], mmio + hpriv->em_loc); 1037 writel(message[0], mmio + hpriv->em_loc);
1038 writel(message[1], mmio + hpriv->em_loc+4); 1038 writel(message[1], mmio + hpriv->em_loc+4);
1039 1039
1040 /* 1040 /*
1041 * tell hardware to transmit the message 1041 * tell hardware to transmit the message
1042 */ 1042 */
1043 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 1043 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1044 } 1044 }
1045 1045
1046 /* save off new led state for port/slot */ 1046 /* save off new led state for port/slot */
1047 emp->led_state = state; 1047 emp->led_state = state;
1048 1048
1049 spin_unlock_irqrestore(ap->lock, flags); 1049 spin_unlock_irqrestore(ap->lock, flags);
1050 return size; 1050 return size;
1051 } 1051 }
1052 1052
1053 static ssize_t ahci_led_show(struct ata_port *ap, char *buf) 1053 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1054 { 1054 {
1055 struct ahci_port_priv *pp = ap->private_data; 1055 struct ahci_port_priv *pp = ap->private_data;
1056 struct ata_link *link; 1056 struct ata_link *link;
1057 struct ahci_em_priv *emp; 1057 struct ahci_em_priv *emp;
1058 int rc = 0; 1058 int rc = 0;
1059 1059
1060 ata_for_each_link(link, ap, EDGE) { 1060 ata_for_each_link(link, ap, EDGE) {
1061 emp = &pp->em_priv[link->pmp]; 1061 emp = &pp->em_priv[link->pmp];
1062 rc += sprintf(buf, "%lx\n", emp->led_state); 1062 rc += sprintf(buf, "%lx\n", emp->led_state);
1063 } 1063 }
1064 return rc; 1064 return rc;
1065 } 1065 }
1066 1066
1067 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 1067 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1068 size_t size) 1068 size_t size)
1069 { 1069 {
1070 int state; 1070 int state;
1071 int pmp; 1071 int pmp;
1072 struct ahci_port_priv *pp = ap->private_data; 1072 struct ahci_port_priv *pp = ap->private_data;
1073 struct ahci_em_priv *emp; 1073 struct ahci_em_priv *emp;
1074 1074
1075 state = simple_strtoul(buf, NULL, 0); 1075 state = simple_strtoul(buf, NULL, 0);
1076 1076
1077 /* get the slot number from the message */ 1077 /* get the slot number from the message */
1078 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1078 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1079 if (pmp < EM_MAX_SLOTS) 1079 if (pmp < EM_MAX_SLOTS)
1080 emp = &pp->em_priv[pmp]; 1080 emp = &pp->em_priv[pmp];
1081 else 1081 else
1082 return -EINVAL; 1082 return -EINVAL;
1083 1083
1084 /* mask off the activity bits if we are in sw_activity 1084 /* mask off the activity bits if we are in sw_activity
1085 * mode, user should turn off sw_activity before setting 1085 * mode, user should turn off sw_activity before setting
1086 * activity led through em_message 1086 * activity led through em_message
1087 */ 1087 */
1088 if (emp->blink_policy) 1088 if (emp->blink_policy)
1089 state &= ~EM_MSG_LED_VALUE_ACTIVITY; 1089 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1090 1090
1091 return ahci_transmit_led_message(ap, state, size); 1091 return ahci_transmit_led_message(ap, state, size);
1092 } 1092 }
1093 1093
1094 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) 1094 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1095 { 1095 {
1096 struct ata_link *link = dev->link; 1096 struct ata_link *link = dev->link;
1097 struct ata_port *ap = link->ap; 1097 struct ata_port *ap = link->ap;
1098 struct ahci_port_priv *pp = ap->private_data; 1098 struct ahci_port_priv *pp = ap->private_data;
1099 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1099 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1100 u32 port_led_state = emp->led_state; 1100 u32 port_led_state = emp->led_state;
1101 1101
1102 /* save the desired Activity LED behavior */ 1102 /* save the desired Activity LED behavior */
1103 if (val == OFF) { 1103 if (val == OFF) {
1104 /* clear LFLAG */ 1104 /* clear LFLAG */
1105 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); 1105 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1106 1106
1107 /* set the LED to OFF */ 1107 /* set the LED to OFF */
1108 port_led_state &= EM_MSG_LED_VALUE_OFF; 1108 port_led_state &= EM_MSG_LED_VALUE_OFF;
1109 port_led_state |= (ap->port_no | (link->pmp << 8)); 1109 port_led_state |= (ap->port_no | (link->pmp << 8));
1110 ahci_transmit_led_message(ap, port_led_state, 4); 1110 ahci_transmit_led_message(ap, port_led_state, 4);
1111 } else { 1111 } else {
1112 link->flags |= ATA_LFLAG_SW_ACTIVITY; 1112 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1113 if (val == BLINK_OFF) { 1113 if (val == BLINK_OFF) {
1114 /* set LED to ON for idle */ 1114 /* set LED to ON for idle */
1115 port_led_state &= EM_MSG_LED_VALUE_OFF; 1115 port_led_state &= EM_MSG_LED_VALUE_OFF;
1116 port_led_state |= (ap->port_no | (link->pmp << 8)); 1116 port_led_state |= (ap->port_no | (link->pmp << 8));
1117 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ 1117 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1118 ahci_transmit_led_message(ap, port_led_state, 4); 1118 ahci_transmit_led_message(ap, port_led_state, 4);
1119 } 1119 }
1120 } 1120 }
1121 emp->blink_policy = val; 1121 emp->blink_policy = val;
1122 return 0; 1122 return 0;
1123 } 1123 }
1124 1124
1125 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) 1125 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1126 { 1126 {
1127 struct ata_link *link = dev->link; 1127 struct ata_link *link = dev->link;
1128 struct ata_port *ap = link->ap; 1128 struct ata_port *ap = link->ap;
1129 struct ahci_port_priv *pp = ap->private_data; 1129 struct ahci_port_priv *pp = ap->private_data;
1130 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1130 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1131 1131
1132 /* display the saved value of activity behavior for this 1132 /* display the saved value of activity behavior for this
1133 * disk. 1133 * disk.
1134 */ 1134 */
1135 return sprintf(buf, "%d\n", emp->blink_policy); 1135 return sprintf(buf, "%d\n", emp->blink_policy);
1136 } 1136 }
1137 1137
1138 static void ahci_port_init(struct device *dev, struct ata_port *ap, 1138 static void ahci_port_init(struct device *dev, struct ata_port *ap,
1139 int port_no, void __iomem *mmio, 1139 int port_no, void __iomem *mmio,
1140 void __iomem *port_mmio) 1140 void __iomem *port_mmio)
1141 { 1141 {
1142 const char *emsg = NULL; 1142 const char *emsg = NULL;
1143 int rc; 1143 int rc;
1144 u32 tmp; 1144 u32 tmp;
1145 1145
1146 /* make sure port is not active */ 1146 /* make sure port is not active */
1147 rc = ahci_deinit_port(ap, &emsg); 1147 rc = ahci_deinit_port(ap, &emsg);
1148 if (rc) 1148 if (rc)
1149 dev_warn(dev, "%s (%d)\n", emsg, rc); 1149 dev_warn(dev, "%s (%d)\n", emsg, rc);
1150 1150
1151 /* clear SError */ 1151 /* clear SError */
1152 tmp = readl(port_mmio + PORT_SCR_ERR); 1152 tmp = readl(port_mmio + PORT_SCR_ERR);
1153 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1153 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1154 writel(tmp, port_mmio + PORT_SCR_ERR); 1154 writel(tmp, port_mmio + PORT_SCR_ERR);
1155 1155
1156 /* clear port IRQ */ 1156 /* clear port IRQ */
1157 tmp = readl(port_mmio + PORT_IRQ_STAT); 1157 tmp = readl(port_mmio + PORT_IRQ_STAT);
1158 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1158 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1159 if (tmp) 1159 if (tmp)
1160 writel(tmp, port_mmio + PORT_IRQ_STAT); 1160 writel(tmp, port_mmio + PORT_IRQ_STAT);
1161 1161
1162 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1162 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1163 } 1163 }
1164 1164
1165 void ahci_init_controller(struct ata_host *host) 1165 void ahci_init_controller(struct ata_host *host)
1166 { 1166 {
1167 struct ahci_host_priv *hpriv = host->private_data; 1167 struct ahci_host_priv *hpriv = host->private_data;
1168 void __iomem *mmio = hpriv->mmio; 1168 void __iomem *mmio = hpriv->mmio;
1169 int i; 1169 int i;
1170 void __iomem *port_mmio; 1170 void __iomem *port_mmio;
1171 u32 tmp; 1171 u32 tmp;
1172 1172
1173 for (i = 0; i < host->n_ports; i++) { 1173 for (i = 0; i < host->n_ports; i++) {
1174 struct ata_port *ap = host->ports[i]; 1174 struct ata_port *ap = host->ports[i];
1175 1175
1176 port_mmio = ahci_port_base(ap); 1176 port_mmio = ahci_port_base(ap);
1177 if (ata_port_is_dummy(ap)) 1177 if (ata_port_is_dummy(ap))
1178 continue; 1178 continue;
1179 1179
1180 ahci_port_init(host->dev, ap, i, mmio, port_mmio); 1180 ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1181 } 1181 }
1182 1182
1183 tmp = readl(mmio + HOST_CTL); 1183 tmp = readl(mmio + HOST_CTL);
1184 VPRINTK("HOST_CTL 0x%x\n", tmp); 1184 VPRINTK("HOST_CTL 0x%x\n", tmp);
1185 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1185 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1186 tmp = readl(mmio + HOST_CTL); 1186 tmp = readl(mmio + HOST_CTL);
1187 VPRINTK("HOST_CTL 0x%x\n", tmp); 1187 VPRINTK("HOST_CTL 0x%x\n", tmp);
1188 } 1188 }
1189 EXPORT_SYMBOL_GPL(ahci_init_controller); 1189 EXPORT_SYMBOL_GPL(ahci_init_controller);
1190 1190
1191 static void ahci_dev_config(struct ata_device *dev) 1191 static void ahci_dev_config(struct ata_device *dev)
1192 { 1192 {
1193 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1193 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1194 1194
1195 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1195 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1196 dev->max_sectors = 255; 1196 dev->max_sectors = 255;
1197 ata_dev_printk(dev, KERN_INFO, 1197 ata_dev_printk(dev, KERN_INFO,
1198 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1198 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1199 } 1199 }
1200 } 1200 }
1201 1201
1202 static unsigned int ahci_dev_classify(struct ata_port *ap) 1202 static unsigned int ahci_dev_classify(struct ata_port *ap)
1203 { 1203 {
1204 void __iomem *port_mmio = ahci_port_base(ap); 1204 void __iomem *port_mmio = ahci_port_base(ap);
1205 struct ata_taskfile tf; 1205 struct ata_taskfile tf;
1206 u32 tmp; 1206 u32 tmp;
1207 1207
1208 tmp = readl(port_mmio + PORT_SIG); 1208 tmp = readl(port_mmio + PORT_SIG);
1209 tf.lbah = (tmp >> 24) & 0xff; 1209 tf.lbah = (tmp >> 24) & 0xff;
1210 tf.lbam = (tmp >> 16) & 0xff; 1210 tf.lbam = (tmp >> 16) & 0xff;
1211 tf.lbal = (tmp >> 8) & 0xff; 1211 tf.lbal = (tmp >> 8) & 0xff;
1212 tf.nsect = (tmp) & 0xff; 1212 tf.nsect = (tmp) & 0xff;
1213 1213
1214 return ata_dev_classify(&tf); 1214 return ata_dev_classify(&tf);
1215 } 1215 }
1216 1216
1217 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1217 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1218 u32 opts) 1218 u32 opts)
1219 { 1219 {
1220 dma_addr_t cmd_tbl_dma; 1220 dma_addr_t cmd_tbl_dma;
1221 1221
1222 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1222 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1223 1223
1224 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1224 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1225 pp->cmd_slot[tag].status = 0; 1225 pp->cmd_slot[tag].status = 0;
1226 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1226 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1227 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1227 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1228 } 1228 }
1229 1229
1230 int ahci_kick_engine(struct ata_port *ap) 1230 int ahci_kick_engine(struct ata_port *ap)
1231 { 1231 {
1232 void __iomem *port_mmio = ahci_port_base(ap); 1232 void __iomem *port_mmio = ahci_port_base(ap);
1233 struct ahci_host_priv *hpriv = ap->host->private_data; 1233 struct ahci_host_priv *hpriv = ap->host->private_data;
1234 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1234 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1235 u32 tmp; 1235 u32 tmp;
1236 int busy, rc; 1236 int busy, rc;
1237 1237
1238 /* stop engine */ 1238 /* stop engine */
1239 rc = ahci_stop_engine(ap); 1239 rc = ahci_stop_engine(ap);
1240 if (rc) 1240 if (rc)
1241 goto out_restart; 1241 goto out_restart;
1242 1242
1243 /* need to do CLO? 1243 /* need to do CLO?
1244 * always do CLO if PMP is attached (AHCI-1.3 9.2) 1244 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1245 */ 1245 */
1246 busy = status & (ATA_BUSY | ATA_DRQ); 1246 busy = status & (ATA_BUSY | ATA_DRQ);
1247 if (!busy && !sata_pmp_attached(ap)) { 1247 if (!busy && !sata_pmp_attached(ap)) {
1248 rc = 0; 1248 rc = 0;
1249 goto out_restart; 1249 goto out_restart;
1250 } 1250 }
1251 1251
1252 if (!(hpriv->cap & HOST_CAP_CLO)) { 1252 if (!(hpriv->cap & HOST_CAP_CLO)) {
1253 rc = -EOPNOTSUPP; 1253 rc = -EOPNOTSUPP;
1254 goto out_restart; 1254 goto out_restart;
1255 } 1255 }
1256 1256
1257 /* perform CLO */ 1257 /* perform CLO */
1258 tmp = readl(port_mmio + PORT_CMD); 1258 tmp = readl(port_mmio + PORT_CMD);
1259 tmp |= PORT_CMD_CLO; 1259 tmp |= PORT_CMD_CLO;
1260 writel(tmp, port_mmio + PORT_CMD); 1260 writel(tmp, port_mmio + PORT_CMD);
1261 1261
1262 rc = 0; 1262 rc = 0;
1263 tmp = ata_wait_register(port_mmio + PORT_CMD, 1263 tmp = ata_wait_register(port_mmio + PORT_CMD,
1264 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1264 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1265 if (tmp & PORT_CMD_CLO) 1265 if (tmp & PORT_CMD_CLO)
1266 rc = -EIO; 1266 rc = -EIO;
1267 1267
1268 /* restart engine */ 1268 /* restart engine */
1269 out_restart: 1269 out_restart:
1270 ahci_start_engine(ap); 1270 ahci_start_engine(ap);
1271 return rc; 1271 return rc;
1272 } 1272 }
1273 EXPORT_SYMBOL_GPL(ahci_kick_engine); 1273 EXPORT_SYMBOL_GPL(ahci_kick_engine);
1274 1274
1275 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1275 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1276 struct ata_taskfile *tf, int is_cmd, u16 flags, 1276 struct ata_taskfile *tf, int is_cmd, u16 flags,
1277 unsigned long timeout_msec) 1277 unsigned long timeout_msec)
1278 { 1278 {
1279 const u32 cmd_fis_len = 5; /* five dwords */ 1279 const u32 cmd_fis_len = 5; /* five dwords */
1280 struct ahci_port_priv *pp = ap->private_data; 1280 struct ahci_port_priv *pp = ap->private_data;
1281 void __iomem *port_mmio = ahci_port_base(ap); 1281 void __iomem *port_mmio = ahci_port_base(ap);
1282 u8 *fis = pp->cmd_tbl; 1282 u8 *fis = pp->cmd_tbl;
1283 u32 tmp; 1283 u32 tmp;
1284 1284
1285 /* prep the command */ 1285 /* prep the command */
1286 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1286 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1287 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1287 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1288 1288
1289 /* issue & wait */ 1289 /* issue & wait */
1290 writel(1, port_mmio + PORT_CMD_ISSUE); 1290 writel(1, port_mmio + PORT_CMD_ISSUE);
1291 1291
1292 if (timeout_msec) { 1292 if (timeout_msec) {
1293 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1293 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1294 1, timeout_msec); 1294 1, timeout_msec);
1295 if (tmp & 0x1) { 1295 if (tmp & 0x1) {
1296 ahci_kick_engine(ap); 1296 ahci_kick_engine(ap);
1297 return -EBUSY; 1297 return -EBUSY;
1298 } 1298 }
1299 } else 1299 } else
1300 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1300 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1301 1301
1302 return 0; 1302 return 0;
1303 } 1303 }
1304 1304
1305 int ahci_do_softreset(struct ata_link *link, unsigned int *class, 1305 int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1306 int pmp, unsigned long deadline, 1306 int pmp, unsigned long deadline,
1307 int (*check_ready)(struct ata_link *link)) 1307 int (*check_ready)(struct ata_link *link))
1308 { 1308 {
1309 struct ata_port *ap = link->ap; 1309 struct ata_port *ap = link->ap;
1310 struct ahci_host_priv *hpriv = ap->host->private_data; 1310 struct ahci_host_priv *hpriv = ap->host->private_data;
1311 const char *reason = NULL; 1311 const char *reason = NULL;
1312 unsigned long now, msecs; 1312 unsigned long now, msecs;
1313 struct ata_taskfile tf; 1313 struct ata_taskfile tf;
1314 int rc; 1314 int rc;
1315 1315
1316 DPRINTK("ENTER\n"); 1316 DPRINTK("ENTER\n");
1317 1317
1318 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1318 /* prepare for SRST (AHCI-1.1 10.4.1) */
1319 rc = ahci_kick_engine(ap); 1319 rc = ahci_kick_engine(ap);
1320 if (rc && rc != -EOPNOTSUPP) 1320 if (rc && rc != -EOPNOTSUPP)
1321 ata_link_printk(link, KERN_WARNING, 1321 ata_link_printk(link, KERN_WARNING,
1322 "failed to reset engine (errno=%d)\n", rc); 1322 "failed to reset engine (errno=%d)\n", rc);
1323 1323
1324 ata_tf_init(link->device, &tf); 1324 ata_tf_init(link->device, &tf);
1325 1325
1326 /* issue the first D2H Register FIS */ 1326 /* issue the first D2H Register FIS */
1327 msecs = 0; 1327 msecs = 0;
1328 now = jiffies; 1328 now = jiffies;
1329 if (time_after(now, deadline)) 1329 if (time_after(deadline, now))
1330 msecs = jiffies_to_msecs(deadline - now); 1330 msecs = jiffies_to_msecs(deadline - now);
1331 1331
1332 tf.ctl |= ATA_SRST; 1332 tf.ctl |= ATA_SRST;
1333 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1333 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1334 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1334 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1335 rc = -EIO; 1335 rc = -EIO;
1336 reason = "1st FIS failed"; 1336 reason = "1st FIS failed";
1337 goto fail; 1337 goto fail;
1338 } 1338 }
1339 1339
1340 /* spec says at least 5us, but be generous and sleep for 1ms */ 1340 /* spec says at least 5us, but be generous and sleep for 1ms */
1341 msleep(1); 1341 msleep(1);
1342 1342
1343 /* issue the second D2H Register FIS */ 1343 /* issue the second D2H Register FIS */
1344 tf.ctl &= ~ATA_SRST; 1344 tf.ctl &= ~ATA_SRST;
1345 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1345 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1346 1346
1347 /* wait for link to become ready */ 1347 /* wait for link to become ready */
1348 rc = ata_wait_after_reset(link, deadline, check_ready); 1348 rc = ata_wait_after_reset(link, deadline, check_ready);
1349 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { 1349 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1350 /* 1350 /*
1351 * Workaround for cases where link online status can't 1351 * Workaround for cases where link online status can't
1352 * be trusted. Treat device readiness timeout as link 1352 * be trusted. Treat device readiness timeout as link
1353 * offline. 1353 * offline.
1354 */ 1354 */
1355 ata_link_printk(link, KERN_INFO, 1355 ata_link_printk(link, KERN_INFO,
1356 "device not ready, treating as offline\n"); 1356 "device not ready, treating as offline\n");
1357 *class = ATA_DEV_NONE; 1357 *class = ATA_DEV_NONE;
1358 } else if (rc) { 1358 } else if (rc) {
1359 /* link occupied, -ENODEV too is an error */ 1359 /* link occupied, -ENODEV too is an error */
1360 reason = "device not ready"; 1360 reason = "device not ready";
1361 goto fail; 1361 goto fail;
1362 } else 1362 } else
1363 *class = ahci_dev_classify(ap); 1363 *class = ahci_dev_classify(ap);
1364 1364
1365 DPRINTK("EXIT, class=%u\n", *class); 1365 DPRINTK("EXIT, class=%u\n", *class);
1366 return 0; 1366 return 0;
1367 1367
1368 fail: 1368 fail:
1369 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1369 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1370 return rc; 1370 return rc;
1371 } 1371 }
1372 1372
1373 int ahci_check_ready(struct ata_link *link) 1373 int ahci_check_ready(struct ata_link *link)
1374 { 1374 {
1375 void __iomem *port_mmio = ahci_port_base(link->ap); 1375 void __iomem *port_mmio = ahci_port_base(link->ap);
1376 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1376 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1377 1377
1378 return ata_check_ready(status); 1378 return ata_check_ready(status);
1379 } 1379 }
1380 EXPORT_SYMBOL_GPL(ahci_check_ready); 1380 EXPORT_SYMBOL_GPL(ahci_check_ready);
1381 1381
1382 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1382 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1383 unsigned long deadline) 1383 unsigned long deadline)
1384 { 1384 {
1385 int pmp = sata_srst_pmp(link); 1385 int pmp = sata_srst_pmp(link);
1386 1386
1387 DPRINTK("ENTER\n"); 1387 DPRINTK("ENTER\n");
1388 1388
1389 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); 1389 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1390 } 1390 }
1391 EXPORT_SYMBOL_GPL(ahci_do_softreset); 1391 EXPORT_SYMBOL_GPL(ahci_do_softreset);
1392 1392
1393 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1393 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1394 unsigned long deadline) 1394 unsigned long deadline)
1395 { 1395 {
1396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 1396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1397 struct ata_port *ap = link->ap; 1397 struct ata_port *ap = link->ap;
1398 struct ahci_port_priv *pp = ap->private_data; 1398 struct ahci_port_priv *pp = ap->private_data;
1399 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1399 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1400 struct ata_taskfile tf; 1400 struct ata_taskfile tf;
1401 bool online; 1401 bool online;
1402 int rc; 1402 int rc;
1403 1403
1404 DPRINTK("ENTER\n"); 1404 DPRINTK("ENTER\n");
1405 1405
1406 ahci_stop_engine(ap); 1406 ahci_stop_engine(ap);
1407 1407
1408 /* clear D2H reception area to properly wait for D2H FIS */ 1408 /* clear D2H reception area to properly wait for D2H FIS */
1409 ata_tf_init(link->device, &tf); 1409 ata_tf_init(link->device, &tf);
1410 tf.command = 0x80; 1410 tf.command = 0x80;
1411 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1411 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1412 1412
1413 rc = sata_link_hardreset(link, timing, deadline, &online, 1413 rc = sata_link_hardreset(link, timing, deadline, &online,
1414 ahci_check_ready); 1414 ahci_check_ready);
1415 1415
1416 ahci_start_engine(ap); 1416 ahci_start_engine(ap);
1417 1417
1418 if (online) 1418 if (online)
1419 *class = ahci_dev_classify(ap); 1419 *class = ahci_dev_classify(ap);
1420 1420
1421 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1421 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1422 return rc; 1422 return rc;
1423 } 1423 }
1424 1424
1425 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1425 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1426 { 1426 {
1427 struct ata_port *ap = link->ap; 1427 struct ata_port *ap = link->ap;
1428 void __iomem *port_mmio = ahci_port_base(ap); 1428 void __iomem *port_mmio = ahci_port_base(ap);
1429 u32 new_tmp, tmp; 1429 u32 new_tmp, tmp;
1430 1430
1431 ata_std_postreset(link, class); 1431 ata_std_postreset(link, class);
1432 1432
1433 /* Make sure port's ATAPI bit is set appropriately */ 1433 /* Make sure port's ATAPI bit is set appropriately */
1434 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1434 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1435 if (*class == ATA_DEV_ATAPI) 1435 if (*class == ATA_DEV_ATAPI)
1436 new_tmp |= PORT_CMD_ATAPI; 1436 new_tmp |= PORT_CMD_ATAPI;
1437 else 1437 else
1438 new_tmp &= ~PORT_CMD_ATAPI; 1438 new_tmp &= ~PORT_CMD_ATAPI;
1439 if (new_tmp != tmp) { 1439 if (new_tmp != tmp) {
1440 writel(new_tmp, port_mmio + PORT_CMD); 1440 writel(new_tmp, port_mmio + PORT_CMD);
1441 readl(port_mmio + PORT_CMD); /* flush */ 1441 readl(port_mmio + PORT_CMD); /* flush */
1442 } 1442 }
1443 } 1443 }
1444 1444
1445 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1445 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1446 { 1446 {
1447 struct scatterlist *sg; 1447 struct scatterlist *sg;
1448 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1448 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1449 unsigned int si; 1449 unsigned int si;
1450 1450
1451 VPRINTK("ENTER\n"); 1451 VPRINTK("ENTER\n");
1452 1452
1453 /* 1453 /*
1454 * Next, the S/G list. 1454 * Next, the S/G list.
1455 */ 1455 */
1456 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1456 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1457 dma_addr_t addr = sg_dma_address(sg); 1457 dma_addr_t addr = sg_dma_address(sg);
1458 u32 sg_len = sg_dma_len(sg); 1458 u32 sg_len = sg_dma_len(sg);
1459 1459
1460 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1460 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1461 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1461 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1462 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1462 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1463 } 1463 }
1464 1464
1465 return si; 1465 return si;
1466 } 1466 }
1467 1467
1468 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) 1468 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1469 { 1469 {
1470 struct ata_port *ap = qc->ap; 1470 struct ata_port *ap = qc->ap;
1471 struct ahci_port_priv *pp = ap->private_data; 1471 struct ahci_port_priv *pp = ap->private_data;
1472 1472
1473 if (!sata_pmp_attached(ap) || pp->fbs_enabled) 1473 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1474 return ata_std_qc_defer(qc); 1474 return ata_std_qc_defer(qc);
1475 else 1475 else
1476 return sata_pmp_qc_defer_cmd_switch(qc); 1476 return sata_pmp_qc_defer_cmd_switch(qc);
1477 } 1477 }
1478 1478
1479 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1479 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1480 { 1480 {
1481 struct ata_port *ap = qc->ap; 1481 struct ata_port *ap = qc->ap;
1482 struct ahci_port_priv *pp = ap->private_data; 1482 struct ahci_port_priv *pp = ap->private_data;
1483 int is_atapi = ata_is_atapi(qc->tf.protocol); 1483 int is_atapi = ata_is_atapi(qc->tf.protocol);
1484 void *cmd_tbl; 1484 void *cmd_tbl;
1485 u32 opts; 1485 u32 opts;
1486 const u32 cmd_fis_len = 5; /* five dwords */ 1486 const u32 cmd_fis_len = 5; /* five dwords */
1487 unsigned int n_elem; 1487 unsigned int n_elem;
1488 1488
1489 /* 1489 /*
1490 * Fill in command table information. First, the header, 1490 * Fill in command table information. First, the header,
1491 * a SATA Register - Host to Device command FIS. 1491 * a SATA Register - Host to Device command FIS.
1492 */ 1492 */
1493 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1493 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1494 1494
1495 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1495 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1496 if (is_atapi) { 1496 if (is_atapi) {
1497 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1497 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1498 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1498 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1499 } 1499 }
1500 1500
1501 n_elem = 0; 1501 n_elem = 0;
1502 if (qc->flags & ATA_QCFLAG_DMAMAP) 1502 if (qc->flags & ATA_QCFLAG_DMAMAP)
1503 n_elem = ahci_fill_sg(qc, cmd_tbl); 1503 n_elem = ahci_fill_sg(qc, cmd_tbl);
1504 1504
1505 /* 1505 /*
1506 * Fill in command slot information. 1506 * Fill in command slot information.
1507 */ 1507 */
1508 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1508 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1509 if (qc->tf.flags & ATA_TFLAG_WRITE) 1509 if (qc->tf.flags & ATA_TFLAG_WRITE)
1510 opts |= AHCI_CMD_WRITE; 1510 opts |= AHCI_CMD_WRITE;
1511 if (is_atapi) 1511 if (is_atapi)
1512 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1512 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1513 1513
1514 ahci_fill_cmd_slot(pp, qc->tag, opts); 1514 ahci_fill_cmd_slot(pp, qc->tag, opts);
1515 } 1515 }
1516 1516
1517 static void ahci_fbs_dec_intr(struct ata_port *ap) 1517 static void ahci_fbs_dec_intr(struct ata_port *ap)
1518 { 1518 {
1519 struct ahci_port_priv *pp = ap->private_data; 1519 struct ahci_port_priv *pp = ap->private_data;
1520 void __iomem *port_mmio = ahci_port_base(ap); 1520 void __iomem *port_mmio = ahci_port_base(ap);
1521 u32 fbs = readl(port_mmio + PORT_FBS); 1521 u32 fbs = readl(port_mmio + PORT_FBS);
1522 int retries = 3; 1522 int retries = 3;
1523 1523
1524 DPRINTK("ENTER\n"); 1524 DPRINTK("ENTER\n");
1525 BUG_ON(!pp->fbs_enabled); 1525 BUG_ON(!pp->fbs_enabled);
1526 1526
1527 /* time to wait for DEC is not specified by AHCI spec, 1527 /* time to wait for DEC is not specified by AHCI spec,
1528 * add a retry loop for safety. 1528 * add a retry loop for safety.
1529 */ 1529 */
1530 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS); 1530 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1531 fbs = readl(port_mmio + PORT_FBS); 1531 fbs = readl(port_mmio + PORT_FBS);
1532 while ((fbs & PORT_FBS_DEC) && retries--) { 1532 while ((fbs & PORT_FBS_DEC) && retries--) {
1533 udelay(1); 1533 udelay(1);
1534 fbs = readl(port_mmio + PORT_FBS); 1534 fbs = readl(port_mmio + PORT_FBS);
1535 } 1535 }
1536 1536
1537 if (fbs & PORT_FBS_DEC) 1537 if (fbs & PORT_FBS_DEC)
1538 dev_printk(KERN_ERR, ap->host->dev, 1538 dev_printk(KERN_ERR, ap->host->dev,
1539 "failed to clear device error\n"); 1539 "failed to clear device error\n");
1540 } 1540 }
1541 1541
1542 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1542 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1543 { 1543 {
1544 struct ahci_host_priv *hpriv = ap->host->private_data; 1544 struct ahci_host_priv *hpriv = ap->host->private_data;
1545 struct ahci_port_priv *pp = ap->private_data; 1545 struct ahci_port_priv *pp = ap->private_data;
1546 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1546 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1547 struct ata_link *link = NULL; 1547 struct ata_link *link = NULL;
1548 struct ata_queued_cmd *active_qc; 1548 struct ata_queued_cmd *active_qc;
1549 struct ata_eh_info *active_ehi; 1549 struct ata_eh_info *active_ehi;
1550 bool fbs_need_dec = false; 1550 bool fbs_need_dec = false;
1551 u32 serror; 1551 u32 serror;
1552 1552
1553 /* determine active link with error */ 1553 /* determine active link with error */
1554 if (pp->fbs_enabled) { 1554 if (pp->fbs_enabled) {
1555 void __iomem *port_mmio = ahci_port_base(ap); 1555 void __iomem *port_mmio = ahci_port_base(ap);
1556 u32 fbs = readl(port_mmio + PORT_FBS); 1556 u32 fbs = readl(port_mmio + PORT_FBS);
1557 int pmp = fbs >> PORT_FBS_DWE_OFFSET; 1557 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1558 1558
1559 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && 1559 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
1560 ata_link_online(&ap->pmp_link[pmp])) { 1560 ata_link_online(&ap->pmp_link[pmp])) {
1561 link = &ap->pmp_link[pmp]; 1561 link = &ap->pmp_link[pmp];
1562 fbs_need_dec = true; 1562 fbs_need_dec = true;
1563 } 1563 }
1564 1564
1565 } else 1565 } else
1566 ata_for_each_link(link, ap, EDGE) 1566 ata_for_each_link(link, ap, EDGE)
1567 if (ata_link_active(link)) 1567 if (ata_link_active(link))
1568 break; 1568 break;
1569 1569
1570 if (!link) 1570 if (!link)
1571 link = &ap->link; 1571 link = &ap->link;
1572 1572
1573 active_qc = ata_qc_from_tag(ap, link->active_tag); 1573 active_qc = ata_qc_from_tag(ap, link->active_tag);
1574 active_ehi = &link->eh_info; 1574 active_ehi = &link->eh_info;
1575 1575
1576 /* record irq stat */ 1576 /* record irq stat */
1577 ata_ehi_clear_desc(host_ehi); 1577 ata_ehi_clear_desc(host_ehi);
1578 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1578 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1579 1579
1580 /* AHCI needs SError cleared; otherwise, it might lock up */ 1580 /* AHCI needs SError cleared; otherwise, it might lock up */
1581 ahci_scr_read(&ap->link, SCR_ERROR, &serror); 1581 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1582 ahci_scr_write(&ap->link, SCR_ERROR, serror); 1582 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1583 host_ehi->serror |= serror; 1583 host_ehi->serror |= serror;
1584 1584
1585 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1585 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1586 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1586 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1587 irq_stat &= ~PORT_IRQ_IF_ERR; 1587 irq_stat &= ~PORT_IRQ_IF_ERR;
1588 1588
1589 if (irq_stat & PORT_IRQ_TF_ERR) { 1589 if (irq_stat & PORT_IRQ_TF_ERR) {
1590 /* If qc is active, charge it; otherwise, the active 1590 /* If qc is active, charge it; otherwise, the active
1591 * link. There's no active qc on NCQ errors. It will 1591 * link. There's no active qc on NCQ errors. It will
1592 * be determined by EH by reading log page 10h. 1592 * be determined by EH by reading log page 10h.
1593 */ 1593 */
1594 if (active_qc) 1594 if (active_qc)
1595 active_qc->err_mask |= AC_ERR_DEV; 1595 active_qc->err_mask |= AC_ERR_DEV;
1596 else 1596 else
1597 active_ehi->err_mask |= AC_ERR_DEV; 1597 active_ehi->err_mask |= AC_ERR_DEV;
1598 1598
1599 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1599 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1600 host_ehi->serror &= ~SERR_INTERNAL; 1600 host_ehi->serror &= ~SERR_INTERNAL;
1601 } 1601 }
1602 1602
1603 if (irq_stat & PORT_IRQ_UNK_FIS) { 1603 if (irq_stat & PORT_IRQ_UNK_FIS) {
1604 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1604 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1605 1605
1606 active_ehi->err_mask |= AC_ERR_HSM; 1606 active_ehi->err_mask |= AC_ERR_HSM;
1607 active_ehi->action |= ATA_EH_RESET; 1607 active_ehi->action |= ATA_EH_RESET;
1608 ata_ehi_push_desc(active_ehi, 1608 ata_ehi_push_desc(active_ehi,
1609 "unknown FIS %08x %08x %08x %08x" , 1609 "unknown FIS %08x %08x %08x %08x" ,
1610 unk[0], unk[1], unk[2], unk[3]); 1610 unk[0], unk[1], unk[2], unk[3]);
1611 } 1611 }
1612 1612
1613 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { 1613 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1614 active_ehi->err_mask |= AC_ERR_HSM; 1614 active_ehi->err_mask |= AC_ERR_HSM;
1615 active_ehi->action |= ATA_EH_RESET; 1615 active_ehi->action |= ATA_EH_RESET;
1616 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1616 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1617 } 1617 }
1618 1618
1619 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1619 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1620 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1620 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1621 host_ehi->action |= ATA_EH_RESET; 1621 host_ehi->action |= ATA_EH_RESET;
1622 ata_ehi_push_desc(host_ehi, "host bus error"); 1622 ata_ehi_push_desc(host_ehi, "host bus error");
1623 } 1623 }
1624 1624
1625 if (irq_stat & PORT_IRQ_IF_ERR) { 1625 if (irq_stat & PORT_IRQ_IF_ERR) {
1626 if (fbs_need_dec) 1626 if (fbs_need_dec)
1627 active_ehi->err_mask |= AC_ERR_DEV; 1627 active_ehi->err_mask |= AC_ERR_DEV;
1628 else { 1628 else {
1629 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1629 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1630 host_ehi->action |= ATA_EH_RESET; 1630 host_ehi->action |= ATA_EH_RESET;
1631 } 1631 }
1632 1632
1633 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1633 ata_ehi_push_desc(host_ehi, "interface fatal error");
1634 } 1634 }
1635 1635
1636 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1636 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1637 ata_ehi_hotplugged(host_ehi); 1637 ata_ehi_hotplugged(host_ehi);
1638 ata_ehi_push_desc(host_ehi, "%s", 1638 ata_ehi_push_desc(host_ehi, "%s",
1639 irq_stat & PORT_IRQ_CONNECT ? 1639 irq_stat & PORT_IRQ_CONNECT ?
1640 "connection status changed" : "PHY RDY changed"); 1640 "connection status changed" : "PHY RDY changed");
1641 } 1641 }
1642 1642
1643 /* okay, let's hand over to EH */ 1643 /* okay, let's hand over to EH */
1644 1644
1645 if (irq_stat & PORT_IRQ_FREEZE) 1645 if (irq_stat & PORT_IRQ_FREEZE)
1646 ata_port_freeze(ap); 1646 ata_port_freeze(ap);
1647 else if (fbs_need_dec) { 1647 else if (fbs_need_dec) {
1648 ata_link_abort(link); 1648 ata_link_abort(link);
1649 ahci_fbs_dec_intr(ap); 1649 ahci_fbs_dec_intr(ap);
1650 } else 1650 } else
1651 ata_port_abort(ap); 1651 ata_port_abort(ap);
1652 } 1652 }
1653 1653
1654 static void ahci_port_intr(struct ata_port *ap) 1654 static void ahci_port_intr(struct ata_port *ap)
1655 { 1655 {
1656 void __iomem *port_mmio = ahci_port_base(ap); 1656 void __iomem *port_mmio = ahci_port_base(ap);
1657 struct ata_eh_info *ehi = &ap->link.eh_info; 1657 struct ata_eh_info *ehi = &ap->link.eh_info;
1658 struct ahci_port_priv *pp = ap->private_data; 1658 struct ahci_port_priv *pp = ap->private_data;
1659 struct ahci_host_priv *hpriv = ap->host->private_data; 1659 struct ahci_host_priv *hpriv = ap->host->private_data;
1660 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1660 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1661 u32 status, qc_active = 0; 1661 u32 status, qc_active = 0;
1662 int rc; 1662 int rc;
1663 1663
1664 status = readl(port_mmio + PORT_IRQ_STAT); 1664 status = readl(port_mmio + PORT_IRQ_STAT);
1665 writel(status, port_mmio + PORT_IRQ_STAT); 1665 writel(status, port_mmio + PORT_IRQ_STAT);
1666 1666
1667 /* ignore BAD_PMP while resetting */ 1667 /* ignore BAD_PMP while resetting */
1668 if (unlikely(resetting)) 1668 if (unlikely(resetting))
1669 status &= ~PORT_IRQ_BAD_PMP; 1669 status &= ~PORT_IRQ_BAD_PMP;
1670 1670
1671 /* If we are getting PhyRdy, this is 1671 /* If we are getting PhyRdy, this is
1672 * just a power state change, we should 1672 * just a power state change, we should
1673 * clear out this, plus the PhyRdy/Comm 1673 * clear out this, plus the PhyRdy/Comm
1674 * Wake bits from Serror 1674 * Wake bits from Serror
1675 */ 1675 */
1676 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && 1676 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1677 (status & PORT_IRQ_PHYRDY)) { 1677 (status & PORT_IRQ_PHYRDY)) {
1678 status &= ~PORT_IRQ_PHYRDY; 1678 status &= ~PORT_IRQ_PHYRDY;
1679 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); 1679 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1680 } 1680 }
1681 1681
1682 if (unlikely(status & PORT_IRQ_ERROR)) { 1682 if (unlikely(status & PORT_IRQ_ERROR)) {
1683 ahci_error_intr(ap, status); 1683 ahci_error_intr(ap, status);
1684 return; 1684 return;
1685 } 1685 }
1686 1686
1687 if (status & PORT_IRQ_SDB_FIS) { 1687 if (status & PORT_IRQ_SDB_FIS) {
1688 /* If SNotification is available, leave notification 1688 /* If SNotification is available, leave notification
1689 * handling to sata_async_notification(). If not, 1689 * handling to sata_async_notification(). If not,
1690 * emulate it by snooping SDB FIS RX area. 1690 * emulate it by snooping SDB FIS RX area.
1691 * 1691 *
1692 * Snooping FIS RX area is probably cheaper than 1692 * Snooping FIS RX area is probably cheaper than
1693 * poking SNotification but some constrollers which 1693 * poking SNotification but some constrollers which
1694 * implement SNotification, ICH9 for example, don't 1694 * implement SNotification, ICH9 for example, don't
1695 * store AN SDB FIS into receive area. 1695 * store AN SDB FIS into receive area.
1696 */ 1696 */
1697 if (hpriv->cap & HOST_CAP_SNTF) 1697 if (hpriv->cap & HOST_CAP_SNTF)
1698 sata_async_notification(ap); 1698 sata_async_notification(ap);
1699 else { 1699 else {
1700 /* If the 'N' bit in word 0 of the FIS is set, 1700 /* If the 'N' bit in word 0 of the FIS is set,
1701 * we just received asynchronous notification. 1701 * we just received asynchronous notification.
1702 * Tell libata about it. 1702 * Tell libata about it.
1703 * 1703 *
1704 * Lack of SNotification should not appear in 1704 * Lack of SNotification should not appear in
1705 * ahci 1.2, so the workaround is unnecessary 1705 * ahci 1.2, so the workaround is unnecessary
1706 * when FBS is enabled. 1706 * when FBS is enabled.
1707 */ 1707 */
1708 if (pp->fbs_enabled) 1708 if (pp->fbs_enabled)
1709 WARN_ON_ONCE(1); 1709 WARN_ON_ONCE(1);
1710 else { 1710 else {
1711 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1711 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1712 u32 f0 = le32_to_cpu(f[0]); 1712 u32 f0 = le32_to_cpu(f[0]);
1713 if (f0 & (1 << 15)) 1713 if (f0 & (1 << 15))
1714 sata_async_notification(ap); 1714 sata_async_notification(ap);
1715 } 1715 }
1716 } 1716 }
1717 } 1717 }
1718 1718
1719 /* pp->active_link is not reliable once FBS is enabled, both 1719 /* pp->active_link is not reliable once FBS is enabled, both
1720 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because 1720 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1721 * NCQ and non-NCQ commands may be in flight at the same time. 1721 * NCQ and non-NCQ commands may be in flight at the same time.
1722 */ 1722 */
1723 if (pp->fbs_enabled) { 1723 if (pp->fbs_enabled) {
1724 if (ap->qc_active) { 1724 if (ap->qc_active) {
1725 qc_active = readl(port_mmio + PORT_SCR_ACT); 1725 qc_active = readl(port_mmio + PORT_SCR_ACT);
1726 qc_active |= readl(port_mmio + PORT_CMD_ISSUE); 1726 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1727 } 1727 }
1728 } else { 1728 } else {
1729 /* pp->active_link is valid iff any command is in flight */ 1729 /* pp->active_link is valid iff any command is in flight */
1730 if (ap->qc_active && pp->active_link->sactive) 1730 if (ap->qc_active && pp->active_link->sactive)
1731 qc_active = readl(port_mmio + PORT_SCR_ACT); 1731 qc_active = readl(port_mmio + PORT_SCR_ACT);
1732 else 1732 else
1733 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1733 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1734 } 1734 }
1735 1735
1736 1736
1737 rc = ata_qc_complete_multiple(ap, qc_active); 1737 rc = ata_qc_complete_multiple(ap, qc_active);
1738 1738
1739 /* while resetting, invalid completions are expected */ 1739 /* while resetting, invalid completions are expected */
1740 if (unlikely(rc < 0 && !resetting)) { 1740 if (unlikely(rc < 0 && !resetting)) {
1741 ehi->err_mask |= AC_ERR_HSM; 1741 ehi->err_mask |= AC_ERR_HSM;
1742 ehi->action |= ATA_EH_RESET; 1742 ehi->action |= ATA_EH_RESET;
1743 ata_port_freeze(ap); 1743 ata_port_freeze(ap);
1744 } 1744 }
1745 } 1745 }
1746 1746
1747 irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1747 irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1748 { 1748 {
1749 struct ata_host *host = dev_instance; 1749 struct ata_host *host = dev_instance;
1750 struct ahci_host_priv *hpriv; 1750 struct ahci_host_priv *hpriv;
1751 unsigned int i, handled = 0; 1751 unsigned int i, handled = 0;
1752 void __iomem *mmio; 1752 void __iomem *mmio;
1753 u32 irq_stat, irq_masked; 1753 u32 irq_stat, irq_masked;
1754 1754
1755 VPRINTK("ENTER\n"); 1755 VPRINTK("ENTER\n");
1756 1756
1757 hpriv = host->private_data; 1757 hpriv = host->private_data;
1758 mmio = hpriv->mmio; 1758 mmio = hpriv->mmio;
1759 1759
1760 /* sigh. 0xffffffff is a valid return from h/w */ 1760 /* sigh. 0xffffffff is a valid return from h/w */
1761 irq_stat = readl(mmio + HOST_IRQ_STAT); 1761 irq_stat = readl(mmio + HOST_IRQ_STAT);
1762 if (!irq_stat) 1762 if (!irq_stat)
1763 return IRQ_NONE; 1763 return IRQ_NONE;
1764 1764
1765 irq_masked = irq_stat & hpriv->port_map; 1765 irq_masked = irq_stat & hpriv->port_map;
1766 1766
1767 spin_lock(&host->lock); 1767 spin_lock(&host->lock);
1768 1768
1769 for (i = 0; i < host->n_ports; i++) { 1769 for (i = 0; i < host->n_ports; i++) {
1770 struct ata_port *ap; 1770 struct ata_port *ap;
1771 1771
1772 if (!(irq_masked & (1 << i))) 1772 if (!(irq_masked & (1 << i)))
1773 continue; 1773 continue;
1774 1774
1775 ap = host->ports[i]; 1775 ap = host->ports[i];
1776 if (ap) { 1776 if (ap) {
1777 ahci_port_intr(ap); 1777 ahci_port_intr(ap);
1778 VPRINTK("port %u\n", i); 1778 VPRINTK("port %u\n", i);
1779 } else { 1779 } else {
1780 VPRINTK("port %u (no irq)\n", i); 1780 VPRINTK("port %u (no irq)\n", i);
1781 if (ata_ratelimit()) 1781 if (ata_ratelimit())
1782 dev_printk(KERN_WARNING, host->dev, 1782 dev_printk(KERN_WARNING, host->dev,
1783 "interrupt on disabled port %u\n", i); 1783 "interrupt on disabled port %u\n", i);
1784 } 1784 }
1785 1785
1786 handled = 1; 1786 handled = 1;
1787 } 1787 }
1788 1788
1789 /* HOST_IRQ_STAT behaves as level triggered latch meaning that 1789 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
1790 * it should be cleared after all the port events are cleared; 1790 * it should be cleared after all the port events are cleared;
1791 * otherwise, it will raise a spurious interrupt after each 1791 * otherwise, it will raise a spurious interrupt after each
1792 * valid one. Please read section 10.6.2 of ahci 1.1 for more 1792 * valid one. Please read section 10.6.2 of ahci 1.1 for more
1793 * information. 1793 * information.
1794 * 1794 *
1795 * Also, use the unmasked value to clear interrupt as spurious 1795 * Also, use the unmasked value to clear interrupt as spurious
1796 * pending event on a dummy port might cause screaming IRQ. 1796 * pending event on a dummy port might cause screaming IRQ.
1797 */ 1797 */
1798 writel(irq_stat, mmio + HOST_IRQ_STAT); 1798 writel(irq_stat, mmio + HOST_IRQ_STAT);
1799 1799
1800 spin_unlock(&host->lock); 1800 spin_unlock(&host->lock);
1801 1801
1802 VPRINTK("EXIT\n"); 1802 VPRINTK("EXIT\n");
1803 1803
1804 return IRQ_RETVAL(handled); 1804 return IRQ_RETVAL(handled);
1805 } 1805 }
1806 EXPORT_SYMBOL_GPL(ahci_interrupt); 1806 EXPORT_SYMBOL_GPL(ahci_interrupt);
1807 1807
1808 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1808 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1809 { 1809 {
1810 struct ata_port *ap = qc->ap; 1810 struct ata_port *ap = qc->ap;
1811 void __iomem *port_mmio = ahci_port_base(ap); 1811 void __iomem *port_mmio = ahci_port_base(ap);
1812 struct ahci_port_priv *pp = ap->private_data; 1812 struct ahci_port_priv *pp = ap->private_data;
1813 1813
1814 /* Keep track of the currently active link. It will be used 1814 /* Keep track of the currently active link. It will be used
1815 * in completion path to determine whether NCQ phase is in 1815 * in completion path to determine whether NCQ phase is in
1816 * progress. 1816 * progress.
1817 */ 1817 */
1818 pp->active_link = qc->dev->link; 1818 pp->active_link = qc->dev->link;
1819 1819
1820 if (qc->tf.protocol == ATA_PROT_NCQ) 1820 if (qc->tf.protocol == ATA_PROT_NCQ)
1821 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1821 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1822 1822
1823 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { 1823 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
1824 u32 fbs = readl(port_mmio + PORT_FBS); 1824 u32 fbs = readl(port_mmio + PORT_FBS);
1825 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); 1825 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1826 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; 1826 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
1827 writel(fbs, port_mmio + PORT_FBS); 1827 writel(fbs, port_mmio + PORT_FBS);
1828 pp->fbs_last_dev = qc->dev->link->pmp; 1828 pp->fbs_last_dev = qc->dev->link->pmp;
1829 } 1829 }
1830 1830
1831 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1831 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1832 1832
1833 ahci_sw_activity(qc->dev->link); 1833 ahci_sw_activity(qc->dev->link);
1834 1834
1835 return 0; 1835 return 0;
1836 } 1836 }
1837 1837
1838 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1838 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1839 { 1839 {
1840 struct ahci_port_priv *pp = qc->ap->private_data; 1840 struct ahci_port_priv *pp = qc->ap->private_data;
1841 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1841 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1842 1842
1843 if (pp->fbs_enabled) 1843 if (pp->fbs_enabled)
1844 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; 1844 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
1845 1845
1846 ata_tf_from_fis(d2h_fis, &qc->result_tf); 1846 ata_tf_from_fis(d2h_fis, &qc->result_tf);
1847 return true; 1847 return true;
1848 } 1848 }
1849 1849
1850 static void ahci_freeze(struct ata_port *ap) 1850 static void ahci_freeze(struct ata_port *ap)
1851 { 1851 {
1852 void __iomem *port_mmio = ahci_port_base(ap); 1852 void __iomem *port_mmio = ahci_port_base(ap);
1853 1853
1854 /* turn IRQ off */ 1854 /* turn IRQ off */
1855 writel(0, port_mmio + PORT_IRQ_MASK); 1855 writel(0, port_mmio + PORT_IRQ_MASK);
1856 } 1856 }
1857 1857
1858 static void ahci_thaw(struct ata_port *ap) 1858 static void ahci_thaw(struct ata_port *ap)
1859 { 1859 {
1860 struct ahci_host_priv *hpriv = ap->host->private_data; 1860 struct ahci_host_priv *hpriv = ap->host->private_data;
1861 void __iomem *mmio = hpriv->mmio; 1861 void __iomem *mmio = hpriv->mmio;
1862 void __iomem *port_mmio = ahci_port_base(ap); 1862 void __iomem *port_mmio = ahci_port_base(ap);
1863 u32 tmp; 1863 u32 tmp;
1864 struct ahci_port_priv *pp = ap->private_data; 1864 struct ahci_port_priv *pp = ap->private_data;
1865 1865
1866 /* clear IRQ */ 1866 /* clear IRQ */
1867 tmp = readl(port_mmio + PORT_IRQ_STAT); 1867 tmp = readl(port_mmio + PORT_IRQ_STAT);
1868 writel(tmp, port_mmio + PORT_IRQ_STAT); 1868 writel(tmp, port_mmio + PORT_IRQ_STAT);
1869 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1869 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1870 1870
1871 /* turn IRQ back on */ 1871 /* turn IRQ back on */
1872 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1872 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1873 } 1873 }
1874 1874
1875 static void ahci_error_handler(struct ata_port *ap) 1875 static void ahci_error_handler(struct ata_port *ap)
1876 { 1876 {
1877 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1877 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1878 /* restart engine */ 1878 /* restart engine */
1879 ahci_stop_engine(ap); 1879 ahci_stop_engine(ap);
1880 ahci_start_engine(ap); 1880 ahci_start_engine(ap);
1881 } 1881 }
1882 1882
1883 sata_pmp_error_handler(ap); 1883 sata_pmp_error_handler(ap);
1884 1884
1885 if (!ata_dev_enabled(ap->link.device)) 1885 if (!ata_dev_enabled(ap->link.device))
1886 ahci_stop_engine(ap); 1886 ahci_stop_engine(ap);
1887 } 1887 }
1888 1888
1889 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1889 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1890 { 1890 {
1891 struct ata_port *ap = qc->ap; 1891 struct ata_port *ap = qc->ap;
1892 1892
1893 /* make DMA engine forget about the failed command */ 1893 /* make DMA engine forget about the failed command */
1894 if (qc->flags & ATA_QCFLAG_FAILED) 1894 if (qc->flags & ATA_QCFLAG_FAILED)
1895 ahci_kick_engine(ap); 1895 ahci_kick_engine(ap);
1896 } 1896 }
1897 1897
1898 static void ahci_enable_fbs(struct ata_port *ap) 1898 static void ahci_enable_fbs(struct ata_port *ap)
1899 { 1899 {
1900 struct ahci_port_priv *pp = ap->private_data; 1900 struct ahci_port_priv *pp = ap->private_data;
1901 void __iomem *port_mmio = ahci_port_base(ap); 1901 void __iomem *port_mmio = ahci_port_base(ap);
1902 u32 fbs; 1902 u32 fbs;
1903 int rc; 1903 int rc;
1904 1904
1905 if (!pp->fbs_supported) 1905 if (!pp->fbs_supported)
1906 return; 1906 return;
1907 1907
1908 fbs = readl(port_mmio + PORT_FBS); 1908 fbs = readl(port_mmio + PORT_FBS);
1909 if (fbs & PORT_FBS_EN) { 1909 if (fbs & PORT_FBS_EN) {
1910 pp->fbs_enabled = true; 1910 pp->fbs_enabled = true;
1911 pp->fbs_last_dev = -1; /* initialization */ 1911 pp->fbs_last_dev = -1; /* initialization */
1912 return; 1912 return;
1913 } 1913 }
1914 1914
1915 rc = ahci_stop_engine(ap); 1915 rc = ahci_stop_engine(ap);
1916 if (rc) 1916 if (rc)
1917 return; 1917 return;
1918 1918
1919 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); 1919 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
1920 fbs = readl(port_mmio + PORT_FBS); 1920 fbs = readl(port_mmio + PORT_FBS);
1921 if (fbs & PORT_FBS_EN) { 1921 if (fbs & PORT_FBS_EN) {
1922 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n"); 1922 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
1923 pp->fbs_enabled = true; 1923 pp->fbs_enabled = true;
1924 pp->fbs_last_dev = -1; /* initialization */ 1924 pp->fbs_last_dev = -1; /* initialization */
1925 } else 1925 } else
1926 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n"); 1926 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
1927 1927
1928 ahci_start_engine(ap); 1928 ahci_start_engine(ap);
1929 } 1929 }
1930 1930
1931 static void ahci_disable_fbs(struct ata_port *ap) 1931 static void ahci_disable_fbs(struct ata_port *ap)
1932 { 1932 {
1933 struct ahci_port_priv *pp = ap->private_data; 1933 struct ahci_port_priv *pp = ap->private_data;
1934 void __iomem *port_mmio = ahci_port_base(ap); 1934 void __iomem *port_mmio = ahci_port_base(ap);
1935 u32 fbs; 1935 u32 fbs;
1936 int rc; 1936 int rc;
1937 1937
1938 if (!pp->fbs_supported) 1938 if (!pp->fbs_supported)
1939 return; 1939 return;
1940 1940
1941 fbs = readl(port_mmio + PORT_FBS); 1941 fbs = readl(port_mmio + PORT_FBS);
1942 if ((fbs & PORT_FBS_EN) == 0) { 1942 if ((fbs & PORT_FBS_EN) == 0) {
1943 pp->fbs_enabled = false; 1943 pp->fbs_enabled = false;
1944 return; 1944 return;
1945 } 1945 }
1946 1946
1947 rc = ahci_stop_engine(ap); 1947 rc = ahci_stop_engine(ap);
1948 if (rc) 1948 if (rc)
1949 return; 1949 return;
1950 1950
1951 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS); 1951 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
1952 fbs = readl(port_mmio + PORT_FBS); 1952 fbs = readl(port_mmio + PORT_FBS);
1953 if (fbs & PORT_FBS_EN) 1953 if (fbs & PORT_FBS_EN)
1954 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n"); 1954 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
1955 else { 1955 else {
1956 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n"); 1956 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
1957 pp->fbs_enabled = false; 1957 pp->fbs_enabled = false;
1958 } 1958 }
1959 1959
1960 ahci_start_engine(ap); 1960 ahci_start_engine(ap);
1961 } 1961 }
1962 1962
1963 static void ahci_pmp_attach(struct ata_port *ap) 1963 static void ahci_pmp_attach(struct ata_port *ap)
1964 { 1964 {
1965 void __iomem *port_mmio = ahci_port_base(ap); 1965 void __iomem *port_mmio = ahci_port_base(ap);
1966 struct ahci_port_priv *pp = ap->private_data; 1966 struct ahci_port_priv *pp = ap->private_data;
1967 u32 cmd; 1967 u32 cmd;
1968 1968
1969 cmd = readl(port_mmio + PORT_CMD); 1969 cmd = readl(port_mmio + PORT_CMD);
1970 cmd |= PORT_CMD_PMP; 1970 cmd |= PORT_CMD_PMP;
1971 writel(cmd, port_mmio + PORT_CMD); 1971 writel(cmd, port_mmio + PORT_CMD);
1972 1972
1973 ahci_enable_fbs(ap); 1973 ahci_enable_fbs(ap);
1974 1974
1975 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1975 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1976 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1976 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1977 } 1977 }
1978 1978
1979 static void ahci_pmp_detach(struct ata_port *ap) 1979 static void ahci_pmp_detach(struct ata_port *ap)
1980 { 1980 {
1981 void __iomem *port_mmio = ahci_port_base(ap); 1981 void __iomem *port_mmio = ahci_port_base(ap);
1982 struct ahci_port_priv *pp = ap->private_data; 1982 struct ahci_port_priv *pp = ap->private_data;
1983 u32 cmd; 1983 u32 cmd;
1984 1984
1985 ahci_disable_fbs(ap); 1985 ahci_disable_fbs(ap);
1986 1986
1987 cmd = readl(port_mmio + PORT_CMD); 1987 cmd = readl(port_mmio + PORT_CMD);
1988 cmd &= ~PORT_CMD_PMP; 1988 cmd &= ~PORT_CMD_PMP;
1989 writel(cmd, port_mmio + PORT_CMD); 1989 writel(cmd, port_mmio + PORT_CMD);
1990 1990
1991 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1991 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1992 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1992 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1993 } 1993 }
1994 1994
1995 static int ahci_port_resume(struct ata_port *ap) 1995 static int ahci_port_resume(struct ata_port *ap)
1996 { 1996 {
1997 ahci_power_up(ap); 1997 ahci_power_up(ap);
1998 ahci_start_port(ap); 1998 ahci_start_port(ap);
1999 1999
2000 if (sata_pmp_attached(ap)) 2000 if (sata_pmp_attached(ap))
2001 ahci_pmp_attach(ap); 2001 ahci_pmp_attach(ap);
2002 else 2002 else
2003 ahci_pmp_detach(ap); 2003 ahci_pmp_detach(ap);
2004 2004
2005 return 0; 2005 return 0;
2006 } 2006 }
2007 2007
2008 #ifdef CONFIG_PM 2008 #ifdef CONFIG_PM
2009 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 2009 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2010 { 2010 {
2011 const char *emsg = NULL; 2011 const char *emsg = NULL;
2012 int rc; 2012 int rc;
2013 2013
2014 rc = ahci_deinit_port(ap, &emsg); 2014 rc = ahci_deinit_port(ap, &emsg);
2015 if (rc == 0) 2015 if (rc == 0)
2016 ahci_power_down(ap); 2016 ahci_power_down(ap);
2017 else { 2017 else {
2018 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 2018 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2019 ahci_start_port(ap); 2019 ahci_start_port(ap);
2020 } 2020 }
2021 2021
2022 return rc; 2022 return rc;
2023 } 2023 }
2024 #endif 2024 #endif
2025 2025
2026 static int ahci_port_start(struct ata_port *ap) 2026 static int ahci_port_start(struct ata_port *ap)
2027 { 2027 {
2028 struct ahci_host_priv *hpriv = ap->host->private_data; 2028 struct ahci_host_priv *hpriv = ap->host->private_data;
2029 struct device *dev = ap->host->dev; 2029 struct device *dev = ap->host->dev;
2030 struct ahci_port_priv *pp; 2030 struct ahci_port_priv *pp;
2031 void *mem; 2031 void *mem;
2032 dma_addr_t mem_dma; 2032 dma_addr_t mem_dma;
2033 size_t dma_sz, rx_fis_sz; 2033 size_t dma_sz, rx_fis_sz;
2034 2034
2035 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 2035 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2036 if (!pp) 2036 if (!pp)
2037 return -ENOMEM; 2037 return -ENOMEM;
2038 2038
2039 /* check FBS capability */ 2039 /* check FBS capability */
2040 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { 2040 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2041 void __iomem *port_mmio = ahci_port_base(ap); 2041 void __iomem *port_mmio = ahci_port_base(ap);
2042 u32 cmd = readl(port_mmio + PORT_CMD); 2042 u32 cmd = readl(port_mmio + PORT_CMD);
2043 if (cmd & PORT_CMD_FBSCP) 2043 if (cmd & PORT_CMD_FBSCP)
2044 pp->fbs_supported = true; 2044 pp->fbs_supported = true;
2045 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { 2045 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2046 dev_printk(KERN_INFO, dev, 2046 dev_printk(KERN_INFO, dev,
2047 "port %d can do FBS, forcing FBSCP\n", 2047 "port %d can do FBS, forcing FBSCP\n",
2048 ap->port_no); 2048 ap->port_no);
2049 pp->fbs_supported = true; 2049 pp->fbs_supported = true;
2050 } else 2050 } else
2051 dev_printk(KERN_WARNING, dev, 2051 dev_printk(KERN_WARNING, dev,
2052 "port %d is not capable of FBS\n", 2052 "port %d is not capable of FBS\n",
2053 ap->port_no); 2053 ap->port_no);
2054 } 2054 }
2055 2055
2056 if (pp->fbs_supported) { 2056 if (pp->fbs_supported) {
2057 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; 2057 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2058 rx_fis_sz = AHCI_RX_FIS_SZ * 16; 2058 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2059 } else { 2059 } else {
2060 dma_sz = AHCI_PORT_PRIV_DMA_SZ; 2060 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2061 rx_fis_sz = AHCI_RX_FIS_SZ; 2061 rx_fis_sz = AHCI_RX_FIS_SZ;
2062 } 2062 }
2063 2063
2064 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); 2064 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2065 if (!mem) 2065 if (!mem)
2066 return -ENOMEM; 2066 return -ENOMEM;
2067 memset(mem, 0, dma_sz); 2067 memset(mem, 0, dma_sz);
2068 2068
2069 /* 2069 /*
2070 * First item in chunk of DMA memory: 32-slot command table, 2070 * First item in chunk of DMA memory: 32-slot command table,
2071 * 32 bytes each in size 2071 * 32 bytes each in size
2072 */ 2072 */
2073 pp->cmd_slot = mem; 2073 pp->cmd_slot = mem;
2074 pp->cmd_slot_dma = mem_dma; 2074 pp->cmd_slot_dma = mem_dma;
2075 2075
2076 mem += AHCI_CMD_SLOT_SZ; 2076 mem += AHCI_CMD_SLOT_SZ;
2077 mem_dma += AHCI_CMD_SLOT_SZ; 2077 mem_dma += AHCI_CMD_SLOT_SZ;
2078 2078
2079 /* 2079 /*
2080 * Second item: Received-FIS area 2080 * Second item: Received-FIS area
2081 */ 2081 */
2082 pp->rx_fis = mem; 2082 pp->rx_fis = mem;
2083 pp->rx_fis_dma = mem_dma; 2083 pp->rx_fis_dma = mem_dma;
2084 2084
2085 mem += rx_fis_sz; 2085 mem += rx_fis_sz;
2086 mem_dma += rx_fis_sz; 2086 mem_dma += rx_fis_sz;
2087 2087
2088 /* 2088 /*
2089 * Third item: data area for storing a single command 2089 * Third item: data area for storing a single command
2090 * and its scatter-gather table 2090 * and its scatter-gather table
2091 */ 2091 */
2092 pp->cmd_tbl = mem; 2092 pp->cmd_tbl = mem;
2093 pp->cmd_tbl_dma = mem_dma; 2093 pp->cmd_tbl_dma = mem_dma;
2094 2094
2095 /* 2095 /*
2096 * Save off initial list of interrupts to be enabled. 2096 * Save off initial list of interrupts to be enabled.
2097 * This could be changed later 2097 * This could be changed later
2098 */ 2098 */
2099 pp->intr_mask = DEF_PORT_IRQ; 2099 pp->intr_mask = DEF_PORT_IRQ;
2100 2100
2101 ap->private_data = pp; 2101 ap->private_data = pp;
2102 2102
2103 /* engage engines, captain */ 2103 /* engage engines, captain */
2104 return ahci_port_resume(ap); 2104 return ahci_port_resume(ap);
2105 } 2105 }
2106 2106
2107 static void ahci_port_stop(struct ata_port *ap) 2107 static void ahci_port_stop(struct ata_port *ap)
2108 { 2108 {
2109 const char *emsg = NULL; 2109 const char *emsg = NULL;
2110 int rc; 2110 int rc;
2111 2111
2112 /* de-initialize port */ 2112 /* de-initialize port */
2113 rc = ahci_deinit_port(ap, &emsg); 2113 rc = ahci_deinit_port(ap, &emsg);
2114 if (rc) 2114 if (rc)
2115 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 2115 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2116 } 2116 }
2117 2117
2118 void ahci_print_info(struct ata_host *host, const char *scc_s) 2118 void ahci_print_info(struct ata_host *host, const char *scc_s)
2119 { 2119 {
2120 struct ahci_host_priv *hpriv = host->private_data; 2120 struct ahci_host_priv *hpriv = host->private_data;
2121 void __iomem *mmio = hpriv->mmio; 2121 void __iomem *mmio = hpriv->mmio;
2122 u32 vers, cap, cap2, impl, speed; 2122 u32 vers, cap, cap2, impl, speed;
2123 const char *speed_s; 2123 const char *speed_s;
2124 2124
2125 vers = readl(mmio + HOST_VERSION); 2125 vers = readl(mmio + HOST_VERSION);
2126 cap = hpriv->cap; 2126 cap = hpriv->cap;
2127 cap2 = hpriv->cap2; 2127 cap2 = hpriv->cap2;
2128 impl = hpriv->port_map; 2128 impl = hpriv->port_map;
2129 2129
2130 speed = (cap >> 20) & 0xf; 2130 speed = (cap >> 20) & 0xf;
2131 if (speed == 1) 2131 if (speed == 1)
2132 speed_s = "1.5"; 2132 speed_s = "1.5";
2133 else if (speed == 2) 2133 else if (speed == 2)
2134 speed_s = "3"; 2134 speed_s = "3";
2135 else if (speed == 3) 2135 else if (speed == 3)
2136 speed_s = "6"; 2136 speed_s = "6";
2137 else 2137 else
2138 speed_s = "?"; 2138 speed_s = "?";
2139 2139
2140 dev_info(host->dev, 2140 dev_info(host->dev,
2141 "AHCI %02x%02x.%02x%02x " 2141 "AHCI %02x%02x.%02x%02x "
2142 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2142 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2143 , 2143 ,
2144 2144
2145 (vers >> 24) & 0xff, 2145 (vers >> 24) & 0xff,
2146 (vers >> 16) & 0xff, 2146 (vers >> 16) & 0xff,
2147 (vers >> 8) & 0xff, 2147 (vers >> 8) & 0xff,
2148 vers & 0xff, 2148 vers & 0xff,
2149 2149
2150 ((cap >> 8) & 0x1f) + 1, 2150 ((cap >> 8) & 0x1f) + 1,
2151 (cap & 0x1f) + 1, 2151 (cap & 0x1f) + 1,
2152 speed_s, 2152 speed_s,
2153 impl, 2153 impl,
2154 scc_s); 2154 scc_s);
2155 2155
2156 dev_info(host->dev, 2156 dev_info(host->dev,
2157 "flags: " 2157 "flags: "
2158 "%s%s%s%s%s%s%s" 2158 "%s%s%s%s%s%s%s"
2159 "%s%s%s%s%s%s%s" 2159 "%s%s%s%s%s%s%s"
2160 "%s%s%s%s%s%s\n" 2160 "%s%s%s%s%s%s\n"
2161 , 2161 ,
2162 2162
2163 cap & HOST_CAP_64 ? "64bit " : "", 2163 cap & HOST_CAP_64 ? "64bit " : "",
2164 cap & HOST_CAP_NCQ ? "ncq " : "", 2164 cap & HOST_CAP_NCQ ? "ncq " : "",
2165 cap & HOST_CAP_SNTF ? "sntf " : "", 2165 cap & HOST_CAP_SNTF ? "sntf " : "",
2166 cap & HOST_CAP_MPS ? "ilck " : "", 2166 cap & HOST_CAP_MPS ? "ilck " : "",
2167 cap & HOST_CAP_SSS ? "stag " : "", 2167 cap & HOST_CAP_SSS ? "stag " : "",
2168 cap & HOST_CAP_ALPM ? "pm " : "", 2168 cap & HOST_CAP_ALPM ? "pm " : "",
2169 cap & HOST_CAP_LED ? "led " : "", 2169 cap & HOST_CAP_LED ? "led " : "",
2170 cap & HOST_CAP_CLO ? "clo " : "", 2170 cap & HOST_CAP_CLO ? "clo " : "",
2171 cap & HOST_CAP_ONLY ? "only " : "", 2171 cap & HOST_CAP_ONLY ? "only " : "",
2172 cap & HOST_CAP_PMP ? "pmp " : "", 2172 cap & HOST_CAP_PMP ? "pmp " : "",
2173 cap & HOST_CAP_FBS ? "fbs " : "", 2173 cap & HOST_CAP_FBS ? "fbs " : "",
2174 cap & HOST_CAP_PIO_MULTI ? "pio " : "", 2174 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2175 cap & HOST_CAP_SSC ? "slum " : "", 2175 cap & HOST_CAP_SSC ? "slum " : "",
2176 cap & HOST_CAP_PART ? "part " : "", 2176 cap & HOST_CAP_PART ? "part " : "",
2177 cap & HOST_CAP_CCC ? "ccc " : "", 2177 cap & HOST_CAP_CCC ? "ccc " : "",
2178 cap & HOST_CAP_EMS ? "ems " : "", 2178 cap & HOST_CAP_EMS ? "ems " : "",
2179 cap & HOST_CAP_SXS ? "sxs " : "", 2179 cap & HOST_CAP_SXS ? "sxs " : "",
2180 cap2 & HOST_CAP2_APST ? "apst " : "", 2180 cap2 & HOST_CAP2_APST ? "apst " : "",
2181 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "", 2181 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2182 cap2 & HOST_CAP2_BOH ? "boh " : "" 2182 cap2 & HOST_CAP2_BOH ? "boh " : ""
2183 ); 2183 );
2184 } 2184 }
2185 EXPORT_SYMBOL_GPL(ahci_print_info); 2185 EXPORT_SYMBOL_GPL(ahci_print_info);
2186 2186
2187 void ahci_set_em_messages(struct ahci_host_priv *hpriv, 2187 void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2188 struct ata_port_info *pi) 2188 struct ata_port_info *pi)
2189 { 2189 {
2190 u8 messages; 2190 u8 messages;
2191 void __iomem *mmio = hpriv->mmio; 2191 void __iomem *mmio = hpriv->mmio;
2192 u32 em_loc = readl(mmio + HOST_EM_LOC); 2192 u32 em_loc = readl(mmio + HOST_EM_LOC);
2193 u32 em_ctl = readl(mmio + HOST_EM_CTL); 2193 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2194 2194
2195 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS)) 2195 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2196 return; 2196 return;
2197 2197
2198 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; 2198 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2199 2199
2200 if (messages) { 2200 if (messages) {
2201 /* store em_loc */ 2201 /* store em_loc */
2202 hpriv->em_loc = ((em_loc >> 16) * 4); 2202 hpriv->em_loc = ((em_loc >> 16) * 4);
2203 hpriv->em_buf_sz = ((em_loc & 0xff) * 4); 2203 hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2204 hpriv->em_msg_type = messages; 2204 hpriv->em_msg_type = messages;
2205 pi->flags |= ATA_FLAG_EM; 2205 pi->flags |= ATA_FLAG_EM;
2206 if (!(em_ctl & EM_CTL_ALHD)) 2206 if (!(em_ctl & EM_CTL_ALHD))
2207 pi->flags |= ATA_FLAG_SW_ACTIVITY; 2207 pi->flags |= ATA_FLAG_SW_ACTIVITY;
2208 } 2208 }
2209 } 2209 }
2210 EXPORT_SYMBOL_GPL(ahci_set_em_messages); 2210 EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2211 2211
2212 MODULE_AUTHOR("Jeff Garzik"); 2212 MODULE_AUTHOR("Jeff Garzik");
2213 MODULE_DESCRIPTION("Common AHCI SATA low-level routines"); 2213 MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2214 MODULE_LICENSE("GPL"); 2214 MODULE_LICENSE("GPL");
2215 2215
drivers/ata/libata-core.c
1 /* 1 /*
2 * libata-core.c - helper library for ATA 2 * libata-core.c - helper library for ATA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik 9 * Copyright 2003-2004 Jeff Garzik
10 * 10 *
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option) 14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version. 15 * any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to 23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 * 25 *
26 * 26 *
27 * libata documentation is available via 'make {ps|pdf}docs', 27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.* 28 * as Documentation/DocBook/libata.*
29 * 29 *
30 * Hardware documentation available from http://www.t13.org/ and 30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/ 31 * http://www.sata-io.org/
32 * 32 *
33 * Standards documents from: 33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA) 36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF) 37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC) 38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported) 39 * http://www.ce-ata.org (CE-ATA: not supported)
40 * 40 *
41 */ 41 */
42 42
43 #include <linux/kernel.h> 43 #include <linux/kernel.h>
44 #include <linux/module.h> 44 #include <linux/module.h>
45 #include <linux/pci.h> 45 #include <linux/pci.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/list.h> 47 #include <linux/list.h>
48 #include <linux/mm.h> 48 #include <linux/mm.h>
49 #include <linux/spinlock.h> 49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h> 50 #include <linux/blkdev.h>
51 #include <linux/delay.h> 51 #include <linux/delay.h>
52 #include <linux/timer.h> 52 #include <linux/timer.h>
53 #include <linux/interrupt.h> 53 #include <linux/interrupt.h>
54 #include <linux/completion.h> 54 #include <linux/completion.h>
55 #include <linux/suspend.h> 55 #include <linux/suspend.h>
56 #include <linux/workqueue.h> 56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h> 57 #include <linux/scatterlist.h>
58 #include <linux/io.h> 58 #include <linux/io.h>
59 #include <linux/async.h> 59 #include <linux/async.h>
60 #include <linux/log2.h> 60 #include <linux/log2.h>
61 #include <linux/slab.h> 61 #include <linux/slab.h>
62 #include <scsi/scsi.h> 62 #include <scsi/scsi.h>
63 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_cmnd.h>
64 #include <scsi/scsi_host.h> 64 #include <scsi/scsi_host.h>
65 #include <linux/libata.h> 65 #include <linux/libata.h>
66 #include <asm/byteorder.h> 66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h> 67 #include <linux/cdrom.h>
68 #include <linux/ratelimit.h> 68 #include <linux/ratelimit.h>
69 69
70 #include "libata.h" 70 #include "libata.h"
71 71
72 72
73 /* debounce timing parameters in msecs { interval, duration, timeout } */ 73 /* debounce timing parameters in msecs { interval, duration, timeout } */
74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77 77
78 const struct ata_port_operations ata_base_port_ops = { 78 const struct ata_port_operations ata_base_port_ops = {
79 .prereset = ata_std_prereset, 79 .prereset = ata_std_prereset,
80 .postreset = ata_std_postreset, 80 .postreset = ata_std_postreset,
81 .error_handler = ata_std_error_handler, 81 .error_handler = ata_std_error_handler,
82 }; 82 };
83 83
84 const struct ata_port_operations sata_port_ops = { 84 const struct ata_port_operations sata_port_ops = {
85 .inherits = &ata_base_port_ops, 85 .inherits = &ata_base_port_ops,
86 86
87 .qc_defer = ata_std_qc_defer, 87 .qc_defer = ata_std_qc_defer,
88 .hardreset = sata_std_hardreset, 88 .hardreset = sata_std_hardreset,
89 }; 89 };
90 90
91 static unsigned int ata_dev_init_params(struct ata_device *dev, 91 static unsigned int ata_dev_init_params(struct ata_device *dev,
92 u16 heads, u16 sectors); 92 u16 heads, u16 sectors);
93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
94 static unsigned int ata_dev_set_feature(struct ata_device *dev, 94 static unsigned int ata_dev_set_feature(struct ata_device *dev,
95 u8 enable, u8 feature); 95 u8 enable, u8 feature);
96 static void ata_dev_xfermask(struct ata_device *dev); 96 static void ata_dev_xfermask(struct ata_device *dev);
97 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 97 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
98 98
99 unsigned int ata_print_id = 1; 99 unsigned int ata_print_id = 1;
100 100
101 struct ata_force_param { 101 struct ata_force_param {
102 const char *name; 102 const char *name;
103 unsigned int cbl; 103 unsigned int cbl;
104 int spd_limit; 104 int spd_limit;
105 unsigned long xfer_mask; 105 unsigned long xfer_mask;
106 unsigned int horkage_on; 106 unsigned int horkage_on;
107 unsigned int horkage_off; 107 unsigned int horkage_off;
108 unsigned int lflags; 108 unsigned int lflags;
109 }; 109 };
110 110
111 struct ata_force_ent { 111 struct ata_force_ent {
112 int port; 112 int port;
113 int device; 113 int device;
114 struct ata_force_param param; 114 struct ata_force_param param;
115 }; 115 };
116 116
117 static struct ata_force_ent *ata_force_tbl; 117 static struct ata_force_ent *ata_force_tbl;
118 static int ata_force_tbl_size; 118 static int ata_force_tbl_size;
119 119
120 static char ata_force_param_buf[PAGE_SIZE] __initdata; 120 static char ata_force_param_buf[PAGE_SIZE] __initdata;
121 /* param_buf is thrown away after initialization, disallow read */ 121 /* param_buf is thrown away after initialization, disallow read */
122 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 122 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
123 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 123 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
124 124
125 static int atapi_enabled = 1; 125 static int atapi_enabled = 1;
126 module_param(atapi_enabled, int, 0444); 126 module_param(atapi_enabled, int, 0444);
127 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 127 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
128 128
129 static int atapi_dmadir = 0; 129 static int atapi_dmadir = 0;
130 module_param(atapi_dmadir, int, 0444); 130 module_param(atapi_dmadir, int, 0444);
131 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 131 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
132 132
133 int atapi_passthru16 = 1; 133 int atapi_passthru16 = 1;
134 module_param(atapi_passthru16, int, 0444); 134 module_param(atapi_passthru16, int, 0444);
135 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 135 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
136 136
137 int libata_fua = 0; 137 int libata_fua = 0;
138 module_param_named(fua, libata_fua, int, 0444); 138 module_param_named(fua, libata_fua, int, 0444);
139 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 139 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
140 140
141 static int ata_ignore_hpa; 141 static int ata_ignore_hpa;
142 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 142 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
143 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 143 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
144 144
145 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 145 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
146 module_param_named(dma, libata_dma_mask, int, 0444); 146 module_param_named(dma, libata_dma_mask, int, 0444);
147 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 147 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
148 148
149 static int ata_probe_timeout; 149 static int ata_probe_timeout;
150 module_param(ata_probe_timeout, int, 0444); 150 module_param(ata_probe_timeout, int, 0444);
151 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 151 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
152 152
153 int libata_noacpi = 0; 153 int libata_noacpi = 0;
154 module_param_named(noacpi, libata_noacpi, int, 0444); 154 module_param_named(noacpi, libata_noacpi, int, 0444);
155 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 155 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
156 156
157 int libata_allow_tpm = 0; 157 int libata_allow_tpm = 0;
158 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 158 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
159 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 159 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
160 160
161 static int atapi_an; 161 static int atapi_an;
162 module_param(atapi_an, int, 0444); 162 module_param(atapi_an, int, 0444);
163 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 163 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
164 164
165 MODULE_AUTHOR("Jeff Garzik"); 165 MODULE_AUTHOR("Jeff Garzik");
166 MODULE_DESCRIPTION("Library module for ATA devices"); 166 MODULE_DESCRIPTION("Library module for ATA devices");
167 MODULE_LICENSE("GPL"); 167 MODULE_LICENSE("GPL");
168 MODULE_VERSION(DRV_VERSION); 168 MODULE_VERSION(DRV_VERSION);
169 169
170 170
171 static bool ata_sstatus_online(u32 sstatus) 171 static bool ata_sstatus_online(u32 sstatus)
172 { 172 {
173 return (sstatus & 0xf) == 0x3; 173 return (sstatus & 0xf) == 0x3;
174 } 174 }
175 175
176 /** 176 /**
177 * ata_link_next - link iteration helper 177 * ata_link_next - link iteration helper
178 * @link: the previous link, NULL to start 178 * @link: the previous link, NULL to start
179 * @ap: ATA port containing links to iterate 179 * @ap: ATA port containing links to iterate
180 * @mode: iteration mode, one of ATA_LITER_* 180 * @mode: iteration mode, one of ATA_LITER_*
181 * 181 *
182 * LOCKING: 182 * LOCKING:
183 * Host lock or EH context. 183 * Host lock or EH context.
184 * 184 *
185 * RETURNS: 185 * RETURNS:
186 * Pointer to the next link. 186 * Pointer to the next link.
187 */ 187 */
188 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 188 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
189 enum ata_link_iter_mode mode) 189 enum ata_link_iter_mode mode)
190 { 190 {
191 BUG_ON(mode != ATA_LITER_EDGE && 191 BUG_ON(mode != ATA_LITER_EDGE &&
192 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 192 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
193 193
194 /* NULL link indicates start of iteration */ 194 /* NULL link indicates start of iteration */
195 if (!link) 195 if (!link)
196 switch (mode) { 196 switch (mode) {
197 case ATA_LITER_EDGE: 197 case ATA_LITER_EDGE:
198 case ATA_LITER_PMP_FIRST: 198 case ATA_LITER_PMP_FIRST:
199 if (sata_pmp_attached(ap)) 199 if (sata_pmp_attached(ap))
200 return ap->pmp_link; 200 return ap->pmp_link;
201 /* fall through */ 201 /* fall through */
202 case ATA_LITER_HOST_FIRST: 202 case ATA_LITER_HOST_FIRST:
203 return &ap->link; 203 return &ap->link;
204 } 204 }
205 205
206 /* we just iterated over the host link, what's next? */ 206 /* we just iterated over the host link, what's next? */
207 if (link == &ap->link) 207 if (link == &ap->link)
208 switch (mode) { 208 switch (mode) {
209 case ATA_LITER_HOST_FIRST: 209 case ATA_LITER_HOST_FIRST:
210 if (sata_pmp_attached(ap)) 210 if (sata_pmp_attached(ap))
211 return ap->pmp_link; 211 return ap->pmp_link;
212 /* fall through */ 212 /* fall through */
213 case ATA_LITER_PMP_FIRST: 213 case ATA_LITER_PMP_FIRST:
214 if (unlikely(ap->slave_link)) 214 if (unlikely(ap->slave_link))
215 return ap->slave_link; 215 return ap->slave_link;
216 /* fall through */ 216 /* fall through */
217 case ATA_LITER_EDGE: 217 case ATA_LITER_EDGE:
218 return NULL; 218 return NULL;
219 } 219 }
220 220
221 /* slave_link excludes PMP */ 221 /* slave_link excludes PMP */
222 if (unlikely(link == ap->slave_link)) 222 if (unlikely(link == ap->slave_link))
223 return NULL; 223 return NULL;
224 224
225 /* we were over a PMP link */ 225 /* we were over a PMP link */
226 if (++link < ap->pmp_link + ap->nr_pmp_links) 226 if (++link < ap->pmp_link + ap->nr_pmp_links)
227 return link; 227 return link;
228 228
229 if (mode == ATA_LITER_PMP_FIRST) 229 if (mode == ATA_LITER_PMP_FIRST)
230 return &ap->link; 230 return &ap->link;
231 231
232 return NULL; 232 return NULL;
233 } 233 }
234 234
235 /** 235 /**
236 * ata_dev_next - device iteration helper 236 * ata_dev_next - device iteration helper
237 * @dev: the previous device, NULL to start 237 * @dev: the previous device, NULL to start
238 * @link: ATA link containing devices to iterate 238 * @link: ATA link containing devices to iterate
239 * @mode: iteration mode, one of ATA_DITER_* 239 * @mode: iteration mode, one of ATA_DITER_*
240 * 240 *
241 * LOCKING: 241 * LOCKING:
242 * Host lock or EH context. 242 * Host lock or EH context.
243 * 243 *
244 * RETURNS: 244 * RETURNS:
245 * Pointer to the next device. 245 * Pointer to the next device.
246 */ 246 */
247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 247 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248 enum ata_dev_iter_mode mode) 248 enum ata_dev_iter_mode mode)
249 { 249 {
250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 250 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 251 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252 252
253 /* NULL dev indicates start of iteration */ 253 /* NULL dev indicates start of iteration */
254 if (!dev) 254 if (!dev)
255 switch (mode) { 255 switch (mode) {
256 case ATA_DITER_ENABLED: 256 case ATA_DITER_ENABLED:
257 case ATA_DITER_ALL: 257 case ATA_DITER_ALL:
258 dev = link->device; 258 dev = link->device;
259 goto check; 259 goto check;
260 case ATA_DITER_ENABLED_REVERSE: 260 case ATA_DITER_ENABLED_REVERSE:
261 case ATA_DITER_ALL_REVERSE: 261 case ATA_DITER_ALL_REVERSE:
262 dev = link->device + ata_link_max_devices(link) - 1; 262 dev = link->device + ata_link_max_devices(link) - 1;
263 goto check; 263 goto check;
264 } 264 }
265 265
266 next: 266 next:
267 /* move to the next one */ 267 /* move to the next one */
268 switch (mode) { 268 switch (mode) {
269 case ATA_DITER_ENABLED: 269 case ATA_DITER_ENABLED:
270 case ATA_DITER_ALL: 270 case ATA_DITER_ALL:
271 if (++dev < link->device + ata_link_max_devices(link)) 271 if (++dev < link->device + ata_link_max_devices(link))
272 goto check; 272 goto check;
273 return NULL; 273 return NULL;
274 case ATA_DITER_ENABLED_REVERSE: 274 case ATA_DITER_ENABLED_REVERSE:
275 case ATA_DITER_ALL_REVERSE: 275 case ATA_DITER_ALL_REVERSE:
276 if (--dev >= link->device) 276 if (--dev >= link->device)
277 goto check; 277 goto check;
278 return NULL; 278 return NULL;
279 } 279 }
280 280
281 check: 281 check:
282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 282 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283 !ata_dev_enabled(dev)) 283 !ata_dev_enabled(dev))
284 goto next; 284 goto next;
285 return dev; 285 return dev;
286 } 286 }
287 287
288 /** 288 /**
289 * ata_dev_phys_link - find physical link for a device 289 * ata_dev_phys_link - find physical link for a device
290 * @dev: ATA device to look up physical link for 290 * @dev: ATA device to look up physical link for
291 * 291 *
292 * Look up physical link which @dev is attached to. Note that 292 * Look up physical link which @dev is attached to. Note that
293 * this is different from @dev->link only when @dev is on slave 293 * this is different from @dev->link only when @dev is on slave
294 * link. For all other cases, it's the same as @dev->link. 294 * link. For all other cases, it's the same as @dev->link.
295 * 295 *
296 * LOCKING: 296 * LOCKING:
297 * Don't care. 297 * Don't care.
298 * 298 *
299 * RETURNS: 299 * RETURNS:
300 * Pointer to the found physical link. 300 * Pointer to the found physical link.
301 */ 301 */
302 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 302 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
303 { 303 {
304 struct ata_port *ap = dev->link->ap; 304 struct ata_port *ap = dev->link->ap;
305 305
306 if (!ap->slave_link) 306 if (!ap->slave_link)
307 return dev->link; 307 return dev->link;
308 if (!dev->devno) 308 if (!dev->devno)
309 return &ap->link; 309 return &ap->link;
310 return ap->slave_link; 310 return ap->slave_link;
311 } 311 }
312 312
313 /** 313 /**
314 * ata_force_cbl - force cable type according to libata.force 314 * ata_force_cbl - force cable type according to libata.force
315 * @ap: ATA port of interest 315 * @ap: ATA port of interest
316 * 316 *
317 * Force cable type according to libata.force and whine about it. 317 * Force cable type according to libata.force and whine about it.
318 * The last entry which has matching port number is used, so it 318 * The last entry which has matching port number is used, so it
319 * can be specified as part of device force parameters. For 319 * can be specified as part of device force parameters. For
320 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 320 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
321 * same effect. 321 * same effect.
322 * 322 *
323 * LOCKING: 323 * LOCKING:
324 * EH context. 324 * EH context.
325 */ 325 */
326 void ata_force_cbl(struct ata_port *ap) 326 void ata_force_cbl(struct ata_port *ap)
327 { 327 {
328 int i; 328 int i;
329 329
330 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 330 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
331 const struct ata_force_ent *fe = &ata_force_tbl[i]; 331 const struct ata_force_ent *fe = &ata_force_tbl[i];
332 332
333 if (fe->port != -1 && fe->port != ap->print_id) 333 if (fe->port != -1 && fe->port != ap->print_id)
334 continue; 334 continue;
335 335
336 if (fe->param.cbl == ATA_CBL_NONE) 336 if (fe->param.cbl == ATA_CBL_NONE)
337 continue; 337 continue;
338 338
339 ap->cbl = fe->param.cbl; 339 ap->cbl = fe->param.cbl;
340 ata_port_printk(ap, KERN_NOTICE, 340 ata_port_printk(ap, KERN_NOTICE,
341 "FORCE: cable set to %s\n", fe->param.name); 341 "FORCE: cable set to %s\n", fe->param.name);
342 return; 342 return;
343 } 343 }
344 } 344 }
345 345
346 /** 346 /**
347 * ata_force_link_limits - force link limits according to libata.force 347 * ata_force_link_limits - force link limits according to libata.force
348 * @link: ATA link of interest 348 * @link: ATA link of interest
349 * 349 *
350 * Force link flags and SATA spd limit according to libata.force 350 * Force link flags and SATA spd limit according to libata.force
351 * and whine about it. When only the port part is specified 351 * and whine about it. When only the port part is specified
352 * (e.g. 1:), the limit applies to all links connected to both 352 * (e.g. 1:), the limit applies to all links connected to both
353 * the host link and all fan-out ports connected via PMP. If the 353 * the host link and all fan-out ports connected via PMP. If the
354 * device part is specified as 0 (e.g. 1.00:), it specifies the 354 * device part is specified as 0 (e.g. 1.00:), it specifies the
355 * first fan-out link not the host link. Device number 15 always 355 * first fan-out link not the host link. Device number 15 always
356 * points to the host link whether PMP is attached or not. If the 356 * points to the host link whether PMP is attached or not. If the
357 * controller has slave link, device number 16 points to it. 357 * controller has slave link, device number 16 points to it.
358 * 358 *
359 * LOCKING: 359 * LOCKING:
360 * EH context. 360 * EH context.
361 */ 361 */
362 static void ata_force_link_limits(struct ata_link *link) 362 static void ata_force_link_limits(struct ata_link *link)
363 { 363 {
364 bool did_spd = false; 364 bool did_spd = false;
365 int linkno = link->pmp; 365 int linkno = link->pmp;
366 int i; 366 int i;
367 367
368 if (ata_is_host_link(link)) 368 if (ata_is_host_link(link))
369 linkno += 15; 369 linkno += 15;
370 370
371 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 371 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
372 const struct ata_force_ent *fe = &ata_force_tbl[i]; 372 const struct ata_force_ent *fe = &ata_force_tbl[i];
373 373
374 if (fe->port != -1 && fe->port != link->ap->print_id) 374 if (fe->port != -1 && fe->port != link->ap->print_id)
375 continue; 375 continue;
376 376
377 if (fe->device != -1 && fe->device != linkno) 377 if (fe->device != -1 && fe->device != linkno)
378 continue; 378 continue;
379 379
380 /* only honor the first spd limit */ 380 /* only honor the first spd limit */
381 if (!did_spd && fe->param.spd_limit) { 381 if (!did_spd && fe->param.spd_limit) {
382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 382 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
383 ata_link_printk(link, KERN_NOTICE, 383 ata_link_printk(link, KERN_NOTICE,
384 "FORCE: PHY spd limit set to %s\n", 384 "FORCE: PHY spd limit set to %s\n",
385 fe->param.name); 385 fe->param.name);
386 did_spd = true; 386 did_spd = true;
387 } 387 }
388 388
389 /* let lflags stack */ 389 /* let lflags stack */
390 if (fe->param.lflags) { 390 if (fe->param.lflags) {
391 link->flags |= fe->param.lflags; 391 link->flags |= fe->param.lflags;
392 ata_link_printk(link, KERN_NOTICE, 392 ata_link_printk(link, KERN_NOTICE,
393 "FORCE: link flag 0x%x forced -> 0x%x\n", 393 "FORCE: link flag 0x%x forced -> 0x%x\n",
394 fe->param.lflags, link->flags); 394 fe->param.lflags, link->flags);
395 } 395 }
396 } 396 }
397 } 397 }
398 398
399 /** 399 /**
400 * ata_force_xfermask - force xfermask according to libata.force 400 * ata_force_xfermask - force xfermask according to libata.force
401 * @dev: ATA device of interest 401 * @dev: ATA device of interest
402 * 402 *
403 * Force xfer_mask according to libata.force and whine about it. 403 * Force xfer_mask according to libata.force and whine about it.
404 * For consistency with link selection, device number 15 selects 404 * For consistency with link selection, device number 15 selects
405 * the first device connected to the host link. 405 * the first device connected to the host link.
406 * 406 *
407 * LOCKING: 407 * LOCKING:
408 * EH context. 408 * EH context.
409 */ 409 */
410 static void ata_force_xfermask(struct ata_device *dev) 410 static void ata_force_xfermask(struct ata_device *dev)
411 { 411 {
412 int devno = dev->link->pmp + dev->devno; 412 int devno = dev->link->pmp + dev->devno;
413 int alt_devno = devno; 413 int alt_devno = devno;
414 int i; 414 int i;
415 415
416 /* allow n.15/16 for devices attached to host port */ 416 /* allow n.15/16 for devices attached to host port */
417 if (ata_is_host_link(dev->link)) 417 if (ata_is_host_link(dev->link))
418 alt_devno += 15; 418 alt_devno += 15;
419 419
420 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 420 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
421 const struct ata_force_ent *fe = &ata_force_tbl[i]; 421 const struct ata_force_ent *fe = &ata_force_tbl[i];
422 unsigned long pio_mask, mwdma_mask, udma_mask; 422 unsigned long pio_mask, mwdma_mask, udma_mask;
423 423
424 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 424 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
425 continue; 425 continue;
426 426
427 if (fe->device != -1 && fe->device != devno && 427 if (fe->device != -1 && fe->device != devno &&
428 fe->device != alt_devno) 428 fe->device != alt_devno)
429 continue; 429 continue;
430 430
431 if (!fe->param.xfer_mask) 431 if (!fe->param.xfer_mask)
432 continue; 432 continue;
433 433
434 ata_unpack_xfermask(fe->param.xfer_mask, 434 ata_unpack_xfermask(fe->param.xfer_mask,
435 &pio_mask, &mwdma_mask, &udma_mask); 435 &pio_mask, &mwdma_mask, &udma_mask);
436 if (udma_mask) 436 if (udma_mask)
437 dev->udma_mask = udma_mask; 437 dev->udma_mask = udma_mask;
438 else if (mwdma_mask) { 438 else if (mwdma_mask) {
439 dev->udma_mask = 0; 439 dev->udma_mask = 0;
440 dev->mwdma_mask = mwdma_mask; 440 dev->mwdma_mask = mwdma_mask;
441 } else { 441 } else {
442 dev->udma_mask = 0; 442 dev->udma_mask = 0;
443 dev->mwdma_mask = 0; 443 dev->mwdma_mask = 0;
444 dev->pio_mask = pio_mask; 444 dev->pio_mask = pio_mask;
445 } 445 }
446 446
447 ata_dev_printk(dev, KERN_NOTICE, 447 ata_dev_printk(dev, KERN_NOTICE,
448 "FORCE: xfer_mask set to %s\n", fe->param.name); 448 "FORCE: xfer_mask set to %s\n", fe->param.name);
449 return; 449 return;
450 } 450 }
451 } 451 }
452 452
453 /** 453 /**
454 * ata_force_horkage - force horkage according to libata.force 454 * ata_force_horkage - force horkage according to libata.force
455 * @dev: ATA device of interest 455 * @dev: ATA device of interest
456 * 456 *
457 * Force horkage according to libata.force and whine about it. 457 * Force horkage according to libata.force and whine about it.
458 * For consistency with link selection, device number 15 selects 458 * For consistency with link selection, device number 15 selects
459 * the first device connected to the host link. 459 * the first device connected to the host link.
460 * 460 *
461 * LOCKING: 461 * LOCKING:
462 * EH context. 462 * EH context.
463 */ 463 */
464 static void ata_force_horkage(struct ata_device *dev) 464 static void ata_force_horkage(struct ata_device *dev)
465 { 465 {
466 int devno = dev->link->pmp + dev->devno; 466 int devno = dev->link->pmp + dev->devno;
467 int alt_devno = devno; 467 int alt_devno = devno;
468 int i; 468 int i;
469 469
470 /* allow n.15/16 for devices attached to host port */ 470 /* allow n.15/16 for devices attached to host port */
471 if (ata_is_host_link(dev->link)) 471 if (ata_is_host_link(dev->link))
472 alt_devno += 15; 472 alt_devno += 15;
473 473
474 for (i = 0; i < ata_force_tbl_size; i++) { 474 for (i = 0; i < ata_force_tbl_size; i++) {
475 const struct ata_force_ent *fe = &ata_force_tbl[i]; 475 const struct ata_force_ent *fe = &ata_force_tbl[i];
476 476
477 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 477 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
478 continue; 478 continue;
479 479
480 if (fe->device != -1 && fe->device != devno && 480 if (fe->device != -1 && fe->device != devno &&
481 fe->device != alt_devno) 481 fe->device != alt_devno)
482 continue; 482 continue;
483 483
484 if (!(~dev->horkage & fe->param.horkage_on) && 484 if (!(~dev->horkage & fe->param.horkage_on) &&
485 !(dev->horkage & fe->param.horkage_off)) 485 !(dev->horkage & fe->param.horkage_off))
486 continue; 486 continue;
487 487
488 dev->horkage |= fe->param.horkage_on; 488 dev->horkage |= fe->param.horkage_on;
489 dev->horkage &= ~fe->param.horkage_off; 489 dev->horkage &= ~fe->param.horkage_off;
490 490
491 ata_dev_printk(dev, KERN_NOTICE, 491 ata_dev_printk(dev, KERN_NOTICE,
492 "FORCE: horkage modified (%s)\n", fe->param.name); 492 "FORCE: horkage modified (%s)\n", fe->param.name);
493 } 493 }
494 } 494 }
495 495
496 /** 496 /**
497 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 497 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
498 * @opcode: SCSI opcode 498 * @opcode: SCSI opcode
499 * 499 *
500 * Determine ATAPI command type from @opcode. 500 * Determine ATAPI command type from @opcode.
501 * 501 *
502 * LOCKING: 502 * LOCKING:
503 * None. 503 * None.
504 * 504 *
505 * RETURNS: 505 * RETURNS:
506 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 506 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
507 */ 507 */
508 int atapi_cmd_type(u8 opcode) 508 int atapi_cmd_type(u8 opcode)
509 { 509 {
510 switch (opcode) { 510 switch (opcode) {
511 case GPCMD_READ_10: 511 case GPCMD_READ_10:
512 case GPCMD_READ_12: 512 case GPCMD_READ_12:
513 return ATAPI_READ; 513 return ATAPI_READ;
514 514
515 case GPCMD_WRITE_10: 515 case GPCMD_WRITE_10:
516 case GPCMD_WRITE_12: 516 case GPCMD_WRITE_12:
517 case GPCMD_WRITE_AND_VERIFY_10: 517 case GPCMD_WRITE_AND_VERIFY_10:
518 return ATAPI_WRITE; 518 return ATAPI_WRITE;
519 519
520 case GPCMD_READ_CD: 520 case GPCMD_READ_CD:
521 case GPCMD_READ_CD_MSF: 521 case GPCMD_READ_CD_MSF:
522 return ATAPI_READ_CD; 522 return ATAPI_READ_CD;
523 523
524 case ATA_16: 524 case ATA_16:
525 case ATA_12: 525 case ATA_12:
526 if (atapi_passthru16) 526 if (atapi_passthru16)
527 return ATAPI_PASS_THRU; 527 return ATAPI_PASS_THRU;
528 /* fall thru */ 528 /* fall thru */
529 default: 529 default:
530 return ATAPI_MISC; 530 return ATAPI_MISC;
531 } 531 }
532 } 532 }
533 533
534 /** 534 /**
535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
536 * @tf: Taskfile to convert 536 * @tf: Taskfile to convert
537 * @pmp: Port multiplier port 537 * @pmp: Port multiplier port
538 * @is_cmd: This FIS is for command 538 * @is_cmd: This FIS is for command
539 * @fis: Buffer into which data will output 539 * @fis: Buffer into which data will output
540 * 540 *
541 * Converts a standard ATA taskfile to a Serial ATA 541 * Converts a standard ATA taskfile to a Serial ATA
542 * FIS structure (Register - Host to Device). 542 * FIS structure (Register - Host to Device).
543 * 543 *
544 * LOCKING: 544 * LOCKING:
545 * Inherited from caller. 545 * Inherited from caller.
546 */ 546 */
547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
548 { 548 {
549 fis[0] = 0x27; /* Register - Host to Device FIS */ 549 fis[0] = 0x27; /* Register - Host to Device FIS */
550 fis[1] = pmp & 0xf; /* Port multiplier number*/ 550 fis[1] = pmp & 0xf; /* Port multiplier number*/
551 if (is_cmd) 551 if (is_cmd)
552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
553 553
554 fis[2] = tf->command; 554 fis[2] = tf->command;
555 fis[3] = tf->feature; 555 fis[3] = tf->feature;
556 556
557 fis[4] = tf->lbal; 557 fis[4] = tf->lbal;
558 fis[5] = tf->lbam; 558 fis[5] = tf->lbam;
559 fis[6] = tf->lbah; 559 fis[6] = tf->lbah;
560 fis[7] = tf->device; 560 fis[7] = tf->device;
561 561
562 fis[8] = tf->hob_lbal; 562 fis[8] = tf->hob_lbal;
563 fis[9] = tf->hob_lbam; 563 fis[9] = tf->hob_lbam;
564 fis[10] = tf->hob_lbah; 564 fis[10] = tf->hob_lbah;
565 fis[11] = tf->hob_feature; 565 fis[11] = tf->hob_feature;
566 566
567 fis[12] = tf->nsect; 567 fis[12] = tf->nsect;
568 fis[13] = tf->hob_nsect; 568 fis[13] = tf->hob_nsect;
569 fis[14] = 0; 569 fis[14] = 0;
570 fis[15] = tf->ctl; 570 fis[15] = tf->ctl;
571 571
572 fis[16] = 0; 572 fis[16] = 0;
573 fis[17] = 0; 573 fis[17] = 0;
574 fis[18] = 0; 574 fis[18] = 0;
575 fis[19] = 0; 575 fis[19] = 0;
576 } 576 }
577 577
578 /** 578 /**
579 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 579 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
580 * @fis: Buffer from which data will be input 580 * @fis: Buffer from which data will be input
581 * @tf: Taskfile to output 581 * @tf: Taskfile to output
582 * 582 *
583 * Converts a serial ATA FIS structure to a standard ATA taskfile. 583 * Converts a serial ATA FIS structure to a standard ATA taskfile.
584 * 584 *
585 * LOCKING: 585 * LOCKING:
586 * Inherited from caller. 586 * Inherited from caller.
587 */ 587 */
588 588
589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
590 { 590 {
591 tf->command = fis[2]; /* status */ 591 tf->command = fis[2]; /* status */
592 tf->feature = fis[3]; /* error */ 592 tf->feature = fis[3]; /* error */
593 593
594 tf->lbal = fis[4]; 594 tf->lbal = fis[4];
595 tf->lbam = fis[5]; 595 tf->lbam = fis[5];
596 tf->lbah = fis[6]; 596 tf->lbah = fis[6];
597 tf->device = fis[7]; 597 tf->device = fis[7];
598 598
599 tf->hob_lbal = fis[8]; 599 tf->hob_lbal = fis[8];
600 tf->hob_lbam = fis[9]; 600 tf->hob_lbam = fis[9];
601 tf->hob_lbah = fis[10]; 601 tf->hob_lbah = fis[10];
602 602
603 tf->nsect = fis[12]; 603 tf->nsect = fis[12];
604 tf->hob_nsect = fis[13]; 604 tf->hob_nsect = fis[13];
605 } 605 }
606 606
607 static const u8 ata_rw_cmds[] = { 607 static const u8 ata_rw_cmds[] = {
608 /* pio multi */ 608 /* pio multi */
609 ATA_CMD_READ_MULTI, 609 ATA_CMD_READ_MULTI,
610 ATA_CMD_WRITE_MULTI, 610 ATA_CMD_WRITE_MULTI,
611 ATA_CMD_READ_MULTI_EXT, 611 ATA_CMD_READ_MULTI_EXT,
612 ATA_CMD_WRITE_MULTI_EXT, 612 ATA_CMD_WRITE_MULTI_EXT,
613 0, 613 0,
614 0, 614 0,
615 0, 615 0,
616 ATA_CMD_WRITE_MULTI_FUA_EXT, 616 ATA_CMD_WRITE_MULTI_FUA_EXT,
617 /* pio */ 617 /* pio */
618 ATA_CMD_PIO_READ, 618 ATA_CMD_PIO_READ,
619 ATA_CMD_PIO_WRITE, 619 ATA_CMD_PIO_WRITE,
620 ATA_CMD_PIO_READ_EXT, 620 ATA_CMD_PIO_READ_EXT,
621 ATA_CMD_PIO_WRITE_EXT, 621 ATA_CMD_PIO_WRITE_EXT,
622 0, 622 0,
623 0, 623 0,
624 0, 624 0,
625 0, 625 0,
626 /* dma */ 626 /* dma */
627 ATA_CMD_READ, 627 ATA_CMD_READ,
628 ATA_CMD_WRITE, 628 ATA_CMD_WRITE,
629 ATA_CMD_READ_EXT, 629 ATA_CMD_READ_EXT,
630 ATA_CMD_WRITE_EXT, 630 ATA_CMD_WRITE_EXT,
631 0, 631 0,
632 0, 632 0,
633 0, 633 0,
634 ATA_CMD_WRITE_FUA_EXT 634 ATA_CMD_WRITE_FUA_EXT
635 }; 635 };
636 636
637 /** 637 /**
638 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 638 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
639 * @tf: command to examine and configure 639 * @tf: command to examine and configure
640 * @dev: device tf belongs to 640 * @dev: device tf belongs to
641 * 641 *
642 * Examine the device configuration and tf->flags to calculate 642 * Examine the device configuration and tf->flags to calculate
643 * the proper read/write commands and protocol to use. 643 * the proper read/write commands and protocol to use.
644 * 644 *
645 * LOCKING: 645 * LOCKING:
646 * caller. 646 * caller.
647 */ 647 */
648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
649 { 649 {
650 u8 cmd; 650 u8 cmd;
651 651
652 int index, fua, lba48, write; 652 int index, fua, lba48, write;
653 653
654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
657 657
658 if (dev->flags & ATA_DFLAG_PIO) { 658 if (dev->flags & ATA_DFLAG_PIO) {
659 tf->protocol = ATA_PROT_PIO; 659 tf->protocol = ATA_PROT_PIO;
660 index = dev->multi_count ? 0 : 8; 660 index = dev->multi_count ? 0 : 8;
661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
662 /* Unable to use DMA due to host limitation */ 662 /* Unable to use DMA due to host limitation */
663 tf->protocol = ATA_PROT_PIO; 663 tf->protocol = ATA_PROT_PIO;
664 index = dev->multi_count ? 0 : 8; 664 index = dev->multi_count ? 0 : 8;
665 } else { 665 } else {
666 tf->protocol = ATA_PROT_DMA; 666 tf->protocol = ATA_PROT_DMA;
667 index = 16; 667 index = 16;
668 } 668 }
669 669
670 cmd = ata_rw_cmds[index + fua + lba48 + write]; 670 cmd = ata_rw_cmds[index + fua + lba48 + write];
671 if (cmd) { 671 if (cmd) {
672 tf->command = cmd; 672 tf->command = cmd;
673 return 0; 673 return 0;
674 } 674 }
675 return -1; 675 return -1;
676 } 676 }
677 677
678 /** 678 /**
679 * ata_tf_read_block - Read block address from ATA taskfile 679 * ata_tf_read_block - Read block address from ATA taskfile
680 * @tf: ATA taskfile of interest 680 * @tf: ATA taskfile of interest
681 * @dev: ATA device @tf belongs to 681 * @dev: ATA device @tf belongs to
682 * 682 *
683 * LOCKING: 683 * LOCKING:
684 * None. 684 * None.
685 * 685 *
686 * Read block address from @tf. This function can handle all 686 * Read block address from @tf. This function can handle all
687 * three address formats - LBA, LBA48 and CHS. tf->protocol and 687 * three address formats - LBA, LBA48 and CHS. tf->protocol and
688 * flags select the address format to use. 688 * flags select the address format to use.
689 * 689 *
690 * RETURNS: 690 * RETURNS:
691 * Block address read from @tf. 691 * Block address read from @tf.
692 */ 692 */
693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
694 { 694 {
695 u64 block = 0; 695 u64 block = 0;
696 696
697 if (tf->flags & ATA_TFLAG_LBA) { 697 if (tf->flags & ATA_TFLAG_LBA) {
698 if (tf->flags & ATA_TFLAG_LBA48) { 698 if (tf->flags & ATA_TFLAG_LBA48) {
699 block |= (u64)tf->hob_lbah << 40; 699 block |= (u64)tf->hob_lbah << 40;
700 block |= (u64)tf->hob_lbam << 32; 700 block |= (u64)tf->hob_lbam << 32;
701 block |= (u64)tf->hob_lbal << 24; 701 block |= (u64)tf->hob_lbal << 24;
702 } else 702 } else
703 block |= (tf->device & 0xf) << 24; 703 block |= (tf->device & 0xf) << 24;
704 704
705 block |= tf->lbah << 16; 705 block |= tf->lbah << 16;
706 block |= tf->lbam << 8; 706 block |= tf->lbam << 8;
707 block |= tf->lbal; 707 block |= tf->lbal;
708 } else { 708 } else {
709 u32 cyl, head, sect; 709 u32 cyl, head, sect;
710 710
711 cyl = tf->lbam | (tf->lbah << 8); 711 cyl = tf->lbam | (tf->lbah << 8);
712 head = tf->device & 0xf; 712 head = tf->device & 0xf;
713 sect = tf->lbal; 713 sect = tf->lbal;
714 714
715 if (!sect) { 715 if (!sect) {
716 ata_dev_printk(dev, KERN_WARNING, "device reported " 716 ata_dev_printk(dev, KERN_WARNING, "device reported "
717 "invalid CHS sector 0\n"); 717 "invalid CHS sector 0\n");
718 sect = 1; /* oh well */ 718 sect = 1; /* oh well */
719 } 719 }
720 720
721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
722 } 722 }
723 723
724 return block; 724 return block;
725 } 725 }
726 726
727 /** 727 /**
728 * ata_build_rw_tf - Build ATA taskfile for given read/write request 728 * ata_build_rw_tf - Build ATA taskfile for given read/write request
729 * @tf: Target ATA taskfile 729 * @tf: Target ATA taskfile
730 * @dev: ATA device @tf belongs to 730 * @dev: ATA device @tf belongs to
731 * @block: Block address 731 * @block: Block address
732 * @n_block: Number of blocks 732 * @n_block: Number of blocks
733 * @tf_flags: RW/FUA etc... 733 * @tf_flags: RW/FUA etc...
734 * @tag: tag 734 * @tag: tag
735 * 735 *
736 * LOCKING: 736 * LOCKING:
737 * None. 737 * None.
738 * 738 *
739 * Build ATA taskfile @tf for read/write request described by 739 * Build ATA taskfile @tf for read/write request described by
740 * @block, @n_block, @tf_flags and @tag on @dev. 740 * @block, @n_block, @tf_flags and @tag on @dev.
741 * 741 *
742 * RETURNS: 742 * RETURNS:
743 * 743 *
744 * 0 on success, -ERANGE if the request is too large for @dev, 744 * 0 on success, -ERANGE if the request is too large for @dev,
745 * -EINVAL if the request is invalid. 745 * -EINVAL if the request is invalid.
746 */ 746 */
747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
748 u64 block, u32 n_block, unsigned int tf_flags, 748 u64 block, u32 n_block, unsigned int tf_flags,
749 unsigned int tag) 749 unsigned int tag)
750 { 750 {
751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
752 tf->flags |= tf_flags; 752 tf->flags |= tf_flags;
753 753
754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
755 /* yay, NCQ */ 755 /* yay, NCQ */
756 if (!lba_48_ok(block, n_block)) 756 if (!lba_48_ok(block, n_block))
757 return -ERANGE; 757 return -ERANGE;
758 758
759 tf->protocol = ATA_PROT_NCQ; 759 tf->protocol = ATA_PROT_NCQ;
760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
761 761
762 if (tf->flags & ATA_TFLAG_WRITE) 762 if (tf->flags & ATA_TFLAG_WRITE)
763 tf->command = ATA_CMD_FPDMA_WRITE; 763 tf->command = ATA_CMD_FPDMA_WRITE;
764 else 764 else
765 tf->command = ATA_CMD_FPDMA_READ; 765 tf->command = ATA_CMD_FPDMA_READ;
766 766
767 tf->nsect = tag << 3; 767 tf->nsect = tag << 3;
768 tf->hob_feature = (n_block >> 8) & 0xff; 768 tf->hob_feature = (n_block >> 8) & 0xff;
769 tf->feature = n_block & 0xff; 769 tf->feature = n_block & 0xff;
770 770
771 tf->hob_lbah = (block >> 40) & 0xff; 771 tf->hob_lbah = (block >> 40) & 0xff;
772 tf->hob_lbam = (block >> 32) & 0xff; 772 tf->hob_lbam = (block >> 32) & 0xff;
773 tf->hob_lbal = (block >> 24) & 0xff; 773 tf->hob_lbal = (block >> 24) & 0xff;
774 tf->lbah = (block >> 16) & 0xff; 774 tf->lbah = (block >> 16) & 0xff;
775 tf->lbam = (block >> 8) & 0xff; 775 tf->lbam = (block >> 8) & 0xff;
776 tf->lbal = block & 0xff; 776 tf->lbal = block & 0xff;
777 777
778 tf->device = 1 << 6; 778 tf->device = 1 << 6;
779 if (tf->flags & ATA_TFLAG_FUA) 779 if (tf->flags & ATA_TFLAG_FUA)
780 tf->device |= 1 << 7; 780 tf->device |= 1 << 7;
781 } else if (dev->flags & ATA_DFLAG_LBA) { 781 } else if (dev->flags & ATA_DFLAG_LBA) {
782 tf->flags |= ATA_TFLAG_LBA; 782 tf->flags |= ATA_TFLAG_LBA;
783 783
784 if (lba_28_ok(block, n_block)) { 784 if (lba_28_ok(block, n_block)) {
785 /* use LBA28 */ 785 /* use LBA28 */
786 tf->device |= (block >> 24) & 0xf; 786 tf->device |= (block >> 24) & 0xf;
787 } else if (lba_48_ok(block, n_block)) { 787 } else if (lba_48_ok(block, n_block)) {
788 if (!(dev->flags & ATA_DFLAG_LBA48)) 788 if (!(dev->flags & ATA_DFLAG_LBA48))
789 return -ERANGE; 789 return -ERANGE;
790 790
791 /* use LBA48 */ 791 /* use LBA48 */
792 tf->flags |= ATA_TFLAG_LBA48; 792 tf->flags |= ATA_TFLAG_LBA48;
793 793
794 tf->hob_nsect = (n_block >> 8) & 0xff; 794 tf->hob_nsect = (n_block >> 8) & 0xff;
795 795
796 tf->hob_lbah = (block >> 40) & 0xff; 796 tf->hob_lbah = (block >> 40) & 0xff;
797 tf->hob_lbam = (block >> 32) & 0xff; 797 tf->hob_lbam = (block >> 32) & 0xff;
798 tf->hob_lbal = (block >> 24) & 0xff; 798 tf->hob_lbal = (block >> 24) & 0xff;
799 } else 799 } else
800 /* request too large even for LBA48 */ 800 /* request too large even for LBA48 */
801 return -ERANGE; 801 return -ERANGE;
802 802
803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
804 return -EINVAL; 804 return -EINVAL;
805 805
806 tf->nsect = n_block & 0xff; 806 tf->nsect = n_block & 0xff;
807 807
808 tf->lbah = (block >> 16) & 0xff; 808 tf->lbah = (block >> 16) & 0xff;
809 tf->lbam = (block >> 8) & 0xff; 809 tf->lbam = (block >> 8) & 0xff;
810 tf->lbal = block & 0xff; 810 tf->lbal = block & 0xff;
811 811
812 tf->device |= ATA_LBA; 812 tf->device |= ATA_LBA;
813 } else { 813 } else {
814 /* CHS */ 814 /* CHS */
815 u32 sect, head, cyl, track; 815 u32 sect, head, cyl, track;
816 816
817 /* The request -may- be too large for CHS addressing. */ 817 /* The request -may- be too large for CHS addressing. */
818 if (!lba_28_ok(block, n_block)) 818 if (!lba_28_ok(block, n_block))
819 return -ERANGE; 819 return -ERANGE;
820 820
821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
822 return -EINVAL; 822 return -EINVAL;
823 823
824 /* Convert LBA to CHS */ 824 /* Convert LBA to CHS */
825 track = (u32)block / dev->sectors; 825 track = (u32)block / dev->sectors;
826 cyl = track / dev->heads; 826 cyl = track / dev->heads;
827 head = track % dev->heads; 827 head = track % dev->heads;
828 sect = (u32)block % dev->sectors + 1; 828 sect = (u32)block % dev->sectors + 1;
829 829
830 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 830 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
831 (u32)block, track, cyl, head, sect); 831 (u32)block, track, cyl, head, sect);
832 832
833 /* Check whether the converted CHS can fit. 833 /* Check whether the converted CHS can fit.
834 Cylinder: 0-65535 834 Cylinder: 0-65535
835 Head: 0-15 835 Head: 0-15
836 Sector: 1-255*/ 836 Sector: 1-255*/
837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
838 return -ERANGE; 838 return -ERANGE;
839 839
840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
841 tf->lbal = sect; 841 tf->lbal = sect;
842 tf->lbam = cyl; 842 tf->lbam = cyl;
843 tf->lbah = cyl >> 8; 843 tf->lbah = cyl >> 8;
844 tf->device |= head; 844 tf->device |= head;
845 } 845 }
846 846
847 return 0; 847 return 0;
848 } 848 }
849 849
850 /** 850 /**
851 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 851 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
852 * @pio_mask: pio_mask 852 * @pio_mask: pio_mask
853 * @mwdma_mask: mwdma_mask 853 * @mwdma_mask: mwdma_mask
854 * @udma_mask: udma_mask 854 * @udma_mask: udma_mask
855 * 855 *
856 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 856 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
857 * unsigned int xfer_mask. 857 * unsigned int xfer_mask.
858 * 858 *
859 * LOCKING: 859 * LOCKING:
860 * None. 860 * None.
861 * 861 *
862 * RETURNS: 862 * RETURNS:
863 * Packed xfer_mask. 863 * Packed xfer_mask.
864 */ 864 */
865 unsigned long ata_pack_xfermask(unsigned long pio_mask, 865 unsigned long ata_pack_xfermask(unsigned long pio_mask,
866 unsigned long mwdma_mask, 866 unsigned long mwdma_mask,
867 unsigned long udma_mask) 867 unsigned long udma_mask)
868 { 868 {
869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
872 } 872 }
873 873
874 /** 874 /**
875 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 875 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
876 * @xfer_mask: xfer_mask to unpack 876 * @xfer_mask: xfer_mask to unpack
877 * @pio_mask: resulting pio_mask 877 * @pio_mask: resulting pio_mask
878 * @mwdma_mask: resulting mwdma_mask 878 * @mwdma_mask: resulting mwdma_mask
879 * @udma_mask: resulting udma_mask 879 * @udma_mask: resulting udma_mask
880 * 880 *
881 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 881 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
882 * Any NULL distination masks will be ignored. 882 * Any NULL distination masks will be ignored.
883 */ 883 */
884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
885 unsigned long *mwdma_mask, unsigned long *udma_mask) 885 unsigned long *mwdma_mask, unsigned long *udma_mask)
886 { 886 {
887 if (pio_mask) 887 if (pio_mask)
888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
889 if (mwdma_mask) 889 if (mwdma_mask)
890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
891 if (udma_mask) 891 if (udma_mask)
892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
893 } 893 }
894 894
895 static const struct ata_xfer_ent { 895 static const struct ata_xfer_ent {
896 int shift, bits; 896 int shift, bits;
897 u8 base; 897 u8 base;
898 } ata_xfer_tbl[] = { 898 } ata_xfer_tbl[] = {
899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
902 { -1, }, 902 { -1, },
903 }; 903 };
904 904
905 /** 905 /**
906 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 906 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
907 * @xfer_mask: xfer_mask of interest 907 * @xfer_mask: xfer_mask of interest
908 * 908 *
909 * Return matching XFER_* value for @xfer_mask. Only the highest 909 * Return matching XFER_* value for @xfer_mask. Only the highest
910 * bit of @xfer_mask is considered. 910 * bit of @xfer_mask is considered.
911 * 911 *
912 * LOCKING: 912 * LOCKING:
913 * None. 913 * None.
914 * 914 *
915 * RETURNS: 915 * RETURNS:
916 * Matching XFER_* value, 0xff if no match found. 916 * Matching XFER_* value, 0xff if no match found.
917 */ 917 */
918 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 918 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
919 { 919 {
920 int highbit = fls(xfer_mask) - 1; 920 int highbit = fls(xfer_mask) - 1;
921 const struct ata_xfer_ent *ent; 921 const struct ata_xfer_ent *ent;
922 922
923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
925 return ent->base + highbit - ent->shift; 925 return ent->base + highbit - ent->shift;
926 return 0xff; 926 return 0xff;
927 } 927 }
928 928
929 /** 929 /**
930 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 930 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
931 * @xfer_mode: XFER_* of interest 931 * @xfer_mode: XFER_* of interest
932 * 932 *
933 * Return matching xfer_mask for @xfer_mode. 933 * Return matching xfer_mask for @xfer_mode.
934 * 934 *
935 * LOCKING: 935 * LOCKING:
936 * None. 936 * None.
937 * 937 *
938 * RETURNS: 938 * RETURNS:
939 * Matching xfer_mask, 0 if no match found. 939 * Matching xfer_mask, 0 if no match found.
940 */ 940 */
941 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 941 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
942 { 942 {
943 const struct ata_xfer_ent *ent; 943 const struct ata_xfer_ent *ent;
944 944
945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
948 & ~((1 << ent->shift) - 1); 948 & ~((1 << ent->shift) - 1);
949 return 0; 949 return 0;
950 } 950 }
951 951
952 /** 952 /**
953 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 953 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
954 * @xfer_mode: XFER_* of interest 954 * @xfer_mode: XFER_* of interest
955 * 955 *
956 * Return matching xfer_shift for @xfer_mode. 956 * Return matching xfer_shift for @xfer_mode.
957 * 957 *
958 * LOCKING: 958 * LOCKING:
959 * None. 959 * None.
960 * 960 *
961 * RETURNS: 961 * RETURNS:
962 * Matching xfer_shift, -1 if no match found. 962 * Matching xfer_shift, -1 if no match found.
963 */ 963 */
964 int ata_xfer_mode2shift(unsigned long xfer_mode) 964 int ata_xfer_mode2shift(unsigned long xfer_mode)
965 { 965 {
966 const struct ata_xfer_ent *ent; 966 const struct ata_xfer_ent *ent;
967 967
968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
970 return ent->shift; 970 return ent->shift;
971 return -1; 971 return -1;
972 } 972 }
973 973
974 /** 974 /**
975 * ata_mode_string - convert xfer_mask to string 975 * ata_mode_string - convert xfer_mask to string
976 * @xfer_mask: mask of bits supported; only highest bit counts. 976 * @xfer_mask: mask of bits supported; only highest bit counts.
977 * 977 *
978 * Determine string which represents the highest speed 978 * Determine string which represents the highest speed
979 * (highest bit in @modemask). 979 * (highest bit in @modemask).
980 * 980 *
981 * LOCKING: 981 * LOCKING:
982 * None. 982 * None.
983 * 983 *
984 * RETURNS: 984 * RETURNS:
985 * Constant C string representing highest speed listed in 985 * Constant C string representing highest speed listed in
986 * @mode_mask, or the constant C string "<n/a>". 986 * @mode_mask, or the constant C string "<n/a>".
987 */ 987 */
988 const char *ata_mode_string(unsigned long xfer_mask) 988 const char *ata_mode_string(unsigned long xfer_mask)
989 { 989 {
990 static const char * const xfer_mode_str[] = { 990 static const char * const xfer_mode_str[] = {
991 "PIO0", 991 "PIO0",
992 "PIO1", 992 "PIO1",
993 "PIO2", 993 "PIO2",
994 "PIO3", 994 "PIO3",
995 "PIO4", 995 "PIO4",
996 "PIO5", 996 "PIO5",
997 "PIO6", 997 "PIO6",
998 "MWDMA0", 998 "MWDMA0",
999 "MWDMA1", 999 "MWDMA1",
1000 "MWDMA2", 1000 "MWDMA2",
1001 "MWDMA3", 1001 "MWDMA3",
1002 "MWDMA4", 1002 "MWDMA4",
1003 "UDMA/16", 1003 "UDMA/16",
1004 "UDMA/25", 1004 "UDMA/25",
1005 "UDMA/33", 1005 "UDMA/33",
1006 "UDMA/44", 1006 "UDMA/44",
1007 "UDMA/66", 1007 "UDMA/66",
1008 "UDMA/100", 1008 "UDMA/100",
1009 "UDMA/133", 1009 "UDMA/133",
1010 "UDMA7", 1010 "UDMA7",
1011 }; 1011 };
1012 int highbit; 1012 int highbit;
1013 1013
1014 highbit = fls(xfer_mask) - 1; 1014 highbit = fls(xfer_mask) - 1;
1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
1016 return xfer_mode_str[highbit]; 1016 return xfer_mode_str[highbit];
1017 return "<n/a>"; 1017 return "<n/a>";
1018 } 1018 }
1019 1019
1020 static const char *sata_spd_string(unsigned int spd) 1020 static const char *sata_spd_string(unsigned int spd)
1021 { 1021 {
1022 static const char * const spd_str[] = { 1022 static const char * const spd_str[] = {
1023 "1.5 Gbps", 1023 "1.5 Gbps",
1024 "3.0 Gbps", 1024 "3.0 Gbps",
1025 "6.0 Gbps", 1025 "6.0 Gbps",
1026 }; 1026 };
1027 1027
1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1029 return "<unknown>"; 1029 return "<unknown>";
1030 return spd_str[spd - 1]; 1030 return spd_str[spd - 1];
1031 } 1031 }
1032 1032
1033 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 1033 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
1034 { 1034 {
1035 struct ata_link *link = dev->link; 1035 struct ata_link *link = dev->link;
1036 struct ata_port *ap = link->ap; 1036 struct ata_port *ap = link->ap;
1037 u32 scontrol; 1037 u32 scontrol;
1038 unsigned int err_mask; 1038 unsigned int err_mask;
1039 int rc; 1039 int rc;
1040 1040
1041 /* 1041 /*
1042 * disallow DIPM for drivers which haven't set 1042 * disallow DIPM for drivers which haven't set
1043 * ATA_FLAG_IPM. This is because when DIPM is enabled, 1043 * ATA_FLAG_IPM. This is because when DIPM is enabled,
1044 * phy ready will be set in the interrupt status on 1044 * phy ready will be set in the interrupt status on
1045 * state changes, which will cause some drivers to 1045 * state changes, which will cause some drivers to
1046 * think there are errors - additionally drivers will 1046 * think there are errors - additionally drivers will
1047 * need to disable hot plug. 1047 * need to disable hot plug.
1048 */ 1048 */
1049 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 1049 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
1050 ap->pm_policy = NOT_AVAILABLE; 1050 ap->pm_policy = NOT_AVAILABLE;
1051 return -EINVAL; 1051 return -EINVAL;
1052 } 1052 }
1053 1053
1054 /* 1054 /*
1055 * For DIPM, we will only enable it for the 1055 * For DIPM, we will only enable it for the
1056 * min_power setting. 1056 * min_power setting.
1057 * 1057 *
1058 * Why? Because Disks are too stupid to know that 1058 * Why? Because Disks are too stupid to know that
1059 * If the host rejects a request to go to SLUMBER 1059 * If the host rejects a request to go to SLUMBER
1060 * they should retry at PARTIAL, and instead it 1060 * they should retry at PARTIAL, and instead it
1061 * just would give up. So, for medium_power to 1061 * just would give up. So, for medium_power to
1062 * work at all, we need to only allow HIPM. 1062 * work at all, we need to only allow HIPM.
1063 */ 1063 */
1064 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 1064 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
1065 if (rc) 1065 if (rc)
1066 return rc; 1066 return rc;
1067 1067
1068 switch (policy) { 1068 switch (policy) {
1069 case MIN_POWER: 1069 case MIN_POWER:
1070 /* no restrictions on IPM transitions */ 1070 /* no restrictions on IPM transitions */
1071 scontrol &= ~(0x3 << 8); 1071 scontrol &= ~(0x3 << 8);
1072 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1072 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1073 if (rc) 1073 if (rc)
1074 return rc; 1074 return rc;
1075 1075
1076 /* enable DIPM */ 1076 /* enable DIPM */
1077 if (dev->flags & ATA_DFLAG_DIPM) 1077 if (dev->flags & ATA_DFLAG_DIPM)
1078 err_mask = ata_dev_set_feature(dev, 1078 err_mask = ata_dev_set_feature(dev,
1079 SETFEATURES_SATA_ENABLE, SATA_DIPM); 1079 SETFEATURES_SATA_ENABLE, SATA_DIPM);
1080 break; 1080 break;
1081 case MEDIUM_POWER: 1081 case MEDIUM_POWER:
1082 /* allow IPM to PARTIAL */ 1082 /* allow IPM to PARTIAL */
1083 scontrol &= ~(0x1 << 8); 1083 scontrol &= ~(0x1 << 8);
1084 scontrol |= (0x2 << 8); 1084 scontrol |= (0x2 << 8);
1085 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1085 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1086 if (rc) 1086 if (rc)
1087 return rc; 1087 return rc;
1088 1088
1089 /* 1089 /*
1090 * we don't have to disable DIPM since IPM flags 1090 * we don't have to disable DIPM since IPM flags
1091 * disallow transitions to SLUMBER, which effectively 1091 * disallow transitions to SLUMBER, which effectively
1092 * disable DIPM if it does not support PARTIAL 1092 * disable DIPM if it does not support PARTIAL
1093 */ 1093 */
1094 break; 1094 break;
1095 case NOT_AVAILABLE: 1095 case NOT_AVAILABLE:
1096 case MAX_PERFORMANCE: 1096 case MAX_PERFORMANCE:
1097 /* disable all IPM transitions */ 1097 /* disable all IPM transitions */
1098 scontrol |= (0x3 << 8); 1098 scontrol |= (0x3 << 8);
1099 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1099 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1100 if (rc) 1100 if (rc)
1101 return rc; 1101 return rc;
1102 1102
1103 /* 1103 /*
1104 * we don't have to disable DIPM since IPM flags 1104 * we don't have to disable DIPM since IPM flags
1105 * disallow all transitions which effectively 1105 * disallow all transitions which effectively
1106 * disable DIPM anyway. 1106 * disable DIPM anyway.
1107 */ 1107 */
1108 break; 1108 break;
1109 } 1109 }
1110 1110
1111 /* FIXME: handle SET FEATURES failure */ 1111 /* FIXME: handle SET FEATURES failure */
1112 (void) err_mask; 1112 (void) err_mask;
1113 1113
1114 return 0; 1114 return 0;
1115 } 1115 }
1116 1116
1117 /** 1117 /**
1118 * ata_dev_enable_pm - enable SATA interface power management 1118 * ata_dev_enable_pm - enable SATA interface power management
1119 * @dev: device to enable power management 1119 * @dev: device to enable power management
1120 * @policy: the link power management policy 1120 * @policy: the link power management policy
1121 * 1121 *
1122 * Enable SATA Interface power management. This will enable 1122 * Enable SATA Interface power management. This will enable
1123 * Device Interface Power Management (DIPM) for min_power 1123 * Device Interface Power Management (DIPM) for min_power
1124 * policy, and then call driver specific callbacks for 1124 * policy, and then call driver specific callbacks for
1125 * enabling Host Initiated Power management. 1125 * enabling Host Initiated Power management.
1126 * 1126 *
1127 * Locking: Caller. 1127 * Locking: Caller.
1128 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 1128 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1129 */ 1129 */
1130 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 1130 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1131 { 1131 {
1132 int rc = 0; 1132 int rc = 0;
1133 struct ata_port *ap = dev->link->ap; 1133 struct ata_port *ap = dev->link->ap;
1134 1134
1135 /* set HIPM first, then DIPM */ 1135 /* set HIPM first, then DIPM */
1136 if (ap->ops->enable_pm) 1136 if (ap->ops->enable_pm)
1137 rc = ap->ops->enable_pm(ap, policy); 1137 rc = ap->ops->enable_pm(ap, policy);
1138 if (rc) 1138 if (rc)
1139 goto enable_pm_out; 1139 goto enable_pm_out;
1140 rc = ata_dev_set_dipm(dev, policy); 1140 rc = ata_dev_set_dipm(dev, policy);
1141 1141
1142 enable_pm_out: 1142 enable_pm_out:
1143 if (rc) 1143 if (rc)
1144 ap->pm_policy = MAX_PERFORMANCE; 1144 ap->pm_policy = MAX_PERFORMANCE;
1145 else 1145 else
1146 ap->pm_policy = policy; 1146 ap->pm_policy = policy;
1147 return /* rc */; /* hopefully we can use 'rc' eventually */ 1147 return /* rc */; /* hopefully we can use 'rc' eventually */
1148 } 1148 }
1149 1149
1150 #ifdef CONFIG_PM 1150 #ifdef CONFIG_PM
1151 /** 1151 /**
1152 * ata_dev_disable_pm - disable SATA interface power management 1152 * ata_dev_disable_pm - disable SATA interface power management
1153 * @dev: device to disable power management 1153 * @dev: device to disable power management
1154 * 1154 *
1155 * Disable SATA Interface power management. This will disable 1155 * Disable SATA Interface power management. This will disable
1156 * Device Interface Power Management (DIPM) without changing 1156 * Device Interface Power Management (DIPM) without changing
1157 * policy, call driver specific callbacks for disabling Host 1157 * policy, call driver specific callbacks for disabling Host
1158 * Initiated Power management. 1158 * Initiated Power management.
1159 * 1159 *
1160 * Locking: Caller. 1160 * Locking: Caller.
1161 * Returns: void 1161 * Returns: void
1162 */ 1162 */
1163 static void ata_dev_disable_pm(struct ata_device *dev) 1163 static void ata_dev_disable_pm(struct ata_device *dev)
1164 { 1164 {
1165 struct ata_port *ap = dev->link->ap; 1165 struct ata_port *ap = dev->link->ap;
1166 1166
1167 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 1167 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1168 if (ap->ops->disable_pm) 1168 if (ap->ops->disable_pm)
1169 ap->ops->disable_pm(ap); 1169 ap->ops->disable_pm(ap);
1170 } 1170 }
1171 #endif /* CONFIG_PM */ 1171 #endif /* CONFIG_PM */
1172 1172
1173 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 1173 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1174 { 1174 {
1175 ap->pm_policy = policy; 1175 ap->pm_policy = policy;
1176 ap->link.eh_info.action |= ATA_EH_LPM; 1176 ap->link.eh_info.action |= ATA_EH_LPM;
1177 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 1177 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1178 ata_port_schedule_eh(ap); 1178 ata_port_schedule_eh(ap);
1179 } 1179 }
1180 1180
1181 #ifdef CONFIG_PM 1181 #ifdef CONFIG_PM
1182 static void ata_lpm_enable(struct ata_host *host) 1182 static void ata_lpm_enable(struct ata_host *host)
1183 { 1183 {
1184 struct ata_link *link; 1184 struct ata_link *link;
1185 struct ata_port *ap; 1185 struct ata_port *ap;
1186 struct ata_device *dev; 1186 struct ata_device *dev;
1187 int i; 1187 int i;
1188 1188
1189 for (i = 0; i < host->n_ports; i++) { 1189 for (i = 0; i < host->n_ports; i++) {
1190 ap = host->ports[i]; 1190 ap = host->ports[i];
1191 ata_for_each_link(link, ap, EDGE) { 1191 ata_for_each_link(link, ap, EDGE) {
1192 ata_for_each_dev(dev, link, ALL) 1192 ata_for_each_dev(dev, link, ALL)
1193 ata_dev_disable_pm(dev); 1193 ata_dev_disable_pm(dev);
1194 } 1194 }
1195 } 1195 }
1196 } 1196 }
1197 1197
1198 static void ata_lpm_disable(struct ata_host *host) 1198 static void ata_lpm_disable(struct ata_host *host)
1199 { 1199 {
1200 int i; 1200 int i;
1201 1201
1202 for (i = 0; i < host->n_ports; i++) { 1202 for (i = 0; i < host->n_ports; i++) {
1203 struct ata_port *ap = host->ports[i]; 1203 struct ata_port *ap = host->ports[i];
1204 ata_lpm_schedule(ap, ap->pm_policy); 1204 ata_lpm_schedule(ap, ap->pm_policy);
1205 } 1205 }
1206 } 1206 }
1207 #endif /* CONFIG_PM */ 1207 #endif /* CONFIG_PM */
1208 1208
1209 /** 1209 /**
1210 * ata_dev_classify - determine device type based on ATA-spec signature 1210 * ata_dev_classify - determine device type based on ATA-spec signature
1211 * @tf: ATA taskfile register set for device to be identified 1211 * @tf: ATA taskfile register set for device to be identified
1212 * 1212 *
1213 * Determine from taskfile register contents whether a device is 1213 * Determine from taskfile register contents whether a device is
1214 * ATA or ATAPI, as per "Signature and persistence" section 1214 * ATA or ATAPI, as per "Signature and persistence" section
1215 * of ATA/PI spec (volume 1, sect 5.14). 1215 * of ATA/PI spec (volume 1, sect 5.14).
1216 * 1216 *
1217 * LOCKING: 1217 * LOCKING:
1218 * None. 1218 * None.
1219 * 1219 *
1220 * RETURNS: 1220 * RETURNS:
1221 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1221 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1222 * %ATA_DEV_UNKNOWN the event of failure. 1222 * %ATA_DEV_UNKNOWN the event of failure.
1223 */ 1223 */
1224 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1224 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1225 { 1225 {
1226 /* Apple's open source Darwin code hints that some devices only 1226 /* Apple's open source Darwin code hints that some devices only
1227 * put a proper signature into the LBA mid/high registers, 1227 * put a proper signature into the LBA mid/high registers,
1228 * So, we only check those. It's sufficient for uniqueness. 1228 * So, we only check those. It's sufficient for uniqueness.
1229 * 1229 *
1230 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1230 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1231 * signatures for ATA and ATAPI devices attached on SerialATA, 1231 * signatures for ATA and ATAPI devices attached on SerialATA,
1232 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1232 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1233 * spec has never mentioned about using different signatures 1233 * spec has never mentioned about using different signatures
1234 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1234 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1235 * Multiplier specification began to use 0x69/0x96 to identify 1235 * Multiplier specification began to use 0x69/0x96 to identify
1236 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1236 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1237 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1237 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1238 * 0x69/0x96 shortly and described them as reserved for 1238 * 0x69/0x96 shortly and described them as reserved for
1239 * SerialATA. 1239 * SerialATA.
1240 * 1240 *
1241 * We follow the current spec and consider that 0x69/0x96 1241 * We follow the current spec and consider that 0x69/0x96
1242 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1242 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1243 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1243 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1244 * SEMB signature. This is worked around in 1244 * SEMB signature. This is worked around in
1245 * ata_dev_read_id(). 1245 * ata_dev_read_id().
1246 */ 1246 */
1247 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1247 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1248 DPRINTK("found ATA device by sig\n"); 1248 DPRINTK("found ATA device by sig\n");
1249 return ATA_DEV_ATA; 1249 return ATA_DEV_ATA;
1250 } 1250 }
1251 1251
1252 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1252 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1253 DPRINTK("found ATAPI device by sig\n"); 1253 DPRINTK("found ATAPI device by sig\n");
1254 return ATA_DEV_ATAPI; 1254 return ATA_DEV_ATAPI;
1255 } 1255 }
1256 1256
1257 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1257 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1258 DPRINTK("found PMP device by sig\n"); 1258 DPRINTK("found PMP device by sig\n");
1259 return ATA_DEV_PMP; 1259 return ATA_DEV_PMP;
1260 } 1260 }
1261 1261
1262 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1262 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1263 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1263 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1264 return ATA_DEV_SEMB; 1264 return ATA_DEV_SEMB;
1265 } 1265 }
1266 1266
1267 DPRINTK("unknown device\n"); 1267 DPRINTK("unknown device\n");
1268 return ATA_DEV_UNKNOWN; 1268 return ATA_DEV_UNKNOWN;
1269 } 1269 }
1270 1270
1271 /** 1271 /**
1272 * ata_id_string - Convert IDENTIFY DEVICE page into string 1272 * ata_id_string - Convert IDENTIFY DEVICE page into string
1273 * @id: IDENTIFY DEVICE results we will examine 1273 * @id: IDENTIFY DEVICE results we will examine
1274 * @s: string into which data is output 1274 * @s: string into which data is output
1275 * @ofs: offset into identify device page 1275 * @ofs: offset into identify device page
1276 * @len: length of string to return. must be an even number. 1276 * @len: length of string to return. must be an even number.
1277 * 1277 *
1278 * The strings in the IDENTIFY DEVICE page are broken up into 1278 * The strings in the IDENTIFY DEVICE page are broken up into
1279 * 16-bit chunks. Run through the string, and output each 1279 * 16-bit chunks. Run through the string, and output each
1280 * 8-bit chunk linearly, regardless of platform. 1280 * 8-bit chunk linearly, regardless of platform.
1281 * 1281 *
1282 * LOCKING: 1282 * LOCKING:
1283 * caller. 1283 * caller.
1284 */ 1284 */
1285 1285
1286 void ata_id_string(const u16 *id, unsigned char *s, 1286 void ata_id_string(const u16 *id, unsigned char *s,
1287 unsigned int ofs, unsigned int len) 1287 unsigned int ofs, unsigned int len)
1288 { 1288 {
1289 unsigned int c; 1289 unsigned int c;
1290 1290
1291 BUG_ON(len & 1); 1291 BUG_ON(len & 1);
1292 1292
1293 while (len > 0) { 1293 while (len > 0) {
1294 c = id[ofs] >> 8; 1294 c = id[ofs] >> 8;
1295 *s = c; 1295 *s = c;
1296 s++; 1296 s++;
1297 1297
1298 c = id[ofs] & 0xff; 1298 c = id[ofs] & 0xff;
1299 *s = c; 1299 *s = c;
1300 s++; 1300 s++;
1301 1301
1302 ofs++; 1302 ofs++;
1303 len -= 2; 1303 len -= 2;
1304 } 1304 }
1305 } 1305 }
1306 1306
1307 /** 1307 /**
1308 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1308 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1309 * @id: IDENTIFY DEVICE results we will examine 1309 * @id: IDENTIFY DEVICE results we will examine
1310 * @s: string into which data is output 1310 * @s: string into which data is output
1311 * @ofs: offset into identify device page 1311 * @ofs: offset into identify device page
1312 * @len: length of string to return. must be an odd number. 1312 * @len: length of string to return. must be an odd number.
1313 * 1313 *
1314 * This function is identical to ata_id_string except that it 1314 * This function is identical to ata_id_string except that it
1315 * trims trailing spaces and terminates the resulting string with 1315 * trims trailing spaces and terminates the resulting string with
1316 * null. @len must be actual maximum length (even number) + 1. 1316 * null. @len must be actual maximum length (even number) + 1.
1317 * 1317 *
1318 * LOCKING: 1318 * LOCKING:
1319 * caller. 1319 * caller.
1320 */ 1320 */
1321 void ata_id_c_string(const u16 *id, unsigned char *s, 1321 void ata_id_c_string(const u16 *id, unsigned char *s,
1322 unsigned int ofs, unsigned int len) 1322 unsigned int ofs, unsigned int len)
1323 { 1323 {
1324 unsigned char *p; 1324 unsigned char *p;
1325 1325
1326 ata_id_string(id, s, ofs, len - 1); 1326 ata_id_string(id, s, ofs, len - 1);
1327 1327
1328 p = s + strnlen(s, len - 1); 1328 p = s + strnlen(s, len - 1);
1329 while (p > s && p[-1] == ' ') 1329 while (p > s && p[-1] == ' ')
1330 p--; 1330 p--;
1331 *p = '\0'; 1331 *p = '\0';
1332 } 1332 }
1333 1333
1334 static u64 ata_id_n_sectors(const u16 *id) 1334 static u64 ata_id_n_sectors(const u16 *id)
1335 { 1335 {
1336 if (ata_id_has_lba(id)) { 1336 if (ata_id_has_lba(id)) {
1337 if (ata_id_has_lba48(id)) 1337 if (ata_id_has_lba48(id))
1338 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1338 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1339 else 1339 else
1340 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1340 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1341 } else { 1341 } else {
1342 if (ata_id_current_chs_valid(id)) 1342 if (ata_id_current_chs_valid(id))
1343 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1343 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1344 id[ATA_ID_CUR_SECTORS]; 1344 id[ATA_ID_CUR_SECTORS];
1345 else 1345 else
1346 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1346 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1347 id[ATA_ID_SECTORS]; 1347 id[ATA_ID_SECTORS];
1348 } 1348 }
1349 } 1349 }
1350 1350
1351 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1351 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1352 { 1352 {
1353 u64 sectors = 0; 1353 u64 sectors = 0;
1354 1354
1355 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1355 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1356 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1356 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1357 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1357 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1358 sectors |= (tf->lbah & 0xff) << 16; 1358 sectors |= (tf->lbah & 0xff) << 16;
1359 sectors |= (tf->lbam & 0xff) << 8; 1359 sectors |= (tf->lbam & 0xff) << 8;
1360 sectors |= (tf->lbal & 0xff); 1360 sectors |= (tf->lbal & 0xff);
1361 1361
1362 return sectors; 1362 return sectors;
1363 } 1363 }
1364 1364
1365 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1365 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1366 { 1366 {
1367 u64 sectors = 0; 1367 u64 sectors = 0;
1368 1368
1369 sectors |= (tf->device & 0x0f) << 24; 1369 sectors |= (tf->device & 0x0f) << 24;
1370 sectors |= (tf->lbah & 0xff) << 16; 1370 sectors |= (tf->lbah & 0xff) << 16;
1371 sectors |= (tf->lbam & 0xff) << 8; 1371 sectors |= (tf->lbam & 0xff) << 8;
1372 sectors |= (tf->lbal & 0xff); 1372 sectors |= (tf->lbal & 0xff);
1373 1373
1374 return sectors; 1374 return sectors;
1375 } 1375 }
1376 1376
1377 /** 1377 /**
1378 * ata_read_native_max_address - Read native max address 1378 * ata_read_native_max_address - Read native max address
1379 * @dev: target device 1379 * @dev: target device
1380 * @max_sectors: out parameter for the result native max address 1380 * @max_sectors: out parameter for the result native max address
1381 * 1381 *
1382 * Perform an LBA48 or LBA28 native size query upon the device in 1382 * Perform an LBA48 or LBA28 native size query upon the device in
1383 * question. 1383 * question.
1384 * 1384 *
1385 * RETURNS: 1385 * RETURNS:
1386 * 0 on success, -EACCES if command is aborted by the drive. 1386 * 0 on success, -EACCES if command is aborted by the drive.
1387 * -EIO on other errors. 1387 * -EIO on other errors.
1388 */ 1388 */
1389 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1389 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1390 { 1390 {
1391 unsigned int err_mask; 1391 unsigned int err_mask;
1392 struct ata_taskfile tf; 1392 struct ata_taskfile tf;
1393 int lba48 = ata_id_has_lba48(dev->id); 1393 int lba48 = ata_id_has_lba48(dev->id);
1394 1394
1395 ata_tf_init(dev, &tf); 1395 ata_tf_init(dev, &tf);
1396 1396
1397 /* always clear all address registers */ 1397 /* always clear all address registers */
1398 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1398 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1399 1399
1400 if (lba48) { 1400 if (lba48) {
1401 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1401 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1402 tf.flags |= ATA_TFLAG_LBA48; 1402 tf.flags |= ATA_TFLAG_LBA48;
1403 } else 1403 } else
1404 tf.command = ATA_CMD_READ_NATIVE_MAX; 1404 tf.command = ATA_CMD_READ_NATIVE_MAX;
1405 1405
1406 tf.protocol |= ATA_PROT_NODATA; 1406 tf.protocol |= ATA_PROT_NODATA;
1407 tf.device |= ATA_LBA; 1407 tf.device |= ATA_LBA;
1408 1408
1409 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1409 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1410 if (err_mask) { 1410 if (err_mask) {
1411 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1411 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1412 "max address (err_mask=0x%x)\n", err_mask); 1412 "max address (err_mask=0x%x)\n", err_mask);
1413 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1413 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1414 return -EACCES; 1414 return -EACCES;
1415 return -EIO; 1415 return -EIO;
1416 } 1416 }
1417 1417
1418 if (lba48) 1418 if (lba48)
1419 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1419 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1420 else 1420 else
1421 *max_sectors = ata_tf_to_lba(&tf) + 1; 1421 *max_sectors = ata_tf_to_lba(&tf) + 1;
1422 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1422 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1423 (*max_sectors)--; 1423 (*max_sectors)--;
1424 return 0; 1424 return 0;
1425 } 1425 }
1426 1426
1427 /** 1427 /**
1428 * ata_set_max_sectors - Set max sectors 1428 * ata_set_max_sectors - Set max sectors
1429 * @dev: target device 1429 * @dev: target device
1430 * @new_sectors: new max sectors value to set for the device 1430 * @new_sectors: new max sectors value to set for the device
1431 * 1431 *
1432 * Set max sectors of @dev to @new_sectors. 1432 * Set max sectors of @dev to @new_sectors.
1433 * 1433 *
1434 * RETURNS: 1434 * RETURNS:
1435 * 0 on success, -EACCES if command is aborted or denied (due to 1435 * 0 on success, -EACCES if command is aborted or denied (due to
1436 * previous non-volatile SET_MAX) by the drive. -EIO on other 1436 * previous non-volatile SET_MAX) by the drive. -EIO on other
1437 * errors. 1437 * errors.
1438 */ 1438 */
1439 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1439 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1440 { 1440 {
1441 unsigned int err_mask; 1441 unsigned int err_mask;
1442 struct ata_taskfile tf; 1442 struct ata_taskfile tf;
1443 int lba48 = ata_id_has_lba48(dev->id); 1443 int lba48 = ata_id_has_lba48(dev->id);
1444 1444
1445 new_sectors--; 1445 new_sectors--;
1446 1446
1447 ata_tf_init(dev, &tf); 1447 ata_tf_init(dev, &tf);
1448 1448
1449 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1449 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1450 1450
1451 if (lba48) { 1451 if (lba48) {
1452 tf.command = ATA_CMD_SET_MAX_EXT; 1452 tf.command = ATA_CMD_SET_MAX_EXT;
1453 tf.flags |= ATA_TFLAG_LBA48; 1453 tf.flags |= ATA_TFLAG_LBA48;
1454 1454
1455 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1455 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1456 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1456 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1457 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1457 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1458 } else { 1458 } else {
1459 tf.command = ATA_CMD_SET_MAX; 1459 tf.command = ATA_CMD_SET_MAX;
1460 1460
1461 tf.device |= (new_sectors >> 24) & 0xf; 1461 tf.device |= (new_sectors >> 24) & 0xf;
1462 } 1462 }
1463 1463
1464 tf.protocol |= ATA_PROT_NODATA; 1464 tf.protocol |= ATA_PROT_NODATA;
1465 tf.device |= ATA_LBA; 1465 tf.device |= ATA_LBA;
1466 1466
1467 tf.lbal = (new_sectors >> 0) & 0xff; 1467 tf.lbal = (new_sectors >> 0) & 0xff;
1468 tf.lbam = (new_sectors >> 8) & 0xff; 1468 tf.lbam = (new_sectors >> 8) & 0xff;
1469 tf.lbah = (new_sectors >> 16) & 0xff; 1469 tf.lbah = (new_sectors >> 16) & 0xff;
1470 1470
1471 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1471 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1472 if (err_mask) { 1472 if (err_mask) {
1473 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1473 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1474 "max address (err_mask=0x%x)\n", err_mask); 1474 "max address (err_mask=0x%x)\n", err_mask);
1475 if (err_mask == AC_ERR_DEV && 1475 if (err_mask == AC_ERR_DEV &&
1476 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1476 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1477 return -EACCES; 1477 return -EACCES;
1478 return -EIO; 1478 return -EIO;
1479 } 1479 }
1480 1480
1481 return 0; 1481 return 0;
1482 } 1482 }
1483 1483
1484 /** 1484 /**
1485 * ata_hpa_resize - Resize a device with an HPA set 1485 * ata_hpa_resize - Resize a device with an HPA set
1486 * @dev: Device to resize 1486 * @dev: Device to resize
1487 * 1487 *
1488 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1488 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1489 * it if required to the full size of the media. The caller must check 1489 * it if required to the full size of the media. The caller must check
1490 * the drive has the HPA feature set enabled. 1490 * the drive has the HPA feature set enabled.
1491 * 1491 *
1492 * RETURNS: 1492 * RETURNS:
1493 * 0 on success, -errno on failure. 1493 * 0 on success, -errno on failure.
1494 */ 1494 */
1495 static int ata_hpa_resize(struct ata_device *dev) 1495 static int ata_hpa_resize(struct ata_device *dev)
1496 { 1496 {
1497 struct ata_eh_context *ehc = &dev->link->eh_context; 1497 struct ata_eh_context *ehc = &dev->link->eh_context;
1498 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1498 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1499 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1499 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1500 u64 sectors = ata_id_n_sectors(dev->id); 1500 u64 sectors = ata_id_n_sectors(dev->id);
1501 u64 native_sectors; 1501 u64 native_sectors;
1502 int rc; 1502 int rc;
1503 1503
1504 /* do we need to do it? */ 1504 /* do we need to do it? */
1505 if (dev->class != ATA_DEV_ATA || 1505 if (dev->class != ATA_DEV_ATA ||
1506 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1506 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1507 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1507 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1508 return 0; 1508 return 0;
1509 1509
1510 /* read native max address */ 1510 /* read native max address */
1511 rc = ata_read_native_max_address(dev, &native_sectors); 1511 rc = ata_read_native_max_address(dev, &native_sectors);
1512 if (rc) { 1512 if (rc) {
1513 /* If device aborted the command or HPA isn't going to 1513 /* If device aborted the command or HPA isn't going to
1514 * be unlocked, skip HPA resizing. 1514 * be unlocked, skip HPA resizing.
1515 */ 1515 */
1516 if (rc == -EACCES || !unlock_hpa) { 1516 if (rc == -EACCES || !unlock_hpa) {
1517 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1517 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1518 "broken, skipping HPA handling\n"); 1518 "broken, skipping HPA handling\n");
1519 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1519 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1520 1520
1521 /* we can continue if device aborted the command */ 1521 /* we can continue if device aborted the command */
1522 if (rc == -EACCES) 1522 if (rc == -EACCES)
1523 rc = 0; 1523 rc = 0;
1524 } 1524 }
1525 1525
1526 return rc; 1526 return rc;
1527 } 1527 }
1528 dev->n_native_sectors = native_sectors; 1528 dev->n_native_sectors = native_sectors;
1529 1529
1530 /* nothing to do? */ 1530 /* nothing to do? */
1531 if (native_sectors <= sectors || !unlock_hpa) { 1531 if (native_sectors <= sectors || !unlock_hpa) {
1532 if (!print_info || native_sectors == sectors) 1532 if (!print_info || native_sectors == sectors)
1533 return 0; 1533 return 0;
1534 1534
1535 if (native_sectors > sectors) 1535 if (native_sectors > sectors)
1536 ata_dev_printk(dev, KERN_INFO, 1536 ata_dev_printk(dev, KERN_INFO,
1537 "HPA detected: current %llu, native %llu\n", 1537 "HPA detected: current %llu, native %llu\n",
1538 (unsigned long long)sectors, 1538 (unsigned long long)sectors,
1539 (unsigned long long)native_sectors); 1539 (unsigned long long)native_sectors);
1540 else if (native_sectors < sectors) 1540 else if (native_sectors < sectors)
1541 ata_dev_printk(dev, KERN_WARNING, 1541 ata_dev_printk(dev, KERN_WARNING,
1542 "native sectors (%llu) is smaller than " 1542 "native sectors (%llu) is smaller than "
1543 "sectors (%llu)\n", 1543 "sectors (%llu)\n",
1544 (unsigned long long)native_sectors, 1544 (unsigned long long)native_sectors,
1545 (unsigned long long)sectors); 1545 (unsigned long long)sectors);
1546 return 0; 1546 return 0;
1547 } 1547 }
1548 1548
1549 /* let's unlock HPA */ 1549 /* let's unlock HPA */
1550 rc = ata_set_max_sectors(dev, native_sectors); 1550 rc = ata_set_max_sectors(dev, native_sectors);
1551 if (rc == -EACCES) { 1551 if (rc == -EACCES) {
1552 /* if device aborted the command, skip HPA resizing */ 1552 /* if device aborted the command, skip HPA resizing */
1553 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1553 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1554 "(%llu -> %llu), skipping HPA handling\n", 1554 "(%llu -> %llu), skipping HPA handling\n",
1555 (unsigned long long)sectors, 1555 (unsigned long long)sectors,
1556 (unsigned long long)native_sectors); 1556 (unsigned long long)native_sectors);
1557 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1557 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1558 return 0; 1558 return 0;
1559 } else if (rc) 1559 } else if (rc)
1560 return rc; 1560 return rc;
1561 1561
1562 /* re-read IDENTIFY data */ 1562 /* re-read IDENTIFY data */
1563 rc = ata_dev_reread_id(dev, 0); 1563 rc = ata_dev_reread_id(dev, 0);
1564 if (rc) { 1564 if (rc) {
1565 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1565 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1566 "data after HPA resizing\n"); 1566 "data after HPA resizing\n");
1567 return rc; 1567 return rc;
1568 } 1568 }
1569 1569
1570 if (print_info) { 1570 if (print_info) {
1571 u64 new_sectors = ata_id_n_sectors(dev->id); 1571 u64 new_sectors = ata_id_n_sectors(dev->id);
1572 ata_dev_printk(dev, KERN_INFO, 1572 ata_dev_printk(dev, KERN_INFO,
1573 "HPA unlocked: %llu -> %llu, native %llu\n", 1573 "HPA unlocked: %llu -> %llu, native %llu\n",
1574 (unsigned long long)sectors, 1574 (unsigned long long)sectors,
1575 (unsigned long long)new_sectors, 1575 (unsigned long long)new_sectors,
1576 (unsigned long long)native_sectors); 1576 (unsigned long long)native_sectors);
1577 } 1577 }
1578 1578
1579 return 0; 1579 return 0;
1580 } 1580 }
1581 1581
1582 /** 1582 /**
1583 * ata_dump_id - IDENTIFY DEVICE info debugging output 1583 * ata_dump_id - IDENTIFY DEVICE info debugging output
1584 * @id: IDENTIFY DEVICE page to dump 1584 * @id: IDENTIFY DEVICE page to dump
1585 * 1585 *
1586 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1586 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1587 * page. 1587 * page.
1588 * 1588 *
1589 * LOCKING: 1589 * LOCKING:
1590 * caller. 1590 * caller.
1591 */ 1591 */
1592 1592
1593 static inline void ata_dump_id(const u16 *id) 1593 static inline void ata_dump_id(const u16 *id)
1594 { 1594 {
1595 DPRINTK("49==0x%04x " 1595 DPRINTK("49==0x%04x "
1596 "53==0x%04x " 1596 "53==0x%04x "
1597 "63==0x%04x " 1597 "63==0x%04x "
1598 "64==0x%04x " 1598 "64==0x%04x "
1599 "75==0x%04x \n", 1599 "75==0x%04x \n",
1600 id[49], 1600 id[49],
1601 id[53], 1601 id[53],
1602 id[63], 1602 id[63],
1603 id[64], 1603 id[64],
1604 id[75]); 1604 id[75]);
1605 DPRINTK("80==0x%04x " 1605 DPRINTK("80==0x%04x "
1606 "81==0x%04x " 1606 "81==0x%04x "
1607 "82==0x%04x " 1607 "82==0x%04x "
1608 "83==0x%04x " 1608 "83==0x%04x "
1609 "84==0x%04x \n", 1609 "84==0x%04x \n",
1610 id[80], 1610 id[80],
1611 id[81], 1611 id[81],
1612 id[82], 1612 id[82],
1613 id[83], 1613 id[83],
1614 id[84]); 1614 id[84]);
1615 DPRINTK("88==0x%04x " 1615 DPRINTK("88==0x%04x "
1616 "93==0x%04x\n", 1616 "93==0x%04x\n",
1617 id[88], 1617 id[88],
1618 id[93]); 1618 id[93]);
1619 } 1619 }
1620 1620
1621 /** 1621 /**
1622 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1622 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1623 * @id: IDENTIFY data to compute xfer mask from 1623 * @id: IDENTIFY data to compute xfer mask from
1624 * 1624 *
1625 * Compute the xfermask for this device. This is not as trivial 1625 * Compute the xfermask for this device. This is not as trivial
1626 * as it seems if we must consider early devices correctly. 1626 * as it seems if we must consider early devices correctly.
1627 * 1627 *
1628 * FIXME: pre IDE drive timing (do we care ?). 1628 * FIXME: pre IDE drive timing (do we care ?).
1629 * 1629 *
1630 * LOCKING: 1630 * LOCKING:
1631 * None. 1631 * None.
1632 * 1632 *
1633 * RETURNS: 1633 * RETURNS:
1634 * Computed xfermask 1634 * Computed xfermask
1635 */ 1635 */
1636 unsigned long ata_id_xfermask(const u16 *id) 1636 unsigned long ata_id_xfermask(const u16 *id)
1637 { 1637 {
1638 unsigned long pio_mask, mwdma_mask, udma_mask; 1638 unsigned long pio_mask, mwdma_mask, udma_mask;
1639 1639
1640 /* Usual case. Word 53 indicates word 64 is valid */ 1640 /* Usual case. Word 53 indicates word 64 is valid */
1641 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1641 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1642 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1642 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1643 pio_mask <<= 3; 1643 pio_mask <<= 3;
1644 pio_mask |= 0x7; 1644 pio_mask |= 0x7;
1645 } else { 1645 } else {
1646 /* If word 64 isn't valid then Word 51 high byte holds 1646 /* If word 64 isn't valid then Word 51 high byte holds
1647 * the PIO timing number for the maximum. Turn it into 1647 * the PIO timing number for the maximum. Turn it into
1648 * a mask. 1648 * a mask.
1649 */ 1649 */
1650 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1650 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1651 if (mode < 5) /* Valid PIO range */ 1651 if (mode < 5) /* Valid PIO range */
1652 pio_mask = (2 << mode) - 1; 1652 pio_mask = (2 << mode) - 1;
1653 else 1653 else
1654 pio_mask = 1; 1654 pio_mask = 1;
1655 1655
1656 /* But wait.. there's more. Design your standards by 1656 /* But wait.. there's more. Design your standards by
1657 * committee and you too can get a free iordy field to 1657 * committee and you too can get a free iordy field to
1658 * process. However its the speeds not the modes that 1658 * process. However its the speeds not the modes that
1659 * are supported... Note drivers using the timing API 1659 * are supported... Note drivers using the timing API
1660 * will get this right anyway 1660 * will get this right anyway
1661 */ 1661 */
1662 } 1662 }
1663 1663
1664 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1664 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1665 1665
1666 if (ata_id_is_cfa(id)) { 1666 if (ata_id_is_cfa(id)) {
1667 /* 1667 /*
1668 * Process compact flash extended modes 1668 * Process compact flash extended modes
1669 */ 1669 */
1670 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1670 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1671 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1671 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1672 1672
1673 if (pio) 1673 if (pio)
1674 pio_mask |= (1 << 5); 1674 pio_mask |= (1 << 5);
1675 if (pio > 1) 1675 if (pio > 1)
1676 pio_mask |= (1 << 6); 1676 pio_mask |= (1 << 6);
1677 if (dma) 1677 if (dma)
1678 mwdma_mask |= (1 << 3); 1678 mwdma_mask |= (1 << 3);
1679 if (dma > 1) 1679 if (dma > 1)
1680 mwdma_mask |= (1 << 4); 1680 mwdma_mask |= (1 << 4);
1681 } 1681 }
1682 1682
1683 udma_mask = 0; 1683 udma_mask = 0;
1684 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1684 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1685 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1685 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1686 1686
1687 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1687 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1688 } 1688 }
1689 1689
1690 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1690 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1691 { 1691 {
1692 struct completion *waiting = qc->private_data; 1692 struct completion *waiting = qc->private_data;
1693 1693
1694 complete(waiting); 1694 complete(waiting);
1695 } 1695 }
1696 1696
1697 /** 1697 /**
1698 * ata_exec_internal_sg - execute libata internal command 1698 * ata_exec_internal_sg - execute libata internal command
1699 * @dev: Device to which the command is sent 1699 * @dev: Device to which the command is sent
1700 * @tf: Taskfile registers for the command and the result 1700 * @tf: Taskfile registers for the command and the result
1701 * @cdb: CDB for packet command 1701 * @cdb: CDB for packet command
1702 * @dma_dir: Data tranfer direction of the command 1702 * @dma_dir: Data tranfer direction of the command
1703 * @sgl: sg list for the data buffer of the command 1703 * @sgl: sg list for the data buffer of the command
1704 * @n_elem: Number of sg entries 1704 * @n_elem: Number of sg entries
1705 * @timeout: Timeout in msecs (0 for default) 1705 * @timeout: Timeout in msecs (0 for default)
1706 * 1706 *
1707 * Executes libata internal command with timeout. @tf contains 1707 * Executes libata internal command with timeout. @tf contains
1708 * command on entry and result on return. Timeout and error 1708 * command on entry and result on return. Timeout and error
1709 * conditions are reported via return value. No recovery action 1709 * conditions are reported via return value. No recovery action
1710 * is taken after a command times out. It's caller's duty to 1710 * is taken after a command times out. It's caller's duty to
1711 * clean up after timeout. 1711 * clean up after timeout.
1712 * 1712 *
1713 * LOCKING: 1713 * LOCKING:
1714 * None. Should be called with kernel context, might sleep. 1714 * None. Should be called with kernel context, might sleep.
1715 * 1715 *
1716 * RETURNS: 1716 * RETURNS:
1717 * Zero on success, AC_ERR_* mask on failure 1717 * Zero on success, AC_ERR_* mask on failure
1718 */ 1718 */
1719 unsigned ata_exec_internal_sg(struct ata_device *dev, 1719 unsigned ata_exec_internal_sg(struct ata_device *dev,
1720 struct ata_taskfile *tf, const u8 *cdb, 1720 struct ata_taskfile *tf, const u8 *cdb,
1721 int dma_dir, struct scatterlist *sgl, 1721 int dma_dir, struct scatterlist *sgl,
1722 unsigned int n_elem, unsigned long timeout) 1722 unsigned int n_elem, unsigned long timeout)
1723 { 1723 {
1724 struct ata_link *link = dev->link; 1724 struct ata_link *link = dev->link;
1725 struct ata_port *ap = link->ap; 1725 struct ata_port *ap = link->ap;
1726 u8 command = tf->command; 1726 u8 command = tf->command;
1727 int auto_timeout = 0; 1727 int auto_timeout = 0;
1728 struct ata_queued_cmd *qc; 1728 struct ata_queued_cmd *qc;
1729 unsigned int tag, preempted_tag; 1729 unsigned int tag, preempted_tag;
1730 u32 preempted_sactive, preempted_qc_active; 1730 u32 preempted_sactive, preempted_qc_active;
1731 int preempted_nr_active_links; 1731 int preempted_nr_active_links;
1732 DECLARE_COMPLETION_ONSTACK(wait); 1732 DECLARE_COMPLETION_ONSTACK(wait);
1733 unsigned long flags; 1733 unsigned long flags;
1734 unsigned int err_mask; 1734 unsigned int err_mask;
1735 int rc; 1735 int rc;
1736 1736
1737 spin_lock_irqsave(ap->lock, flags); 1737 spin_lock_irqsave(ap->lock, flags);
1738 1738
1739 /* no internal command while frozen */ 1739 /* no internal command while frozen */
1740 if (ap->pflags & ATA_PFLAG_FROZEN) { 1740 if (ap->pflags & ATA_PFLAG_FROZEN) {
1741 spin_unlock_irqrestore(ap->lock, flags); 1741 spin_unlock_irqrestore(ap->lock, flags);
1742 return AC_ERR_SYSTEM; 1742 return AC_ERR_SYSTEM;
1743 } 1743 }
1744 1744
1745 /* initialize internal qc */ 1745 /* initialize internal qc */
1746 1746
1747 /* XXX: Tag 0 is used for drivers with legacy EH as some 1747 /* XXX: Tag 0 is used for drivers with legacy EH as some
1748 * drivers choke if any other tag is given. This breaks 1748 * drivers choke if any other tag is given. This breaks
1749 * ata_tag_internal() test for those drivers. Don't use new 1749 * ata_tag_internal() test for those drivers. Don't use new
1750 * EH stuff without converting to it. 1750 * EH stuff without converting to it.
1751 */ 1751 */
1752 if (ap->ops->error_handler) 1752 if (ap->ops->error_handler)
1753 tag = ATA_TAG_INTERNAL; 1753 tag = ATA_TAG_INTERNAL;
1754 else 1754 else
1755 tag = 0; 1755 tag = 0;
1756 1756
1757 if (test_and_set_bit(tag, &ap->qc_allocated)) 1757 if (test_and_set_bit(tag, &ap->qc_allocated))
1758 BUG(); 1758 BUG();
1759 qc = __ata_qc_from_tag(ap, tag); 1759 qc = __ata_qc_from_tag(ap, tag);
1760 1760
1761 qc->tag = tag; 1761 qc->tag = tag;
1762 qc->scsicmd = NULL; 1762 qc->scsicmd = NULL;
1763 qc->ap = ap; 1763 qc->ap = ap;
1764 qc->dev = dev; 1764 qc->dev = dev;
1765 ata_qc_reinit(qc); 1765 ata_qc_reinit(qc);
1766 1766
1767 preempted_tag = link->active_tag; 1767 preempted_tag = link->active_tag;
1768 preempted_sactive = link->sactive; 1768 preempted_sactive = link->sactive;
1769 preempted_qc_active = ap->qc_active; 1769 preempted_qc_active = ap->qc_active;
1770 preempted_nr_active_links = ap->nr_active_links; 1770 preempted_nr_active_links = ap->nr_active_links;
1771 link->active_tag = ATA_TAG_POISON; 1771 link->active_tag = ATA_TAG_POISON;
1772 link->sactive = 0; 1772 link->sactive = 0;
1773 ap->qc_active = 0; 1773 ap->qc_active = 0;
1774 ap->nr_active_links = 0; 1774 ap->nr_active_links = 0;
1775 1775
1776 /* prepare & issue qc */ 1776 /* prepare & issue qc */
1777 qc->tf = *tf; 1777 qc->tf = *tf;
1778 if (cdb) 1778 if (cdb)
1779 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1779 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1780 qc->flags |= ATA_QCFLAG_RESULT_TF; 1780 qc->flags |= ATA_QCFLAG_RESULT_TF;
1781 qc->dma_dir = dma_dir; 1781 qc->dma_dir = dma_dir;
1782 if (dma_dir != DMA_NONE) { 1782 if (dma_dir != DMA_NONE) {
1783 unsigned int i, buflen = 0; 1783 unsigned int i, buflen = 0;
1784 struct scatterlist *sg; 1784 struct scatterlist *sg;
1785 1785
1786 for_each_sg(sgl, sg, n_elem, i) 1786 for_each_sg(sgl, sg, n_elem, i)
1787 buflen += sg->length; 1787 buflen += sg->length;
1788 1788
1789 ata_sg_init(qc, sgl, n_elem); 1789 ata_sg_init(qc, sgl, n_elem);
1790 qc->nbytes = buflen; 1790 qc->nbytes = buflen;
1791 } 1791 }
1792 1792
1793 qc->private_data = &wait; 1793 qc->private_data = &wait;
1794 qc->complete_fn = ata_qc_complete_internal; 1794 qc->complete_fn = ata_qc_complete_internal;
1795 1795
1796 ata_qc_issue(qc); 1796 ata_qc_issue(qc);
1797 1797
1798 spin_unlock_irqrestore(ap->lock, flags); 1798 spin_unlock_irqrestore(ap->lock, flags);
1799 1799
1800 if (!timeout) { 1800 if (!timeout) {
1801 if (ata_probe_timeout) 1801 if (ata_probe_timeout)
1802 timeout = ata_probe_timeout * 1000; 1802 timeout = ata_probe_timeout * 1000;
1803 else { 1803 else {
1804 timeout = ata_internal_cmd_timeout(dev, command); 1804 timeout = ata_internal_cmd_timeout(dev, command);
1805 auto_timeout = 1; 1805 auto_timeout = 1;
1806 } 1806 }
1807 } 1807 }
1808 1808
1809 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1809 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1810 1810
1811 ata_sff_flush_pio_task(ap); 1811 ata_sff_flush_pio_task(ap);
1812 1812
1813 if (!rc) { 1813 if (!rc) {
1814 spin_lock_irqsave(ap->lock, flags); 1814 spin_lock_irqsave(ap->lock, flags);
1815 1815
1816 /* We're racing with irq here. If we lose, the 1816 /* We're racing with irq here. If we lose, the
1817 * following test prevents us from completing the qc 1817 * following test prevents us from completing the qc
1818 * twice. If we win, the port is frozen and will be 1818 * twice. If we win, the port is frozen and will be
1819 * cleaned up by ->post_internal_cmd(). 1819 * cleaned up by ->post_internal_cmd().
1820 */ 1820 */
1821 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1821 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1822 qc->err_mask |= AC_ERR_TIMEOUT; 1822 qc->err_mask |= AC_ERR_TIMEOUT;
1823 1823
1824 if (ap->ops->error_handler) 1824 if (ap->ops->error_handler)
1825 ata_port_freeze(ap); 1825 ata_port_freeze(ap);
1826 else 1826 else
1827 ata_qc_complete(qc); 1827 ata_qc_complete(qc);
1828 1828
1829 if (ata_msg_warn(ap)) 1829 if (ata_msg_warn(ap))
1830 ata_dev_printk(dev, KERN_WARNING, 1830 ata_dev_printk(dev, KERN_WARNING,
1831 "qc timeout (cmd 0x%x)\n", command); 1831 "qc timeout (cmd 0x%x)\n", command);
1832 } 1832 }
1833 1833
1834 spin_unlock_irqrestore(ap->lock, flags); 1834 spin_unlock_irqrestore(ap->lock, flags);
1835 } 1835 }
1836 1836
1837 /* do post_internal_cmd */ 1837 /* do post_internal_cmd */
1838 if (ap->ops->post_internal_cmd) 1838 if (ap->ops->post_internal_cmd)
1839 ap->ops->post_internal_cmd(qc); 1839 ap->ops->post_internal_cmd(qc);
1840 1840
1841 /* perform minimal error analysis */ 1841 /* perform minimal error analysis */
1842 if (qc->flags & ATA_QCFLAG_FAILED) { 1842 if (qc->flags & ATA_QCFLAG_FAILED) {
1843 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1843 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1844 qc->err_mask |= AC_ERR_DEV; 1844 qc->err_mask |= AC_ERR_DEV;
1845 1845
1846 if (!qc->err_mask) 1846 if (!qc->err_mask)
1847 qc->err_mask |= AC_ERR_OTHER; 1847 qc->err_mask |= AC_ERR_OTHER;
1848 1848
1849 if (qc->err_mask & ~AC_ERR_OTHER) 1849 if (qc->err_mask & ~AC_ERR_OTHER)
1850 qc->err_mask &= ~AC_ERR_OTHER; 1850 qc->err_mask &= ~AC_ERR_OTHER;
1851 } 1851 }
1852 1852
1853 /* finish up */ 1853 /* finish up */
1854 spin_lock_irqsave(ap->lock, flags); 1854 spin_lock_irqsave(ap->lock, flags);
1855 1855
1856 *tf = qc->result_tf; 1856 *tf = qc->result_tf;
1857 err_mask = qc->err_mask; 1857 err_mask = qc->err_mask;
1858 1858
1859 ata_qc_free(qc); 1859 ata_qc_free(qc);
1860 link->active_tag = preempted_tag; 1860 link->active_tag = preempted_tag;
1861 link->sactive = preempted_sactive; 1861 link->sactive = preempted_sactive;
1862 ap->qc_active = preempted_qc_active; 1862 ap->qc_active = preempted_qc_active;
1863 ap->nr_active_links = preempted_nr_active_links; 1863 ap->nr_active_links = preempted_nr_active_links;
1864 1864
1865 spin_unlock_irqrestore(ap->lock, flags); 1865 spin_unlock_irqrestore(ap->lock, flags);
1866 1866
1867 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1867 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1868 ata_internal_cmd_timed_out(dev, command); 1868 ata_internal_cmd_timed_out(dev, command);
1869 1869
1870 return err_mask; 1870 return err_mask;
1871 } 1871 }
1872 1872
1873 /** 1873 /**
1874 * ata_exec_internal - execute libata internal command 1874 * ata_exec_internal - execute libata internal command
1875 * @dev: Device to which the command is sent 1875 * @dev: Device to which the command is sent
1876 * @tf: Taskfile registers for the command and the result 1876 * @tf: Taskfile registers for the command and the result
1877 * @cdb: CDB for packet command 1877 * @cdb: CDB for packet command
1878 * @dma_dir: Data tranfer direction of the command 1878 * @dma_dir: Data tranfer direction of the command
1879 * @buf: Data buffer of the command 1879 * @buf: Data buffer of the command
1880 * @buflen: Length of data buffer 1880 * @buflen: Length of data buffer
1881 * @timeout: Timeout in msecs (0 for default) 1881 * @timeout: Timeout in msecs (0 for default)
1882 * 1882 *
1883 * Wrapper around ata_exec_internal_sg() which takes simple 1883 * Wrapper around ata_exec_internal_sg() which takes simple
1884 * buffer instead of sg list. 1884 * buffer instead of sg list.
1885 * 1885 *
1886 * LOCKING: 1886 * LOCKING:
1887 * None. Should be called with kernel context, might sleep. 1887 * None. Should be called with kernel context, might sleep.
1888 * 1888 *
1889 * RETURNS: 1889 * RETURNS:
1890 * Zero on success, AC_ERR_* mask on failure 1890 * Zero on success, AC_ERR_* mask on failure
1891 */ 1891 */
1892 unsigned ata_exec_internal(struct ata_device *dev, 1892 unsigned ata_exec_internal(struct ata_device *dev,
1893 struct ata_taskfile *tf, const u8 *cdb, 1893 struct ata_taskfile *tf, const u8 *cdb,
1894 int dma_dir, void *buf, unsigned int buflen, 1894 int dma_dir, void *buf, unsigned int buflen,
1895 unsigned long timeout) 1895 unsigned long timeout)
1896 { 1896 {
1897 struct scatterlist *psg = NULL, sg; 1897 struct scatterlist *psg = NULL, sg;
1898 unsigned int n_elem = 0; 1898 unsigned int n_elem = 0;
1899 1899
1900 if (dma_dir != DMA_NONE) { 1900 if (dma_dir != DMA_NONE) {
1901 WARN_ON(!buf); 1901 WARN_ON(!buf);
1902 sg_init_one(&sg, buf, buflen); 1902 sg_init_one(&sg, buf, buflen);
1903 psg = &sg; 1903 psg = &sg;
1904 n_elem++; 1904 n_elem++;
1905 } 1905 }
1906 1906
1907 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1907 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1908 timeout); 1908 timeout);
1909 } 1909 }
1910 1910
1911 /** 1911 /**
1912 * ata_do_simple_cmd - execute simple internal command 1912 * ata_do_simple_cmd - execute simple internal command
1913 * @dev: Device to which the command is sent 1913 * @dev: Device to which the command is sent
1914 * @cmd: Opcode to execute 1914 * @cmd: Opcode to execute
1915 * 1915 *
1916 * Execute a 'simple' command, that only consists of the opcode 1916 * Execute a 'simple' command, that only consists of the opcode
1917 * 'cmd' itself, without filling any other registers 1917 * 'cmd' itself, without filling any other registers
1918 * 1918 *
1919 * LOCKING: 1919 * LOCKING:
1920 * Kernel thread context (may sleep). 1920 * Kernel thread context (may sleep).
1921 * 1921 *
1922 * RETURNS: 1922 * RETURNS:
1923 * Zero on success, AC_ERR_* mask on failure 1923 * Zero on success, AC_ERR_* mask on failure
1924 */ 1924 */
1925 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1925 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1926 { 1926 {
1927 struct ata_taskfile tf; 1927 struct ata_taskfile tf;
1928 1928
1929 ata_tf_init(dev, &tf); 1929 ata_tf_init(dev, &tf);
1930 1930
1931 tf.command = cmd; 1931 tf.command = cmd;
1932 tf.flags |= ATA_TFLAG_DEVICE; 1932 tf.flags |= ATA_TFLAG_DEVICE;
1933 tf.protocol = ATA_PROT_NODATA; 1933 tf.protocol = ATA_PROT_NODATA;
1934 1934
1935 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1935 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1936 } 1936 }
1937 1937
1938 /** 1938 /**
1939 * ata_pio_need_iordy - check if iordy needed 1939 * ata_pio_need_iordy - check if iordy needed
1940 * @adev: ATA device 1940 * @adev: ATA device
1941 * 1941 *
1942 * Check if the current speed of the device requires IORDY. Used 1942 * Check if the current speed of the device requires IORDY. Used
1943 * by various controllers for chip configuration. 1943 * by various controllers for chip configuration.
1944 */ 1944 */
1945 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1945 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1946 { 1946 {
1947 /* Don't set IORDY if we're preparing for reset. IORDY may 1947 /* Don't set IORDY if we're preparing for reset. IORDY may
1948 * lead to controller lock up on certain controllers if the 1948 * lead to controller lock up on certain controllers if the
1949 * port is not occupied. See bko#11703 for details. 1949 * port is not occupied. See bko#11703 for details.
1950 */ 1950 */
1951 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1951 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1952 return 0; 1952 return 0;
1953 /* Controller doesn't support IORDY. Probably a pointless 1953 /* Controller doesn't support IORDY. Probably a pointless
1954 * check as the caller should know this. 1954 * check as the caller should know this.
1955 */ 1955 */
1956 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1956 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1957 return 0; 1957 return 0;
1958 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1958 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1959 if (ata_id_is_cfa(adev->id) 1959 if (ata_id_is_cfa(adev->id)
1960 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1960 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1961 return 0; 1961 return 0;
1962 /* PIO3 and higher it is mandatory */ 1962 /* PIO3 and higher it is mandatory */
1963 if (adev->pio_mode > XFER_PIO_2) 1963 if (adev->pio_mode > XFER_PIO_2)
1964 return 1; 1964 return 1;
1965 /* We turn it on when possible */ 1965 /* We turn it on when possible */
1966 if (ata_id_has_iordy(adev->id)) 1966 if (ata_id_has_iordy(adev->id))
1967 return 1; 1967 return 1;
1968 return 0; 1968 return 0;
1969 } 1969 }
1970 1970
1971 /** 1971 /**
1972 * ata_pio_mask_no_iordy - Return the non IORDY mask 1972 * ata_pio_mask_no_iordy - Return the non IORDY mask
1973 * @adev: ATA device 1973 * @adev: ATA device
1974 * 1974 *
1975 * Compute the highest mode possible if we are not using iordy. Return 1975 * Compute the highest mode possible if we are not using iordy. Return
1976 * -1 if no iordy mode is available. 1976 * -1 if no iordy mode is available.
1977 */ 1977 */
1978 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1978 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1979 { 1979 {
1980 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1980 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1981 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1981 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1982 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1982 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1983 /* Is the speed faster than the drive allows non IORDY ? */ 1983 /* Is the speed faster than the drive allows non IORDY ? */
1984 if (pio) { 1984 if (pio) {
1985 /* This is cycle times not frequency - watch the logic! */ 1985 /* This is cycle times not frequency - watch the logic! */
1986 if (pio > 240) /* PIO2 is 240nS per cycle */ 1986 if (pio > 240) /* PIO2 is 240nS per cycle */
1987 return 3 << ATA_SHIFT_PIO; 1987 return 3 << ATA_SHIFT_PIO;
1988 return 7 << ATA_SHIFT_PIO; 1988 return 7 << ATA_SHIFT_PIO;
1989 } 1989 }
1990 } 1990 }
1991 return 3 << ATA_SHIFT_PIO; 1991 return 3 << ATA_SHIFT_PIO;
1992 } 1992 }
1993 1993
1994 /** 1994 /**
1995 * ata_do_dev_read_id - default ID read method 1995 * ata_do_dev_read_id - default ID read method
1996 * @dev: device 1996 * @dev: device
1997 * @tf: proposed taskfile 1997 * @tf: proposed taskfile
1998 * @id: data buffer 1998 * @id: data buffer
1999 * 1999 *
2000 * Issue the identify taskfile and hand back the buffer containing 2000 * Issue the identify taskfile and hand back the buffer containing
2001 * identify data. For some RAID controllers and for pre ATA devices 2001 * identify data. For some RAID controllers and for pre ATA devices
2002 * this function is wrapped or replaced by the driver 2002 * this function is wrapped or replaced by the driver
2003 */ 2003 */
2004 unsigned int ata_do_dev_read_id(struct ata_device *dev, 2004 unsigned int ata_do_dev_read_id(struct ata_device *dev,
2005 struct ata_taskfile *tf, u16 *id) 2005 struct ata_taskfile *tf, u16 *id)
2006 { 2006 {
2007 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 2007 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
2008 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2008 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2009 } 2009 }
2010 2010
2011 /** 2011 /**
2012 * ata_dev_read_id - Read ID data from the specified device 2012 * ata_dev_read_id - Read ID data from the specified device
2013 * @dev: target device 2013 * @dev: target device
2014 * @p_class: pointer to class of the target device (may be changed) 2014 * @p_class: pointer to class of the target device (may be changed)
2015 * @flags: ATA_READID_* flags 2015 * @flags: ATA_READID_* flags
2016 * @id: buffer to read IDENTIFY data into 2016 * @id: buffer to read IDENTIFY data into
2017 * 2017 *
2018 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2018 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2019 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2019 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2020 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2020 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2021 * for pre-ATA4 drives. 2021 * for pre-ATA4 drives.
2022 * 2022 *
2023 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2023 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2024 * now we abort if we hit that case. 2024 * now we abort if we hit that case.
2025 * 2025 *
2026 * LOCKING: 2026 * LOCKING:
2027 * Kernel thread context (may sleep) 2027 * Kernel thread context (may sleep)
2028 * 2028 *
2029 * RETURNS: 2029 * RETURNS:
2030 * 0 on success, -errno otherwise. 2030 * 0 on success, -errno otherwise.
2031 */ 2031 */
2032 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2032 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2033 unsigned int flags, u16 *id) 2033 unsigned int flags, u16 *id)
2034 { 2034 {
2035 struct ata_port *ap = dev->link->ap; 2035 struct ata_port *ap = dev->link->ap;
2036 unsigned int class = *p_class; 2036 unsigned int class = *p_class;
2037 struct ata_taskfile tf; 2037 struct ata_taskfile tf;
2038 unsigned int err_mask = 0; 2038 unsigned int err_mask = 0;
2039 const char *reason; 2039 const char *reason;
2040 bool is_semb = class == ATA_DEV_SEMB; 2040 bool is_semb = class == ATA_DEV_SEMB;
2041 int may_fallback = 1, tried_spinup = 0; 2041 int may_fallback = 1, tried_spinup = 0;
2042 int rc; 2042 int rc;
2043 2043
2044 if (ata_msg_ctl(ap)) 2044 if (ata_msg_ctl(ap))
2045 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2045 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2046 2046
2047 retry: 2047 retry:
2048 ata_tf_init(dev, &tf); 2048 ata_tf_init(dev, &tf);
2049 2049
2050 switch (class) { 2050 switch (class) {
2051 case ATA_DEV_SEMB: 2051 case ATA_DEV_SEMB:
2052 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 2052 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
2053 case ATA_DEV_ATA: 2053 case ATA_DEV_ATA:
2054 tf.command = ATA_CMD_ID_ATA; 2054 tf.command = ATA_CMD_ID_ATA;
2055 break; 2055 break;
2056 case ATA_DEV_ATAPI: 2056 case ATA_DEV_ATAPI:
2057 tf.command = ATA_CMD_ID_ATAPI; 2057 tf.command = ATA_CMD_ID_ATAPI;
2058 break; 2058 break;
2059 default: 2059 default:
2060 rc = -ENODEV; 2060 rc = -ENODEV;
2061 reason = "unsupported class"; 2061 reason = "unsupported class";
2062 goto err_out; 2062 goto err_out;
2063 } 2063 }
2064 2064
2065 tf.protocol = ATA_PROT_PIO; 2065 tf.protocol = ATA_PROT_PIO;
2066 2066
2067 /* Some devices choke if TF registers contain garbage. Make 2067 /* Some devices choke if TF registers contain garbage. Make
2068 * sure those are properly initialized. 2068 * sure those are properly initialized.
2069 */ 2069 */
2070 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2070 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2071 2071
2072 /* Device presence detection is unreliable on some 2072 /* Device presence detection is unreliable on some
2073 * controllers. Always poll IDENTIFY if available. 2073 * controllers. Always poll IDENTIFY if available.
2074 */ 2074 */
2075 tf.flags |= ATA_TFLAG_POLLING; 2075 tf.flags |= ATA_TFLAG_POLLING;
2076 2076
2077 if (ap->ops->read_id) 2077 if (ap->ops->read_id)
2078 err_mask = ap->ops->read_id(dev, &tf, id); 2078 err_mask = ap->ops->read_id(dev, &tf, id);
2079 else 2079 else
2080 err_mask = ata_do_dev_read_id(dev, &tf, id); 2080 err_mask = ata_do_dev_read_id(dev, &tf, id);
2081 2081
2082 if (err_mask) { 2082 if (err_mask) {
2083 if (err_mask & AC_ERR_NODEV_HINT) { 2083 if (err_mask & AC_ERR_NODEV_HINT) {
2084 ata_dev_printk(dev, KERN_DEBUG, 2084 ata_dev_printk(dev, KERN_DEBUG,
2085 "NODEV after polling detection\n"); 2085 "NODEV after polling detection\n");
2086 return -ENOENT; 2086 return -ENOENT;
2087 } 2087 }
2088 2088
2089 if (is_semb) { 2089 if (is_semb) {
2090 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " 2090 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
2091 "device w/ SEMB sig, disabled\n"); 2091 "device w/ SEMB sig, disabled\n");
2092 /* SEMB is not supported yet */ 2092 /* SEMB is not supported yet */
2093 *p_class = ATA_DEV_SEMB_UNSUP; 2093 *p_class = ATA_DEV_SEMB_UNSUP;
2094 return 0; 2094 return 0;
2095 } 2095 }
2096 2096
2097 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2097 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2098 /* Device or controller might have reported 2098 /* Device or controller might have reported
2099 * the wrong device class. Give a shot at the 2099 * the wrong device class. Give a shot at the
2100 * other IDENTIFY if the current one is 2100 * other IDENTIFY if the current one is
2101 * aborted by the device. 2101 * aborted by the device.
2102 */ 2102 */
2103 if (may_fallback) { 2103 if (may_fallback) {
2104 may_fallback = 0; 2104 may_fallback = 0;
2105 2105
2106 if (class == ATA_DEV_ATA) 2106 if (class == ATA_DEV_ATA)
2107 class = ATA_DEV_ATAPI; 2107 class = ATA_DEV_ATAPI;
2108 else 2108 else
2109 class = ATA_DEV_ATA; 2109 class = ATA_DEV_ATA;
2110 goto retry; 2110 goto retry;
2111 } 2111 }
2112 2112
2113 /* Control reaches here iff the device aborted 2113 /* Control reaches here iff the device aborted
2114 * both flavors of IDENTIFYs which happens 2114 * both flavors of IDENTIFYs which happens
2115 * sometimes with phantom devices. 2115 * sometimes with phantom devices.
2116 */ 2116 */
2117 ata_dev_printk(dev, KERN_DEBUG, 2117 ata_dev_printk(dev, KERN_DEBUG,
2118 "both IDENTIFYs aborted, assuming NODEV\n"); 2118 "both IDENTIFYs aborted, assuming NODEV\n");
2119 return -ENOENT; 2119 return -ENOENT;
2120 } 2120 }
2121 2121
2122 rc = -EIO; 2122 rc = -EIO;
2123 reason = "I/O error"; 2123 reason = "I/O error";
2124 goto err_out; 2124 goto err_out;
2125 } 2125 }
2126 2126
2127 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 2127 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
2128 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " 2128 ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
2129 "class=%d may_fallback=%d tried_spinup=%d\n", 2129 "class=%d may_fallback=%d tried_spinup=%d\n",
2130 class, may_fallback, tried_spinup); 2130 class, may_fallback, tried_spinup);
2131 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 2131 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
2132 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 2132 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
2133 } 2133 }
2134 2134
2135 /* Falling back doesn't make sense if ID data was read 2135 /* Falling back doesn't make sense if ID data was read
2136 * successfully at least once. 2136 * successfully at least once.
2137 */ 2137 */
2138 may_fallback = 0; 2138 may_fallback = 0;
2139 2139
2140 swap_buf_le16(id, ATA_ID_WORDS); 2140 swap_buf_le16(id, ATA_ID_WORDS);
2141 2141
2142 /* sanity check */ 2142 /* sanity check */
2143 rc = -EINVAL; 2143 rc = -EINVAL;
2144 reason = "device reports invalid type"; 2144 reason = "device reports invalid type";
2145 2145
2146 if (class == ATA_DEV_ATA) { 2146 if (class == ATA_DEV_ATA) {
2147 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2147 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2148 goto err_out; 2148 goto err_out;
2149 } else { 2149 } else {
2150 if (ata_id_is_ata(id)) 2150 if (ata_id_is_ata(id))
2151 goto err_out; 2151 goto err_out;
2152 } 2152 }
2153 2153
2154 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2154 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2155 tried_spinup = 1; 2155 tried_spinup = 1;
2156 /* 2156 /*
2157 * Drive powered-up in standby mode, and requires a specific 2157 * Drive powered-up in standby mode, and requires a specific
2158 * SET_FEATURES spin-up subcommand before it will accept 2158 * SET_FEATURES spin-up subcommand before it will accept
2159 * anything other than the original IDENTIFY command. 2159 * anything other than the original IDENTIFY command.
2160 */ 2160 */
2161 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2161 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2162 if (err_mask && id[2] != 0x738c) { 2162 if (err_mask && id[2] != 0x738c) {
2163 rc = -EIO; 2163 rc = -EIO;
2164 reason = "SPINUP failed"; 2164 reason = "SPINUP failed";
2165 goto err_out; 2165 goto err_out;
2166 } 2166 }
2167 /* 2167 /*
2168 * If the drive initially returned incomplete IDENTIFY info, 2168 * If the drive initially returned incomplete IDENTIFY info,
2169 * we now must reissue the IDENTIFY command. 2169 * we now must reissue the IDENTIFY command.
2170 */ 2170 */
2171 if (id[2] == 0x37c8) 2171 if (id[2] == 0x37c8)
2172 goto retry; 2172 goto retry;
2173 } 2173 }
2174 2174
2175 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2175 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2176 /* 2176 /*
2177 * The exact sequence expected by certain pre-ATA4 drives is: 2177 * The exact sequence expected by certain pre-ATA4 drives is:
2178 * SRST RESET 2178 * SRST RESET
2179 * IDENTIFY (optional in early ATA) 2179 * IDENTIFY (optional in early ATA)
2180 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2180 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2181 * anything else.. 2181 * anything else..
2182 * Some drives were very specific about that exact sequence. 2182 * Some drives were very specific about that exact sequence.
2183 * 2183 *
2184 * Note that ATA4 says lba is mandatory so the second check 2184 * Note that ATA4 says lba is mandatory so the second check
2185 * should never trigger. 2185 * should never trigger.
2186 */ 2186 */
2187 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2187 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2188 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2188 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2189 if (err_mask) { 2189 if (err_mask) {
2190 rc = -EIO; 2190 rc = -EIO;
2191 reason = "INIT_DEV_PARAMS failed"; 2191 reason = "INIT_DEV_PARAMS failed";
2192 goto err_out; 2192 goto err_out;
2193 } 2193 }
2194 2194
2195 /* current CHS translation info (id[53-58]) might be 2195 /* current CHS translation info (id[53-58]) might be
2196 * changed. reread the identify device info. 2196 * changed. reread the identify device info.
2197 */ 2197 */
2198 flags &= ~ATA_READID_POSTRESET; 2198 flags &= ~ATA_READID_POSTRESET;
2199 goto retry; 2199 goto retry;
2200 } 2200 }
2201 } 2201 }
2202 2202
2203 *p_class = class; 2203 *p_class = class;
2204 2204
2205 return 0; 2205 return 0;
2206 2206
2207 err_out: 2207 err_out:
2208 if (ata_msg_warn(ap)) 2208 if (ata_msg_warn(ap))
2209 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2209 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2210 "(%s, err_mask=0x%x)\n", reason, err_mask); 2210 "(%s, err_mask=0x%x)\n", reason, err_mask);
2211 return rc; 2211 return rc;
2212 } 2212 }
2213 2213
2214 static int ata_do_link_spd_horkage(struct ata_device *dev) 2214 static int ata_do_link_spd_horkage(struct ata_device *dev)
2215 { 2215 {
2216 struct ata_link *plink = ata_dev_phys_link(dev); 2216 struct ata_link *plink = ata_dev_phys_link(dev);
2217 u32 target, target_limit; 2217 u32 target, target_limit;
2218 2218
2219 if (!sata_scr_valid(plink)) 2219 if (!sata_scr_valid(plink))
2220 return 0; 2220 return 0;
2221 2221
2222 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2222 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2223 target = 1; 2223 target = 1;
2224 else 2224 else
2225 return 0; 2225 return 0;
2226 2226
2227 target_limit = (1 << target) - 1; 2227 target_limit = (1 << target) - 1;
2228 2228
2229 /* if already on stricter limit, no need to push further */ 2229 /* if already on stricter limit, no need to push further */
2230 if (plink->sata_spd_limit <= target_limit) 2230 if (plink->sata_spd_limit <= target_limit)
2231 return 0; 2231 return 0;
2232 2232
2233 plink->sata_spd_limit = target_limit; 2233 plink->sata_spd_limit = target_limit;
2234 2234
2235 /* Request another EH round by returning -EAGAIN if link is 2235 /* Request another EH round by returning -EAGAIN if link is
2236 * going faster than the target speed. Forward progress is 2236 * going faster than the target speed. Forward progress is
2237 * guaranteed by setting sata_spd_limit to target_limit above. 2237 * guaranteed by setting sata_spd_limit to target_limit above.
2238 */ 2238 */
2239 if (plink->sata_spd > target) { 2239 if (plink->sata_spd > target) {
2240 ata_dev_printk(dev, KERN_INFO, 2240 ata_dev_printk(dev, KERN_INFO,
2241 "applying link speed limit horkage to %s\n", 2241 "applying link speed limit horkage to %s\n",
2242 sata_spd_string(target)); 2242 sata_spd_string(target));
2243 return -EAGAIN; 2243 return -EAGAIN;
2244 } 2244 }
2245 return 0; 2245 return 0;
2246 } 2246 }
2247 2247
2248 static inline u8 ata_dev_knobble(struct ata_device *dev) 2248 static inline u8 ata_dev_knobble(struct ata_device *dev)
2249 { 2249 {
2250 struct ata_port *ap = dev->link->ap; 2250 struct ata_port *ap = dev->link->ap;
2251 2251
2252 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2252 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2253 return 0; 2253 return 0;
2254 2254
2255 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2255 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2256 } 2256 }
2257 2257
2258 static int ata_dev_config_ncq(struct ata_device *dev, 2258 static int ata_dev_config_ncq(struct ata_device *dev,
2259 char *desc, size_t desc_sz) 2259 char *desc, size_t desc_sz)
2260 { 2260 {
2261 struct ata_port *ap = dev->link->ap; 2261 struct ata_port *ap = dev->link->ap;
2262 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2262 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2263 unsigned int err_mask; 2263 unsigned int err_mask;
2264 char *aa_desc = ""; 2264 char *aa_desc = "";
2265 2265
2266 if (!ata_id_has_ncq(dev->id)) { 2266 if (!ata_id_has_ncq(dev->id)) {
2267 desc[0] = '\0'; 2267 desc[0] = '\0';
2268 return 0; 2268 return 0;
2269 } 2269 }
2270 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2270 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2271 snprintf(desc, desc_sz, "NCQ (not used)"); 2271 snprintf(desc, desc_sz, "NCQ (not used)");
2272 return 0; 2272 return 0;
2273 } 2273 }
2274 if (ap->flags & ATA_FLAG_NCQ) { 2274 if (ap->flags & ATA_FLAG_NCQ) {
2275 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2275 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2276 dev->flags |= ATA_DFLAG_NCQ; 2276 dev->flags |= ATA_DFLAG_NCQ;
2277 } 2277 }
2278 2278
2279 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2279 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2280 (ap->flags & ATA_FLAG_FPDMA_AA) && 2280 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2281 ata_id_has_fpdma_aa(dev->id)) { 2281 ata_id_has_fpdma_aa(dev->id)) {
2282 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2282 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2283 SATA_FPDMA_AA); 2283 SATA_FPDMA_AA);
2284 if (err_mask) { 2284 if (err_mask) {
2285 ata_dev_printk(dev, KERN_ERR, "failed to enable AA" 2285 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2286 "(error_mask=0x%x)\n", err_mask); 2286 "(error_mask=0x%x)\n", err_mask);
2287 if (err_mask != AC_ERR_DEV) { 2287 if (err_mask != AC_ERR_DEV) {
2288 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2288 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2289 return -EIO; 2289 return -EIO;
2290 } 2290 }
2291 } else 2291 } else
2292 aa_desc = ", AA"; 2292 aa_desc = ", AA";
2293 } 2293 }
2294 2294
2295 if (hdepth >= ddepth) 2295 if (hdepth >= ddepth)
2296 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2296 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2297 else 2297 else
2298 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2298 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2299 ddepth, aa_desc); 2299 ddepth, aa_desc);
2300 return 0; 2300 return 0;
2301 } 2301 }
2302 2302
2303 /** 2303 /**
2304 * ata_dev_configure - Configure the specified ATA/ATAPI device 2304 * ata_dev_configure - Configure the specified ATA/ATAPI device
2305 * @dev: Target device to configure 2305 * @dev: Target device to configure
2306 * 2306 *
2307 * Configure @dev according to @dev->id. Generic and low-level 2307 * Configure @dev according to @dev->id. Generic and low-level
2308 * driver specific fixups are also applied. 2308 * driver specific fixups are also applied.
2309 * 2309 *
2310 * LOCKING: 2310 * LOCKING:
2311 * Kernel thread context (may sleep) 2311 * Kernel thread context (may sleep)
2312 * 2312 *
2313 * RETURNS: 2313 * RETURNS:
2314 * 0 on success, -errno otherwise 2314 * 0 on success, -errno otherwise
2315 */ 2315 */
2316 int ata_dev_configure(struct ata_device *dev) 2316 int ata_dev_configure(struct ata_device *dev)
2317 { 2317 {
2318 struct ata_port *ap = dev->link->ap; 2318 struct ata_port *ap = dev->link->ap;
2319 struct ata_eh_context *ehc = &dev->link->eh_context; 2319 struct ata_eh_context *ehc = &dev->link->eh_context;
2320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2320 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2321 const u16 *id = dev->id; 2321 const u16 *id = dev->id;
2322 unsigned long xfer_mask; 2322 unsigned long xfer_mask;
2323 char revbuf[7]; /* XYZ-99\0 */ 2323 char revbuf[7]; /* XYZ-99\0 */
2324 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2324 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2325 char modelbuf[ATA_ID_PROD_LEN+1]; 2325 char modelbuf[ATA_ID_PROD_LEN+1];
2326 int rc; 2326 int rc;
2327 2327
2328 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2328 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2329 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2329 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2330 __func__); 2330 __func__);
2331 return 0; 2331 return 0;
2332 } 2332 }
2333 2333
2334 if (ata_msg_probe(ap)) 2334 if (ata_msg_probe(ap))
2335 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2335 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2336 2336
2337 /* set horkage */ 2337 /* set horkage */
2338 dev->horkage |= ata_dev_blacklisted(dev); 2338 dev->horkage |= ata_dev_blacklisted(dev);
2339 ata_force_horkage(dev); 2339 ata_force_horkage(dev);
2340 2340
2341 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2341 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2342 ata_dev_printk(dev, KERN_INFO, 2342 ata_dev_printk(dev, KERN_INFO,
2343 "unsupported device, disabling\n"); 2343 "unsupported device, disabling\n");
2344 ata_dev_disable(dev); 2344 ata_dev_disable(dev);
2345 return 0; 2345 return 0;
2346 } 2346 }
2347 2347
2348 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2348 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2349 dev->class == ATA_DEV_ATAPI) { 2349 dev->class == ATA_DEV_ATAPI) {
2350 ata_dev_printk(dev, KERN_WARNING, 2350 ata_dev_printk(dev, KERN_WARNING,
2351 "WARNING: ATAPI is %s, device ignored.\n", 2351 "WARNING: ATAPI is %s, device ignored.\n",
2352 atapi_enabled ? "not supported with this driver" 2352 atapi_enabled ? "not supported with this driver"
2353 : "disabled"); 2353 : "disabled");
2354 ata_dev_disable(dev); 2354 ata_dev_disable(dev);
2355 return 0; 2355 return 0;
2356 } 2356 }
2357 2357
2358 rc = ata_do_link_spd_horkage(dev); 2358 rc = ata_do_link_spd_horkage(dev);
2359 if (rc) 2359 if (rc)
2360 return rc; 2360 return rc;
2361 2361
2362 /* let ACPI work its magic */ 2362 /* let ACPI work its magic */
2363 rc = ata_acpi_on_devcfg(dev); 2363 rc = ata_acpi_on_devcfg(dev);
2364 if (rc) 2364 if (rc)
2365 return rc; 2365 return rc;
2366 2366
2367 /* massage HPA, do it early as it might change IDENTIFY data */ 2367 /* massage HPA, do it early as it might change IDENTIFY data */
2368 rc = ata_hpa_resize(dev); 2368 rc = ata_hpa_resize(dev);
2369 if (rc) 2369 if (rc)
2370 return rc; 2370 return rc;
2371 2371
2372 /* print device capabilities */ 2372 /* print device capabilities */
2373 if (ata_msg_probe(ap)) 2373 if (ata_msg_probe(ap))
2374 ata_dev_printk(dev, KERN_DEBUG, 2374 ata_dev_printk(dev, KERN_DEBUG,
2375 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2375 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2376 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2376 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2377 __func__, 2377 __func__,
2378 id[49], id[82], id[83], id[84], 2378 id[49], id[82], id[83], id[84],
2379 id[85], id[86], id[87], id[88]); 2379 id[85], id[86], id[87], id[88]);
2380 2380
2381 /* initialize to-be-configured parameters */ 2381 /* initialize to-be-configured parameters */
2382 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2382 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2383 dev->max_sectors = 0; 2383 dev->max_sectors = 0;
2384 dev->cdb_len = 0; 2384 dev->cdb_len = 0;
2385 dev->n_sectors = 0; 2385 dev->n_sectors = 0;
2386 dev->cylinders = 0; 2386 dev->cylinders = 0;
2387 dev->heads = 0; 2387 dev->heads = 0;
2388 dev->sectors = 0; 2388 dev->sectors = 0;
2389 dev->multi_count = 0; 2389 dev->multi_count = 0;
2390 2390
2391 /* 2391 /*
2392 * common ATA, ATAPI feature tests 2392 * common ATA, ATAPI feature tests
2393 */ 2393 */
2394 2394
2395 /* find max transfer mode; for printk only */ 2395 /* find max transfer mode; for printk only */
2396 xfer_mask = ata_id_xfermask(id); 2396 xfer_mask = ata_id_xfermask(id);
2397 2397
2398 if (ata_msg_probe(ap)) 2398 if (ata_msg_probe(ap))
2399 ata_dump_id(id); 2399 ata_dump_id(id);
2400 2400
2401 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2401 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2402 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2402 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2403 sizeof(fwrevbuf)); 2403 sizeof(fwrevbuf));
2404 2404
2405 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2405 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2406 sizeof(modelbuf)); 2406 sizeof(modelbuf));
2407 2407
2408 /* ATA-specific feature tests */ 2408 /* ATA-specific feature tests */
2409 if (dev->class == ATA_DEV_ATA) { 2409 if (dev->class == ATA_DEV_ATA) {
2410 if (ata_id_is_cfa(id)) { 2410 if (ata_id_is_cfa(id)) {
2411 /* CPRM may make this media unusable */ 2411 /* CPRM may make this media unusable */
2412 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2412 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2413 ata_dev_printk(dev, KERN_WARNING, 2413 ata_dev_printk(dev, KERN_WARNING,
2414 "supports DRM functions and may " 2414 "supports DRM functions and may "
2415 "not be fully accessable.\n"); 2415 "not be fully accessable.\n");
2416 snprintf(revbuf, 7, "CFA"); 2416 snprintf(revbuf, 7, "CFA");
2417 } else { 2417 } else {
2418 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2418 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2419 /* Warn the user if the device has TPM extensions */ 2419 /* Warn the user if the device has TPM extensions */
2420 if (ata_id_has_tpm(id)) 2420 if (ata_id_has_tpm(id))
2421 ata_dev_printk(dev, KERN_WARNING, 2421 ata_dev_printk(dev, KERN_WARNING,
2422 "supports DRM functions and may " 2422 "supports DRM functions and may "
2423 "not be fully accessable.\n"); 2423 "not be fully accessable.\n");
2424 } 2424 }
2425 2425
2426 dev->n_sectors = ata_id_n_sectors(id); 2426 dev->n_sectors = ata_id_n_sectors(id);
2427 2427
2428 /* get current R/W Multiple count setting */ 2428 /* get current R/W Multiple count setting */
2429 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2429 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2430 unsigned int max = dev->id[47] & 0xff; 2430 unsigned int max = dev->id[47] & 0xff;
2431 unsigned int cnt = dev->id[59] & 0xff; 2431 unsigned int cnt = dev->id[59] & 0xff;
2432 /* only recognize/allow powers of two here */ 2432 /* only recognize/allow powers of two here */
2433 if (is_power_of_2(max) && is_power_of_2(cnt)) 2433 if (is_power_of_2(max) && is_power_of_2(cnt))
2434 if (cnt <= max) 2434 if (cnt <= max)
2435 dev->multi_count = cnt; 2435 dev->multi_count = cnt;
2436 } 2436 }
2437 2437
2438 if (ata_id_has_lba(id)) { 2438 if (ata_id_has_lba(id)) {
2439 const char *lba_desc; 2439 const char *lba_desc;
2440 char ncq_desc[24]; 2440 char ncq_desc[24];
2441 2441
2442 lba_desc = "LBA"; 2442 lba_desc = "LBA";
2443 dev->flags |= ATA_DFLAG_LBA; 2443 dev->flags |= ATA_DFLAG_LBA;
2444 if (ata_id_has_lba48(id)) { 2444 if (ata_id_has_lba48(id)) {
2445 dev->flags |= ATA_DFLAG_LBA48; 2445 dev->flags |= ATA_DFLAG_LBA48;
2446 lba_desc = "LBA48"; 2446 lba_desc = "LBA48";
2447 2447
2448 if (dev->n_sectors >= (1UL << 28) && 2448 if (dev->n_sectors >= (1UL << 28) &&
2449 ata_id_has_flush_ext(id)) 2449 ata_id_has_flush_ext(id))
2450 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2450 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2451 } 2451 }
2452 2452
2453 /* config NCQ */ 2453 /* config NCQ */
2454 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2454 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2455 if (rc) 2455 if (rc)
2456 return rc; 2456 return rc;
2457 2457
2458 /* print device info to dmesg */ 2458 /* print device info to dmesg */
2459 if (ata_msg_drv(ap) && print_info) { 2459 if (ata_msg_drv(ap) && print_info) {
2460 ata_dev_printk(dev, KERN_INFO, 2460 ata_dev_printk(dev, KERN_INFO,
2461 "%s: %s, %s, max %s\n", 2461 "%s: %s, %s, max %s\n",
2462 revbuf, modelbuf, fwrevbuf, 2462 revbuf, modelbuf, fwrevbuf,
2463 ata_mode_string(xfer_mask)); 2463 ata_mode_string(xfer_mask));
2464 ata_dev_printk(dev, KERN_INFO, 2464 ata_dev_printk(dev, KERN_INFO,
2465 "%Lu sectors, multi %u: %s %s\n", 2465 "%Lu sectors, multi %u: %s %s\n",
2466 (unsigned long long)dev->n_sectors, 2466 (unsigned long long)dev->n_sectors,
2467 dev->multi_count, lba_desc, ncq_desc); 2467 dev->multi_count, lba_desc, ncq_desc);
2468 } 2468 }
2469 } else { 2469 } else {
2470 /* CHS */ 2470 /* CHS */
2471 2471
2472 /* Default translation */ 2472 /* Default translation */
2473 dev->cylinders = id[1]; 2473 dev->cylinders = id[1];
2474 dev->heads = id[3]; 2474 dev->heads = id[3];
2475 dev->sectors = id[6]; 2475 dev->sectors = id[6];
2476 2476
2477 if (ata_id_current_chs_valid(id)) { 2477 if (ata_id_current_chs_valid(id)) {
2478 /* Current CHS translation is valid. */ 2478 /* Current CHS translation is valid. */
2479 dev->cylinders = id[54]; 2479 dev->cylinders = id[54];
2480 dev->heads = id[55]; 2480 dev->heads = id[55];
2481 dev->sectors = id[56]; 2481 dev->sectors = id[56];
2482 } 2482 }
2483 2483
2484 /* print device info to dmesg */ 2484 /* print device info to dmesg */
2485 if (ata_msg_drv(ap) && print_info) { 2485 if (ata_msg_drv(ap) && print_info) {
2486 ata_dev_printk(dev, KERN_INFO, 2486 ata_dev_printk(dev, KERN_INFO,
2487 "%s: %s, %s, max %s\n", 2487 "%s: %s, %s, max %s\n",
2488 revbuf, modelbuf, fwrevbuf, 2488 revbuf, modelbuf, fwrevbuf,
2489 ata_mode_string(xfer_mask)); 2489 ata_mode_string(xfer_mask));
2490 ata_dev_printk(dev, KERN_INFO, 2490 ata_dev_printk(dev, KERN_INFO,
2491 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2491 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2492 (unsigned long long)dev->n_sectors, 2492 (unsigned long long)dev->n_sectors,
2493 dev->multi_count, dev->cylinders, 2493 dev->multi_count, dev->cylinders,
2494 dev->heads, dev->sectors); 2494 dev->heads, dev->sectors);
2495 } 2495 }
2496 } 2496 }
2497 2497
2498 dev->cdb_len = 16; 2498 dev->cdb_len = 16;
2499 } 2499 }
2500 2500
2501 /* ATAPI-specific feature tests */ 2501 /* ATAPI-specific feature tests */
2502 else if (dev->class == ATA_DEV_ATAPI) { 2502 else if (dev->class == ATA_DEV_ATAPI) {
2503 const char *cdb_intr_string = ""; 2503 const char *cdb_intr_string = "";
2504 const char *atapi_an_string = ""; 2504 const char *atapi_an_string = "";
2505 const char *dma_dir_string = ""; 2505 const char *dma_dir_string = "";
2506 u32 sntf; 2506 u32 sntf;
2507 2507
2508 rc = atapi_cdb_len(id); 2508 rc = atapi_cdb_len(id);
2509 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2509 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2510 if (ata_msg_warn(ap)) 2510 if (ata_msg_warn(ap))
2511 ata_dev_printk(dev, KERN_WARNING, 2511 ata_dev_printk(dev, KERN_WARNING,
2512 "unsupported CDB len\n"); 2512 "unsupported CDB len\n");
2513 rc = -EINVAL; 2513 rc = -EINVAL;
2514 goto err_out_nosup; 2514 goto err_out_nosup;
2515 } 2515 }
2516 dev->cdb_len = (unsigned int) rc; 2516 dev->cdb_len = (unsigned int) rc;
2517 2517
2518 /* Enable ATAPI AN if both the host and device have 2518 /* Enable ATAPI AN if both the host and device have
2519 * the support. If PMP is attached, SNTF is required 2519 * the support. If PMP is attached, SNTF is required
2520 * to enable ATAPI AN to discern between PHY status 2520 * to enable ATAPI AN to discern between PHY status
2521 * changed notifications and ATAPI ANs. 2521 * changed notifications and ATAPI ANs.
2522 */ 2522 */
2523 if (atapi_an && 2523 if (atapi_an &&
2524 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2524 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2525 (!sata_pmp_attached(ap) || 2525 (!sata_pmp_attached(ap) ||
2526 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2526 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2527 unsigned int err_mask; 2527 unsigned int err_mask;
2528 2528
2529 /* issue SET feature command to turn this on */ 2529 /* issue SET feature command to turn this on */
2530 err_mask = ata_dev_set_feature(dev, 2530 err_mask = ata_dev_set_feature(dev,
2531 SETFEATURES_SATA_ENABLE, SATA_AN); 2531 SETFEATURES_SATA_ENABLE, SATA_AN);
2532 if (err_mask) 2532 if (err_mask)
2533 ata_dev_printk(dev, KERN_ERR, 2533 ata_dev_printk(dev, KERN_ERR,
2534 "failed to enable ATAPI AN " 2534 "failed to enable ATAPI AN "
2535 "(err_mask=0x%x)\n", err_mask); 2535 "(err_mask=0x%x)\n", err_mask);
2536 else { 2536 else {
2537 dev->flags |= ATA_DFLAG_AN; 2537 dev->flags |= ATA_DFLAG_AN;
2538 atapi_an_string = ", ATAPI AN"; 2538 atapi_an_string = ", ATAPI AN";
2539 } 2539 }
2540 } 2540 }
2541 2541
2542 if (ata_id_cdb_intr(dev->id)) { 2542 if (ata_id_cdb_intr(dev->id)) {
2543 dev->flags |= ATA_DFLAG_CDB_INTR; 2543 dev->flags |= ATA_DFLAG_CDB_INTR;
2544 cdb_intr_string = ", CDB intr"; 2544 cdb_intr_string = ", CDB intr";
2545 } 2545 }
2546 2546
2547 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2547 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2548 dev->flags |= ATA_DFLAG_DMADIR; 2548 dev->flags |= ATA_DFLAG_DMADIR;
2549 dma_dir_string = ", DMADIR"; 2549 dma_dir_string = ", DMADIR";
2550 } 2550 }
2551 2551
2552 /* print device info to dmesg */ 2552 /* print device info to dmesg */
2553 if (ata_msg_drv(ap) && print_info) 2553 if (ata_msg_drv(ap) && print_info)
2554 ata_dev_printk(dev, KERN_INFO, 2554 ata_dev_printk(dev, KERN_INFO,
2555 "ATAPI: %s, %s, max %s%s%s%s\n", 2555 "ATAPI: %s, %s, max %s%s%s%s\n",
2556 modelbuf, fwrevbuf, 2556 modelbuf, fwrevbuf,
2557 ata_mode_string(xfer_mask), 2557 ata_mode_string(xfer_mask),
2558 cdb_intr_string, atapi_an_string, 2558 cdb_intr_string, atapi_an_string,
2559 dma_dir_string); 2559 dma_dir_string);
2560 } 2560 }
2561 2561
2562 /* determine max_sectors */ 2562 /* determine max_sectors */
2563 dev->max_sectors = ATA_MAX_SECTORS; 2563 dev->max_sectors = ATA_MAX_SECTORS;
2564 if (dev->flags & ATA_DFLAG_LBA48) 2564 if (dev->flags & ATA_DFLAG_LBA48)
2565 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2565 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2566 2566
2567 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2567 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2568 if (ata_id_has_hipm(dev->id)) 2568 if (ata_id_has_hipm(dev->id))
2569 dev->flags |= ATA_DFLAG_HIPM; 2569 dev->flags |= ATA_DFLAG_HIPM;
2570 if (ata_id_has_dipm(dev->id)) 2570 if (ata_id_has_dipm(dev->id))
2571 dev->flags |= ATA_DFLAG_DIPM; 2571 dev->flags |= ATA_DFLAG_DIPM;
2572 } 2572 }
2573 2573
2574 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2574 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2575 200 sectors */ 2575 200 sectors */
2576 if (ata_dev_knobble(dev)) { 2576 if (ata_dev_knobble(dev)) {
2577 if (ata_msg_drv(ap) && print_info) 2577 if (ata_msg_drv(ap) && print_info)
2578 ata_dev_printk(dev, KERN_INFO, 2578 ata_dev_printk(dev, KERN_INFO,
2579 "applying bridge limits\n"); 2579 "applying bridge limits\n");
2580 dev->udma_mask &= ATA_UDMA5; 2580 dev->udma_mask &= ATA_UDMA5;
2581 dev->max_sectors = ATA_MAX_SECTORS; 2581 dev->max_sectors = ATA_MAX_SECTORS;
2582 } 2582 }
2583 2583
2584 if ((dev->class == ATA_DEV_ATAPI) && 2584 if ((dev->class == ATA_DEV_ATAPI) &&
2585 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2585 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2586 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2586 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2587 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2587 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2588 } 2588 }
2589 2589
2590 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2590 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2591 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2591 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2592 dev->max_sectors); 2592 dev->max_sectors);
2593 2593
2594 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2594 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2595 dev->horkage |= ATA_HORKAGE_IPM; 2595 dev->horkage |= ATA_HORKAGE_IPM;
2596 2596
2597 /* reset link pm_policy for this port to no pm */ 2597 /* reset link pm_policy for this port to no pm */
2598 ap->pm_policy = MAX_PERFORMANCE; 2598 ap->pm_policy = MAX_PERFORMANCE;
2599 } 2599 }
2600 2600
2601 if (ap->ops->dev_config) 2601 if (ap->ops->dev_config)
2602 ap->ops->dev_config(dev); 2602 ap->ops->dev_config(dev);
2603 2603
2604 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2604 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2605 /* Let the user know. We don't want to disallow opens for 2605 /* Let the user know. We don't want to disallow opens for
2606 rescue purposes, or in case the vendor is just a blithering 2606 rescue purposes, or in case the vendor is just a blithering
2607 idiot. Do this after the dev_config call as some controllers 2607 idiot. Do this after the dev_config call as some controllers
2608 with buggy firmware may want to avoid reporting false device 2608 with buggy firmware may want to avoid reporting false device
2609 bugs */ 2609 bugs */
2610 2610
2611 if (print_info) { 2611 if (print_info) {
2612 ata_dev_printk(dev, KERN_WARNING, 2612 ata_dev_printk(dev, KERN_WARNING,
2613 "Drive reports diagnostics failure. This may indicate a drive\n"); 2613 "Drive reports diagnostics failure. This may indicate a drive\n");
2614 ata_dev_printk(dev, KERN_WARNING, 2614 ata_dev_printk(dev, KERN_WARNING,
2615 "fault or invalid emulation. Contact drive vendor for information.\n"); 2615 "fault or invalid emulation. Contact drive vendor for information.\n");
2616 } 2616 }
2617 } 2617 }
2618 2618
2619 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2619 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2620 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " 2620 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2621 "firmware update to be fully functional.\n"); 2621 "firmware update to be fully functional.\n");
2622 ata_dev_printk(dev, KERN_WARNING, " contact the vendor " 2622 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2623 "or visit http://ata.wiki.kernel.org.\n"); 2623 "or visit http://ata.wiki.kernel.org.\n");
2624 } 2624 }
2625 2625
2626 return 0; 2626 return 0;
2627 2627
2628 err_out_nosup: 2628 err_out_nosup:
2629 if (ata_msg_probe(ap)) 2629 if (ata_msg_probe(ap))
2630 ata_dev_printk(dev, KERN_DEBUG, 2630 ata_dev_printk(dev, KERN_DEBUG,
2631 "%s: EXIT, err\n", __func__); 2631 "%s: EXIT, err\n", __func__);
2632 return rc; 2632 return rc;
2633 } 2633 }
2634 2634
2635 /** 2635 /**
2636 * ata_cable_40wire - return 40 wire cable type 2636 * ata_cable_40wire - return 40 wire cable type
2637 * @ap: port 2637 * @ap: port
2638 * 2638 *
2639 * Helper method for drivers which want to hardwire 40 wire cable 2639 * Helper method for drivers which want to hardwire 40 wire cable
2640 * detection. 2640 * detection.
2641 */ 2641 */
2642 2642
2643 int ata_cable_40wire(struct ata_port *ap) 2643 int ata_cable_40wire(struct ata_port *ap)
2644 { 2644 {
2645 return ATA_CBL_PATA40; 2645 return ATA_CBL_PATA40;
2646 } 2646 }
2647 2647
2648 /** 2648 /**
2649 * ata_cable_80wire - return 80 wire cable type 2649 * ata_cable_80wire - return 80 wire cable type
2650 * @ap: port 2650 * @ap: port
2651 * 2651 *
2652 * Helper method for drivers which want to hardwire 80 wire cable 2652 * Helper method for drivers which want to hardwire 80 wire cable
2653 * detection. 2653 * detection.
2654 */ 2654 */
2655 2655
2656 int ata_cable_80wire(struct ata_port *ap) 2656 int ata_cable_80wire(struct ata_port *ap)
2657 { 2657 {
2658 return ATA_CBL_PATA80; 2658 return ATA_CBL_PATA80;
2659 } 2659 }
2660 2660
2661 /** 2661 /**
2662 * ata_cable_unknown - return unknown PATA cable. 2662 * ata_cable_unknown - return unknown PATA cable.
2663 * @ap: port 2663 * @ap: port
2664 * 2664 *
2665 * Helper method for drivers which have no PATA cable detection. 2665 * Helper method for drivers which have no PATA cable detection.
2666 */ 2666 */
2667 2667
2668 int ata_cable_unknown(struct ata_port *ap) 2668 int ata_cable_unknown(struct ata_port *ap)
2669 { 2669 {
2670 return ATA_CBL_PATA_UNK; 2670 return ATA_CBL_PATA_UNK;
2671 } 2671 }
2672 2672
2673 /** 2673 /**
2674 * ata_cable_ignore - return ignored PATA cable. 2674 * ata_cable_ignore - return ignored PATA cable.
2675 * @ap: port 2675 * @ap: port
2676 * 2676 *
2677 * Helper method for drivers which don't use cable type to limit 2677 * Helper method for drivers which don't use cable type to limit
2678 * transfer mode. 2678 * transfer mode.
2679 */ 2679 */
2680 int ata_cable_ignore(struct ata_port *ap) 2680 int ata_cable_ignore(struct ata_port *ap)
2681 { 2681 {
2682 return ATA_CBL_PATA_IGN; 2682 return ATA_CBL_PATA_IGN;
2683 } 2683 }
2684 2684
2685 /** 2685 /**
2686 * ata_cable_sata - return SATA cable type 2686 * ata_cable_sata - return SATA cable type
2687 * @ap: port 2687 * @ap: port
2688 * 2688 *
2689 * Helper method for drivers which have SATA cables 2689 * Helper method for drivers which have SATA cables
2690 */ 2690 */
2691 2691
2692 int ata_cable_sata(struct ata_port *ap) 2692 int ata_cable_sata(struct ata_port *ap)
2693 { 2693 {
2694 return ATA_CBL_SATA; 2694 return ATA_CBL_SATA;
2695 } 2695 }
2696 2696
2697 /** 2697 /**
2698 * ata_bus_probe - Reset and probe ATA bus 2698 * ata_bus_probe - Reset and probe ATA bus
2699 * @ap: Bus to probe 2699 * @ap: Bus to probe
2700 * 2700 *
2701 * Master ATA bus probing function. Initiates a hardware-dependent 2701 * Master ATA bus probing function. Initiates a hardware-dependent
2702 * bus reset, then attempts to identify any devices found on 2702 * bus reset, then attempts to identify any devices found on
2703 * the bus. 2703 * the bus.
2704 * 2704 *
2705 * LOCKING: 2705 * LOCKING:
2706 * PCI/etc. bus probe sem. 2706 * PCI/etc. bus probe sem.
2707 * 2707 *
2708 * RETURNS: 2708 * RETURNS:
2709 * Zero on success, negative errno otherwise. 2709 * Zero on success, negative errno otherwise.
2710 */ 2710 */
2711 2711
2712 int ata_bus_probe(struct ata_port *ap) 2712 int ata_bus_probe(struct ata_port *ap)
2713 { 2713 {
2714 unsigned int classes[ATA_MAX_DEVICES]; 2714 unsigned int classes[ATA_MAX_DEVICES];
2715 int tries[ATA_MAX_DEVICES]; 2715 int tries[ATA_MAX_DEVICES];
2716 int rc; 2716 int rc;
2717 struct ata_device *dev; 2717 struct ata_device *dev;
2718 2718
2719 ata_for_each_dev(dev, &ap->link, ALL) 2719 ata_for_each_dev(dev, &ap->link, ALL)
2720 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2720 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2721 2721
2722 retry: 2722 retry:
2723 ata_for_each_dev(dev, &ap->link, ALL) { 2723 ata_for_each_dev(dev, &ap->link, ALL) {
2724 /* If we issue an SRST then an ATA drive (not ATAPI) 2724 /* If we issue an SRST then an ATA drive (not ATAPI)
2725 * may change configuration and be in PIO0 timing. If 2725 * may change configuration and be in PIO0 timing. If
2726 * we do a hard reset (or are coming from power on) 2726 * we do a hard reset (or are coming from power on)
2727 * this is true for ATA or ATAPI. Until we've set a 2727 * this is true for ATA or ATAPI. Until we've set a
2728 * suitable controller mode we should not touch the 2728 * suitable controller mode we should not touch the
2729 * bus as we may be talking too fast. 2729 * bus as we may be talking too fast.
2730 */ 2730 */
2731 dev->pio_mode = XFER_PIO_0; 2731 dev->pio_mode = XFER_PIO_0;
2732 2732
2733 /* If the controller has a pio mode setup function 2733 /* If the controller has a pio mode setup function
2734 * then use it to set the chipset to rights. Don't 2734 * then use it to set the chipset to rights. Don't
2735 * touch the DMA setup as that will be dealt with when 2735 * touch the DMA setup as that will be dealt with when
2736 * configuring devices. 2736 * configuring devices.
2737 */ 2737 */
2738 if (ap->ops->set_piomode) 2738 if (ap->ops->set_piomode)
2739 ap->ops->set_piomode(ap, dev); 2739 ap->ops->set_piomode(ap, dev);
2740 } 2740 }
2741 2741
2742 /* reset and determine device classes */ 2742 /* reset and determine device classes */
2743 ap->ops->phy_reset(ap); 2743 ap->ops->phy_reset(ap);
2744 2744
2745 ata_for_each_dev(dev, &ap->link, ALL) { 2745 ata_for_each_dev(dev, &ap->link, ALL) {
2746 if (dev->class != ATA_DEV_UNKNOWN) 2746 if (dev->class != ATA_DEV_UNKNOWN)
2747 classes[dev->devno] = dev->class; 2747 classes[dev->devno] = dev->class;
2748 else 2748 else
2749 classes[dev->devno] = ATA_DEV_NONE; 2749 classes[dev->devno] = ATA_DEV_NONE;
2750 2750
2751 dev->class = ATA_DEV_UNKNOWN; 2751 dev->class = ATA_DEV_UNKNOWN;
2752 } 2752 }
2753 2753
2754 /* read IDENTIFY page and configure devices. We have to do the identify 2754 /* read IDENTIFY page and configure devices. We have to do the identify
2755 specific sequence bass-ackwards so that PDIAG- is released by 2755 specific sequence bass-ackwards so that PDIAG- is released by
2756 the slave device */ 2756 the slave device */
2757 2757
2758 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2758 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2759 if (tries[dev->devno]) 2759 if (tries[dev->devno])
2760 dev->class = classes[dev->devno]; 2760 dev->class = classes[dev->devno];
2761 2761
2762 if (!ata_dev_enabled(dev)) 2762 if (!ata_dev_enabled(dev))
2763 continue; 2763 continue;
2764 2764
2765 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2765 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2766 dev->id); 2766 dev->id);
2767 if (rc) 2767 if (rc)
2768 goto fail; 2768 goto fail;
2769 } 2769 }
2770 2770
2771 /* Now ask for the cable type as PDIAG- should have been released */ 2771 /* Now ask for the cable type as PDIAG- should have been released */
2772 if (ap->ops->cable_detect) 2772 if (ap->ops->cable_detect)
2773 ap->cbl = ap->ops->cable_detect(ap); 2773 ap->cbl = ap->ops->cable_detect(ap);
2774 2774
2775 /* We may have SATA bridge glue hiding here irrespective of 2775 /* We may have SATA bridge glue hiding here irrespective of
2776 * the reported cable types and sensed types. When SATA 2776 * the reported cable types and sensed types. When SATA
2777 * drives indicate we have a bridge, we don't know which end 2777 * drives indicate we have a bridge, we don't know which end
2778 * of the link the bridge is which is a problem. 2778 * of the link the bridge is which is a problem.
2779 */ 2779 */
2780 ata_for_each_dev(dev, &ap->link, ENABLED) 2780 ata_for_each_dev(dev, &ap->link, ENABLED)
2781 if (ata_id_is_sata(dev->id)) 2781 if (ata_id_is_sata(dev->id))
2782 ap->cbl = ATA_CBL_SATA; 2782 ap->cbl = ATA_CBL_SATA;
2783 2783
2784 /* After the identify sequence we can now set up the devices. We do 2784 /* After the identify sequence we can now set up the devices. We do
2785 this in the normal order so that the user doesn't get confused */ 2785 this in the normal order so that the user doesn't get confused */
2786 2786
2787 ata_for_each_dev(dev, &ap->link, ENABLED) { 2787 ata_for_each_dev(dev, &ap->link, ENABLED) {
2788 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2788 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2789 rc = ata_dev_configure(dev); 2789 rc = ata_dev_configure(dev);
2790 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2790 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2791 if (rc) 2791 if (rc)
2792 goto fail; 2792 goto fail;
2793 } 2793 }
2794 2794
2795 /* configure transfer mode */ 2795 /* configure transfer mode */
2796 rc = ata_set_mode(&ap->link, &dev); 2796 rc = ata_set_mode(&ap->link, &dev);
2797 if (rc) 2797 if (rc)
2798 goto fail; 2798 goto fail;
2799 2799
2800 ata_for_each_dev(dev, &ap->link, ENABLED) 2800 ata_for_each_dev(dev, &ap->link, ENABLED)
2801 return 0; 2801 return 0;
2802 2802
2803 return -ENODEV; 2803 return -ENODEV;
2804 2804
2805 fail: 2805 fail:
2806 tries[dev->devno]--; 2806 tries[dev->devno]--;
2807 2807
2808 switch (rc) { 2808 switch (rc) {
2809 case -EINVAL: 2809 case -EINVAL:
2810 /* eeek, something went very wrong, give up */ 2810 /* eeek, something went very wrong, give up */
2811 tries[dev->devno] = 0; 2811 tries[dev->devno] = 0;
2812 break; 2812 break;
2813 2813
2814 case -ENODEV: 2814 case -ENODEV:
2815 /* give it just one more chance */ 2815 /* give it just one more chance */
2816 tries[dev->devno] = min(tries[dev->devno], 1); 2816 tries[dev->devno] = min(tries[dev->devno], 1);
2817 case -EIO: 2817 case -EIO:
2818 if (tries[dev->devno] == 1) { 2818 if (tries[dev->devno] == 1) {
2819 /* This is the last chance, better to slow 2819 /* This is the last chance, better to slow
2820 * down than lose it. 2820 * down than lose it.
2821 */ 2821 */
2822 sata_down_spd_limit(&ap->link, 0); 2822 sata_down_spd_limit(&ap->link, 0);
2823 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2823 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2824 } 2824 }
2825 } 2825 }
2826 2826
2827 if (!tries[dev->devno]) 2827 if (!tries[dev->devno])
2828 ata_dev_disable(dev); 2828 ata_dev_disable(dev);
2829 2829
2830 goto retry; 2830 goto retry;
2831 } 2831 }
2832 2832
2833 /** 2833 /**
2834 * sata_print_link_status - Print SATA link status 2834 * sata_print_link_status - Print SATA link status
2835 * @link: SATA link to printk link status about 2835 * @link: SATA link to printk link status about
2836 * 2836 *
2837 * This function prints link speed and status of a SATA link. 2837 * This function prints link speed and status of a SATA link.
2838 * 2838 *
2839 * LOCKING: 2839 * LOCKING:
2840 * None. 2840 * None.
2841 */ 2841 */
2842 static void sata_print_link_status(struct ata_link *link) 2842 static void sata_print_link_status(struct ata_link *link)
2843 { 2843 {
2844 u32 sstatus, scontrol, tmp; 2844 u32 sstatus, scontrol, tmp;
2845 2845
2846 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2846 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2847 return; 2847 return;
2848 sata_scr_read(link, SCR_CONTROL, &scontrol); 2848 sata_scr_read(link, SCR_CONTROL, &scontrol);
2849 2849
2850 if (ata_phys_link_online(link)) { 2850 if (ata_phys_link_online(link)) {
2851 tmp = (sstatus >> 4) & 0xf; 2851 tmp = (sstatus >> 4) & 0xf;
2852 ata_link_printk(link, KERN_INFO, 2852 ata_link_printk(link, KERN_INFO,
2853 "SATA link up %s (SStatus %X SControl %X)\n", 2853 "SATA link up %s (SStatus %X SControl %X)\n",
2854 sata_spd_string(tmp), sstatus, scontrol); 2854 sata_spd_string(tmp), sstatus, scontrol);
2855 } else { 2855 } else {
2856 ata_link_printk(link, KERN_INFO, 2856 ata_link_printk(link, KERN_INFO,
2857 "SATA link down (SStatus %X SControl %X)\n", 2857 "SATA link down (SStatus %X SControl %X)\n",
2858 sstatus, scontrol); 2858 sstatus, scontrol);
2859 } 2859 }
2860 } 2860 }
2861 2861
2862 /** 2862 /**
2863 * ata_dev_pair - return other device on cable 2863 * ata_dev_pair - return other device on cable
2864 * @adev: device 2864 * @adev: device
2865 * 2865 *
2866 * Obtain the other device on the same cable, or if none is 2866 * Obtain the other device on the same cable, or if none is
2867 * present NULL is returned 2867 * present NULL is returned
2868 */ 2868 */
2869 2869
2870 struct ata_device *ata_dev_pair(struct ata_device *adev) 2870 struct ata_device *ata_dev_pair(struct ata_device *adev)
2871 { 2871 {
2872 struct ata_link *link = adev->link; 2872 struct ata_link *link = adev->link;
2873 struct ata_device *pair = &link->device[1 - adev->devno]; 2873 struct ata_device *pair = &link->device[1 - adev->devno];
2874 if (!ata_dev_enabled(pair)) 2874 if (!ata_dev_enabled(pair))
2875 return NULL; 2875 return NULL;
2876 return pair; 2876 return pair;
2877 } 2877 }
2878 2878
2879 /** 2879 /**
2880 * sata_down_spd_limit - adjust SATA spd limit downward 2880 * sata_down_spd_limit - adjust SATA spd limit downward
2881 * @link: Link to adjust SATA spd limit for 2881 * @link: Link to adjust SATA spd limit for
2882 * @spd_limit: Additional limit 2882 * @spd_limit: Additional limit
2883 * 2883 *
2884 * Adjust SATA spd limit of @link downward. Note that this 2884 * Adjust SATA spd limit of @link downward. Note that this
2885 * function only adjusts the limit. The change must be applied 2885 * function only adjusts the limit. The change must be applied
2886 * using sata_set_spd(). 2886 * using sata_set_spd().
2887 * 2887 *
2888 * If @spd_limit is non-zero, the speed is limited to equal to or 2888 * If @spd_limit is non-zero, the speed is limited to equal to or
2889 * lower than @spd_limit if such speed is supported. If 2889 * lower than @spd_limit if such speed is supported. If
2890 * @spd_limit is slower than any supported speed, only the lowest 2890 * @spd_limit is slower than any supported speed, only the lowest
2891 * supported speed is allowed. 2891 * supported speed is allowed.
2892 * 2892 *
2893 * LOCKING: 2893 * LOCKING:
2894 * Inherited from caller. 2894 * Inherited from caller.
2895 * 2895 *
2896 * RETURNS: 2896 * RETURNS:
2897 * 0 on success, negative errno on failure 2897 * 0 on success, negative errno on failure
2898 */ 2898 */
2899 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2899 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2900 { 2900 {
2901 u32 sstatus, spd, mask; 2901 u32 sstatus, spd, mask;
2902 int rc, bit; 2902 int rc, bit;
2903 2903
2904 if (!sata_scr_valid(link)) 2904 if (!sata_scr_valid(link))
2905 return -EOPNOTSUPP; 2905 return -EOPNOTSUPP;
2906 2906
2907 /* If SCR can be read, use it to determine the current SPD. 2907 /* If SCR can be read, use it to determine the current SPD.
2908 * If not, use cached value in link->sata_spd. 2908 * If not, use cached value in link->sata_spd.
2909 */ 2909 */
2910 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2910 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2911 if (rc == 0 && ata_sstatus_online(sstatus)) 2911 if (rc == 0 && ata_sstatus_online(sstatus))
2912 spd = (sstatus >> 4) & 0xf; 2912 spd = (sstatus >> 4) & 0xf;
2913 else 2913 else
2914 spd = link->sata_spd; 2914 spd = link->sata_spd;
2915 2915
2916 mask = link->sata_spd_limit; 2916 mask = link->sata_spd_limit;
2917 if (mask <= 1) 2917 if (mask <= 1)
2918 return -EINVAL; 2918 return -EINVAL;
2919 2919
2920 /* unconditionally mask off the highest bit */ 2920 /* unconditionally mask off the highest bit */
2921 bit = fls(mask) - 1; 2921 bit = fls(mask) - 1;
2922 mask &= ~(1 << bit); 2922 mask &= ~(1 << bit);
2923 2923
2924 /* Mask off all speeds higher than or equal to the current 2924 /* Mask off all speeds higher than or equal to the current
2925 * one. Force 1.5Gbps if current SPD is not available. 2925 * one. Force 1.5Gbps if current SPD is not available.
2926 */ 2926 */
2927 if (spd > 1) 2927 if (spd > 1)
2928 mask &= (1 << (spd - 1)) - 1; 2928 mask &= (1 << (spd - 1)) - 1;
2929 else 2929 else
2930 mask &= 1; 2930 mask &= 1;
2931 2931
2932 /* were we already at the bottom? */ 2932 /* were we already at the bottom? */
2933 if (!mask) 2933 if (!mask)
2934 return -EINVAL; 2934 return -EINVAL;
2935 2935
2936 if (spd_limit) { 2936 if (spd_limit) {
2937 if (mask & ((1 << spd_limit) - 1)) 2937 if (mask & ((1 << spd_limit) - 1))
2938 mask &= (1 << spd_limit) - 1; 2938 mask &= (1 << spd_limit) - 1;
2939 else { 2939 else {
2940 bit = ffs(mask) - 1; 2940 bit = ffs(mask) - 1;
2941 mask = 1 << bit; 2941 mask = 1 << bit;
2942 } 2942 }
2943 } 2943 }
2944 2944
2945 link->sata_spd_limit = mask; 2945 link->sata_spd_limit = mask;
2946 2946
2947 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2947 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2948 sata_spd_string(fls(mask))); 2948 sata_spd_string(fls(mask)));
2949 2949
2950 return 0; 2950 return 0;
2951 } 2951 }
2952 2952
2953 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2953 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2954 { 2954 {
2955 struct ata_link *host_link = &link->ap->link; 2955 struct ata_link *host_link = &link->ap->link;
2956 u32 limit, target, spd; 2956 u32 limit, target, spd;
2957 2957
2958 limit = link->sata_spd_limit; 2958 limit = link->sata_spd_limit;
2959 2959
2960 /* Don't configure downstream link faster than upstream link. 2960 /* Don't configure downstream link faster than upstream link.
2961 * It doesn't speed up anything and some PMPs choke on such 2961 * It doesn't speed up anything and some PMPs choke on such
2962 * configuration. 2962 * configuration.
2963 */ 2963 */
2964 if (!ata_is_host_link(link) && host_link->sata_spd) 2964 if (!ata_is_host_link(link) && host_link->sata_spd)
2965 limit &= (1 << host_link->sata_spd) - 1; 2965 limit &= (1 << host_link->sata_spd) - 1;
2966 2966
2967 if (limit == UINT_MAX) 2967 if (limit == UINT_MAX)
2968 target = 0; 2968 target = 0;
2969 else 2969 else
2970 target = fls(limit); 2970 target = fls(limit);
2971 2971
2972 spd = (*scontrol >> 4) & 0xf; 2972 spd = (*scontrol >> 4) & 0xf;
2973 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2973 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2974 2974
2975 return spd != target; 2975 return spd != target;
2976 } 2976 }
2977 2977
2978 /** 2978 /**
2979 * sata_set_spd_needed - is SATA spd configuration needed 2979 * sata_set_spd_needed - is SATA spd configuration needed
2980 * @link: Link in question 2980 * @link: Link in question
2981 * 2981 *
2982 * Test whether the spd limit in SControl matches 2982 * Test whether the spd limit in SControl matches
2983 * @link->sata_spd_limit. This function is used to determine 2983 * @link->sata_spd_limit. This function is used to determine
2984 * whether hardreset is necessary to apply SATA spd 2984 * whether hardreset is necessary to apply SATA spd
2985 * configuration. 2985 * configuration.
2986 * 2986 *
2987 * LOCKING: 2987 * LOCKING:
2988 * Inherited from caller. 2988 * Inherited from caller.
2989 * 2989 *
2990 * RETURNS: 2990 * RETURNS:
2991 * 1 if SATA spd configuration is needed, 0 otherwise. 2991 * 1 if SATA spd configuration is needed, 0 otherwise.
2992 */ 2992 */
2993 static int sata_set_spd_needed(struct ata_link *link) 2993 static int sata_set_spd_needed(struct ata_link *link)
2994 { 2994 {
2995 u32 scontrol; 2995 u32 scontrol;
2996 2996
2997 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2997 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2998 return 1; 2998 return 1;
2999 2999
3000 return __sata_set_spd_needed(link, &scontrol); 3000 return __sata_set_spd_needed(link, &scontrol);
3001 } 3001 }
3002 3002
3003 /** 3003 /**
3004 * sata_set_spd - set SATA spd according to spd limit 3004 * sata_set_spd - set SATA spd according to spd limit
3005 * @link: Link to set SATA spd for 3005 * @link: Link to set SATA spd for
3006 * 3006 *
3007 * Set SATA spd of @link according to sata_spd_limit. 3007 * Set SATA spd of @link according to sata_spd_limit.
3008 * 3008 *
3009 * LOCKING: 3009 * LOCKING:
3010 * Inherited from caller. 3010 * Inherited from caller.
3011 * 3011 *
3012 * RETURNS: 3012 * RETURNS:
3013 * 0 if spd doesn't need to be changed, 1 if spd has been 3013 * 0 if spd doesn't need to be changed, 1 if spd has been
3014 * changed. Negative errno if SCR registers are inaccessible. 3014 * changed. Negative errno if SCR registers are inaccessible.
3015 */ 3015 */
3016 int sata_set_spd(struct ata_link *link) 3016 int sata_set_spd(struct ata_link *link)
3017 { 3017 {
3018 u32 scontrol; 3018 u32 scontrol;
3019 int rc; 3019 int rc;
3020 3020
3021 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3021 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3022 return rc; 3022 return rc;
3023 3023
3024 if (!__sata_set_spd_needed(link, &scontrol)) 3024 if (!__sata_set_spd_needed(link, &scontrol))
3025 return 0; 3025 return 0;
3026 3026
3027 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3027 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3028 return rc; 3028 return rc;
3029 3029
3030 return 1; 3030 return 1;
3031 } 3031 }
3032 3032
3033 /* 3033 /*
3034 * This mode timing computation functionality is ported over from 3034 * This mode timing computation functionality is ported over from
3035 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3035 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3036 */ 3036 */
3037 /* 3037 /*
3038 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3038 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3039 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3039 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3040 * for UDMA6, which is currently supported only by Maxtor drives. 3040 * for UDMA6, which is currently supported only by Maxtor drives.
3041 * 3041 *
3042 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3042 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3043 */ 3043 */
3044 3044
3045 static const struct ata_timing ata_timing[] = { 3045 static const struct ata_timing ata_timing[] = {
3046 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3046 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3047 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3047 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3048 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3048 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3049 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3049 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3050 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3050 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3051 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3051 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3052 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3052 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3053 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3053 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3054 3054
3055 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3055 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3056 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3056 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3057 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3057 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3058 3058
3059 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3059 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3060 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3060 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3061 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3061 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3062 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3062 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3063 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3063 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3064 3064
3065 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3065 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3066 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3066 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3067 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3067 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3068 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3068 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3069 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3069 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3070 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3070 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3071 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3071 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3072 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3072 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3073 3073
3074 { 0xFF } 3074 { 0xFF }
3075 }; 3075 };
3076 3076
3077 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3077 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3078 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3078 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3079 3079
3080 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3080 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3081 { 3081 {
3082 q->setup = EZ(t->setup * 1000, T); 3082 q->setup = EZ(t->setup * 1000, T);
3083 q->act8b = EZ(t->act8b * 1000, T); 3083 q->act8b = EZ(t->act8b * 1000, T);
3084 q->rec8b = EZ(t->rec8b * 1000, T); 3084 q->rec8b = EZ(t->rec8b * 1000, T);
3085 q->cyc8b = EZ(t->cyc8b * 1000, T); 3085 q->cyc8b = EZ(t->cyc8b * 1000, T);
3086 q->active = EZ(t->active * 1000, T); 3086 q->active = EZ(t->active * 1000, T);
3087 q->recover = EZ(t->recover * 1000, T); 3087 q->recover = EZ(t->recover * 1000, T);
3088 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3088 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3089 q->cycle = EZ(t->cycle * 1000, T); 3089 q->cycle = EZ(t->cycle * 1000, T);
3090 q->udma = EZ(t->udma * 1000, UT); 3090 q->udma = EZ(t->udma * 1000, UT);
3091 } 3091 }
3092 3092
3093 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3093 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3094 struct ata_timing *m, unsigned int what) 3094 struct ata_timing *m, unsigned int what)
3095 { 3095 {
3096 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3096 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3097 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3097 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3098 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3098 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3099 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3099 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3100 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3100 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3101 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3101 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3102 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3102 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3103 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3103 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3104 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3104 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3105 } 3105 }
3106 3106
3107 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3107 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3108 { 3108 {
3109 const struct ata_timing *t = ata_timing; 3109 const struct ata_timing *t = ata_timing;
3110 3110
3111 while (xfer_mode > t->mode) 3111 while (xfer_mode > t->mode)
3112 t++; 3112 t++;
3113 3113
3114 if (xfer_mode == t->mode) 3114 if (xfer_mode == t->mode)
3115 return t; 3115 return t;
3116 return NULL; 3116 return NULL;
3117 } 3117 }
3118 3118
3119 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3119 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3120 struct ata_timing *t, int T, int UT) 3120 struct ata_timing *t, int T, int UT)
3121 { 3121 {
3122 const u16 *id = adev->id; 3122 const u16 *id = adev->id;
3123 const struct ata_timing *s; 3123 const struct ata_timing *s;
3124 struct ata_timing p; 3124 struct ata_timing p;
3125 3125
3126 /* 3126 /*
3127 * Find the mode. 3127 * Find the mode.
3128 */ 3128 */
3129 3129
3130 if (!(s = ata_timing_find_mode(speed))) 3130 if (!(s = ata_timing_find_mode(speed)))
3131 return -EINVAL; 3131 return -EINVAL;
3132 3132
3133 memcpy(t, s, sizeof(*s)); 3133 memcpy(t, s, sizeof(*s));
3134 3134
3135 /* 3135 /*
3136 * If the drive is an EIDE drive, it can tell us it needs extended 3136 * If the drive is an EIDE drive, it can tell us it needs extended
3137 * PIO/MW_DMA cycle timing. 3137 * PIO/MW_DMA cycle timing.
3138 */ 3138 */
3139 3139
3140 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3140 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3141 memset(&p, 0, sizeof(p)); 3141 memset(&p, 0, sizeof(p));
3142 3142
3143 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3143 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3144 if (speed <= XFER_PIO_2) 3144 if (speed <= XFER_PIO_2)
3145 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3145 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
3146 else if ((speed <= XFER_PIO_4) || 3146 else if ((speed <= XFER_PIO_4) ||
3147 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3147 (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
3148 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3148 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
3149 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3149 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
3150 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3150 p.cycle = id[ATA_ID_EIDE_DMA_MIN];
3151 3151
3152 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3152 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3153 } 3153 }
3154 3154
3155 /* 3155 /*
3156 * Convert the timing to bus clock counts. 3156 * Convert the timing to bus clock counts.
3157 */ 3157 */
3158 3158
3159 ata_timing_quantize(t, t, T, UT); 3159 ata_timing_quantize(t, t, T, UT);
3160 3160
3161 /* 3161 /*
3162 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3162 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3163 * S.M.A.R.T * and some other commands. We have to ensure that the 3163 * S.M.A.R.T * and some other commands. We have to ensure that the
3164 * DMA cycle timing is slower/equal than the fastest PIO timing. 3164 * DMA cycle timing is slower/equal than the fastest PIO timing.
3165 */ 3165 */
3166 3166
3167 if (speed > XFER_PIO_6) { 3167 if (speed > XFER_PIO_6) {
3168 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3168 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3169 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3169 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3170 } 3170 }
3171 3171
3172 /* 3172 /*
3173 * Lengthen active & recovery time so that cycle time is correct. 3173 * Lengthen active & recovery time so that cycle time is correct.
3174 */ 3174 */
3175 3175
3176 if (t->act8b + t->rec8b < t->cyc8b) { 3176 if (t->act8b + t->rec8b < t->cyc8b) {
3177 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3177 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3178 t->rec8b = t->cyc8b - t->act8b; 3178 t->rec8b = t->cyc8b - t->act8b;
3179 } 3179 }
3180 3180
3181 if (t->active + t->recover < t->cycle) { 3181 if (t->active + t->recover < t->cycle) {
3182 t->active += (t->cycle - (t->active + t->recover)) / 2; 3182 t->active += (t->cycle - (t->active + t->recover)) / 2;
3183 t->recover = t->cycle - t->active; 3183 t->recover = t->cycle - t->active;
3184 } 3184 }
3185 3185
3186 /* In a few cases quantisation may produce enough errors to 3186 /* In a few cases quantisation may produce enough errors to
3187 leave t->cycle too low for the sum of active and recovery 3187 leave t->cycle too low for the sum of active and recovery
3188 if so we must correct this */ 3188 if so we must correct this */
3189 if (t->active + t->recover > t->cycle) 3189 if (t->active + t->recover > t->cycle)
3190 t->cycle = t->active + t->recover; 3190 t->cycle = t->active + t->recover;
3191 3191
3192 return 0; 3192 return 0;
3193 } 3193 }
3194 3194
3195 /** 3195 /**
3196 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3196 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3197 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3197 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3198 * @cycle: cycle duration in ns 3198 * @cycle: cycle duration in ns
3199 * 3199 *
3200 * Return matching xfer mode for @cycle. The returned mode is of 3200 * Return matching xfer mode for @cycle. The returned mode is of
3201 * the transfer type specified by @xfer_shift. If @cycle is too 3201 * the transfer type specified by @xfer_shift. If @cycle is too
3202 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3202 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3203 * than the fastest known mode, the fasted mode is returned. 3203 * than the fastest known mode, the fasted mode is returned.
3204 * 3204 *
3205 * LOCKING: 3205 * LOCKING:
3206 * None. 3206 * None.
3207 * 3207 *
3208 * RETURNS: 3208 * RETURNS:
3209 * Matching xfer_mode, 0xff if no match found. 3209 * Matching xfer_mode, 0xff if no match found.
3210 */ 3210 */
3211 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3211 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3212 { 3212 {
3213 u8 base_mode = 0xff, last_mode = 0xff; 3213 u8 base_mode = 0xff, last_mode = 0xff;
3214 const struct ata_xfer_ent *ent; 3214 const struct ata_xfer_ent *ent;
3215 const struct ata_timing *t; 3215 const struct ata_timing *t;
3216 3216
3217 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3217 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3218 if (ent->shift == xfer_shift) 3218 if (ent->shift == xfer_shift)
3219 base_mode = ent->base; 3219 base_mode = ent->base;
3220 3220
3221 for (t = ata_timing_find_mode(base_mode); 3221 for (t = ata_timing_find_mode(base_mode);
3222 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3222 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3223 unsigned short this_cycle; 3223 unsigned short this_cycle;
3224 3224
3225 switch (xfer_shift) { 3225 switch (xfer_shift) {
3226 case ATA_SHIFT_PIO: 3226 case ATA_SHIFT_PIO:
3227 case ATA_SHIFT_MWDMA: 3227 case ATA_SHIFT_MWDMA:
3228 this_cycle = t->cycle; 3228 this_cycle = t->cycle;
3229 break; 3229 break;
3230 case ATA_SHIFT_UDMA: 3230 case ATA_SHIFT_UDMA:
3231 this_cycle = t->udma; 3231 this_cycle = t->udma;
3232 break; 3232 break;
3233 default: 3233 default:
3234 return 0xff; 3234 return 0xff;
3235 } 3235 }
3236 3236
3237 if (cycle > this_cycle) 3237 if (cycle > this_cycle)
3238 break; 3238 break;
3239 3239
3240 last_mode = t->mode; 3240 last_mode = t->mode;
3241 } 3241 }
3242 3242
3243 return last_mode; 3243 return last_mode;
3244 } 3244 }
3245 3245
3246 /** 3246 /**
3247 * ata_down_xfermask_limit - adjust dev xfer masks downward 3247 * ata_down_xfermask_limit - adjust dev xfer masks downward
3248 * @dev: Device to adjust xfer masks 3248 * @dev: Device to adjust xfer masks
3249 * @sel: ATA_DNXFER_* selector 3249 * @sel: ATA_DNXFER_* selector
3250 * 3250 *
3251 * Adjust xfer masks of @dev downward. Note that this function 3251 * Adjust xfer masks of @dev downward. Note that this function
3252 * does not apply the change. Invoking ata_set_mode() afterwards 3252 * does not apply the change. Invoking ata_set_mode() afterwards
3253 * will apply the limit. 3253 * will apply the limit.
3254 * 3254 *
3255 * LOCKING: 3255 * LOCKING:
3256 * Inherited from caller. 3256 * Inherited from caller.
3257 * 3257 *
3258 * RETURNS: 3258 * RETURNS:
3259 * 0 on success, negative errno on failure 3259 * 0 on success, negative errno on failure
3260 */ 3260 */
3261 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3261 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3262 { 3262 {
3263 char buf[32]; 3263 char buf[32];
3264 unsigned long orig_mask, xfer_mask; 3264 unsigned long orig_mask, xfer_mask;
3265 unsigned long pio_mask, mwdma_mask, udma_mask; 3265 unsigned long pio_mask, mwdma_mask, udma_mask;
3266 int quiet, highbit; 3266 int quiet, highbit;
3267 3267
3268 quiet = !!(sel & ATA_DNXFER_QUIET); 3268 quiet = !!(sel & ATA_DNXFER_QUIET);
3269 sel &= ~ATA_DNXFER_QUIET; 3269 sel &= ~ATA_DNXFER_QUIET;
3270 3270
3271 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3271 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3272 dev->mwdma_mask, 3272 dev->mwdma_mask,
3273 dev->udma_mask); 3273 dev->udma_mask);
3274 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3274 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3275 3275
3276 switch (sel) { 3276 switch (sel) {
3277 case ATA_DNXFER_PIO: 3277 case ATA_DNXFER_PIO:
3278 highbit = fls(pio_mask) - 1; 3278 highbit = fls(pio_mask) - 1;
3279 pio_mask &= ~(1 << highbit); 3279 pio_mask &= ~(1 << highbit);
3280 break; 3280 break;
3281 3281
3282 case ATA_DNXFER_DMA: 3282 case ATA_DNXFER_DMA:
3283 if (udma_mask) { 3283 if (udma_mask) {
3284 highbit = fls(udma_mask) - 1; 3284 highbit = fls(udma_mask) - 1;
3285 udma_mask &= ~(1 << highbit); 3285 udma_mask &= ~(1 << highbit);
3286 if (!udma_mask) 3286 if (!udma_mask)
3287 return -ENOENT; 3287 return -ENOENT;
3288 } else if (mwdma_mask) { 3288 } else if (mwdma_mask) {
3289 highbit = fls(mwdma_mask) - 1; 3289 highbit = fls(mwdma_mask) - 1;
3290 mwdma_mask &= ~(1 << highbit); 3290 mwdma_mask &= ~(1 << highbit);
3291 if (!mwdma_mask) 3291 if (!mwdma_mask)
3292 return -ENOENT; 3292 return -ENOENT;
3293 } 3293 }
3294 break; 3294 break;
3295 3295
3296 case ATA_DNXFER_40C: 3296 case ATA_DNXFER_40C:
3297 udma_mask &= ATA_UDMA_MASK_40C; 3297 udma_mask &= ATA_UDMA_MASK_40C;
3298 break; 3298 break;
3299 3299
3300 case ATA_DNXFER_FORCE_PIO0: 3300 case ATA_DNXFER_FORCE_PIO0:
3301 pio_mask &= 1; 3301 pio_mask &= 1;
3302 case ATA_DNXFER_FORCE_PIO: 3302 case ATA_DNXFER_FORCE_PIO:
3303 mwdma_mask = 0; 3303 mwdma_mask = 0;
3304 udma_mask = 0; 3304 udma_mask = 0;
3305 break; 3305 break;
3306 3306
3307 default: 3307 default:
3308 BUG(); 3308 BUG();
3309 } 3309 }
3310 3310
3311 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3311 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3312 3312
3313 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3313 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3314 return -ENOENT; 3314 return -ENOENT;
3315 3315
3316 if (!quiet) { 3316 if (!quiet) {
3317 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3317 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3318 snprintf(buf, sizeof(buf), "%s:%s", 3318 snprintf(buf, sizeof(buf), "%s:%s",
3319 ata_mode_string(xfer_mask), 3319 ata_mode_string(xfer_mask),
3320 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3320 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3321 else 3321 else
3322 snprintf(buf, sizeof(buf), "%s", 3322 snprintf(buf, sizeof(buf), "%s",
3323 ata_mode_string(xfer_mask)); 3323 ata_mode_string(xfer_mask));
3324 3324
3325 ata_dev_printk(dev, KERN_WARNING, 3325 ata_dev_printk(dev, KERN_WARNING,
3326 "limiting speed to %s\n", buf); 3326 "limiting speed to %s\n", buf);
3327 } 3327 }
3328 3328
3329 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3329 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3330 &dev->udma_mask); 3330 &dev->udma_mask);
3331 3331
3332 return 0; 3332 return 0;
3333 } 3333 }
3334 3334
3335 static int ata_dev_set_mode(struct ata_device *dev) 3335 static int ata_dev_set_mode(struct ata_device *dev)
3336 { 3336 {
3337 struct ata_port *ap = dev->link->ap; 3337 struct ata_port *ap = dev->link->ap;
3338 struct ata_eh_context *ehc = &dev->link->eh_context; 3338 struct ata_eh_context *ehc = &dev->link->eh_context;
3339 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3339 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3340 const char *dev_err_whine = ""; 3340 const char *dev_err_whine = "";
3341 int ign_dev_err = 0; 3341 int ign_dev_err = 0;
3342 unsigned int err_mask = 0; 3342 unsigned int err_mask = 0;
3343 int rc; 3343 int rc;
3344 3344
3345 dev->flags &= ~ATA_DFLAG_PIO; 3345 dev->flags &= ~ATA_DFLAG_PIO;
3346 if (dev->xfer_shift == ATA_SHIFT_PIO) 3346 if (dev->xfer_shift == ATA_SHIFT_PIO)
3347 dev->flags |= ATA_DFLAG_PIO; 3347 dev->flags |= ATA_DFLAG_PIO;
3348 3348
3349 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3349 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3350 dev_err_whine = " (SET_XFERMODE skipped)"; 3350 dev_err_whine = " (SET_XFERMODE skipped)";
3351 else { 3351 else {
3352 if (nosetxfer) 3352 if (nosetxfer)
3353 ata_dev_printk(dev, KERN_WARNING, 3353 ata_dev_printk(dev, KERN_WARNING,
3354 "NOSETXFER but PATA detected - can't " 3354 "NOSETXFER but PATA detected - can't "
3355 "skip SETXFER, might malfunction\n"); 3355 "skip SETXFER, might malfunction\n");
3356 err_mask = ata_dev_set_xfermode(dev); 3356 err_mask = ata_dev_set_xfermode(dev);
3357 } 3357 }
3358 3358
3359 if (err_mask & ~AC_ERR_DEV) 3359 if (err_mask & ~AC_ERR_DEV)
3360 goto fail; 3360 goto fail;
3361 3361
3362 /* revalidate */ 3362 /* revalidate */
3363 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3363 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3364 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3364 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3365 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3365 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3366 if (rc) 3366 if (rc)
3367 return rc; 3367 return rc;
3368 3368
3369 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3369 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3370 /* Old CFA may refuse this command, which is just fine */ 3370 /* Old CFA may refuse this command, which is just fine */
3371 if (ata_id_is_cfa(dev->id)) 3371 if (ata_id_is_cfa(dev->id))
3372 ign_dev_err = 1; 3372 ign_dev_err = 1;
3373 /* Catch several broken garbage emulations plus some pre 3373 /* Catch several broken garbage emulations plus some pre
3374 ATA devices */ 3374 ATA devices */
3375 if (ata_id_major_version(dev->id) == 0 && 3375 if (ata_id_major_version(dev->id) == 0 &&
3376 dev->pio_mode <= XFER_PIO_2) 3376 dev->pio_mode <= XFER_PIO_2)
3377 ign_dev_err = 1; 3377 ign_dev_err = 1;
3378 /* Some very old devices and some bad newer ones fail 3378 /* Some very old devices and some bad newer ones fail
3379 any kind of SET_XFERMODE request but support PIO0-2 3379 any kind of SET_XFERMODE request but support PIO0-2
3380 timings and no IORDY */ 3380 timings and no IORDY */
3381 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3381 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3382 ign_dev_err = 1; 3382 ign_dev_err = 1;
3383 } 3383 }
3384 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3384 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3385 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3385 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3386 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3386 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3387 dev->dma_mode == XFER_MW_DMA_0 && 3387 dev->dma_mode == XFER_MW_DMA_0 &&
3388 (dev->id[63] >> 8) & 1) 3388 (dev->id[63] >> 8) & 1)
3389 ign_dev_err = 1; 3389 ign_dev_err = 1;
3390 3390
3391 /* if the device is actually configured correctly, ignore dev err */ 3391 /* if the device is actually configured correctly, ignore dev err */
3392 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3392 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3393 ign_dev_err = 1; 3393 ign_dev_err = 1;
3394 3394
3395 if (err_mask & AC_ERR_DEV) { 3395 if (err_mask & AC_ERR_DEV) {
3396 if (!ign_dev_err) 3396 if (!ign_dev_err)
3397 goto fail; 3397 goto fail;
3398 else 3398 else
3399 dev_err_whine = " (device error ignored)"; 3399 dev_err_whine = " (device error ignored)";
3400 } 3400 }
3401 3401
3402 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3402 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3403 dev->xfer_shift, (int)dev->xfer_mode); 3403 dev->xfer_shift, (int)dev->xfer_mode);
3404 3404
3405 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3405 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3406 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3406 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3407 dev_err_whine); 3407 dev_err_whine);
3408 3408
3409 return 0; 3409 return 0;
3410 3410
3411 fail: 3411 fail:
3412 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3412 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3413 "(err_mask=0x%x)\n", err_mask); 3413 "(err_mask=0x%x)\n", err_mask);
3414 return -EIO; 3414 return -EIO;
3415 } 3415 }
3416 3416
3417 /** 3417 /**
3418 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3418 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3419 * @link: link on which timings will be programmed 3419 * @link: link on which timings will be programmed
3420 * @r_failed_dev: out parameter for failed device 3420 * @r_failed_dev: out parameter for failed device
3421 * 3421 *
3422 * Standard implementation of the function used to tune and set 3422 * Standard implementation of the function used to tune and set
3423 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3423 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3424 * ata_dev_set_mode() fails, pointer to the failing device is 3424 * ata_dev_set_mode() fails, pointer to the failing device is
3425 * returned in @r_failed_dev. 3425 * returned in @r_failed_dev.
3426 * 3426 *
3427 * LOCKING: 3427 * LOCKING:
3428 * PCI/etc. bus probe sem. 3428 * PCI/etc. bus probe sem.
3429 * 3429 *
3430 * RETURNS: 3430 * RETURNS:
3431 * 0 on success, negative errno otherwise 3431 * 0 on success, negative errno otherwise
3432 */ 3432 */
3433 3433
3434 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3434 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3435 { 3435 {
3436 struct ata_port *ap = link->ap; 3436 struct ata_port *ap = link->ap;
3437 struct ata_device *dev; 3437 struct ata_device *dev;
3438 int rc = 0, used_dma = 0, found = 0; 3438 int rc = 0, used_dma = 0, found = 0;
3439 3439
3440 /* step 1: calculate xfer_mask */ 3440 /* step 1: calculate xfer_mask */
3441 ata_for_each_dev(dev, link, ENABLED) { 3441 ata_for_each_dev(dev, link, ENABLED) {
3442 unsigned long pio_mask, dma_mask; 3442 unsigned long pio_mask, dma_mask;
3443 unsigned int mode_mask; 3443 unsigned int mode_mask;
3444 3444
3445 mode_mask = ATA_DMA_MASK_ATA; 3445 mode_mask = ATA_DMA_MASK_ATA;
3446 if (dev->class == ATA_DEV_ATAPI) 3446 if (dev->class == ATA_DEV_ATAPI)
3447 mode_mask = ATA_DMA_MASK_ATAPI; 3447 mode_mask = ATA_DMA_MASK_ATAPI;
3448 else if (ata_id_is_cfa(dev->id)) 3448 else if (ata_id_is_cfa(dev->id))
3449 mode_mask = ATA_DMA_MASK_CFA; 3449 mode_mask = ATA_DMA_MASK_CFA;
3450 3450
3451 ata_dev_xfermask(dev); 3451 ata_dev_xfermask(dev);
3452 ata_force_xfermask(dev); 3452 ata_force_xfermask(dev);
3453 3453
3454 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3454 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3455 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3455 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3456 3456
3457 if (libata_dma_mask & mode_mask) 3457 if (libata_dma_mask & mode_mask)
3458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3459 else 3459 else
3460 dma_mask = 0; 3460 dma_mask = 0;
3461 3461
3462 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3462 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3463 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3463 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3464 3464
3465 found = 1; 3465 found = 1;
3466 if (ata_dma_enabled(dev)) 3466 if (ata_dma_enabled(dev))
3467 used_dma = 1; 3467 used_dma = 1;
3468 } 3468 }
3469 if (!found) 3469 if (!found)
3470 goto out; 3470 goto out;
3471 3471
3472 /* step 2: always set host PIO timings */ 3472 /* step 2: always set host PIO timings */
3473 ata_for_each_dev(dev, link, ENABLED) { 3473 ata_for_each_dev(dev, link, ENABLED) {
3474 if (dev->pio_mode == 0xff) { 3474 if (dev->pio_mode == 0xff) {
3475 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3475 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3476 rc = -EINVAL; 3476 rc = -EINVAL;
3477 goto out; 3477 goto out;
3478 } 3478 }
3479 3479
3480 dev->xfer_mode = dev->pio_mode; 3480 dev->xfer_mode = dev->pio_mode;
3481 dev->xfer_shift = ATA_SHIFT_PIO; 3481 dev->xfer_shift = ATA_SHIFT_PIO;
3482 if (ap->ops->set_piomode) 3482 if (ap->ops->set_piomode)
3483 ap->ops->set_piomode(ap, dev); 3483 ap->ops->set_piomode(ap, dev);
3484 } 3484 }
3485 3485
3486 /* step 3: set host DMA timings */ 3486 /* step 3: set host DMA timings */
3487 ata_for_each_dev(dev, link, ENABLED) { 3487 ata_for_each_dev(dev, link, ENABLED) {
3488 if (!ata_dma_enabled(dev)) 3488 if (!ata_dma_enabled(dev))
3489 continue; 3489 continue;
3490 3490
3491 dev->xfer_mode = dev->dma_mode; 3491 dev->xfer_mode = dev->dma_mode;
3492 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3492 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3493 if (ap->ops->set_dmamode) 3493 if (ap->ops->set_dmamode)
3494 ap->ops->set_dmamode(ap, dev); 3494 ap->ops->set_dmamode(ap, dev);
3495 } 3495 }
3496 3496
3497 /* step 4: update devices' xfer mode */ 3497 /* step 4: update devices' xfer mode */
3498 ata_for_each_dev(dev, link, ENABLED) { 3498 ata_for_each_dev(dev, link, ENABLED) {
3499 rc = ata_dev_set_mode(dev); 3499 rc = ata_dev_set_mode(dev);
3500 if (rc) 3500 if (rc)
3501 goto out; 3501 goto out;
3502 } 3502 }
3503 3503
3504 /* Record simplex status. If we selected DMA then the other 3504 /* Record simplex status. If we selected DMA then the other
3505 * host channels are not permitted to do so. 3505 * host channels are not permitted to do so.
3506 */ 3506 */
3507 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3507 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3508 ap->host->simplex_claimed = ap; 3508 ap->host->simplex_claimed = ap;
3509 3509
3510 out: 3510 out:
3511 if (rc) 3511 if (rc)
3512 *r_failed_dev = dev; 3512 *r_failed_dev = dev;
3513 return rc; 3513 return rc;
3514 } 3514 }
3515 3515
3516 /** 3516 /**
3517 * ata_wait_ready - wait for link to become ready 3517 * ata_wait_ready - wait for link to become ready
3518 * @link: link to be waited on 3518 * @link: link to be waited on
3519 * @deadline: deadline jiffies for the operation 3519 * @deadline: deadline jiffies for the operation
3520 * @check_ready: callback to check link readiness 3520 * @check_ready: callback to check link readiness
3521 * 3521 *
3522 * Wait for @link to become ready. @check_ready should return 3522 * Wait for @link to become ready. @check_ready should return
3523 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3523 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3524 * link doesn't seem to be occupied, other errno for other error 3524 * link doesn't seem to be occupied, other errno for other error
3525 * conditions. 3525 * conditions.
3526 * 3526 *
3527 * Transient -ENODEV conditions are allowed for 3527 * Transient -ENODEV conditions are allowed for
3528 * ATA_TMOUT_FF_WAIT. 3528 * ATA_TMOUT_FF_WAIT.
3529 * 3529 *
3530 * LOCKING: 3530 * LOCKING:
3531 * EH context. 3531 * EH context.
3532 * 3532 *
3533 * RETURNS: 3533 * RETURNS:
3534 * 0 if @linke is ready before @deadline; otherwise, -errno. 3534 * 0 if @linke is ready before @deadline; otherwise, -errno.
3535 */ 3535 */
3536 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3536 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3537 int (*check_ready)(struct ata_link *link)) 3537 int (*check_ready)(struct ata_link *link))
3538 { 3538 {
3539 unsigned long start = jiffies; 3539 unsigned long start = jiffies;
3540 unsigned long nodev_deadline; 3540 unsigned long nodev_deadline;
3541 int warned = 0; 3541 int warned = 0;
3542 3542
3543 /* choose which 0xff timeout to use, read comment in libata.h */ 3543 /* choose which 0xff timeout to use, read comment in libata.h */
3544 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3544 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3545 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3545 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3546 else 3546 else
3547 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3547 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3548 3548
3549 /* Slave readiness can't be tested separately from master. On 3549 /* Slave readiness can't be tested separately from master. On
3550 * M/S emulation configuration, this function should be called 3550 * M/S emulation configuration, this function should be called
3551 * only on the master and it will handle both master and slave. 3551 * only on the master and it will handle both master and slave.
3552 */ 3552 */
3553 WARN_ON(link == link->ap->slave_link); 3553 WARN_ON(link == link->ap->slave_link);
3554 3554
3555 if (time_after(nodev_deadline, deadline)) 3555 if (time_after(nodev_deadline, deadline))
3556 nodev_deadline = deadline; 3556 nodev_deadline = deadline;
3557 3557
3558 while (1) { 3558 while (1) {
3559 unsigned long now = jiffies; 3559 unsigned long now = jiffies;
3560 int ready, tmp; 3560 int ready, tmp;
3561 3561
3562 ready = tmp = check_ready(link); 3562 ready = tmp = check_ready(link);
3563 if (ready > 0) 3563 if (ready > 0)
3564 return 0; 3564 return 0;
3565 3565
3566 /* 3566 /*
3567 * -ENODEV could be transient. Ignore -ENODEV if link 3567 * -ENODEV could be transient. Ignore -ENODEV if link
3568 * is online. Also, some SATA devices take a long 3568 * is online. Also, some SATA devices take a long
3569 * time to clear 0xff after reset. Wait for 3569 * time to clear 0xff after reset. Wait for
3570 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3570 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3571 * offline. 3571 * offline.
3572 * 3572 *
3573 * Note that some PATA controllers (pata_ali) explode 3573 * Note that some PATA controllers (pata_ali) explode
3574 * if status register is read more than once when 3574 * if status register is read more than once when
3575 * there's no device attached. 3575 * there's no device attached.
3576 */ 3576 */
3577 if (ready == -ENODEV) { 3577 if (ready == -ENODEV) {
3578 if (ata_link_online(link)) 3578 if (ata_link_online(link))
3579 ready = 0; 3579 ready = 0;
3580 else if ((link->ap->flags & ATA_FLAG_SATA) && 3580 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3581 !ata_link_offline(link) && 3581 !ata_link_offline(link) &&
3582 time_before(now, nodev_deadline)) 3582 time_before(now, nodev_deadline))
3583 ready = 0; 3583 ready = 0;
3584 } 3584 }
3585 3585
3586 if (ready) 3586 if (ready)
3587 return ready; 3587 return ready;
3588 if (time_after(now, deadline)) 3588 if (time_after(now, deadline))
3589 return -EBUSY; 3589 return -EBUSY;
3590 3590
3591 if (!warned && time_after(now, start + 5 * HZ) && 3591 if (!warned && time_after(now, start + 5 * HZ) &&
3592 (deadline - now > 3 * HZ)) { 3592 (deadline - now > 3 * HZ)) {
3593 ata_link_printk(link, KERN_WARNING, 3593 ata_link_printk(link, KERN_WARNING,
3594 "link is slow to respond, please be patient " 3594 "link is slow to respond, please be patient "
3595 "(ready=%d)\n", tmp); 3595 "(ready=%d)\n", tmp);
3596 warned = 1; 3596 warned = 1;
3597 } 3597 }
3598 3598
3599 msleep(50); 3599 msleep(50);
3600 } 3600 }
3601 } 3601 }
3602 3602
3603 /** 3603 /**
3604 * ata_wait_after_reset - wait for link to become ready after reset 3604 * ata_wait_after_reset - wait for link to become ready after reset
3605 * @link: link to be waited on 3605 * @link: link to be waited on
3606 * @deadline: deadline jiffies for the operation 3606 * @deadline: deadline jiffies for the operation
3607 * @check_ready: callback to check link readiness 3607 * @check_ready: callback to check link readiness
3608 * 3608 *
3609 * Wait for @link to become ready after reset. 3609 * Wait for @link to become ready after reset.
3610 * 3610 *
3611 * LOCKING: 3611 * LOCKING:
3612 * EH context. 3612 * EH context.
3613 * 3613 *
3614 * RETURNS: 3614 * RETURNS:
3615 * 0 if @linke is ready before @deadline; otherwise, -errno. 3615 * 0 if @linke is ready before @deadline; otherwise, -errno.
3616 */ 3616 */
3617 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3617 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3618 int (*check_ready)(struct ata_link *link)) 3618 int (*check_ready)(struct ata_link *link))
3619 { 3619 {
3620 msleep(ATA_WAIT_AFTER_RESET); 3620 msleep(ATA_WAIT_AFTER_RESET);
3621 3621
3622 return ata_wait_ready(link, deadline, check_ready); 3622 return ata_wait_ready(link, deadline, check_ready);
3623 } 3623 }
3624 3624
3625 /** 3625 /**
3626 * sata_link_debounce - debounce SATA phy status 3626 * sata_link_debounce - debounce SATA phy status
3627 * @link: ATA link to debounce SATA phy status for 3627 * @link: ATA link to debounce SATA phy status for
3628 * @params: timing parameters { interval, duratinon, timeout } in msec 3628 * @params: timing parameters { interval, duratinon, timeout } in msec
3629 * @deadline: deadline jiffies for the operation 3629 * @deadline: deadline jiffies for the operation
3630 * 3630 *
3631 * Make sure SStatus of @link reaches stable state, determined by 3631 * Make sure SStatus of @link reaches stable state, determined by
3632 * holding the same value where DET is not 1 for @duration polled 3632 * holding the same value where DET is not 1 for @duration polled
3633 * every @interval, before @timeout. Timeout constraints the 3633 * every @interval, before @timeout. Timeout constraints the
3634 * beginning of the stable state. Because DET gets stuck at 1 on 3634 * beginning of the stable state. Because DET gets stuck at 1 on
3635 * some controllers after hot unplugging, this functions waits 3635 * some controllers after hot unplugging, this functions waits
3636 * until timeout then returns 0 if DET is stable at 1. 3636 * until timeout then returns 0 if DET is stable at 1.
3637 * 3637 *
3638 * @timeout is further limited by @deadline. The sooner of the 3638 * @timeout is further limited by @deadline. The sooner of the
3639 * two is used. 3639 * two is used.
3640 * 3640 *
3641 * LOCKING: 3641 * LOCKING:
3642 * Kernel thread context (may sleep) 3642 * Kernel thread context (may sleep)
3643 * 3643 *
3644 * RETURNS: 3644 * RETURNS:
3645 * 0 on success, -errno on failure. 3645 * 0 on success, -errno on failure.
3646 */ 3646 */
3647 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3647 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3648 unsigned long deadline) 3648 unsigned long deadline)
3649 { 3649 {
3650 unsigned long interval = params[0]; 3650 unsigned long interval = params[0];
3651 unsigned long duration = params[1]; 3651 unsigned long duration = params[1];
3652 unsigned long last_jiffies, t; 3652 unsigned long last_jiffies, t;
3653 u32 last, cur; 3653 u32 last, cur;
3654 int rc; 3654 int rc;
3655 3655
3656 t = ata_deadline(jiffies, params[2]); 3656 t = ata_deadline(jiffies, params[2]);
3657 if (time_before(t, deadline)) 3657 if (time_before(t, deadline))
3658 deadline = t; 3658 deadline = t;
3659 3659
3660 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3660 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3661 return rc; 3661 return rc;
3662 cur &= 0xf; 3662 cur &= 0xf;
3663 3663
3664 last = cur; 3664 last = cur;
3665 last_jiffies = jiffies; 3665 last_jiffies = jiffies;
3666 3666
3667 while (1) { 3667 while (1) {
3668 msleep(interval); 3668 msleep(interval);
3669 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3669 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3670 return rc; 3670 return rc;
3671 cur &= 0xf; 3671 cur &= 0xf;
3672 3672
3673 /* DET stable? */ 3673 /* DET stable? */
3674 if (cur == last) { 3674 if (cur == last) {
3675 if (cur == 1 && time_before(jiffies, deadline)) 3675 if (cur == 1 && time_before(jiffies, deadline))
3676 continue; 3676 continue;
3677 if (time_after(jiffies, 3677 if (time_after(jiffies,
3678 ata_deadline(last_jiffies, duration))) 3678 ata_deadline(last_jiffies, duration)))
3679 return 0; 3679 return 0;
3680 continue; 3680 continue;
3681 } 3681 }
3682 3682
3683 /* unstable, start over */ 3683 /* unstable, start over */
3684 last = cur; 3684 last = cur;
3685 last_jiffies = jiffies; 3685 last_jiffies = jiffies;
3686 3686
3687 /* Check deadline. If debouncing failed, return 3687 /* Check deadline. If debouncing failed, return
3688 * -EPIPE to tell upper layer to lower link speed. 3688 * -EPIPE to tell upper layer to lower link speed.
3689 */ 3689 */
3690 if (time_after(jiffies, deadline)) 3690 if (time_after(jiffies, deadline))
3691 return -EPIPE; 3691 return -EPIPE;
3692 } 3692 }
3693 } 3693 }
3694 3694
3695 /** 3695 /**
3696 * sata_link_resume - resume SATA link 3696 * sata_link_resume - resume SATA link
3697 * @link: ATA link to resume SATA 3697 * @link: ATA link to resume SATA
3698 * @params: timing parameters { interval, duratinon, timeout } in msec 3698 * @params: timing parameters { interval, duratinon, timeout } in msec
3699 * @deadline: deadline jiffies for the operation 3699 * @deadline: deadline jiffies for the operation
3700 * 3700 *
3701 * Resume SATA phy @link and debounce it. 3701 * Resume SATA phy @link and debounce it.
3702 * 3702 *
3703 * LOCKING: 3703 * LOCKING:
3704 * Kernel thread context (may sleep) 3704 * Kernel thread context (may sleep)
3705 * 3705 *
3706 * RETURNS: 3706 * RETURNS:
3707 * 0 on success, -errno on failure. 3707 * 0 on success, -errno on failure.
3708 */ 3708 */
3709 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3709 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3710 unsigned long deadline) 3710 unsigned long deadline)
3711 { 3711 {
3712 int tries = ATA_LINK_RESUME_TRIES; 3712 int tries = ATA_LINK_RESUME_TRIES;
3713 u32 scontrol, serror; 3713 u32 scontrol, serror;
3714 int rc; 3714 int rc;
3715 3715
3716 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3716 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3717 return rc; 3717 return rc;
3718 3718
3719 /* 3719 /*
3720 * Writes to SControl sometimes get ignored under certain 3720 * Writes to SControl sometimes get ignored under certain
3721 * controllers (ata_piix SIDPR). Make sure DET actually is 3721 * controllers (ata_piix SIDPR). Make sure DET actually is
3722 * cleared. 3722 * cleared.
3723 */ 3723 */
3724 do { 3724 do {
3725 scontrol = (scontrol & 0x0f0) | 0x300; 3725 scontrol = (scontrol & 0x0f0) | 0x300;
3726 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3726 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3727 return rc; 3727 return rc;
3728 /* 3728 /*
3729 * Some PHYs react badly if SStatus is pounded 3729 * Some PHYs react badly if SStatus is pounded
3730 * immediately after resuming. Delay 200ms before 3730 * immediately after resuming. Delay 200ms before
3731 * debouncing. 3731 * debouncing.
3732 */ 3732 */
3733 msleep(200); 3733 msleep(200);
3734 3734
3735 /* is SControl restored correctly? */ 3735 /* is SControl restored correctly? */
3736 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3736 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3737 return rc; 3737 return rc;
3738 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3738 } while ((scontrol & 0xf0f) != 0x300 && --tries);
3739 3739
3740 if ((scontrol & 0xf0f) != 0x300) { 3740 if ((scontrol & 0xf0f) != 0x300) {
3741 ata_link_printk(link, KERN_ERR, 3741 ata_link_printk(link, KERN_ERR,
3742 "failed to resume link (SControl %X)\n", 3742 "failed to resume link (SControl %X)\n",
3743 scontrol); 3743 scontrol);
3744 return 0; 3744 return 0;
3745 } 3745 }
3746 3746
3747 if (tries < ATA_LINK_RESUME_TRIES) 3747 if (tries < ATA_LINK_RESUME_TRIES)
3748 ata_link_printk(link, KERN_WARNING, 3748 ata_link_printk(link, KERN_WARNING,
3749 "link resume succeeded after %d retries\n", 3749 "link resume succeeded after %d retries\n",
3750 ATA_LINK_RESUME_TRIES - tries); 3750 ATA_LINK_RESUME_TRIES - tries);
3751 3751
3752 if ((rc = sata_link_debounce(link, params, deadline))) 3752 if ((rc = sata_link_debounce(link, params, deadline)))
3753 return rc; 3753 return rc;
3754 3754
3755 /* clear SError, some PHYs require this even for SRST to work */ 3755 /* clear SError, some PHYs require this even for SRST to work */
3756 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3756 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3757 rc = sata_scr_write(link, SCR_ERROR, serror); 3757 rc = sata_scr_write(link, SCR_ERROR, serror);
3758 3758
3759 return rc != -EINVAL ? rc : 0; 3759 return rc != -EINVAL ? rc : 0;
3760 } 3760 }
3761 3761
3762 /** 3762 /**
3763 * ata_std_prereset - prepare for reset 3763 * ata_std_prereset - prepare for reset
3764 * @link: ATA link to be reset 3764 * @link: ATA link to be reset
3765 * @deadline: deadline jiffies for the operation 3765 * @deadline: deadline jiffies for the operation
3766 * 3766 *
3767 * @link is about to be reset. Initialize it. Failure from 3767 * @link is about to be reset. Initialize it. Failure from
3768 * prereset makes libata abort whole reset sequence and give up 3768 * prereset makes libata abort whole reset sequence and give up
3769 * that port, so prereset should be best-effort. It does its 3769 * that port, so prereset should be best-effort. It does its
3770 * best to prepare for reset sequence but if things go wrong, it 3770 * best to prepare for reset sequence but if things go wrong, it
3771 * should just whine, not fail. 3771 * should just whine, not fail.
3772 * 3772 *
3773 * LOCKING: 3773 * LOCKING:
3774 * Kernel thread context (may sleep) 3774 * Kernel thread context (may sleep)
3775 * 3775 *
3776 * RETURNS: 3776 * RETURNS:
3777 * 0 on success, -errno otherwise. 3777 * 0 on success, -errno otherwise.
3778 */ 3778 */
3779 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3779 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3780 { 3780 {
3781 struct ata_port *ap = link->ap; 3781 struct ata_port *ap = link->ap;
3782 struct ata_eh_context *ehc = &link->eh_context; 3782 struct ata_eh_context *ehc = &link->eh_context;
3783 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3783 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3784 int rc; 3784 int rc;
3785 3785
3786 /* if we're about to do hardreset, nothing more to do */ 3786 /* if we're about to do hardreset, nothing more to do */
3787 if (ehc->i.action & ATA_EH_HARDRESET) 3787 if (ehc->i.action & ATA_EH_HARDRESET)
3788 return 0; 3788 return 0;
3789 3789
3790 /* if SATA, resume link */ 3790 /* if SATA, resume link */
3791 if (ap->flags & ATA_FLAG_SATA) { 3791 if (ap->flags & ATA_FLAG_SATA) {
3792 rc = sata_link_resume(link, timing, deadline); 3792 rc = sata_link_resume(link, timing, deadline);
3793 /* whine about phy resume failure but proceed */ 3793 /* whine about phy resume failure but proceed */
3794 if (rc && rc != -EOPNOTSUPP) 3794 if (rc && rc != -EOPNOTSUPP)
3795 ata_link_printk(link, KERN_WARNING, "failed to resume " 3795 ata_link_printk(link, KERN_WARNING, "failed to resume "
3796 "link for reset (errno=%d)\n", rc); 3796 "link for reset (errno=%d)\n", rc);
3797 } 3797 }
3798 3798
3799 /* no point in trying softreset on offline link */ 3799 /* no point in trying softreset on offline link */
3800 if (ata_phys_link_offline(link)) 3800 if (ata_phys_link_offline(link))
3801 ehc->i.action &= ~ATA_EH_SOFTRESET; 3801 ehc->i.action &= ~ATA_EH_SOFTRESET;
3802 3802
3803 return 0; 3803 return 0;
3804 } 3804 }
3805 3805
3806 /** 3806 /**
3807 * sata_link_hardreset - reset link via SATA phy reset 3807 * sata_link_hardreset - reset link via SATA phy reset
3808 * @link: link to reset 3808 * @link: link to reset
3809 * @timing: timing parameters { interval, duratinon, timeout } in msec 3809 * @timing: timing parameters { interval, duratinon, timeout } in msec
3810 * @deadline: deadline jiffies for the operation 3810 * @deadline: deadline jiffies for the operation
3811 * @online: optional out parameter indicating link onlineness 3811 * @online: optional out parameter indicating link onlineness
3812 * @check_ready: optional callback to check link readiness 3812 * @check_ready: optional callback to check link readiness
3813 * 3813 *
3814 * SATA phy-reset @link using DET bits of SControl register. 3814 * SATA phy-reset @link using DET bits of SControl register.
3815 * After hardreset, link readiness is waited upon using 3815 * After hardreset, link readiness is waited upon using
3816 * ata_wait_ready() if @check_ready is specified. LLDs are 3816 * ata_wait_ready() if @check_ready is specified. LLDs are
3817 * allowed to not specify @check_ready and wait itself after this 3817 * allowed to not specify @check_ready and wait itself after this
3818 * function returns. Device classification is LLD's 3818 * function returns. Device classification is LLD's
3819 * responsibility. 3819 * responsibility.
3820 * 3820 *
3821 * *@online is set to one iff reset succeeded and @link is online 3821 * *@online is set to one iff reset succeeded and @link is online
3822 * after reset. 3822 * after reset.
3823 * 3823 *
3824 * LOCKING: 3824 * LOCKING:
3825 * Kernel thread context (may sleep) 3825 * Kernel thread context (may sleep)
3826 * 3826 *
3827 * RETURNS: 3827 * RETURNS:
3828 * 0 on success, -errno otherwise. 3828 * 0 on success, -errno otherwise.
3829 */ 3829 */
3830 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3830 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3831 unsigned long deadline, 3831 unsigned long deadline,
3832 bool *online, int (*check_ready)(struct ata_link *)) 3832 bool *online, int (*check_ready)(struct ata_link *))
3833 { 3833 {
3834 u32 scontrol; 3834 u32 scontrol;
3835 int rc; 3835 int rc;
3836 3836
3837 DPRINTK("ENTER\n"); 3837 DPRINTK("ENTER\n");
3838 3838
3839 if (online) 3839 if (online)
3840 *online = false; 3840 *online = false;
3841 3841
3842 if (sata_set_spd_needed(link)) { 3842 if (sata_set_spd_needed(link)) {
3843 /* SATA spec says nothing about how to reconfigure 3843 /* SATA spec says nothing about how to reconfigure
3844 * spd. To be on the safe side, turn off phy during 3844 * spd. To be on the safe side, turn off phy during
3845 * reconfiguration. This works for at least ICH7 AHCI 3845 * reconfiguration. This works for at least ICH7 AHCI
3846 * and Sil3124. 3846 * and Sil3124.
3847 */ 3847 */
3848 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3848 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3849 goto out; 3849 goto out;
3850 3850
3851 scontrol = (scontrol & 0x0f0) | 0x304; 3851 scontrol = (scontrol & 0x0f0) | 0x304;
3852 3852
3853 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3853 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3854 goto out; 3854 goto out;
3855 3855
3856 sata_set_spd(link); 3856 sata_set_spd(link);
3857 } 3857 }
3858 3858
3859 /* issue phy wake/reset */ 3859 /* issue phy wake/reset */
3860 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3860 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3861 goto out; 3861 goto out;
3862 3862
3863 scontrol = (scontrol & 0x0f0) | 0x301; 3863 scontrol = (scontrol & 0x0f0) | 0x301;
3864 3864
3865 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3865 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3866 goto out; 3866 goto out;
3867 3867
3868 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3868 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3869 * 10.4.2 says at least 1 ms. 3869 * 10.4.2 says at least 1 ms.
3870 */ 3870 */
3871 msleep(1); 3871 msleep(1);
3872 3872
3873 /* bring link back */ 3873 /* bring link back */
3874 rc = sata_link_resume(link, timing, deadline); 3874 rc = sata_link_resume(link, timing, deadline);
3875 if (rc) 3875 if (rc)
3876 goto out; 3876 goto out;
3877 /* if link is offline nothing more to do */ 3877 /* if link is offline nothing more to do */
3878 if (ata_phys_link_offline(link)) 3878 if (ata_phys_link_offline(link))
3879 goto out; 3879 goto out;
3880 3880
3881 /* Link is online. From this point, -ENODEV too is an error. */ 3881 /* Link is online. From this point, -ENODEV too is an error. */
3882 if (online) 3882 if (online)
3883 *online = true; 3883 *online = true;
3884 3884
3885 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3885 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3886 /* If PMP is supported, we have to do follow-up SRST. 3886 /* If PMP is supported, we have to do follow-up SRST.
3887 * Some PMPs don't send D2H Reg FIS after hardreset if 3887 * Some PMPs don't send D2H Reg FIS after hardreset if
3888 * the first port is empty. Wait only for 3888 * the first port is empty. Wait only for
3889 * ATA_TMOUT_PMP_SRST_WAIT. 3889 * ATA_TMOUT_PMP_SRST_WAIT.
3890 */ 3890 */
3891 if (check_ready) { 3891 if (check_ready) {
3892 unsigned long pmp_deadline; 3892 unsigned long pmp_deadline;
3893 3893
3894 pmp_deadline = ata_deadline(jiffies, 3894 pmp_deadline = ata_deadline(jiffies,
3895 ATA_TMOUT_PMP_SRST_WAIT); 3895 ATA_TMOUT_PMP_SRST_WAIT);
3896 if (time_after(pmp_deadline, deadline)) 3896 if (time_after(pmp_deadline, deadline))
3897 pmp_deadline = deadline; 3897 pmp_deadline = deadline;
3898 ata_wait_ready(link, pmp_deadline, check_ready); 3898 ata_wait_ready(link, pmp_deadline, check_ready);
3899 } 3899 }
3900 rc = -EAGAIN; 3900 rc = -EAGAIN;
3901 goto out; 3901 goto out;
3902 } 3902 }
3903 3903
3904 rc = 0; 3904 rc = 0;
3905 if (check_ready) 3905 if (check_ready)
3906 rc = ata_wait_ready(link, deadline, check_ready); 3906 rc = ata_wait_ready(link, deadline, check_ready);
3907 out: 3907 out:
3908 if (rc && rc != -EAGAIN) { 3908 if (rc && rc != -EAGAIN) {
3909 /* online is set iff link is online && reset succeeded */ 3909 /* online is set iff link is online && reset succeeded */
3910 if (online) 3910 if (online)
3911 *online = false; 3911 *online = false;
3912 ata_link_printk(link, KERN_ERR, 3912 ata_link_printk(link, KERN_ERR,
3913 "COMRESET failed (errno=%d)\n", rc); 3913 "COMRESET failed (errno=%d)\n", rc);
3914 } 3914 }
3915 DPRINTK("EXIT, rc=%d\n", rc); 3915 DPRINTK("EXIT, rc=%d\n", rc);
3916 return rc; 3916 return rc;
3917 } 3917 }
3918 3918
3919 /** 3919 /**
3920 * sata_std_hardreset - COMRESET w/o waiting or classification 3920 * sata_std_hardreset - COMRESET w/o waiting or classification
3921 * @link: link to reset 3921 * @link: link to reset
3922 * @class: resulting class of attached device 3922 * @class: resulting class of attached device
3923 * @deadline: deadline jiffies for the operation 3923 * @deadline: deadline jiffies for the operation
3924 * 3924 *
3925 * Standard SATA COMRESET w/o waiting or classification. 3925 * Standard SATA COMRESET w/o waiting or classification.
3926 * 3926 *
3927 * LOCKING: 3927 * LOCKING:
3928 * Kernel thread context (may sleep) 3928 * Kernel thread context (may sleep)
3929 * 3929 *
3930 * RETURNS: 3930 * RETURNS:
3931 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3931 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3932 */ 3932 */
3933 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3933 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3934 unsigned long deadline) 3934 unsigned long deadline)
3935 { 3935 {
3936 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3936 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3937 bool online; 3937 bool online;
3938 int rc; 3938 int rc;
3939 3939
3940 /* do hardreset */ 3940 /* do hardreset */
3941 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3941 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3942 return online ? -EAGAIN : rc; 3942 return online ? -EAGAIN : rc;
3943 } 3943 }
3944 3944
3945 /** 3945 /**
3946 * ata_std_postreset - standard postreset callback 3946 * ata_std_postreset - standard postreset callback
3947 * @link: the target ata_link 3947 * @link: the target ata_link
3948 * @classes: classes of attached devices 3948 * @classes: classes of attached devices
3949 * 3949 *
3950 * This function is invoked after a successful reset. Note that 3950 * This function is invoked after a successful reset. Note that
3951 * the device might have been reset more than once using 3951 * the device might have been reset more than once using
3952 * different reset methods before postreset is invoked. 3952 * different reset methods before postreset is invoked.
3953 * 3953 *
3954 * LOCKING: 3954 * LOCKING:
3955 * Kernel thread context (may sleep) 3955 * Kernel thread context (may sleep)
3956 */ 3956 */
3957 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3957 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3958 { 3958 {
3959 u32 serror; 3959 u32 serror;
3960 3960
3961 DPRINTK("ENTER\n"); 3961 DPRINTK("ENTER\n");
3962 3962
3963 /* reset complete, clear SError */ 3963 /* reset complete, clear SError */
3964 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3964 if (!sata_scr_read(link, SCR_ERROR, &serror))
3965 sata_scr_write(link, SCR_ERROR, serror); 3965 sata_scr_write(link, SCR_ERROR, serror);
3966 3966
3967 /* print link status */ 3967 /* print link status */
3968 sata_print_link_status(link); 3968 sata_print_link_status(link);
3969 3969
3970 DPRINTK("EXIT\n"); 3970 DPRINTK("EXIT\n");
3971 } 3971 }
3972 3972
3973 /** 3973 /**
3974 * ata_dev_same_device - Determine whether new ID matches configured device 3974 * ata_dev_same_device - Determine whether new ID matches configured device
3975 * @dev: device to compare against 3975 * @dev: device to compare against
3976 * @new_class: class of the new device 3976 * @new_class: class of the new device
3977 * @new_id: IDENTIFY page of the new device 3977 * @new_id: IDENTIFY page of the new device
3978 * 3978 *
3979 * Compare @new_class and @new_id against @dev and determine 3979 * Compare @new_class and @new_id against @dev and determine
3980 * whether @dev is the device indicated by @new_class and 3980 * whether @dev is the device indicated by @new_class and
3981 * @new_id. 3981 * @new_id.
3982 * 3982 *
3983 * LOCKING: 3983 * LOCKING:
3984 * None. 3984 * None.
3985 * 3985 *
3986 * RETURNS: 3986 * RETURNS:
3987 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3987 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3988 */ 3988 */
3989 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3989 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3990 const u16 *new_id) 3990 const u16 *new_id)
3991 { 3991 {
3992 const u16 *old_id = dev->id; 3992 const u16 *old_id = dev->id;
3993 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3993 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3994 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3994 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3995 3995
3996 if (dev->class != new_class) { 3996 if (dev->class != new_class) {
3997 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 3997 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3998 dev->class, new_class); 3998 dev->class, new_class);
3999 return 0; 3999 return 0;
4000 } 4000 }
4001 4001
4002 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4002 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4003 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4003 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4004 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4004 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4005 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4005 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4006 4006
4007 if (strcmp(model[0], model[1])) { 4007 if (strcmp(model[0], model[1])) {
4008 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 4008 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4009 "'%s' != '%s'\n", model[0], model[1]); 4009 "'%s' != '%s'\n", model[0], model[1]);
4010 return 0; 4010 return 0;
4011 } 4011 }
4012 4012
4013 if (strcmp(serial[0], serial[1])) { 4013 if (strcmp(serial[0], serial[1])) {
4014 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4014 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4015 "'%s' != '%s'\n", serial[0], serial[1]); 4015 "'%s' != '%s'\n", serial[0], serial[1]);
4016 return 0; 4016 return 0;
4017 } 4017 }
4018 4018
4019 return 1; 4019 return 1;
4020 } 4020 }
4021 4021
4022 /** 4022 /**
4023 * ata_dev_reread_id - Re-read IDENTIFY data 4023 * ata_dev_reread_id - Re-read IDENTIFY data
4024 * @dev: target ATA device 4024 * @dev: target ATA device
4025 * @readid_flags: read ID flags 4025 * @readid_flags: read ID flags
4026 * 4026 *
4027 * Re-read IDENTIFY page and make sure @dev is still attached to 4027 * Re-read IDENTIFY page and make sure @dev is still attached to
4028 * the port. 4028 * the port.
4029 * 4029 *
4030 * LOCKING: 4030 * LOCKING:
4031 * Kernel thread context (may sleep) 4031 * Kernel thread context (may sleep)
4032 * 4032 *
4033 * RETURNS: 4033 * RETURNS:
4034 * 0 on success, negative errno otherwise 4034 * 0 on success, negative errno otherwise
4035 */ 4035 */
4036 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4036 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4037 { 4037 {
4038 unsigned int class = dev->class; 4038 unsigned int class = dev->class;
4039 u16 *id = (void *)dev->link->ap->sector_buf; 4039 u16 *id = (void *)dev->link->ap->sector_buf;
4040 int rc; 4040 int rc;
4041 4041
4042 /* read ID data */ 4042 /* read ID data */
4043 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4043 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4044 if (rc) 4044 if (rc)
4045 return rc; 4045 return rc;
4046 4046
4047 /* is the device still there? */ 4047 /* is the device still there? */
4048 if (!ata_dev_same_device(dev, class, id)) 4048 if (!ata_dev_same_device(dev, class, id))
4049 return -ENODEV; 4049 return -ENODEV;
4050 4050
4051 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4051 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4052 return 0; 4052 return 0;
4053 } 4053 }
4054 4054
4055 /** 4055 /**
4056 * ata_dev_revalidate - Revalidate ATA device 4056 * ata_dev_revalidate - Revalidate ATA device
4057 * @dev: device to revalidate 4057 * @dev: device to revalidate
4058 * @new_class: new class code 4058 * @new_class: new class code
4059 * @readid_flags: read ID flags 4059 * @readid_flags: read ID flags
4060 * 4060 *
4061 * Re-read IDENTIFY page, make sure @dev is still attached to the 4061 * Re-read IDENTIFY page, make sure @dev is still attached to the
4062 * port and reconfigure it according to the new IDENTIFY page. 4062 * port and reconfigure it according to the new IDENTIFY page.
4063 * 4063 *
4064 * LOCKING: 4064 * LOCKING:
4065 * Kernel thread context (may sleep) 4065 * Kernel thread context (may sleep)
4066 * 4066 *
4067 * RETURNS: 4067 * RETURNS:
4068 * 0 on success, negative errno otherwise 4068 * 0 on success, negative errno otherwise
4069 */ 4069 */
4070 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4070 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4071 unsigned int readid_flags) 4071 unsigned int readid_flags)
4072 { 4072 {
4073 u64 n_sectors = dev->n_sectors; 4073 u64 n_sectors = dev->n_sectors;
4074 u64 n_native_sectors = dev->n_native_sectors; 4074 u64 n_native_sectors = dev->n_native_sectors;
4075 int rc; 4075 int rc;
4076 4076
4077 if (!ata_dev_enabled(dev)) 4077 if (!ata_dev_enabled(dev))
4078 return -ENODEV; 4078 return -ENODEV;
4079 4079
4080 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4080 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4081 if (ata_class_enabled(new_class) && 4081 if (ata_class_enabled(new_class) &&
4082 new_class != ATA_DEV_ATA && 4082 new_class != ATA_DEV_ATA &&
4083 new_class != ATA_DEV_ATAPI && 4083 new_class != ATA_DEV_ATAPI &&
4084 new_class != ATA_DEV_SEMB) { 4084 new_class != ATA_DEV_SEMB) {
4085 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4085 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4086 dev->class, new_class); 4086 dev->class, new_class);
4087 rc = -ENODEV; 4087 rc = -ENODEV;
4088 goto fail; 4088 goto fail;
4089 } 4089 }
4090 4090
4091 /* re-read ID */ 4091 /* re-read ID */
4092 rc = ata_dev_reread_id(dev, readid_flags); 4092 rc = ata_dev_reread_id(dev, readid_flags);
4093 if (rc) 4093 if (rc)
4094 goto fail; 4094 goto fail;
4095 4095
4096 /* configure device according to the new ID */ 4096 /* configure device according to the new ID */
4097 rc = ata_dev_configure(dev); 4097 rc = ata_dev_configure(dev);
4098 if (rc) 4098 if (rc)
4099 goto fail; 4099 goto fail;
4100 4100
4101 /* verify n_sectors hasn't changed */ 4101 /* verify n_sectors hasn't changed */
4102 if (dev->class != ATA_DEV_ATA || !n_sectors || 4102 if (dev->class != ATA_DEV_ATA || !n_sectors ||
4103 dev->n_sectors == n_sectors) 4103 dev->n_sectors == n_sectors)
4104 return 0; 4104 return 0;
4105 4105
4106 /* n_sectors has changed */ 4106 /* n_sectors has changed */
4107 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", 4107 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
4108 (unsigned long long)n_sectors, 4108 (unsigned long long)n_sectors,
4109 (unsigned long long)dev->n_sectors); 4109 (unsigned long long)dev->n_sectors);
4110 4110
4111 /* 4111 /*
4112 * Something could have caused HPA to be unlocked 4112 * Something could have caused HPA to be unlocked
4113 * involuntarily. If n_native_sectors hasn't changed and the 4113 * involuntarily. If n_native_sectors hasn't changed and the
4114 * new size matches it, keep the device. 4114 * new size matches it, keep the device.
4115 */ 4115 */
4116 if (dev->n_native_sectors == n_native_sectors && 4116 if (dev->n_native_sectors == n_native_sectors &&
4117 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4117 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4118 ata_dev_printk(dev, KERN_WARNING, 4118 ata_dev_printk(dev, KERN_WARNING,
4119 "new n_sectors matches native, probably " 4119 "new n_sectors matches native, probably "
4120 "late HPA unlock, n_sectors updated\n"); 4120 "late HPA unlock, n_sectors updated\n");
4121 /* use the larger n_sectors */ 4121 /* use the larger n_sectors */
4122 return 0; 4122 return 0;
4123 } 4123 }
4124 4124
4125 /* 4125 /*
4126 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4126 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
4127 * unlocking HPA in those cases. 4127 * unlocking HPA in those cases.
4128 * 4128 *
4129 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4129 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4130 */ 4130 */
4131 if (dev->n_native_sectors == n_native_sectors && 4131 if (dev->n_native_sectors == n_native_sectors &&
4132 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4132 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4133 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4133 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4134 ata_dev_printk(dev, KERN_WARNING, 4134 ata_dev_printk(dev, KERN_WARNING,
4135 "old n_sectors matches native, probably " 4135 "old n_sectors matches native, probably "
4136 "late HPA lock, will try to unlock HPA\n"); 4136 "late HPA lock, will try to unlock HPA\n");
4137 /* try unlocking HPA */ 4137 /* try unlocking HPA */
4138 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4138 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4139 rc = -EIO; 4139 rc = -EIO;
4140 } else 4140 } else
4141 rc = -ENODEV; 4141 rc = -ENODEV;
4142 4142
4143 /* restore original n_[native_]sectors and fail */ 4143 /* restore original n_[native_]sectors and fail */
4144 dev->n_native_sectors = n_native_sectors; 4144 dev->n_native_sectors = n_native_sectors;
4145 dev->n_sectors = n_sectors; 4145 dev->n_sectors = n_sectors;
4146 fail: 4146 fail:
4147 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4147 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4148 return rc; 4148 return rc;
4149 } 4149 }
4150 4150
4151 struct ata_blacklist_entry { 4151 struct ata_blacklist_entry {
4152 const char *model_num; 4152 const char *model_num;
4153 const char *model_rev; 4153 const char *model_rev;
4154 unsigned long horkage; 4154 unsigned long horkage;
4155 }; 4155 };
4156 4156
4157 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4157 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4158 /* Devices with DMA related problems under Linux */ 4158 /* Devices with DMA related problems under Linux */
4159 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4159 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4160 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4160 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4161 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4161 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4162 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4162 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4163 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4163 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4164 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4164 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4165 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4165 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4166 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4166 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4167 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4167 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4168 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4168 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
4169 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4169 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4170 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4170 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4171 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4171 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4172 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4172 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4173 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4173 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4174 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4174 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
4175 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4175 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4176 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4176 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4177 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4177 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4178 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4178 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4179 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4179 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4180 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4180 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4181 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4181 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4182 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4182 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4183 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4183 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4184 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4184 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4185 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4185 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4186 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4186 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4187 /* Odd clown on sil3726/4726 PMPs */ 4187 /* Odd clown on sil3726/4726 PMPs */
4188 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4188 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4189 4189
4190 /* Weird ATAPI devices */ 4190 /* Weird ATAPI devices */
4191 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4191 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4192 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4192 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4193 4193
4194 /* Devices we expect to fail diagnostics */ 4194 /* Devices we expect to fail diagnostics */
4195 4195
4196 /* Devices where NCQ should be avoided */ 4196 /* Devices where NCQ should be avoided */
4197 /* NCQ is slow */ 4197 /* NCQ is slow */
4198 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4198 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4199 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4199 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4200 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4200 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4201 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4201 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4202 /* NCQ is broken */ 4202 /* NCQ is broken */
4203 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4203 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4204 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4204 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4205 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4205 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4206 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4206 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4207 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4207 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
4208 4208
4209 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4209 /* Seagate NCQ + FLUSH CACHE firmware bug */
4210 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4210 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4211 ATA_HORKAGE_FIRMWARE_WARN }, 4211 ATA_HORKAGE_FIRMWARE_WARN },
4212 4212
4213 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4213 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4214 ATA_HORKAGE_FIRMWARE_WARN }, 4214 ATA_HORKAGE_FIRMWARE_WARN },
4215 4215
4216 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4216 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4217 ATA_HORKAGE_FIRMWARE_WARN }, 4217 ATA_HORKAGE_FIRMWARE_WARN },
4218 4218
4219 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4219 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4220 ATA_HORKAGE_FIRMWARE_WARN }, 4220 ATA_HORKAGE_FIRMWARE_WARN },
4221 4221
4222 /* Blacklist entries taken from Silicon Image 3124/3132 4222 /* Blacklist entries taken from Silicon Image 3124/3132
4223 Windows driver .inf file - also several Linux problem reports */ 4223 Windows driver .inf file - also several Linux problem reports */
4224 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4224 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4225 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4225 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4226 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4226 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4227 4227
4228 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4228 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4229 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4229 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4230 4230
4231 /* devices which puke on READ_NATIVE_MAX */ 4231 /* devices which puke on READ_NATIVE_MAX */
4232 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4232 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4233 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4233 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4234 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4234 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4235 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4235 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4236 4236
4237 /* this one allows HPA unlocking but fails IOs on the area */ 4237 /* this one allows HPA unlocking but fails IOs on the area */
4238 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4238 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
4239 4239
4240 /* Devices which report 1 sector over size HPA */ 4240 /* Devices which report 1 sector over size HPA */
4241 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4241 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4242 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4242 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4243 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4243 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4244 4244
4245 /* Devices which get the IVB wrong */ 4245 /* Devices which get the IVB wrong */
4246 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4246 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4247 /* Maybe we should just blacklist TSSTcorp... */ 4247 /* Maybe we should just blacklist TSSTcorp... */
4248 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4248 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
4249 4249
4250 /* Devices that do not need bridging limits applied */ 4250 /* Devices that do not need bridging limits applied */
4251 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4251 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4252 4252
4253 /* Devices which aren't very happy with higher link speeds */ 4253 /* Devices which aren't very happy with higher link speeds */
4254 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4254 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
4255 4255
4256 /* 4256 /*
4257 * Devices which choke on SETXFER. Applies only if both the 4257 * Devices which choke on SETXFER. Applies only if both the
4258 * device and controller are SATA. 4258 * device and controller are SATA.
4259 */ 4259 */
4260 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4260 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4261 4261
4262 /* End Marker */ 4262 /* End Marker */
4263 { } 4263 { }
4264 }; 4264 };
4265 4265
4266 /** 4266 /**
4267 * glob_match - match a text string against a glob-style pattern 4267 * glob_match - match a text string against a glob-style pattern
4268 * @text: the string to be examined 4268 * @text: the string to be examined
4269 * @pattern: the glob-style pattern to be matched against 4269 * @pattern: the glob-style pattern to be matched against
4270 * 4270 *
4271 * Either/both of text and pattern can be empty strings. 4271 * Either/both of text and pattern can be empty strings.
4272 * 4272 *
4273 * Match text against a glob-style pattern, with wildcards and simple sets: 4273 * Match text against a glob-style pattern, with wildcards and simple sets:
4274 * 4274 *
4275 * ? matches any single character. 4275 * ? matches any single character.
4276 * * matches any run of characters. 4276 * * matches any run of characters.
4277 * [xyz] matches a single character from the set: x, y, or z. 4277 * [xyz] matches a single character from the set: x, y, or z.
4278 * [a-d] matches a single character from the range: a, b, c, or d. 4278 * [a-d] matches a single character from the range: a, b, c, or d.
4279 * [a-d0-9] matches a single character from either range. 4279 * [a-d0-9] matches a single character from either range.
4280 * 4280 *
4281 * The special characters ?, [, -, or *, can be matched using a set, eg. [*] 4281 * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
4282 * Behaviour with malformed patterns is undefined, though generally reasonable. 4282 * Behaviour with malformed patterns is undefined, though generally reasonable.
4283 * 4283 *
4284 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" 4284 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
4285 * 4285 *
4286 * This function uses one level of recursion per '*' in pattern. 4286 * This function uses one level of recursion per '*' in pattern.
4287 * Since it calls _nothing_ else, and has _no_ explicit local variables, 4287 * Since it calls _nothing_ else, and has _no_ explicit local variables,
4288 * this will not cause stack problems for any reasonable use here. 4288 * this will not cause stack problems for any reasonable use here.
4289 * 4289 *
4290 * RETURNS: 4290 * RETURNS:
4291 * 0 on match, 1 otherwise. 4291 * 0 on match, 1 otherwise.
4292 */ 4292 */
4293 static int glob_match (const char *text, const char *pattern) 4293 static int glob_match (const char *text, const char *pattern)
4294 { 4294 {
4295 do { 4295 do {
4296 /* Match single character or a '?' wildcard */ 4296 /* Match single character or a '?' wildcard */
4297 if (*text == *pattern || *pattern == '?') { 4297 if (*text == *pattern || *pattern == '?') {
4298 if (!*pattern++) 4298 if (!*pattern++)
4299 return 0; /* End of both strings: match */ 4299 return 0; /* End of both strings: match */
4300 } else { 4300 } else {
4301 /* Match single char against a '[' bracketed ']' pattern set */ 4301 /* Match single char against a '[' bracketed ']' pattern set */
4302 if (!*text || *pattern != '[') 4302 if (!*text || *pattern != '[')
4303 break; /* Not a pattern set */ 4303 break; /* Not a pattern set */
4304 while (*++pattern && *pattern != ']' && *text != *pattern) { 4304 while (*++pattern && *pattern != ']' && *text != *pattern) {
4305 if (*pattern == '-' && *(pattern - 1) != '[') 4305 if (*pattern == '-' && *(pattern - 1) != '[')
4306 if (*text > *(pattern - 1) && *text < *(pattern + 1)) { 4306 if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
4307 ++pattern; 4307 ++pattern;
4308 break; 4308 break;
4309 } 4309 }
4310 } 4310 }
4311 if (!*pattern || *pattern == ']') 4311 if (!*pattern || *pattern == ']')
4312 return 1; /* No match */ 4312 return 1; /* No match */
4313 while (*pattern && *pattern++ != ']'); 4313 while (*pattern && *pattern++ != ']');
4314 } 4314 }
4315 } while (*++text && *pattern); 4315 } while (*++text && *pattern);
4316 4316
4317 /* Match any run of chars against a '*' wildcard */ 4317 /* Match any run of chars against a '*' wildcard */
4318 if (*pattern == '*') { 4318 if (*pattern == '*') {
4319 if (!*++pattern) 4319 if (!*++pattern)
4320 return 0; /* Match: avoid recursion at end of pattern */ 4320 return 0; /* Match: avoid recursion at end of pattern */
4321 /* Loop to handle additional pattern chars after the wildcard */ 4321 /* Loop to handle additional pattern chars after the wildcard */
4322 while (*text) { 4322 while (*text) {
4323 if (glob_match(text, pattern) == 0) 4323 if (glob_match(text, pattern) == 0)
4324 return 0; /* Remainder matched */ 4324 return 0; /* Remainder matched */
4325 ++text; /* Absorb (match) this char and try again */ 4325 ++text; /* Absorb (match) this char and try again */
4326 } 4326 }
4327 } 4327 }
4328 if (!*text && !*pattern) 4328 if (!*text && !*pattern)
4329 return 0; /* End of both strings: match */ 4329 return 0; /* End of both strings: match */
4330 return 1; /* No match */ 4330 return 1; /* No match */
4331 } 4331 }
4332 4332
4333 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4333 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4334 { 4334 {
4335 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4335 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4336 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4336 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4337 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4337 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4338 4338
4339 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4339 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4340 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4340 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4341 4341
4342 while (ad->model_num) { 4342 while (ad->model_num) {
4343 if (!glob_match(model_num, ad->model_num)) { 4343 if (!glob_match(model_num, ad->model_num)) {
4344 if (ad->model_rev == NULL) 4344 if (ad->model_rev == NULL)
4345 return ad->horkage; 4345 return ad->horkage;
4346 if (!glob_match(model_rev, ad->model_rev)) 4346 if (!glob_match(model_rev, ad->model_rev))
4347 return ad->horkage; 4347 return ad->horkage;
4348 } 4348 }
4349 ad++; 4349 ad++;
4350 } 4350 }
4351 return 0; 4351 return 0;
4352 } 4352 }
4353 4353
4354 static int ata_dma_blacklisted(const struct ata_device *dev) 4354 static int ata_dma_blacklisted(const struct ata_device *dev)
4355 { 4355 {
4356 /* We don't support polling DMA. 4356 /* We don't support polling DMA.
4357 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4357 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4358 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4358 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4359 */ 4359 */
4360 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4360 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4361 (dev->flags & ATA_DFLAG_CDB_INTR)) 4361 (dev->flags & ATA_DFLAG_CDB_INTR))
4362 return 1; 4362 return 1;
4363 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4363 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4364 } 4364 }
4365 4365
4366 /** 4366 /**
4367 * ata_is_40wire - check drive side detection 4367 * ata_is_40wire - check drive side detection
4368 * @dev: device 4368 * @dev: device
4369 * 4369 *
4370 * Perform drive side detection decoding, allowing for device vendors 4370 * Perform drive side detection decoding, allowing for device vendors
4371 * who can't follow the documentation. 4371 * who can't follow the documentation.
4372 */ 4372 */
4373 4373
4374 static int ata_is_40wire(struct ata_device *dev) 4374 static int ata_is_40wire(struct ata_device *dev)
4375 { 4375 {
4376 if (dev->horkage & ATA_HORKAGE_IVB) 4376 if (dev->horkage & ATA_HORKAGE_IVB)
4377 return ata_drive_40wire_relaxed(dev->id); 4377 return ata_drive_40wire_relaxed(dev->id);
4378 return ata_drive_40wire(dev->id); 4378 return ata_drive_40wire(dev->id);
4379 } 4379 }
4380 4380
4381 /** 4381 /**
4382 * cable_is_40wire - 40/80/SATA decider 4382 * cable_is_40wire - 40/80/SATA decider
4383 * @ap: port to consider 4383 * @ap: port to consider
4384 * 4384 *
4385 * This function encapsulates the policy for speed management 4385 * This function encapsulates the policy for speed management
4386 * in one place. At the moment we don't cache the result but 4386 * in one place. At the moment we don't cache the result but
4387 * there is a good case for setting ap->cbl to the result when 4387 * there is a good case for setting ap->cbl to the result when
4388 * we are called with unknown cables (and figuring out if it 4388 * we are called with unknown cables (and figuring out if it
4389 * impacts hotplug at all). 4389 * impacts hotplug at all).
4390 * 4390 *
4391 * Return 1 if the cable appears to be 40 wire. 4391 * Return 1 if the cable appears to be 40 wire.
4392 */ 4392 */
4393 4393
4394 static int cable_is_40wire(struct ata_port *ap) 4394 static int cable_is_40wire(struct ata_port *ap)
4395 { 4395 {
4396 struct ata_link *link; 4396 struct ata_link *link;
4397 struct ata_device *dev; 4397 struct ata_device *dev;
4398 4398
4399 /* If the controller thinks we are 40 wire, we are. */ 4399 /* If the controller thinks we are 40 wire, we are. */
4400 if (ap->cbl == ATA_CBL_PATA40) 4400 if (ap->cbl == ATA_CBL_PATA40)
4401 return 1; 4401 return 1;
4402 4402
4403 /* If the controller thinks we are 80 wire, we are. */ 4403 /* If the controller thinks we are 80 wire, we are. */
4404 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4404 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4405 return 0; 4405 return 0;
4406 4406
4407 /* If the system is known to be 40 wire short cable (eg 4407 /* If the system is known to be 40 wire short cable (eg
4408 * laptop), then we allow 80 wire modes even if the drive 4408 * laptop), then we allow 80 wire modes even if the drive
4409 * isn't sure. 4409 * isn't sure.
4410 */ 4410 */
4411 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4411 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4412 return 0; 4412 return 0;
4413 4413
4414 /* If the controller doesn't know, we scan. 4414 /* If the controller doesn't know, we scan.
4415 * 4415 *
4416 * Note: We look for all 40 wire detects at this point. Any 4416 * Note: We look for all 40 wire detects at this point. Any
4417 * 80 wire detect is taken to be 80 wire cable because 4417 * 80 wire detect is taken to be 80 wire cable because
4418 * - in many setups only the one drive (slave if present) will 4418 * - in many setups only the one drive (slave if present) will
4419 * give a valid detect 4419 * give a valid detect
4420 * - if you have a non detect capable drive you don't want it 4420 * - if you have a non detect capable drive you don't want it
4421 * to colour the choice 4421 * to colour the choice
4422 */ 4422 */
4423 ata_for_each_link(link, ap, EDGE) { 4423 ata_for_each_link(link, ap, EDGE) {
4424 ata_for_each_dev(dev, link, ENABLED) { 4424 ata_for_each_dev(dev, link, ENABLED) {
4425 if (!ata_is_40wire(dev)) 4425 if (!ata_is_40wire(dev))
4426 return 0; 4426 return 0;
4427 } 4427 }
4428 } 4428 }
4429 return 1; 4429 return 1;
4430 } 4430 }
4431 4431
4432 /** 4432 /**
4433 * ata_dev_xfermask - Compute supported xfermask of the given device 4433 * ata_dev_xfermask - Compute supported xfermask of the given device
4434 * @dev: Device to compute xfermask for 4434 * @dev: Device to compute xfermask for
4435 * 4435 *
4436 * Compute supported xfermask of @dev and store it in 4436 * Compute supported xfermask of @dev and store it in
4437 * dev->*_mask. This function is responsible for applying all 4437 * dev->*_mask. This function is responsible for applying all
4438 * known limits including host controller limits, device 4438 * known limits including host controller limits, device
4439 * blacklist, etc... 4439 * blacklist, etc...
4440 * 4440 *
4441 * LOCKING: 4441 * LOCKING:
4442 * None. 4442 * None.
4443 */ 4443 */
4444 static void ata_dev_xfermask(struct ata_device *dev) 4444 static void ata_dev_xfermask(struct ata_device *dev)
4445 { 4445 {
4446 struct ata_link *link = dev->link; 4446 struct ata_link *link = dev->link;
4447 struct ata_port *ap = link->ap; 4447 struct ata_port *ap = link->ap;
4448 struct ata_host *host = ap->host; 4448 struct ata_host *host = ap->host;
4449 unsigned long xfer_mask; 4449 unsigned long xfer_mask;
4450 4450
4451 /* controller modes available */ 4451 /* controller modes available */
4452 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4452 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4453 ap->mwdma_mask, ap->udma_mask); 4453 ap->mwdma_mask, ap->udma_mask);
4454 4454
4455 /* drive modes available */ 4455 /* drive modes available */
4456 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4456 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4457 dev->mwdma_mask, dev->udma_mask); 4457 dev->mwdma_mask, dev->udma_mask);
4458 xfer_mask &= ata_id_xfermask(dev->id); 4458 xfer_mask &= ata_id_xfermask(dev->id);
4459 4459
4460 /* 4460 /*
4461 * CFA Advanced TrueIDE timings are not allowed on a shared 4461 * CFA Advanced TrueIDE timings are not allowed on a shared
4462 * cable 4462 * cable
4463 */ 4463 */
4464 if (ata_dev_pair(dev)) { 4464 if (ata_dev_pair(dev)) {
4465 /* No PIO5 or PIO6 */ 4465 /* No PIO5 or PIO6 */
4466 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4466 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4467 /* No MWDMA3 or MWDMA 4 */ 4467 /* No MWDMA3 or MWDMA 4 */
4468 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4468 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4469 } 4469 }
4470 4470
4471 if (ata_dma_blacklisted(dev)) { 4471 if (ata_dma_blacklisted(dev)) {
4472 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4472 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4473 ata_dev_printk(dev, KERN_WARNING, 4473 ata_dev_printk(dev, KERN_WARNING,
4474 "device is on DMA blacklist, disabling DMA\n"); 4474 "device is on DMA blacklist, disabling DMA\n");
4475 } 4475 }
4476 4476
4477 if ((host->flags & ATA_HOST_SIMPLEX) && 4477 if ((host->flags & ATA_HOST_SIMPLEX) &&
4478 host->simplex_claimed && host->simplex_claimed != ap) { 4478 host->simplex_claimed && host->simplex_claimed != ap) {
4479 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4479 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4480 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4480 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4481 "other device, disabling DMA\n"); 4481 "other device, disabling DMA\n");
4482 } 4482 }
4483 4483
4484 if (ap->flags & ATA_FLAG_NO_IORDY) 4484 if (ap->flags & ATA_FLAG_NO_IORDY)
4485 xfer_mask &= ata_pio_mask_no_iordy(dev); 4485 xfer_mask &= ata_pio_mask_no_iordy(dev);
4486 4486
4487 if (ap->ops->mode_filter) 4487 if (ap->ops->mode_filter)
4488 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4488 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4489 4489
4490 /* Apply cable rule here. Don't apply it early because when 4490 /* Apply cable rule here. Don't apply it early because when
4491 * we handle hot plug the cable type can itself change. 4491 * we handle hot plug the cable type can itself change.
4492 * Check this last so that we know if the transfer rate was 4492 * Check this last so that we know if the transfer rate was
4493 * solely limited by the cable. 4493 * solely limited by the cable.
4494 * Unknown or 80 wire cables reported host side are checked 4494 * Unknown or 80 wire cables reported host side are checked
4495 * drive side as well. Cases where we know a 40wire cable 4495 * drive side as well. Cases where we know a 40wire cable
4496 * is used safely for 80 are not checked here. 4496 * is used safely for 80 are not checked here.
4497 */ 4497 */
4498 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4498 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4499 /* UDMA/44 or higher would be available */ 4499 /* UDMA/44 or higher would be available */
4500 if (cable_is_40wire(ap)) { 4500 if (cable_is_40wire(ap)) {
4501 ata_dev_printk(dev, KERN_WARNING, 4501 ata_dev_printk(dev, KERN_WARNING,
4502 "limited to UDMA/33 due to 40-wire cable\n"); 4502 "limited to UDMA/33 due to 40-wire cable\n");
4503 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4503 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4504 } 4504 }
4505 4505
4506 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4506 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4507 &dev->mwdma_mask, &dev->udma_mask); 4507 &dev->mwdma_mask, &dev->udma_mask);
4508 } 4508 }
4509 4509
4510 /** 4510 /**
4511 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4511 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4512 * @dev: Device to which command will be sent 4512 * @dev: Device to which command will be sent
4513 * 4513 *
4514 * Issue SET FEATURES - XFER MODE command to device @dev 4514 * Issue SET FEATURES - XFER MODE command to device @dev
4515 * on port @ap. 4515 * on port @ap.
4516 * 4516 *
4517 * LOCKING: 4517 * LOCKING:
4518 * PCI/etc. bus probe sem. 4518 * PCI/etc. bus probe sem.
4519 * 4519 *
4520 * RETURNS: 4520 * RETURNS:
4521 * 0 on success, AC_ERR_* mask otherwise. 4521 * 0 on success, AC_ERR_* mask otherwise.
4522 */ 4522 */
4523 4523
4524 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4524 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4525 { 4525 {
4526 struct ata_taskfile tf; 4526 struct ata_taskfile tf;
4527 unsigned int err_mask; 4527 unsigned int err_mask;
4528 4528
4529 /* set up set-features taskfile */ 4529 /* set up set-features taskfile */
4530 DPRINTK("set features - xfer mode\n"); 4530 DPRINTK("set features - xfer mode\n");
4531 4531
4532 /* Some controllers and ATAPI devices show flaky interrupt 4532 /* Some controllers and ATAPI devices show flaky interrupt
4533 * behavior after setting xfer mode. Use polling instead. 4533 * behavior after setting xfer mode. Use polling instead.
4534 */ 4534 */
4535 ata_tf_init(dev, &tf); 4535 ata_tf_init(dev, &tf);
4536 tf.command = ATA_CMD_SET_FEATURES; 4536 tf.command = ATA_CMD_SET_FEATURES;
4537 tf.feature = SETFEATURES_XFER; 4537 tf.feature = SETFEATURES_XFER;
4538 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4538 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4539 tf.protocol = ATA_PROT_NODATA; 4539 tf.protocol = ATA_PROT_NODATA;
4540 /* If we are using IORDY we must send the mode setting command */ 4540 /* If we are using IORDY we must send the mode setting command */
4541 if (ata_pio_need_iordy(dev)) 4541 if (ata_pio_need_iordy(dev))
4542 tf.nsect = dev->xfer_mode; 4542 tf.nsect = dev->xfer_mode;
4543 /* If the device has IORDY and the controller does not - turn it off */ 4543 /* If the device has IORDY and the controller does not - turn it off */
4544 else if (ata_id_has_iordy(dev->id)) 4544 else if (ata_id_has_iordy(dev->id))
4545 tf.nsect = 0x01; 4545 tf.nsect = 0x01;
4546 else /* In the ancient relic department - skip all of this */ 4546 else /* In the ancient relic department - skip all of this */
4547 return 0; 4547 return 0;
4548 4548
4549 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4549 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4550 4550
4551 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4551 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4552 return err_mask; 4552 return err_mask;
4553 } 4553 }
4554 /** 4554 /**
4555 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4555 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4556 * @dev: Device to which command will be sent 4556 * @dev: Device to which command will be sent
4557 * @enable: Whether to enable or disable the feature 4557 * @enable: Whether to enable or disable the feature
4558 * @feature: The sector count represents the feature to set 4558 * @feature: The sector count represents the feature to set
4559 * 4559 *
4560 * Issue SET FEATURES - SATA FEATURES command to device @dev 4560 * Issue SET FEATURES - SATA FEATURES command to device @dev
4561 * on port @ap with sector count 4561 * on port @ap with sector count
4562 * 4562 *
4563 * LOCKING: 4563 * LOCKING:
4564 * PCI/etc. bus probe sem. 4564 * PCI/etc. bus probe sem.
4565 * 4565 *
4566 * RETURNS: 4566 * RETURNS:
4567 * 0 on success, AC_ERR_* mask otherwise. 4567 * 0 on success, AC_ERR_* mask otherwise.
4568 */ 4568 */
4569 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4569 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4570 u8 feature) 4570 u8 feature)
4571 { 4571 {
4572 struct ata_taskfile tf; 4572 struct ata_taskfile tf;
4573 unsigned int err_mask; 4573 unsigned int err_mask;
4574 4574
4575 /* set up set-features taskfile */ 4575 /* set up set-features taskfile */
4576 DPRINTK("set features - SATA features\n"); 4576 DPRINTK("set features - SATA features\n");
4577 4577
4578 ata_tf_init(dev, &tf); 4578 ata_tf_init(dev, &tf);
4579 tf.command = ATA_CMD_SET_FEATURES; 4579 tf.command = ATA_CMD_SET_FEATURES;
4580 tf.feature = enable; 4580 tf.feature = enable;
4581 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4581 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4582 tf.protocol = ATA_PROT_NODATA; 4582 tf.protocol = ATA_PROT_NODATA;
4583 tf.nsect = feature; 4583 tf.nsect = feature;
4584 4584
4585 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4585 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4586 4586
4587 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4587 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4588 return err_mask; 4588 return err_mask;
4589 } 4589 }
4590 4590
4591 /** 4591 /**
4592 * ata_dev_init_params - Issue INIT DEV PARAMS command 4592 * ata_dev_init_params - Issue INIT DEV PARAMS command
4593 * @dev: Device to which command will be sent 4593 * @dev: Device to which command will be sent
4594 * @heads: Number of heads (taskfile parameter) 4594 * @heads: Number of heads (taskfile parameter)
4595 * @sectors: Number of sectors (taskfile parameter) 4595 * @sectors: Number of sectors (taskfile parameter)
4596 * 4596 *
4597 * LOCKING: 4597 * LOCKING:
4598 * Kernel thread context (may sleep) 4598 * Kernel thread context (may sleep)
4599 * 4599 *
4600 * RETURNS: 4600 * RETURNS:
4601 * 0 on success, AC_ERR_* mask otherwise. 4601 * 0 on success, AC_ERR_* mask otherwise.
4602 */ 4602 */
4603 static unsigned int ata_dev_init_params(struct ata_device *dev, 4603 static unsigned int ata_dev_init_params(struct ata_device *dev,
4604 u16 heads, u16 sectors) 4604 u16 heads, u16 sectors)
4605 { 4605 {
4606 struct ata_taskfile tf; 4606 struct ata_taskfile tf;
4607 unsigned int err_mask; 4607 unsigned int err_mask;
4608 4608
4609 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4609 /* Number of sectors per track 1-255. Number of heads 1-16 */
4610 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4610 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4611 return AC_ERR_INVALID; 4611 return AC_ERR_INVALID;
4612 4612
4613 /* set up init dev params taskfile */ 4613 /* set up init dev params taskfile */
4614 DPRINTK("init dev params \n"); 4614 DPRINTK("init dev params \n");
4615 4615
4616 ata_tf_init(dev, &tf); 4616 ata_tf_init(dev, &tf);
4617 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4617 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4618 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4618 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4619 tf.protocol = ATA_PROT_NODATA; 4619 tf.protocol = ATA_PROT_NODATA;
4620 tf.nsect = sectors; 4620 tf.nsect = sectors;
4621 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4621 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4622 4622
4623 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4623 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4624 /* A clean abort indicates an original or just out of spec drive 4624 /* A clean abort indicates an original or just out of spec drive
4625 and we should continue as we issue the setup based on the 4625 and we should continue as we issue the setup based on the
4626 drive reported working geometry */ 4626 drive reported working geometry */
4627 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4627 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4628 err_mask = 0; 4628 err_mask = 0;
4629 4629
4630 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4630 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4631 return err_mask; 4631 return err_mask;
4632 } 4632 }
4633 4633
4634 /** 4634 /**
4635 * ata_sg_clean - Unmap DMA memory associated with command 4635 * ata_sg_clean - Unmap DMA memory associated with command
4636 * @qc: Command containing DMA memory to be released 4636 * @qc: Command containing DMA memory to be released
4637 * 4637 *
4638 * Unmap all mapped DMA memory associated with this command. 4638 * Unmap all mapped DMA memory associated with this command.
4639 * 4639 *
4640 * LOCKING: 4640 * LOCKING:
4641 * spin_lock_irqsave(host lock) 4641 * spin_lock_irqsave(host lock)
4642 */ 4642 */
4643 void ata_sg_clean(struct ata_queued_cmd *qc) 4643 void ata_sg_clean(struct ata_queued_cmd *qc)
4644 { 4644 {
4645 struct ata_port *ap = qc->ap; 4645 struct ata_port *ap = qc->ap;
4646 struct scatterlist *sg = qc->sg; 4646 struct scatterlist *sg = qc->sg;
4647 int dir = qc->dma_dir; 4647 int dir = qc->dma_dir;
4648 4648
4649 WARN_ON_ONCE(sg == NULL); 4649 WARN_ON_ONCE(sg == NULL);
4650 4650
4651 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4651 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4652 4652
4653 if (qc->n_elem) 4653 if (qc->n_elem)
4654 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4654 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4655 4655
4656 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4656 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4657 qc->sg = NULL; 4657 qc->sg = NULL;
4658 } 4658 }
4659 4659
4660 /** 4660 /**
4661 * atapi_check_dma - Check whether ATAPI DMA can be supported 4661 * atapi_check_dma - Check whether ATAPI DMA can be supported
4662 * @qc: Metadata associated with taskfile to check 4662 * @qc: Metadata associated with taskfile to check
4663 * 4663 *
4664 * Allow low-level driver to filter ATA PACKET commands, returning 4664 * Allow low-level driver to filter ATA PACKET commands, returning
4665 * a status indicating whether or not it is OK to use DMA for the 4665 * a status indicating whether or not it is OK to use DMA for the
4666 * supplied PACKET command. 4666 * supplied PACKET command.
4667 * 4667 *
4668 * LOCKING: 4668 * LOCKING:
4669 * spin_lock_irqsave(host lock) 4669 * spin_lock_irqsave(host lock)
4670 * 4670 *
4671 * RETURNS: 0 when ATAPI DMA can be used 4671 * RETURNS: 0 when ATAPI DMA can be used
4672 * nonzero otherwise 4672 * nonzero otherwise
4673 */ 4673 */
4674 int atapi_check_dma(struct ata_queued_cmd *qc) 4674 int atapi_check_dma(struct ata_queued_cmd *qc)
4675 { 4675 {
4676 struct ata_port *ap = qc->ap; 4676 struct ata_port *ap = qc->ap;
4677 4677
4678 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4678 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4679 * few ATAPI devices choke on such DMA requests. 4679 * few ATAPI devices choke on such DMA requests.
4680 */ 4680 */
4681 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4681 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4682 unlikely(qc->nbytes & 15)) 4682 unlikely(qc->nbytes & 15))
4683 return 1; 4683 return 1;
4684 4684
4685 if (ap->ops->check_atapi_dma) 4685 if (ap->ops->check_atapi_dma)
4686 return ap->ops->check_atapi_dma(qc); 4686 return ap->ops->check_atapi_dma(qc);
4687 4687
4688 return 0; 4688 return 0;
4689 } 4689 }
4690 4690
4691 /** 4691 /**
4692 * ata_std_qc_defer - Check whether a qc needs to be deferred 4692 * ata_std_qc_defer - Check whether a qc needs to be deferred
4693 * @qc: ATA command in question 4693 * @qc: ATA command in question
4694 * 4694 *
4695 * Non-NCQ commands cannot run with any other command, NCQ or 4695 * Non-NCQ commands cannot run with any other command, NCQ or
4696 * not. As upper layer only knows the queue depth, we are 4696 * not. As upper layer only knows the queue depth, we are
4697 * responsible for maintaining exclusion. This function checks 4697 * responsible for maintaining exclusion. This function checks
4698 * whether a new command @qc can be issued. 4698 * whether a new command @qc can be issued.
4699 * 4699 *
4700 * LOCKING: 4700 * LOCKING:
4701 * spin_lock_irqsave(host lock) 4701 * spin_lock_irqsave(host lock)
4702 * 4702 *
4703 * RETURNS: 4703 * RETURNS:
4704 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4704 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4705 */ 4705 */
4706 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4706 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4707 { 4707 {
4708 struct ata_link *link = qc->dev->link; 4708 struct ata_link *link = qc->dev->link;
4709 4709
4710 if (qc->tf.protocol == ATA_PROT_NCQ) { 4710 if (qc->tf.protocol == ATA_PROT_NCQ) {
4711 if (!ata_tag_valid(link->active_tag)) 4711 if (!ata_tag_valid(link->active_tag))
4712 return 0; 4712 return 0;
4713 } else { 4713 } else {
4714 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4714 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4715 return 0; 4715 return 0;
4716 } 4716 }
4717 4717
4718 return ATA_DEFER_LINK; 4718 return ATA_DEFER_LINK;
4719 } 4719 }
4720 4720
4721 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4721 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4722 4722
4723 /** 4723 /**
4724 * ata_sg_init - Associate command with scatter-gather table. 4724 * ata_sg_init - Associate command with scatter-gather table.
4725 * @qc: Command to be associated 4725 * @qc: Command to be associated
4726 * @sg: Scatter-gather table. 4726 * @sg: Scatter-gather table.
4727 * @n_elem: Number of elements in s/g table. 4727 * @n_elem: Number of elements in s/g table.
4728 * 4728 *
4729 * Initialize the data-related elements of queued_cmd @qc 4729 * Initialize the data-related elements of queued_cmd @qc
4730 * to point to a scatter-gather table @sg, containing @n_elem 4730 * to point to a scatter-gather table @sg, containing @n_elem
4731 * elements. 4731 * elements.
4732 * 4732 *
4733 * LOCKING: 4733 * LOCKING:
4734 * spin_lock_irqsave(host lock) 4734 * spin_lock_irqsave(host lock)
4735 */ 4735 */
4736 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4736 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4737 unsigned int n_elem) 4737 unsigned int n_elem)
4738 { 4738 {
4739 qc->sg = sg; 4739 qc->sg = sg;
4740 qc->n_elem = n_elem; 4740 qc->n_elem = n_elem;
4741 qc->cursg = qc->sg; 4741 qc->cursg = qc->sg;
4742 } 4742 }
4743 4743
4744 /** 4744 /**
4745 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4745 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4746 * @qc: Command with scatter-gather table to be mapped. 4746 * @qc: Command with scatter-gather table to be mapped.
4747 * 4747 *
4748 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4748 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4749 * 4749 *
4750 * LOCKING: 4750 * LOCKING:
4751 * spin_lock_irqsave(host lock) 4751 * spin_lock_irqsave(host lock)
4752 * 4752 *
4753 * RETURNS: 4753 * RETURNS:
4754 * Zero on success, negative on error. 4754 * Zero on success, negative on error.
4755 * 4755 *
4756 */ 4756 */
4757 static int ata_sg_setup(struct ata_queued_cmd *qc) 4757 static int ata_sg_setup(struct ata_queued_cmd *qc)
4758 { 4758 {
4759 struct ata_port *ap = qc->ap; 4759 struct ata_port *ap = qc->ap;
4760 unsigned int n_elem; 4760 unsigned int n_elem;
4761 4761
4762 VPRINTK("ENTER, ata%u\n", ap->print_id); 4762 VPRINTK("ENTER, ata%u\n", ap->print_id);
4763 4763
4764 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4764 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4765 if (n_elem < 1) 4765 if (n_elem < 1)
4766 return -1; 4766 return -1;
4767 4767
4768 DPRINTK("%d sg elements mapped\n", n_elem); 4768 DPRINTK("%d sg elements mapped\n", n_elem);
4769 qc->orig_n_elem = qc->n_elem; 4769 qc->orig_n_elem = qc->n_elem;
4770 qc->n_elem = n_elem; 4770 qc->n_elem = n_elem;
4771 qc->flags |= ATA_QCFLAG_DMAMAP; 4771 qc->flags |= ATA_QCFLAG_DMAMAP;
4772 4772
4773 return 0; 4773 return 0;
4774 } 4774 }
4775 4775
4776 /** 4776 /**
4777 * swap_buf_le16 - swap halves of 16-bit words in place 4777 * swap_buf_le16 - swap halves of 16-bit words in place
4778 * @buf: Buffer to swap 4778 * @buf: Buffer to swap
4779 * @buf_words: Number of 16-bit words in buffer. 4779 * @buf_words: Number of 16-bit words in buffer.
4780 * 4780 *
4781 * Swap halves of 16-bit words if needed to convert from 4781 * Swap halves of 16-bit words if needed to convert from
4782 * little-endian byte order to native cpu byte order, or 4782 * little-endian byte order to native cpu byte order, or
4783 * vice-versa. 4783 * vice-versa.
4784 * 4784 *
4785 * LOCKING: 4785 * LOCKING:
4786 * Inherited from caller. 4786 * Inherited from caller.
4787 */ 4787 */
4788 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4788 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4789 { 4789 {
4790 #ifdef __BIG_ENDIAN 4790 #ifdef __BIG_ENDIAN
4791 unsigned int i; 4791 unsigned int i;
4792 4792
4793 for (i = 0; i < buf_words; i++) 4793 for (i = 0; i < buf_words; i++)
4794 buf[i] = le16_to_cpu(buf[i]); 4794 buf[i] = le16_to_cpu(buf[i]);
4795 #endif /* __BIG_ENDIAN */ 4795 #endif /* __BIG_ENDIAN */
4796 } 4796 }
4797 4797
4798 /** 4798 /**
4799 * ata_qc_new - Request an available ATA command, for queueing 4799 * ata_qc_new - Request an available ATA command, for queueing
4800 * @ap: target port 4800 * @ap: target port
4801 * 4801 *
4802 * LOCKING: 4802 * LOCKING:
4803 * None. 4803 * None.
4804 */ 4804 */
4805 4805
4806 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4806 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4807 { 4807 {
4808 struct ata_queued_cmd *qc = NULL; 4808 struct ata_queued_cmd *qc = NULL;
4809 unsigned int i; 4809 unsigned int i;
4810 4810
4811 /* no command while frozen */ 4811 /* no command while frozen */
4812 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4812 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4813 return NULL; 4813 return NULL;
4814 4814
4815 /* the last tag is reserved for internal command. */ 4815 /* the last tag is reserved for internal command. */
4816 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4816 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4817 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4817 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4818 qc = __ata_qc_from_tag(ap, i); 4818 qc = __ata_qc_from_tag(ap, i);
4819 break; 4819 break;
4820 } 4820 }
4821 4821
4822 if (qc) 4822 if (qc)
4823 qc->tag = i; 4823 qc->tag = i;
4824 4824
4825 return qc; 4825 return qc;
4826 } 4826 }
4827 4827
4828 /** 4828 /**
4829 * ata_qc_new_init - Request an available ATA command, and initialize it 4829 * ata_qc_new_init - Request an available ATA command, and initialize it
4830 * @dev: Device from whom we request an available command structure 4830 * @dev: Device from whom we request an available command structure
4831 * 4831 *
4832 * LOCKING: 4832 * LOCKING:
4833 * None. 4833 * None.
4834 */ 4834 */
4835 4835
4836 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4836 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4837 { 4837 {
4838 struct ata_port *ap = dev->link->ap; 4838 struct ata_port *ap = dev->link->ap;
4839 struct ata_queued_cmd *qc; 4839 struct ata_queued_cmd *qc;
4840 4840
4841 qc = ata_qc_new(ap); 4841 qc = ata_qc_new(ap);
4842 if (qc) { 4842 if (qc) {
4843 qc->scsicmd = NULL; 4843 qc->scsicmd = NULL;
4844 qc->ap = ap; 4844 qc->ap = ap;
4845 qc->dev = dev; 4845 qc->dev = dev;
4846 4846
4847 ata_qc_reinit(qc); 4847 ata_qc_reinit(qc);
4848 } 4848 }
4849 4849
4850 return qc; 4850 return qc;
4851 } 4851 }
4852 4852
4853 /** 4853 /**
4854 * ata_qc_free - free unused ata_queued_cmd 4854 * ata_qc_free - free unused ata_queued_cmd
4855 * @qc: Command to complete 4855 * @qc: Command to complete
4856 * 4856 *
4857 * Designed to free unused ata_queued_cmd object 4857 * Designed to free unused ata_queued_cmd object
4858 * in case something prevents using it. 4858 * in case something prevents using it.
4859 * 4859 *
4860 * LOCKING: 4860 * LOCKING:
4861 * spin_lock_irqsave(host lock) 4861 * spin_lock_irqsave(host lock)
4862 */ 4862 */
4863 void ata_qc_free(struct ata_queued_cmd *qc) 4863 void ata_qc_free(struct ata_queued_cmd *qc)
4864 { 4864 {
4865 struct ata_port *ap; 4865 struct ata_port *ap;
4866 unsigned int tag; 4866 unsigned int tag;
4867 4867
4868 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4868 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4869 ap = qc->ap; 4869 ap = qc->ap;
4870 4870
4871 qc->flags = 0; 4871 qc->flags = 0;
4872 tag = qc->tag; 4872 tag = qc->tag;
4873 if (likely(ata_tag_valid(tag))) { 4873 if (likely(ata_tag_valid(tag))) {
4874 qc->tag = ATA_TAG_POISON; 4874 qc->tag = ATA_TAG_POISON;
4875 clear_bit(tag, &ap->qc_allocated); 4875 clear_bit(tag, &ap->qc_allocated);
4876 } 4876 }
4877 } 4877 }
4878 4878
4879 void __ata_qc_complete(struct ata_queued_cmd *qc) 4879 void __ata_qc_complete(struct ata_queued_cmd *qc)
4880 { 4880 {
4881 struct ata_port *ap; 4881 struct ata_port *ap;
4882 struct ata_link *link; 4882 struct ata_link *link;
4883 4883
4884 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4884 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4885 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4885 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4886 ap = qc->ap; 4886 ap = qc->ap;
4887 link = qc->dev->link; 4887 link = qc->dev->link;
4888 4888
4889 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4889 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4890 ata_sg_clean(qc); 4890 ata_sg_clean(qc);
4891 4891
4892 /* command should be marked inactive atomically with qc completion */ 4892 /* command should be marked inactive atomically with qc completion */
4893 if (qc->tf.protocol == ATA_PROT_NCQ) { 4893 if (qc->tf.protocol == ATA_PROT_NCQ) {
4894 link->sactive &= ~(1 << qc->tag); 4894 link->sactive &= ~(1 << qc->tag);
4895 if (!link->sactive) 4895 if (!link->sactive)
4896 ap->nr_active_links--; 4896 ap->nr_active_links--;
4897 } else { 4897 } else {
4898 link->active_tag = ATA_TAG_POISON; 4898 link->active_tag = ATA_TAG_POISON;
4899 ap->nr_active_links--; 4899 ap->nr_active_links--;
4900 } 4900 }
4901 4901
4902 /* clear exclusive status */ 4902 /* clear exclusive status */
4903 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4903 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4904 ap->excl_link == link)) 4904 ap->excl_link == link))
4905 ap->excl_link = NULL; 4905 ap->excl_link = NULL;
4906 4906
4907 /* atapi: mark qc as inactive to prevent the interrupt handler 4907 /* atapi: mark qc as inactive to prevent the interrupt handler
4908 * from completing the command twice later, before the error handler 4908 * from completing the command twice later, before the error handler
4909 * is called. (when rc != 0 and atapi request sense is needed) 4909 * is called. (when rc != 0 and atapi request sense is needed)
4910 */ 4910 */
4911 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4911 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4912 ap->qc_active &= ~(1 << qc->tag); 4912 ap->qc_active &= ~(1 << qc->tag);
4913 4913
4914 /* call completion callback */ 4914 /* call completion callback */
4915 qc->complete_fn(qc); 4915 qc->complete_fn(qc);
4916 } 4916 }
4917 4917
4918 static void fill_result_tf(struct ata_queued_cmd *qc) 4918 static void fill_result_tf(struct ata_queued_cmd *qc)
4919 { 4919 {
4920 struct ata_port *ap = qc->ap; 4920 struct ata_port *ap = qc->ap;
4921 4921
4922 qc->result_tf.flags = qc->tf.flags; 4922 qc->result_tf.flags = qc->tf.flags;
4923 ap->ops->qc_fill_rtf(qc); 4923 ap->ops->qc_fill_rtf(qc);
4924 } 4924 }
4925 4925
4926 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4926 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4927 { 4927 {
4928 struct ata_device *dev = qc->dev; 4928 struct ata_device *dev = qc->dev;
4929 4929
4930 if (ata_tag_internal(qc->tag)) 4930 if (ata_tag_internal(qc->tag))
4931 return; 4931 return;
4932 4932
4933 if (ata_is_nodata(qc->tf.protocol)) 4933 if (ata_is_nodata(qc->tf.protocol))
4934 return; 4934 return;
4935 4935
4936 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4936 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4937 return; 4937 return;
4938 4938
4939 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4939 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4940 } 4940 }
4941 4941
4942 /** 4942 /**
4943 * ata_qc_complete - Complete an active ATA command 4943 * ata_qc_complete - Complete an active ATA command
4944 * @qc: Command to complete 4944 * @qc: Command to complete
4945 * 4945 *
4946 * Indicate to the mid and upper layers that an ATA 4946 * Indicate to the mid and upper layers that an ATA
4947 * command has completed, with either an ok or not-ok status. 4947 * command has completed, with either an ok or not-ok status.
4948 * 4948 *
4949 * LOCKING: 4949 * LOCKING:
4950 * spin_lock_irqsave(host lock) 4950 * spin_lock_irqsave(host lock)
4951 */ 4951 */
4952 void ata_qc_complete(struct ata_queued_cmd *qc) 4952 void ata_qc_complete(struct ata_queued_cmd *qc)
4953 { 4953 {
4954 struct ata_port *ap = qc->ap; 4954 struct ata_port *ap = qc->ap;
4955 4955
4956 /* XXX: New EH and old EH use different mechanisms to 4956 /* XXX: New EH and old EH use different mechanisms to
4957 * synchronize EH with regular execution path. 4957 * synchronize EH with regular execution path.
4958 * 4958 *
4959 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4959 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4960 * Normal execution path is responsible for not accessing a 4960 * Normal execution path is responsible for not accessing a
4961 * failed qc. libata core enforces the rule by returning NULL 4961 * failed qc. libata core enforces the rule by returning NULL
4962 * from ata_qc_from_tag() for failed qcs. 4962 * from ata_qc_from_tag() for failed qcs.
4963 * 4963 *
4964 * Old EH depends on ata_qc_complete() nullifying completion 4964 * Old EH depends on ata_qc_complete() nullifying completion
4965 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4965 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4966 * not synchronize with interrupt handler. Only PIO task is 4966 * not synchronize with interrupt handler. Only PIO task is
4967 * taken care of. 4967 * taken care of.
4968 */ 4968 */
4969 if (ap->ops->error_handler) { 4969 if (ap->ops->error_handler) {
4970 struct ata_device *dev = qc->dev; 4970 struct ata_device *dev = qc->dev;
4971 struct ata_eh_info *ehi = &dev->link->eh_info; 4971 struct ata_eh_info *ehi = &dev->link->eh_info;
4972 4972
4973 if (unlikely(qc->err_mask)) 4973 if (unlikely(qc->err_mask))
4974 qc->flags |= ATA_QCFLAG_FAILED; 4974 qc->flags |= ATA_QCFLAG_FAILED;
4975 4975
4976 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4976 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4977 /* always fill result TF for failed qc */ 4977 /* always fill result TF for failed qc */
4978 fill_result_tf(qc); 4978 fill_result_tf(qc);
4979 4979
4980 if (!ata_tag_internal(qc->tag)) 4980 if (!ata_tag_internal(qc->tag))
4981 ata_qc_schedule_eh(qc); 4981 ata_qc_schedule_eh(qc);
4982 else 4982 else
4983 __ata_qc_complete(qc); 4983 __ata_qc_complete(qc);
4984 return; 4984 return;
4985 } 4985 }
4986 4986
4987 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4987 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4988 4988
4989 /* read result TF if requested */ 4989 /* read result TF if requested */
4990 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4990 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4991 fill_result_tf(qc); 4991 fill_result_tf(qc);
4992 4992
4993 /* Some commands need post-processing after successful 4993 /* Some commands need post-processing after successful
4994 * completion. 4994 * completion.
4995 */ 4995 */
4996 switch (qc->tf.command) { 4996 switch (qc->tf.command) {
4997 case ATA_CMD_SET_FEATURES: 4997 case ATA_CMD_SET_FEATURES:
4998 if (qc->tf.feature != SETFEATURES_WC_ON && 4998 if (qc->tf.feature != SETFEATURES_WC_ON &&
4999 qc->tf.feature != SETFEATURES_WC_OFF) 4999 qc->tf.feature != SETFEATURES_WC_OFF)
5000 break; 5000 break;
5001 /* fall through */ 5001 /* fall through */
5002 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5002 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5003 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5003 case ATA_CMD_SET_MULTI: /* multi_count changed */
5004 /* revalidate device */ 5004 /* revalidate device */
5005 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5005 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5006 ata_port_schedule_eh(ap); 5006 ata_port_schedule_eh(ap);
5007 break; 5007 break;
5008 5008
5009 case ATA_CMD_SLEEP: 5009 case ATA_CMD_SLEEP:
5010 dev->flags |= ATA_DFLAG_SLEEPING; 5010 dev->flags |= ATA_DFLAG_SLEEPING;
5011 break; 5011 break;
5012 } 5012 }
5013 5013
5014 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5014 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5015 ata_verify_xfer(qc); 5015 ata_verify_xfer(qc);
5016 5016
5017 __ata_qc_complete(qc); 5017 __ata_qc_complete(qc);
5018 } else { 5018 } else {
5019 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5019 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5020 return; 5020 return;
5021 5021
5022 /* read result TF if failed or requested */ 5022 /* read result TF if failed or requested */
5023 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5023 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5024 fill_result_tf(qc); 5024 fill_result_tf(qc);
5025 5025
5026 __ata_qc_complete(qc); 5026 __ata_qc_complete(qc);
5027 } 5027 }
5028 } 5028 }
5029 5029
5030 /** 5030 /**
5031 * ata_qc_complete_multiple - Complete multiple qcs successfully 5031 * ata_qc_complete_multiple - Complete multiple qcs successfully
5032 * @ap: port in question 5032 * @ap: port in question
5033 * @qc_active: new qc_active mask 5033 * @qc_active: new qc_active mask
5034 * 5034 *
5035 * Complete in-flight commands. This functions is meant to be 5035 * Complete in-flight commands. This functions is meant to be
5036 * called from low-level driver's interrupt routine to complete 5036 * called from low-level driver's interrupt routine to complete
5037 * requests normally. ap->qc_active and @qc_active is compared 5037 * requests normally. ap->qc_active and @qc_active is compared
5038 * and commands are completed accordingly. 5038 * and commands are completed accordingly.
5039 * 5039 *
5040 * LOCKING: 5040 * LOCKING:
5041 * spin_lock_irqsave(host lock) 5041 * spin_lock_irqsave(host lock)
5042 * 5042 *
5043 * RETURNS: 5043 * RETURNS:
5044 * Number of completed commands on success, -errno otherwise. 5044 * Number of completed commands on success, -errno otherwise.
5045 */ 5045 */
5046 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5046 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
5047 { 5047 {
5048 int nr_done = 0; 5048 int nr_done = 0;
5049 u32 done_mask; 5049 u32 done_mask;
5050 5050
5051 done_mask = ap->qc_active ^ qc_active; 5051 done_mask = ap->qc_active ^ qc_active;
5052 5052
5053 if (unlikely(done_mask & qc_active)) { 5053 if (unlikely(done_mask & qc_active)) {
5054 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5054 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5055 "(%08x->%08x)\n", ap->qc_active, qc_active); 5055 "(%08x->%08x)\n", ap->qc_active, qc_active);
5056 return -EINVAL; 5056 return -EINVAL;
5057 } 5057 }
5058 5058
5059 while (done_mask) { 5059 while (done_mask) {
5060 struct ata_queued_cmd *qc; 5060 struct ata_queued_cmd *qc;
5061 unsigned int tag = __ffs(done_mask); 5061 unsigned int tag = __ffs(done_mask);
5062 5062
5063 qc = ata_qc_from_tag(ap, tag); 5063 qc = ata_qc_from_tag(ap, tag);
5064 if (qc) { 5064 if (qc) {
5065 ata_qc_complete(qc); 5065 ata_qc_complete(qc);
5066 nr_done++; 5066 nr_done++;
5067 } 5067 }
5068 done_mask &= ~(1 << tag); 5068 done_mask &= ~(1 << tag);
5069 } 5069 }
5070 5070
5071 return nr_done; 5071 return nr_done;
5072 } 5072 }
5073 5073
5074 /** 5074 /**
5075 * ata_qc_issue - issue taskfile to device 5075 * ata_qc_issue - issue taskfile to device
5076 * @qc: command to issue to device 5076 * @qc: command to issue to device
5077 * 5077 *
5078 * Prepare an ATA command to submission to device. 5078 * Prepare an ATA command to submission to device.
5079 * This includes mapping the data into a DMA-able 5079 * This includes mapping the data into a DMA-able
5080 * area, filling in the S/G table, and finally 5080 * area, filling in the S/G table, and finally
5081 * writing the taskfile to hardware, starting the command. 5081 * writing the taskfile to hardware, starting the command.
5082 * 5082 *
5083 * LOCKING: 5083 * LOCKING:
5084 * spin_lock_irqsave(host lock) 5084 * spin_lock_irqsave(host lock)
5085 */ 5085 */
5086 void ata_qc_issue(struct ata_queued_cmd *qc) 5086 void ata_qc_issue(struct ata_queued_cmd *qc)
5087 { 5087 {
5088 struct ata_port *ap = qc->ap; 5088 struct ata_port *ap = qc->ap;
5089 struct ata_link *link = qc->dev->link; 5089 struct ata_link *link = qc->dev->link;
5090 u8 prot = qc->tf.protocol; 5090 u8 prot = qc->tf.protocol;
5091 5091
5092 /* Make sure only one non-NCQ command is outstanding. The 5092 /* Make sure only one non-NCQ command is outstanding. The
5093 * check is skipped for old EH because it reuses active qc to 5093 * check is skipped for old EH because it reuses active qc to
5094 * request ATAPI sense. 5094 * request ATAPI sense.
5095 */ 5095 */
5096 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5096 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5097 5097
5098 if (ata_is_ncq(prot)) { 5098 if (ata_is_ncq(prot)) {
5099 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5099 WARN_ON_ONCE(link->sactive & (1 << qc->tag));
5100 5100
5101 if (!link->sactive) 5101 if (!link->sactive)
5102 ap->nr_active_links++; 5102 ap->nr_active_links++;
5103 link->sactive |= 1 << qc->tag; 5103 link->sactive |= 1 << qc->tag;
5104 } else { 5104 } else {
5105 WARN_ON_ONCE(link->sactive); 5105 WARN_ON_ONCE(link->sactive);
5106 5106
5107 ap->nr_active_links++; 5107 ap->nr_active_links++;
5108 link->active_tag = qc->tag; 5108 link->active_tag = qc->tag;
5109 } 5109 }
5110 5110
5111 qc->flags |= ATA_QCFLAG_ACTIVE; 5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5112 ap->qc_active |= 1 << qc->tag; 5112 ap->qc_active |= 1 << qc->tag;
5113 5113
5114 /* 5114 /*
5115 * We guarantee to LLDs that they will have at least one 5115 * We guarantee to LLDs that they will have at least one
5116 * non-zero sg if the command is a data command. 5116 * non-zero sg if the command is a data command.
5117 */ 5117 */
5118 if (WARN_ON_ONCE(ata_is_data(prot) && 5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err; 5120 goto sys_err;
5121 5121
5122 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5123 (ap->flags & ATA_FLAG_PIO_DMA))) 5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5124 if (ata_sg_setup(qc)) 5124 if (ata_sg_setup(qc))
5125 goto sys_err; 5125 goto sys_err;
5126 5126
5127 /* if device is sleeping, schedule reset and abort the link */ 5127 /* if device is sleeping, schedule reset and abort the link */
5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5129 link->eh_info.action |= ATA_EH_RESET; 5129 link->eh_info.action |= ATA_EH_RESET;
5130 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5130 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5131 ata_link_abort(link); 5131 ata_link_abort(link);
5132 return; 5132 return;
5133 } 5133 }
5134 5134
5135 ap->ops->qc_prep(qc); 5135 ap->ops->qc_prep(qc);
5136 5136
5137 qc->err_mask |= ap->ops->qc_issue(qc); 5137 qc->err_mask |= ap->ops->qc_issue(qc);
5138 if (unlikely(qc->err_mask)) 5138 if (unlikely(qc->err_mask))
5139 goto err; 5139 goto err;
5140 return; 5140 return;
5141 5141
5142 sys_err: 5142 sys_err:
5143 qc->err_mask |= AC_ERR_SYSTEM; 5143 qc->err_mask |= AC_ERR_SYSTEM;
5144 err: 5144 err:
5145 ata_qc_complete(qc); 5145 ata_qc_complete(qc);
5146 } 5146 }
5147 5147
5148 /** 5148 /**
5149 * sata_scr_valid - test whether SCRs are accessible 5149 * sata_scr_valid - test whether SCRs are accessible
5150 * @link: ATA link to test SCR accessibility for 5150 * @link: ATA link to test SCR accessibility for
5151 * 5151 *
5152 * Test whether SCRs are accessible for @link. 5152 * Test whether SCRs are accessible for @link.
5153 * 5153 *
5154 * LOCKING: 5154 * LOCKING:
5155 * None. 5155 * None.
5156 * 5156 *
5157 * RETURNS: 5157 * RETURNS:
5158 * 1 if SCRs are accessible, 0 otherwise. 5158 * 1 if SCRs are accessible, 0 otherwise.
5159 */ 5159 */
5160 int sata_scr_valid(struct ata_link *link) 5160 int sata_scr_valid(struct ata_link *link)
5161 { 5161 {
5162 struct ata_port *ap = link->ap; 5162 struct ata_port *ap = link->ap;
5163 5163
5164 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5164 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5165 } 5165 }
5166 5166
5167 /** 5167 /**
5168 * sata_scr_read - read SCR register of the specified port 5168 * sata_scr_read - read SCR register of the specified port
5169 * @link: ATA link to read SCR for 5169 * @link: ATA link to read SCR for
5170 * @reg: SCR to read 5170 * @reg: SCR to read
5171 * @val: Place to store read value 5171 * @val: Place to store read value
5172 * 5172 *
5173 * Read SCR register @reg of @link into *@val. This function is 5173 * Read SCR register @reg of @link into *@val. This function is
5174 * guaranteed to succeed if @link is ap->link, the cable type of 5174 * guaranteed to succeed if @link is ap->link, the cable type of
5175 * the port is SATA and the port implements ->scr_read. 5175 * the port is SATA and the port implements ->scr_read.
5176 * 5176 *
5177 * LOCKING: 5177 * LOCKING:
5178 * None if @link is ap->link. Kernel thread context otherwise. 5178 * None if @link is ap->link. Kernel thread context otherwise.
5179 * 5179 *
5180 * RETURNS: 5180 * RETURNS:
5181 * 0 on success, negative errno on failure. 5181 * 0 on success, negative errno on failure.
5182 */ 5182 */
5183 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5183 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5184 { 5184 {
5185 if (ata_is_host_link(link)) { 5185 if (ata_is_host_link(link)) {
5186 if (sata_scr_valid(link)) 5186 if (sata_scr_valid(link))
5187 return link->ap->ops->scr_read(link, reg, val); 5187 return link->ap->ops->scr_read(link, reg, val);
5188 return -EOPNOTSUPP; 5188 return -EOPNOTSUPP;
5189 } 5189 }
5190 5190
5191 return sata_pmp_scr_read(link, reg, val); 5191 return sata_pmp_scr_read(link, reg, val);
5192 } 5192 }
5193 5193
5194 /** 5194 /**
5195 * sata_scr_write - write SCR register of the specified port 5195 * sata_scr_write - write SCR register of the specified port
5196 * @link: ATA link to write SCR for 5196 * @link: ATA link to write SCR for
5197 * @reg: SCR to write 5197 * @reg: SCR to write
5198 * @val: value to write 5198 * @val: value to write
5199 * 5199 *
5200 * Write @val to SCR register @reg of @link. This function is 5200 * Write @val to SCR register @reg of @link. This function is
5201 * guaranteed to succeed if @link is ap->link, the cable type of 5201 * guaranteed to succeed if @link is ap->link, the cable type of
5202 * the port is SATA and the port implements ->scr_read. 5202 * the port is SATA and the port implements ->scr_read.
5203 * 5203 *
5204 * LOCKING: 5204 * LOCKING:
5205 * None if @link is ap->link. Kernel thread context otherwise. 5205 * None if @link is ap->link. Kernel thread context otherwise.
5206 * 5206 *
5207 * RETURNS: 5207 * RETURNS:
5208 * 0 on success, negative errno on failure. 5208 * 0 on success, negative errno on failure.
5209 */ 5209 */
5210 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5210 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5211 { 5211 {
5212 if (ata_is_host_link(link)) { 5212 if (ata_is_host_link(link)) {
5213 if (sata_scr_valid(link)) 5213 if (sata_scr_valid(link))
5214 return link->ap->ops->scr_write(link, reg, val); 5214 return link->ap->ops->scr_write(link, reg, val);
5215 return -EOPNOTSUPP; 5215 return -EOPNOTSUPP;
5216 } 5216 }
5217 5217
5218 return sata_pmp_scr_write(link, reg, val); 5218 return sata_pmp_scr_write(link, reg, val);
5219 } 5219 }
5220 5220
5221 /** 5221 /**
5222 * sata_scr_write_flush - write SCR register of the specified port and flush 5222 * sata_scr_write_flush - write SCR register of the specified port and flush
5223 * @link: ATA link to write SCR for 5223 * @link: ATA link to write SCR for
5224 * @reg: SCR to write 5224 * @reg: SCR to write
5225 * @val: value to write 5225 * @val: value to write
5226 * 5226 *
5227 * This function is identical to sata_scr_write() except that this 5227 * This function is identical to sata_scr_write() except that this
5228 * function performs flush after writing to the register. 5228 * function performs flush after writing to the register.
5229 * 5229 *
5230 * LOCKING: 5230 * LOCKING:
5231 * None if @link is ap->link. Kernel thread context otherwise. 5231 * None if @link is ap->link. Kernel thread context otherwise.
5232 * 5232 *
5233 * RETURNS: 5233 * RETURNS:
5234 * 0 on success, negative errno on failure. 5234 * 0 on success, negative errno on failure.
5235 */ 5235 */
5236 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5236 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5237 { 5237 {
5238 if (ata_is_host_link(link)) { 5238 if (ata_is_host_link(link)) {
5239 int rc; 5239 int rc;
5240 5240
5241 if (sata_scr_valid(link)) { 5241 if (sata_scr_valid(link)) {
5242 rc = link->ap->ops->scr_write(link, reg, val); 5242 rc = link->ap->ops->scr_write(link, reg, val);
5243 if (rc == 0) 5243 if (rc == 0)
5244 rc = link->ap->ops->scr_read(link, reg, &val); 5244 rc = link->ap->ops->scr_read(link, reg, &val);
5245 return rc; 5245 return rc;
5246 } 5246 }
5247 return -EOPNOTSUPP; 5247 return -EOPNOTSUPP;
5248 } 5248 }
5249 5249
5250 return sata_pmp_scr_write(link, reg, val); 5250 return sata_pmp_scr_write(link, reg, val);
5251 } 5251 }
5252 5252
5253 /** 5253 /**
5254 * ata_phys_link_online - test whether the given link is online 5254 * ata_phys_link_online - test whether the given link is online
5255 * @link: ATA link to test 5255 * @link: ATA link to test
5256 * 5256 *
5257 * Test whether @link is online. Note that this function returns 5257 * Test whether @link is online. Note that this function returns
5258 * 0 if online status of @link cannot be obtained, so 5258 * 0 if online status of @link cannot be obtained, so
5259 * ata_link_online(link) != !ata_link_offline(link). 5259 * ata_link_online(link) != !ata_link_offline(link).
5260 * 5260 *
5261 * LOCKING: 5261 * LOCKING:
5262 * None. 5262 * None.
5263 * 5263 *
5264 * RETURNS: 5264 * RETURNS:
5265 * True if the port online status is available and online. 5265 * True if the port online status is available and online.
5266 */ 5266 */
5267 bool ata_phys_link_online(struct ata_link *link) 5267 bool ata_phys_link_online(struct ata_link *link)
5268 { 5268 {
5269 u32 sstatus; 5269 u32 sstatus;
5270 5270
5271 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5271 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5272 ata_sstatus_online(sstatus)) 5272 ata_sstatus_online(sstatus))
5273 return true; 5273 return true;
5274 return false; 5274 return false;
5275 } 5275 }
5276 5276
5277 /** 5277 /**
5278 * ata_phys_link_offline - test whether the given link is offline 5278 * ata_phys_link_offline - test whether the given link is offline
5279 * @link: ATA link to test 5279 * @link: ATA link to test
5280 * 5280 *
5281 * Test whether @link is offline. Note that this function 5281 * Test whether @link is offline. Note that this function
5282 * returns 0 if offline status of @link cannot be obtained, so 5282 * returns 0 if offline status of @link cannot be obtained, so
5283 * ata_link_online(link) != !ata_link_offline(link). 5283 * ata_link_online(link) != !ata_link_offline(link).
5284 * 5284 *
5285 * LOCKING: 5285 * LOCKING:
5286 * None. 5286 * None.
5287 * 5287 *
5288 * RETURNS: 5288 * RETURNS:
5289 * True if the port offline status is available and offline. 5289 * True if the port offline status is available and offline.
5290 */ 5290 */
5291 bool ata_phys_link_offline(struct ata_link *link) 5291 bool ata_phys_link_offline(struct ata_link *link)
5292 { 5292 {
5293 u32 sstatus; 5293 u32 sstatus;
5294 5294
5295 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5295 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5296 !ata_sstatus_online(sstatus)) 5296 !ata_sstatus_online(sstatus))
5297 return true; 5297 return true;
5298 return false; 5298 return false;
5299 } 5299 }
5300 5300
5301 /** 5301 /**
5302 * ata_link_online - test whether the given link is online 5302 * ata_link_online - test whether the given link is online
5303 * @link: ATA link to test 5303 * @link: ATA link to test
5304 * 5304 *
5305 * Test whether @link is online. This is identical to 5305 * Test whether @link is online. This is identical to
5306 * ata_phys_link_online() when there's no slave link. When 5306 * ata_phys_link_online() when there's no slave link. When
5307 * there's a slave link, this function should only be called on 5307 * there's a slave link, this function should only be called on
5308 * the master link and will return true if any of M/S links is 5308 * the master link and will return true if any of M/S links is
5309 * online. 5309 * online.
5310 * 5310 *
5311 * LOCKING: 5311 * LOCKING:
5312 * None. 5312 * None.
5313 * 5313 *
5314 * RETURNS: 5314 * RETURNS:
5315 * True if the port online status is available and online. 5315 * True if the port online status is available and online.
5316 */ 5316 */
5317 bool ata_link_online(struct ata_link *link) 5317 bool ata_link_online(struct ata_link *link)
5318 { 5318 {
5319 struct ata_link *slave = link->ap->slave_link; 5319 struct ata_link *slave = link->ap->slave_link;
5320 5320
5321 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5321 WARN_ON(link == slave); /* shouldn't be called on slave link */
5322 5322
5323 return ata_phys_link_online(link) || 5323 return ata_phys_link_online(link) ||
5324 (slave && ata_phys_link_online(slave)); 5324 (slave && ata_phys_link_online(slave));
5325 } 5325 }
5326 5326
5327 /** 5327 /**
5328 * ata_link_offline - test whether the given link is offline 5328 * ata_link_offline - test whether the given link is offline
5329 * @link: ATA link to test 5329 * @link: ATA link to test
5330 * 5330 *
5331 * Test whether @link is offline. This is identical to 5331 * Test whether @link is offline. This is identical to
5332 * ata_phys_link_offline() when there's no slave link. When 5332 * ata_phys_link_offline() when there's no slave link. When
5333 * there's a slave link, this function should only be called on 5333 * there's a slave link, this function should only be called on
5334 * the master link and will return true if both M/S links are 5334 * the master link and will return true if both M/S links are
5335 * offline. 5335 * offline.
5336 * 5336 *
5337 * LOCKING: 5337 * LOCKING:
5338 * None. 5338 * None.
5339 * 5339 *
5340 * RETURNS: 5340 * RETURNS:
5341 * True if the port offline status is available and offline. 5341 * True if the port offline status is available and offline.
5342 */ 5342 */
5343 bool ata_link_offline(struct ata_link *link) 5343 bool ata_link_offline(struct ata_link *link)
5344 { 5344 {
5345 struct ata_link *slave = link->ap->slave_link; 5345 struct ata_link *slave = link->ap->slave_link;
5346 5346
5347 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5347 WARN_ON(link == slave); /* shouldn't be called on slave link */
5348 5348
5349 return ata_phys_link_offline(link) && 5349 return ata_phys_link_offline(link) &&
5350 (!slave || ata_phys_link_offline(slave)); 5350 (!slave || ata_phys_link_offline(slave));
5351 } 5351 }
5352 5352
5353 #ifdef CONFIG_PM 5353 #ifdef CONFIG_PM
5354 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5354 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5355 unsigned int action, unsigned int ehi_flags, 5355 unsigned int action, unsigned int ehi_flags,
5356 int wait) 5356 int wait)
5357 { 5357 {
5358 unsigned long flags; 5358 unsigned long flags;
5359 int i, rc; 5359 int i, rc;
5360 5360
5361 for (i = 0; i < host->n_ports; i++) { 5361 for (i = 0; i < host->n_ports; i++) {
5362 struct ata_port *ap = host->ports[i]; 5362 struct ata_port *ap = host->ports[i];
5363 struct ata_link *link; 5363 struct ata_link *link;
5364 5364
5365 /* Previous resume operation might still be in 5365 /* Previous resume operation might still be in
5366 * progress. Wait for PM_PENDING to clear. 5366 * progress. Wait for PM_PENDING to clear.
5367 */ 5367 */
5368 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5368 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5369 ata_port_wait_eh(ap); 5369 ata_port_wait_eh(ap);
5370 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5370 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5371 } 5371 }
5372 5372
5373 /* request PM ops to EH */ 5373 /* request PM ops to EH */
5374 spin_lock_irqsave(ap->lock, flags); 5374 spin_lock_irqsave(ap->lock, flags);
5375 5375
5376 ap->pm_mesg = mesg; 5376 ap->pm_mesg = mesg;
5377 if (wait) { 5377 if (wait) {
5378 rc = 0; 5378 rc = 0;
5379 ap->pm_result = &rc; 5379 ap->pm_result = &rc;
5380 } 5380 }
5381 5381
5382 ap->pflags |= ATA_PFLAG_PM_PENDING; 5382 ap->pflags |= ATA_PFLAG_PM_PENDING;
5383 ata_for_each_link(link, ap, HOST_FIRST) { 5383 ata_for_each_link(link, ap, HOST_FIRST) {
5384 link->eh_info.action |= action; 5384 link->eh_info.action |= action;
5385 link->eh_info.flags |= ehi_flags; 5385 link->eh_info.flags |= ehi_flags;
5386 } 5386 }
5387 5387
5388 ata_port_schedule_eh(ap); 5388 ata_port_schedule_eh(ap);
5389 5389
5390 spin_unlock_irqrestore(ap->lock, flags); 5390 spin_unlock_irqrestore(ap->lock, flags);
5391 5391
5392 /* wait and check result */ 5392 /* wait and check result */
5393 if (wait) { 5393 if (wait) {
5394 ata_port_wait_eh(ap); 5394 ata_port_wait_eh(ap);
5395 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5395 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5396 if (rc) 5396 if (rc)
5397 return rc; 5397 return rc;
5398 } 5398 }
5399 } 5399 }
5400 5400
5401 return 0; 5401 return 0;
5402 } 5402 }
5403 5403
5404 /** 5404 /**
5405 * ata_host_suspend - suspend host 5405 * ata_host_suspend - suspend host
5406 * @host: host to suspend 5406 * @host: host to suspend
5407 * @mesg: PM message 5407 * @mesg: PM message
5408 * 5408 *
5409 * Suspend @host. Actual operation is performed by EH. This 5409 * Suspend @host. Actual operation is performed by EH. This
5410 * function requests EH to perform PM operations and waits for EH 5410 * function requests EH to perform PM operations and waits for EH
5411 * to finish. 5411 * to finish.
5412 * 5412 *
5413 * LOCKING: 5413 * LOCKING:
5414 * Kernel thread context (may sleep). 5414 * Kernel thread context (may sleep).
5415 * 5415 *
5416 * RETURNS: 5416 * RETURNS:
5417 * 0 on success, -errno on failure. 5417 * 0 on success, -errno on failure.
5418 */ 5418 */
5419 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5420 { 5420 {
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5421 int rc; 5422 int rc;
5422 5423
5423 /* 5424 /*
5424 * disable link pm on all ports before requesting 5425 * disable link pm on all ports before requesting
5425 * any pm activity 5426 * any pm activity
5426 */ 5427 */
5427 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5428 5429
5429 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5430 if (rc == 0) 5442 if (rc == 0)
5431 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5432 return rc; 5444 return rc;
5433 } 5445 }
5434 5446
5435 /** 5447 /**
5436 * ata_host_resume - resume host 5448 * ata_host_resume - resume host
5437 * @host: host to resume 5449 * @host: host to resume
5438 * 5450 *
5439 * Resume @host. Actual operation is performed by EH. This 5451 * Resume @host. Actual operation is performed by EH. This
5440 * function requests EH to perform PM operations and returns. 5452 * function requests EH to perform PM operations and returns.
5441 * Note that all resume operations are performed parallely. 5453 * Note that all resume operations are performed parallely.
5442 * 5454 *
5443 * LOCKING: 5455 * LOCKING:
5444 * Kernel thread context (may sleep). 5456 * Kernel thread context (may sleep).
5445 */ 5457 */
5446 void ata_host_resume(struct ata_host *host) 5458 void ata_host_resume(struct ata_host *host)
5447 { 5459 {
5448 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5460 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5449 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5461 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5450 host->dev->power.power_state = PMSG_ON; 5462 host->dev->power.power_state = PMSG_ON;
5451 5463
5452 /* reenable link pm */ 5464 /* reenable link pm */
5453 ata_lpm_disable(host); 5465 ata_lpm_disable(host);
5454 } 5466 }
5455 #endif 5467 #endif
5456 5468
5457 /** 5469 /**
5458 * ata_dev_init - Initialize an ata_device structure 5470 * ata_dev_init - Initialize an ata_device structure
5459 * @dev: Device structure to initialize 5471 * @dev: Device structure to initialize
5460 * 5472 *
5461 * Initialize @dev in preparation for probing. 5473 * Initialize @dev in preparation for probing.
5462 * 5474 *
5463 * LOCKING: 5475 * LOCKING:
5464 * Inherited from caller. 5476 * Inherited from caller.
5465 */ 5477 */
5466 void ata_dev_init(struct ata_device *dev) 5478 void ata_dev_init(struct ata_device *dev)
5467 { 5479 {
5468 struct ata_link *link = ata_dev_phys_link(dev); 5480 struct ata_link *link = ata_dev_phys_link(dev);
5469 struct ata_port *ap = link->ap; 5481 struct ata_port *ap = link->ap;
5470 unsigned long flags; 5482 unsigned long flags;
5471 5483
5472 /* SATA spd limit is bound to the attached device, reset together */ 5484 /* SATA spd limit is bound to the attached device, reset together */
5473 link->sata_spd_limit = link->hw_sata_spd_limit; 5485 link->sata_spd_limit = link->hw_sata_spd_limit;
5474 link->sata_spd = 0; 5486 link->sata_spd = 0;
5475 5487
5476 /* High bits of dev->flags are used to record warm plug 5488 /* High bits of dev->flags are used to record warm plug
5477 * requests which occur asynchronously. Synchronize using 5489 * requests which occur asynchronously. Synchronize using
5478 * host lock. 5490 * host lock.
5479 */ 5491 */
5480 spin_lock_irqsave(ap->lock, flags); 5492 spin_lock_irqsave(ap->lock, flags);
5481 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5493 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5482 dev->horkage = 0; 5494 dev->horkage = 0;
5483 spin_unlock_irqrestore(ap->lock, flags); 5495 spin_unlock_irqrestore(ap->lock, flags);
5484 5496
5485 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5497 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5486 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5498 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5487 dev->pio_mask = UINT_MAX; 5499 dev->pio_mask = UINT_MAX;
5488 dev->mwdma_mask = UINT_MAX; 5500 dev->mwdma_mask = UINT_MAX;
5489 dev->udma_mask = UINT_MAX; 5501 dev->udma_mask = UINT_MAX;
5490 } 5502 }
5491 5503
5492 /** 5504 /**
5493 * ata_link_init - Initialize an ata_link structure 5505 * ata_link_init - Initialize an ata_link structure
5494 * @ap: ATA port link is attached to 5506 * @ap: ATA port link is attached to
5495 * @link: Link structure to initialize 5507 * @link: Link structure to initialize
5496 * @pmp: Port multiplier port number 5508 * @pmp: Port multiplier port number
5497 * 5509 *
5498 * Initialize @link. 5510 * Initialize @link.
5499 * 5511 *
5500 * LOCKING: 5512 * LOCKING:
5501 * Kernel thread context (may sleep) 5513 * Kernel thread context (may sleep)
5502 */ 5514 */
5503 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5515 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5504 { 5516 {
5505 int i; 5517 int i;
5506 5518
5507 /* clear everything except for devices */ 5519 /* clear everything except for devices */
5508 memset(link, 0, offsetof(struct ata_link, device[0])); 5520 memset(link, 0, offsetof(struct ata_link, device[0]));
5509 5521
5510 link->ap = ap; 5522 link->ap = ap;
5511 link->pmp = pmp; 5523 link->pmp = pmp;
5512 link->active_tag = ATA_TAG_POISON; 5524 link->active_tag = ATA_TAG_POISON;
5513 link->hw_sata_spd_limit = UINT_MAX; 5525 link->hw_sata_spd_limit = UINT_MAX;
5514 5526
5515 /* can't use iterator, ap isn't initialized yet */ 5527 /* can't use iterator, ap isn't initialized yet */
5516 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5528 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5517 struct ata_device *dev = &link->device[i]; 5529 struct ata_device *dev = &link->device[i];
5518 5530
5519 dev->link = link; 5531 dev->link = link;
5520 dev->devno = dev - link->device; 5532 dev->devno = dev - link->device;
5521 #ifdef CONFIG_ATA_ACPI 5533 #ifdef CONFIG_ATA_ACPI
5522 dev->gtf_filter = ata_acpi_gtf_filter; 5534 dev->gtf_filter = ata_acpi_gtf_filter;
5523 #endif 5535 #endif
5524 ata_dev_init(dev); 5536 ata_dev_init(dev);
5525 } 5537 }
5526 } 5538 }
5527 5539
5528 /** 5540 /**
5529 * sata_link_init_spd - Initialize link->sata_spd_limit 5541 * sata_link_init_spd - Initialize link->sata_spd_limit
5530 * @link: Link to configure sata_spd_limit for 5542 * @link: Link to configure sata_spd_limit for
5531 * 5543 *
5532 * Initialize @link->[hw_]sata_spd_limit to the currently 5544 * Initialize @link->[hw_]sata_spd_limit to the currently
5533 * configured value. 5545 * configured value.
5534 * 5546 *
5535 * LOCKING: 5547 * LOCKING:
5536 * Kernel thread context (may sleep). 5548 * Kernel thread context (may sleep).
5537 * 5549 *
5538 * RETURNS: 5550 * RETURNS:
5539 * 0 on success, -errno on failure. 5551 * 0 on success, -errno on failure.
5540 */ 5552 */
5541 int sata_link_init_spd(struct ata_link *link) 5553 int sata_link_init_spd(struct ata_link *link)
5542 { 5554 {
5543 u8 spd; 5555 u8 spd;
5544 int rc; 5556 int rc;
5545 5557
5546 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5558 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5547 if (rc) 5559 if (rc)
5548 return rc; 5560 return rc;
5549 5561
5550 spd = (link->saved_scontrol >> 4) & 0xf; 5562 spd = (link->saved_scontrol >> 4) & 0xf;
5551 if (spd) 5563 if (spd)
5552 link->hw_sata_spd_limit &= (1 << spd) - 1; 5564 link->hw_sata_spd_limit &= (1 << spd) - 1;
5553 5565
5554 ata_force_link_limits(link); 5566 ata_force_link_limits(link);
5555 5567
5556 link->sata_spd_limit = link->hw_sata_spd_limit; 5568 link->sata_spd_limit = link->hw_sata_spd_limit;
5557 5569
5558 return 0; 5570 return 0;
5559 } 5571 }
5560 5572
5561 /** 5573 /**
5562 * ata_port_alloc - allocate and initialize basic ATA port resources 5574 * ata_port_alloc - allocate and initialize basic ATA port resources
5563 * @host: ATA host this allocated port belongs to 5575 * @host: ATA host this allocated port belongs to
5564 * 5576 *
5565 * Allocate and initialize basic ATA port resources. 5577 * Allocate and initialize basic ATA port resources.
5566 * 5578 *
5567 * RETURNS: 5579 * RETURNS:
5568 * Allocate ATA port on success, NULL on failure. 5580 * Allocate ATA port on success, NULL on failure.
5569 * 5581 *
5570 * LOCKING: 5582 * LOCKING:
5571 * Inherited from calling layer (may sleep). 5583 * Inherited from calling layer (may sleep).
5572 */ 5584 */
5573 struct ata_port *ata_port_alloc(struct ata_host *host) 5585 struct ata_port *ata_port_alloc(struct ata_host *host)
5574 { 5586 {
5575 struct ata_port *ap; 5587 struct ata_port *ap;
5576 5588
5577 DPRINTK("ENTER\n"); 5589 DPRINTK("ENTER\n");
5578 5590
5579 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5591 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5580 if (!ap) 5592 if (!ap)
5581 return NULL; 5593 return NULL;
5582 5594
5583 ap->pflags |= ATA_PFLAG_INITIALIZING; 5595 ap->pflags |= ATA_PFLAG_INITIALIZING;
5584 ap->lock = &host->lock; 5596 ap->lock = &host->lock;
5585 ap->print_id = -1; 5597 ap->print_id = -1;
5586 ap->host = host; 5598 ap->host = host;
5587 ap->dev = host->dev; 5599 ap->dev = host->dev;
5588 5600
5589 #if defined(ATA_VERBOSE_DEBUG) 5601 #if defined(ATA_VERBOSE_DEBUG)
5590 /* turn on all debugging levels */ 5602 /* turn on all debugging levels */
5591 ap->msg_enable = 0x00FF; 5603 ap->msg_enable = 0x00FF;
5592 #elif defined(ATA_DEBUG) 5604 #elif defined(ATA_DEBUG)
5593 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5605 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5594 #else 5606 #else
5595 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5607 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5596 #endif 5608 #endif
5597 5609
5598 mutex_init(&ap->scsi_scan_mutex); 5610 mutex_init(&ap->scsi_scan_mutex);
5599 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5611 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5600 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5612 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5601 INIT_LIST_HEAD(&ap->eh_done_q); 5613 INIT_LIST_HEAD(&ap->eh_done_q);
5602 init_waitqueue_head(&ap->eh_wait_q); 5614 init_waitqueue_head(&ap->eh_wait_q);
5603 init_completion(&ap->park_req_pending); 5615 init_completion(&ap->park_req_pending);
5604 init_timer_deferrable(&ap->fastdrain_timer); 5616 init_timer_deferrable(&ap->fastdrain_timer);
5605 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5617 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5606 ap->fastdrain_timer.data = (unsigned long)ap; 5618 ap->fastdrain_timer.data = (unsigned long)ap;
5607 5619
5608 ap->cbl = ATA_CBL_NONE; 5620 ap->cbl = ATA_CBL_NONE;
5609 5621
5610 ata_link_init(ap, &ap->link, 0); 5622 ata_link_init(ap, &ap->link, 0);
5611 5623
5612 #ifdef ATA_IRQ_TRAP 5624 #ifdef ATA_IRQ_TRAP
5613 ap->stats.unhandled_irq = 1; 5625 ap->stats.unhandled_irq = 1;
5614 ap->stats.idle_irq = 1; 5626 ap->stats.idle_irq = 1;
5615 #endif 5627 #endif
5616 ata_sff_port_init(ap); 5628 ata_sff_port_init(ap);
5617 5629
5618 return ap; 5630 return ap;
5619 } 5631 }
5620 5632
5621 static void ata_host_release(struct device *gendev, void *res) 5633 static void ata_host_release(struct device *gendev, void *res)
5622 { 5634 {
5623 struct ata_host *host = dev_get_drvdata(gendev); 5635 struct ata_host *host = dev_get_drvdata(gendev);
5624 int i; 5636 int i;
5625 5637
5626 for (i = 0; i < host->n_ports; i++) { 5638 for (i = 0; i < host->n_ports; i++) {
5627 struct ata_port *ap = host->ports[i]; 5639 struct ata_port *ap = host->ports[i];
5628 5640
5629 if (!ap) 5641 if (!ap)
5630 continue; 5642 continue;
5631 5643
5632 if (ap->scsi_host) 5644 if (ap->scsi_host)
5633 scsi_host_put(ap->scsi_host); 5645 scsi_host_put(ap->scsi_host);
5634 5646
5635 kfree(ap->pmp_link); 5647 kfree(ap->pmp_link);
5636 kfree(ap->slave_link); 5648 kfree(ap->slave_link);
5637 kfree(ap); 5649 kfree(ap);
5638 host->ports[i] = NULL; 5650 host->ports[i] = NULL;
5639 } 5651 }
5640 5652
5641 dev_set_drvdata(gendev, NULL); 5653 dev_set_drvdata(gendev, NULL);
5642 } 5654 }
5643 5655
5644 /** 5656 /**
5645 * ata_host_alloc - allocate and init basic ATA host resources 5657 * ata_host_alloc - allocate and init basic ATA host resources
5646 * @dev: generic device this host is associated with 5658 * @dev: generic device this host is associated with
5647 * @max_ports: maximum number of ATA ports associated with this host 5659 * @max_ports: maximum number of ATA ports associated with this host
5648 * 5660 *
5649 * Allocate and initialize basic ATA host resources. LLD calls 5661 * Allocate and initialize basic ATA host resources. LLD calls
5650 * this function to allocate a host, initializes it fully and 5662 * this function to allocate a host, initializes it fully and
5651 * attaches it using ata_host_register(). 5663 * attaches it using ata_host_register().
5652 * 5664 *
5653 * @max_ports ports are allocated and host->n_ports is 5665 * @max_ports ports are allocated and host->n_ports is
5654 * initialized to @max_ports. The caller is allowed to decrease 5666 * initialized to @max_ports. The caller is allowed to decrease
5655 * host->n_ports before calling ata_host_register(). The unused 5667 * host->n_ports before calling ata_host_register(). The unused
5656 * ports will be automatically freed on registration. 5668 * ports will be automatically freed on registration.
5657 * 5669 *
5658 * RETURNS: 5670 * RETURNS:
5659 * Allocate ATA host on success, NULL on failure. 5671 * Allocate ATA host on success, NULL on failure.
5660 * 5672 *
5661 * LOCKING: 5673 * LOCKING:
5662 * Inherited from calling layer (may sleep). 5674 * Inherited from calling layer (may sleep).
5663 */ 5675 */
5664 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5676 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5665 { 5677 {
5666 struct ata_host *host; 5678 struct ata_host *host;
5667 size_t sz; 5679 size_t sz;
5668 int i; 5680 int i;
5669 5681
5670 DPRINTK("ENTER\n"); 5682 DPRINTK("ENTER\n");
5671 5683
5672 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5684 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5673 return NULL; 5685 return NULL;
5674 5686
5675 /* alloc a container for our list of ATA ports (buses) */ 5687 /* alloc a container for our list of ATA ports (buses) */
5676 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5688 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5677 /* alloc a container for our list of ATA ports (buses) */ 5689 /* alloc a container for our list of ATA ports (buses) */
5678 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5690 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5679 if (!host) 5691 if (!host)
5680 goto err_out; 5692 goto err_out;
5681 5693
5682 devres_add(dev, host); 5694 devres_add(dev, host);
5683 dev_set_drvdata(dev, host); 5695 dev_set_drvdata(dev, host);
5684 5696
5685 spin_lock_init(&host->lock); 5697 spin_lock_init(&host->lock);
5686 host->dev = dev; 5698 host->dev = dev;
5687 host->n_ports = max_ports; 5699 host->n_ports = max_ports;
5688 5700
5689 /* allocate ports bound to this host */ 5701 /* allocate ports bound to this host */
5690 for (i = 0; i < max_ports; i++) { 5702 for (i = 0; i < max_ports; i++) {
5691 struct ata_port *ap; 5703 struct ata_port *ap;
5692 5704
5693 ap = ata_port_alloc(host); 5705 ap = ata_port_alloc(host);
5694 if (!ap) 5706 if (!ap)
5695 goto err_out; 5707 goto err_out;
5696 5708
5697 ap->port_no = i; 5709 ap->port_no = i;
5698 host->ports[i] = ap; 5710 host->ports[i] = ap;
5699 } 5711 }
5700 5712
5701 devres_remove_group(dev, NULL); 5713 devres_remove_group(dev, NULL);
5702 return host; 5714 return host;
5703 5715
5704 err_out: 5716 err_out:
5705 devres_release_group(dev, NULL); 5717 devres_release_group(dev, NULL);
5706 return NULL; 5718 return NULL;
5707 } 5719 }
5708 5720
5709 /** 5721 /**
5710 * ata_host_alloc_pinfo - alloc host and init with port_info array 5722 * ata_host_alloc_pinfo - alloc host and init with port_info array
5711 * @dev: generic device this host is associated with 5723 * @dev: generic device this host is associated with
5712 * @ppi: array of ATA port_info to initialize host with 5724 * @ppi: array of ATA port_info to initialize host with
5713 * @n_ports: number of ATA ports attached to this host 5725 * @n_ports: number of ATA ports attached to this host
5714 * 5726 *
5715 * Allocate ATA host and initialize with info from @ppi. If NULL 5727 * Allocate ATA host and initialize with info from @ppi. If NULL
5716 * terminated, @ppi may contain fewer entries than @n_ports. The 5728 * terminated, @ppi may contain fewer entries than @n_ports. The
5717 * last entry will be used for the remaining ports. 5729 * last entry will be used for the remaining ports.
5718 * 5730 *
5719 * RETURNS: 5731 * RETURNS:
5720 * Allocate ATA host on success, NULL on failure. 5732 * Allocate ATA host on success, NULL on failure.
5721 * 5733 *
5722 * LOCKING: 5734 * LOCKING:
5723 * Inherited from calling layer (may sleep). 5735 * Inherited from calling layer (may sleep).
5724 */ 5736 */
5725 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5737 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5726 const struct ata_port_info * const * ppi, 5738 const struct ata_port_info * const * ppi,
5727 int n_ports) 5739 int n_ports)
5728 { 5740 {
5729 const struct ata_port_info *pi; 5741 const struct ata_port_info *pi;
5730 struct ata_host *host; 5742 struct ata_host *host;
5731 int i, j; 5743 int i, j;
5732 5744
5733 host = ata_host_alloc(dev, n_ports); 5745 host = ata_host_alloc(dev, n_ports);
5734 if (!host) 5746 if (!host)
5735 return NULL; 5747 return NULL;
5736 5748
5737 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5749 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5738 struct ata_port *ap = host->ports[i]; 5750 struct ata_port *ap = host->ports[i];
5739 5751
5740 if (ppi[j]) 5752 if (ppi[j])
5741 pi = ppi[j++]; 5753 pi = ppi[j++];
5742 5754
5743 ap->pio_mask = pi->pio_mask; 5755 ap->pio_mask = pi->pio_mask;
5744 ap->mwdma_mask = pi->mwdma_mask; 5756 ap->mwdma_mask = pi->mwdma_mask;
5745 ap->udma_mask = pi->udma_mask; 5757 ap->udma_mask = pi->udma_mask;
5746 ap->flags |= pi->flags; 5758 ap->flags |= pi->flags;
5747 ap->link.flags |= pi->link_flags; 5759 ap->link.flags |= pi->link_flags;
5748 ap->ops = pi->port_ops; 5760 ap->ops = pi->port_ops;
5749 5761
5750 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5762 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5751 host->ops = pi->port_ops; 5763 host->ops = pi->port_ops;
5752 } 5764 }
5753 5765
5754 return host; 5766 return host;
5755 } 5767 }
5756 5768
5757 /** 5769 /**
5758 * ata_slave_link_init - initialize slave link 5770 * ata_slave_link_init - initialize slave link
5759 * @ap: port to initialize slave link for 5771 * @ap: port to initialize slave link for
5760 * 5772 *
5761 * Create and initialize slave link for @ap. This enables slave 5773 * Create and initialize slave link for @ap. This enables slave
5762 * link handling on the port. 5774 * link handling on the port.
5763 * 5775 *
5764 * In libata, a port contains links and a link contains devices. 5776 * In libata, a port contains links and a link contains devices.
5765 * There is single host link but if a PMP is attached to it, 5777 * There is single host link but if a PMP is attached to it,
5766 * there can be multiple fan-out links. On SATA, there's usually 5778 * there can be multiple fan-out links. On SATA, there's usually
5767 * a single device connected to a link but PATA and SATA 5779 * a single device connected to a link but PATA and SATA
5768 * controllers emulating TF based interface can have two - master 5780 * controllers emulating TF based interface can have two - master
5769 * and slave. 5781 * and slave.
5770 * 5782 *
5771 * However, there are a few controllers which don't fit into this 5783 * However, there are a few controllers which don't fit into this
5772 * abstraction too well - SATA controllers which emulate TF 5784 * abstraction too well - SATA controllers which emulate TF
5773 * interface with both master and slave devices but also have 5785 * interface with both master and slave devices but also have
5774 * separate SCR register sets for each device. These controllers 5786 * separate SCR register sets for each device. These controllers
5775 * need separate links for physical link handling 5787 * need separate links for physical link handling
5776 * (e.g. onlineness, link speed) but should be treated like a 5788 * (e.g. onlineness, link speed) but should be treated like a
5777 * traditional M/S controller for everything else (e.g. command 5789 * traditional M/S controller for everything else (e.g. command
5778 * issue, softreset). 5790 * issue, softreset).
5779 * 5791 *
5780 * slave_link is libata's way of handling this class of 5792 * slave_link is libata's way of handling this class of
5781 * controllers without impacting core layer too much. For 5793 * controllers without impacting core layer too much. For
5782 * anything other than physical link handling, the default host 5794 * anything other than physical link handling, the default host
5783 * link is used for both master and slave. For physical link 5795 * link is used for both master and slave. For physical link
5784 * handling, separate @ap->slave_link is used. All dirty details 5796 * handling, separate @ap->slave_link is used. All dirty details
5785 * are implemented inside libata core layer. From LLD's POV, the 5797 * are implemented inside libata core layer. From LLD's POV, the
5786 * only difference is that prereset, hardreset and postreset are 5798 * only difference is that prereset, hardreset and postreset are
5787 * called once more for the slave link, so the reset sequence 5799 * called once more for the slave link, so the reset sequence
5788 * looks like the following. 5800 * looks like the following.
5789 * 5801 *
5790 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5802 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) ->
5791 * softreset(M) -> postreset(M) -> postreset(S) 5803 * softreset(M) -> postreset(M) -> postreset(S)
5792 * 5804 *
5793 * Note that softreset is called only for the master. Softreset 5805 * Note that softreset is called only for the master. Softreset
5794 * resets both M/S by definition, so SRST on master should handle 5806 * resets both M/S by definition, so SRST on master should handle
5795 * both (the standard method will work just fine). 5807 * both (the standard method will work just fine).
5796 * 5808 *
5797 * LOCKING: 5809 * LOCKING:
5798 * Should be called before host is registered. 5810 * Should be called before host is registered.
5799 * 5811 *
5800 * RETURNS: 5812 * RETURNS:
5801 * 0 on success, -errno on failure. 5813 * 0 on success, -errno on failure.
5802 */ 5814 */
5803 int ata_slave_link_init(struct ata_port *ap) 5815 int ata_slave_link_init(struct ata_port *ap)
5804 { 5816 {
5805 struct ata_link *link; 5817 struct ata_link *link;
5806 5818
5807 WARN_ON(ap->slave_link); 5819 WARN_ON(ap->slave_link);
5808 WARN_ON(ap->flags & ATA_FLAG_PMP); 5820 WARN_ON(ap->flags & ATA_FLAG_PMP);
5809 5821
5810 link = kzalloc(sizeof(*link), GFP_KERNEL); 5822 link = kzalloc(sizeof(*link), GFP_KERNEL);
5811 if (!link) 5823 if (!link)
5812 return -ENOMEM; 5824 return -ENOMEM;
5813 5825
5814 ata_link_init(ap, link, 1); 5826 ata_link_init(ap, link, 1);
5815 ap->slave_link = link; 5827 ap->slave_link = link;
5816 return 0; 5828 return 0;
5817 } 5829 }
5818 5830
5819 static void ata_host_stop(struct device *gendev, void *res) 5831 static void ata_host_stop(struct device *gendev, void *res)
5820 { 5832 {
5821 struct ata_host *host = dev_get_drvdata(gendev); 5833 struct ata_host *host = dev_get_drvdata(gendev);
5822 int i; 5834 int i;
5823 5835
5824 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5836 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5825 5837
5826 for (i = 0; i < host->n_ports; i++) { 5838 for (i = 0; i < host->n_ports; i++) {
5827 struct ata_port *ap = host->ports[i]; 5839 struct ata_port *ap = host->ports[i];
5828 5840
5829 if (ap->ops->port_stop) 5841 if (ap->ops->port_stop)
5830 ap->ops->port_stop(ap); 5842 ap->ops->port_stop(ap);
5831 } 5843 }
5832 5844
5833 if (host->ops->host_stop) 5845 if (host->ops->host_stop)
5834 host->ops->host_stop(host); 5846 host->ops->host_stop(host);
5835 } 5847 }
5836 5848
5837 /** 5849 /**
5838 * ata_finalize_port_ops - finalize ata_port_operations 5850 * ata_finalize_port_ops - finalize ata_port_operations
5839 * @ops: ata_port_operations to finalize 5851 * @ops: ata_port_operations to finalize
5840 * 5852 *
5841 * An ata_port_operations can inherit from another ops and that 5853 * An ata_port_operations can inherit from another ops and that
5842 * ops can again inherit from another. This can go on as many 5854 * ops can again inherit from another. This can go on as many
5843 * times as necessary as long as there is no loop in the 5855 * times as necessary as long as there is no loop in the
5844 * inheritance chain. 5856 * inheritance chain.
5845 * 5857 *
5846 * Ops tables are finalized when the host is started. NULL or 5858 * Ops tables are finalized when the host is started. NULL or
5847 * unspecified entries are inherited from the closet ancestor 5859 * unspecified entries are inherited from the closet ancestor
5848 * which has the method and the entry is populated with it. 5860 * which has the method and the entry is populated with it.
5849 * After finalization, the ops table directly points to all the 5861 * After finalization, the ops table directly points to all the
5850 * methods and ->inherits is no longer necessary and cleared. 5862 * methods and ->inherits is no longer necessary and cleared.
5851 * 5863 *
5852 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5864 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5853 * 5865 *
5854 * LOCKING: 5866 * LOCKING:
5855 * None. 5867 * None.
5856 */ 5868 */
5857 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5869 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5858 { 5870 {
5859 static DEFINE_SPINLOCK(lock); 5871 static DEFINE_SPINLOCK(lock);
5860 const struct ata_port_operations *cur; 5872 const struct ata_port_operations *cur;
5861 void **begin = (void **)ops; 5873 void **begin = (void **)ops;
5862 void **end = (void **)&ops->inherits; 5874 void **end = (void **)&ops->inherits;
5863 void **pp; 5875 void **pp;
5864 5876
5865 if (!ops || !ops->inherits) 5877 if (!ops || !ops->inherits)
5866 return; 5878 return;
5867 5879
5868 spin_lock(&lock); 5880 spin_lock(&lock);
5869 5881
5870 for (cur = ops->inherits; cur; cur = cur->inherits) { 5882 for (cur = ops->inherits; cur; cur = cur->inherits) {
5871 void **inherit = (void **)cur; 5883 void **inherit = (void **)cur;
5872 5884
5873 for (pp = begin; pp < end; pp++, inherit++) 5885 for (pp = begin; pp < end; pp++, inherit++)
5874 if (!*pp) 5886 if (!*pp)
5875 *pp = *inherit; 5887 *pp = *inherit;
5876 } 5888 }
5877 5889
5878 for (pp = begin; pp < end; pp++) 5890 for (pp = begin; pp < end; pp++)
5879 if (IS_ERR(*pp)) 5891 if (IS_ERR(*pp))
5880 *pp = NULL; 5892 *pp = NULL;
5881 5893
5882 ops->inherits = NULL; 5894 ops->inherits = NULL;
5883 5895
5884 spin_unlock(&lock); 5896 spin_unlock(&lock);
5885 } 5897 }
5886 5898
5887 /** 5899 /**
5888 * ata_host_start - start and freeze ports of an ATA host 5900 * ata_host_start - start and freeze ports of an ATA host
5889 * @host: ATA host to start ports for 5901 * @host: ATA host to start ports for
5890 * 5902 *
5891 * Start and then freeze ports of @host. Started status is 5903 * Start and then freeze ports of @host. Started status is
5892 * recorded in host->flags, so this function can be called 5904 * recorded in host->flags, so this function can be called
5893 * multiple times. Ports are guaranteed to get started only 5905 * multiple times. Ports are guaranteed to get started only
5894 * once. If host->ops isn't initialized yet, its set to the 5906 * once. If host->ops isn't initialized yet, its set to the
5895 * first non-dummy port ops. 5907 * first non-dummy port ops.
5896 * 5908 *
5897 * LOCKING: 5909 * LOCKING:
5898 * Inherited from calling layer (may sleep). 5910 * Inherited from calling layer (may sleep).
5899 * 5911 *
5900 * RETURNS: 5912 * RETURNS:
5901 * 0 if all ports are started successfully, -errno otherwise. 5913 * 0 if all ports are started successfully, -errno otherwise.
5902 */ 5914 */
5903 int ata_host_start(struct ata_host *host) 5915 int ata_host_start(struct ata_host *host)
5904 { 5916 {
5905 int have_stop = 0; 5917 int have_stop = 0;
5906 void *start_dr = NULL; 5918 void *start_dr = NULL;
5907 int i, rc; 5919 int i, rc;
5908 5920
5909 if (host->flags & ATA_HOST_STARTED) 5921 if (host->flags & ATA_HOST_STARTED)
5910 return 0; 5922 return 0;
5911 5923
5912 ata_finalize_port_ops(host->ops); 5924 ata_finalize_port_ops(host->ops);
5913 5925
5914 for (i = 0; i < host->n_ports; i++) { 5926 for (i = 0; i < host->n_ports; i++) {
5915 struct ata_port *ap = host->ports[i]; 5927 struct ata_port *ap = host->ports[i];
5916 5928
5917 ata_finalize_port_ops(ap->ops); 5929 ata_finalize_port_ops(ap->ops);
5918 5930
5919 if (!host->ops && !ata_port_is_dummy(ap)) 5931 if (!host->ops && !ata_port_is_dummy(ap))
5920 host->ops = ap->ops; 5932 host->ops = ap->ops;
5921 5933
5922 if (ap->ops->port_stop) 5934 if (ap->ops->port_stop)
5923 have_stop = 1; 5935 have_stop = 1;
5924 } 5936 }
5925 5937
5926 if (host->ops->host_stop) 5938 if (host->ops->host_stop)
5927 have_stop = 1; 5939 have_stop = 1;
5928 5940
5929 if (have_stop) { 5941 if (have_stop) {
5930 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5942 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5931 if (!start_dr) 5943 if (!start_dr)
5932 return -ENOMEM; 5944 return -ENOMEM;
5933 } 5945 }
5934 5946
5935 for (i = 0; i < host->n_ports; i++) { 5947 for (i = 0; i < host->n_ports; i++) {
5936 struct ata_port *ap = host->ports[i]; 5948 struct ata_port *ap = host->ports[i];
5937 5949
5938 if (ap->ops->port_start) { 5950 if (ap->ops->port_start) {
5939 rc = ap->ops->port_start(ap); 5951 rc = ap->ops->port_start(ap);
5940 if (rc) { 5952 if (rc) {
5941 if (rc != -ENODEV) 5953 if (rc != -ENODEV)
5942 dev_printk(KERN_ERR, host->dev, 5954 dev_printk(KERN_ERR, host->dev,
5943 "failed to start port %d " 5955 "failed to start port %d "
5944 "(errno=%d)\n", i, rc); 5956 "(errno=%d)\n", i, rc);
5945 goto err_out; 5957 goto err_out;
5946 } 5958 }
5947 } 5959 }
5948 ata_eh_freeze_port(ap); 5960 ata_eh_freeze_port(ap);
5949 } 5961 }
5950 5962
5951 if (start_dr) 5963 if (start_dr)
5952 devres_add(host->dev, start_dr); 5964 devres_add(host->dev, start_dr);
5953 host->flags |= ATA_HOST_STARTED; 5965 host->flags |= ATA_HOST_STARTED;
5954 return 0; 5966 return 0;
5955 5967
5956 err_out: 5968 err_out:
5957 while (--i >= 0) { 5969 while (--i >= 0) {
5958 struct ata_port *ap = host->ports[i]; 5970 struct ata_port *ap = host->ports[i];
5959 5971
5960 if (ap->ops->port_stop) 5972 if (ap->ops->port_stop)
5961 ap->ops->port_stop(ap); 5973 ap->ops->port_stop(ap);
5962 } 5974 }
5963 devres_free(start_dr); 5975 devres_free(start_dr);
5964 return rc; 5976 return rc;
5965 } 5977 }
5966 5978
5967 /** 5979 /**
5968 * ata_sas_host_init - Initialize a host struct 5980 * ata_sas_host_init - Initialize a host struct
5969 * @host: host to initialize 5981 * @host: host to initialize
5970 * @dev: device host is attached to 5982 * @dev: device host is attached to
5971 * @flags: host flags 5983 * @flags: host flags
5972 * @ops: port_ops 5984 * @ops: port_ops
5973 * 5985 *
5974 * LOCKING: 5986 * LOCKING:
5975 * PCI/etc. bus probe sem. 5987 * PCI/etc. bus probe sem.
5976 * 5988 *
5977 */ 5989 */
5978 /* KILLME - the only user left is ipr */ 5990 /* KILLME - the only user left is ipr */
5979 void ata_host_init(struct ata_host *host, struct device *dev, 5991 void ata_host_init(struct ata_host *host, struct device *dev,
5980 unsigned long flags, struct ata_port_operations *ops) 5992 unsigned long flags, struct ata_port_operations *ops)
5981 { 5993 {
5982 spin_lock_init(&host->lock); 5994 spin_lock_init(&host->lock);
5983 host->dev = dev; 5995 host->dev = dev;
5984 host->flags = flags; 5996 host->flags = flags;
5985 host->ops = ops; 5997 host->ops = ops;
5986 } 5998 }
5987 5999
5988 6000
5989 static void async_port_probe(void *data, async_cookie_t cookie) 6001 static void async_port_probe(void *data, async_cookie_t cookie)
5990 { 6002 {
5991 int rc; 6003 int rc;
5992 struct ata_port *ap = data; 6004 struct ata_port *ap = data;
5993 6005
5994 /* 6006 /*
5995 * If we're not allowed to scan this host in parallel, 6007 * If we're not allowed to scan this host in parallel,
5996 * we need to wait until all previous scans have completed 6008 * we need to wait until all previous scans have completed
5997 * before going further. 6009 * before going further.
5998 * Jeff Garzik says this is only within a controller, so we 6010 * Jeff Garzik says this is only within a controller, so we
5999 * don't need to wait for port 0, only for later ports. 6011 * don't need to wait for port 0, only for later ports.
6000 */ 6012 */
6001 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6013 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
6002 async_synchronize_cookie(cookie); 6014 async_synchronize_cookie(cookie);
6003 6015
6004 /* probe */ 6016 /* probe */
6005 if (ap->ops->error_handler) { 6017 if (ap->ops->error_handler) {
6006 struct ata_eh_info *ehi = &ap->link.eh_info; 6018 struct ata_eh_info *ehi = &ap->link.eh_info;
6007 unsigned long flags; 6019 unsigned long flags;
6008 6020
6009 /* kick EH for boot probing */ 6021 /* kick EH for boot probing */
6010 spin_lock_irqsave(ap->lock, flags); 6022 spin_lock_irqsave(ap->lock, flags);
6011 6023
6012 ehi->probe_mask |= ATA_ALL_DEVICES; 6024 ehi->probe_mask |= ATA_ALL_DEVICES;
6013 ehi->action |= ATA_EH_RESET | ATA_EH_LPM; 6025 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
6014 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6026 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6015 6027
6016 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6028 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6017 ap->pflags |= ATA_PFLAG_LOADING; 6029 ap->pflags |= ATA_PFLAG_LOADING;
6018 ata_port_schedule_eh(ap); 6030 ata_port_schedule_eh(ap);
6019 6031
6020 spin_unlock_irqrestore(ap->lock, flags); 6032 spin_unlock_irqrestore(ap->lock, flags);
6021 6033
6022 /* wait for EH to finish */ 6034 /* wait for EH to finish */
6023 ata_port_wait_eh(ap); 6035 ata_port_wait_eh(ap);
6024 } else { 6036 } else {
6025 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6037 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6026 rc = ata_bus_probe(ap); 6038 rc = ata_bus_probe(ap);
6027 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6039 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6028 6040
6029 if (rc) { 6041 if (rc) {
6030 /* FIXME: do something useful here? 6042 /* FIXME: do something useful here?
6031 * Current libata behavior will 6043 * Current libata behavior will
6032 * tear down everything when 6044 * tear down everything when
6033 * the module is removed 6045 * the module is removed
6034 * or the h/w is unplugged. 6046 * or the h/w is unplugged.
6035 */ 6047 */
6036 } 6048 }
6037 } 6049 }
6038 6050
6039 /* in order to keep device order, we need to synchronize at this point */ 6051 /* in order to keep device order, we need to synchronize at this point */
6040 async_synchronize_cookie(cookie); 6052 async_synchronize_cookie(cookie);
6041 6053
6042 ata_scsi_scan_host(ap, 1); 6054 ata_scsi_scan_host(ap, 1);
6043 6055
6044 } 6056 }
6045 /** 6057 /**
6046 * ata_host_register - register initialized ATA host 6058 * ata_host_register - register initialized ATA host
6047 * @host: ATA host to register 6059 * @host: ATA host to register
6048 * @sht: template for SCSI host 6060 * @sht: template for SCSI host
6049 * 6061 *
6050 * Register initialized ATA host. @host is allocated using 6062 * Register initialized ATA host. @host is allocated using
6051 * ata_host_alloc() and fully initialized by LLD. This function 6063 * ata_host_alloc() and fully initialized by LLD. This function
6052 * starts ports, registers @host with ATA and SCSI layers and 6064 * starts ports, registers @host with ATA and SCSI layers and
6053 * probe registered devices. 6065 * probe registered devices.
6054 * 6066 *
6055 * LOCKING: 6067 * LOCKING:
6056 * Inherited from calling layer (may sleep). 6068 * Inherited from calling layer (may sleep).
6057 * 6069 *
6058 * RETURNS: 6070 * RETURNS:
6059 * 0 on success, -errno otherwise. 6071 * 0 on success, -errno otherwise.
6060 */ 6072 */
6061 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6073 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6062 { 6074 {
6063 int i, rc; 6075 int i, rc;
6064 6076
6065 /* host must have been started */ 6077 /* host must have been started */
6066 if (!(host->flags & ATA_HOST_STARTED)) { 6078 if (!(host->flags & ATA_HOST_STARTED)) {
6067 dev_printk(KERN_ERR, host->dev, 6079 dev_printk(KERN_ERR, host->dev,
6068 "BUG: trying to register unstarted host\n"); 6080 "BUG: trying to register unstarted host\n");
6069 WARN_ON(1); 6081 WARN_ON(1);
6070 return -EINVAL; 6082 return -EINVAL;
6071 } 6083 }
6072 6084
6073 /* Blow away unused ports. This happens when LLD can't 6085 /* Blow away unused ports. This happens when LLD can't
6074 * determine the exact number of ports to allocate at 6086 * determine the exact number of ports to allocate at
6075 * allocation time. 6087 * allocation time.
6076 */ 6088 */
6077 for (i = host->n_ports; host->ports[i]; i++) 6089 for (i = host->n_ports; host->ports[i]; i++)
6078 kfree(host->ports[i]); 6090 kfree(host->ports[i]);
6079 6091
6080 /* give ports names and add SCSI hosts */ 6092 /* give ports names and add SCSI hosts */
6081 for (i = 0; i < host->n_ports; i++) 6093 for (i = 0; i < host->n_ports; i++)
6082 host->ports[i]->print_id = ata_print_id++; 6094 host->ports[i]->print_id = ata_print_id++;
6083 6095
6084 rc = ata_scsi_add_hosts(host, sht); 6096 rc = ata_scsi_add_hosts(host, sht);
6085 if (rc) 6097 if (rc)
6086 return rc; 6098 return rc;
6087 6099
6088 /* associate with ACPI nodes */ 6100 /* associate with ACPI nodes */
6089 ata_acpi_associate(host); 6101 ata_acpi_associate(host);
6090 6102
6091 /* set cable, sata_spd_limit and report */ 6103 /* set cable, sata_spd_limit and report */
6092 for (i = 0; i < host->n_ports; i++) { 6104 for (i = 0; i < host->n_ports; i++) {
6093 struct ata_port *ap = host->ports[i]; 6105 struct ata_port *ap = host->ports[i];
6094 unsigned long xfer_mask; 6106 unsigned long xfer_mask;
6095 6107
6096 /* set SATA cable type if still unset */ 6108 /* set SATA cable type if still unset */
6097 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6109 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6098 ap->cbl = ATA_CBL_SATA; 6110 ap->cbl = ATA_CBL_SATA;
6099 6111
6100 /* init sata_spd_limit to the current value */ 6112 /* init sata_spd_limit to the current value */
6101 sata_link_init_spd(&ap->link); 6113 sata_link_init_spd(&ap->link);
6102 if (ap->slave_link) 6114 if (ap->slave_link)
6103 sata_link_init_spd(ap->slave_link); 6115 sata_link_init_spd(ap->slave_link);
6104 6116
6105 /* print per-port info to dmesg */ 6117 /* print per-port info to dmesg */
6106 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6118 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6107 ap->udma_mask); 6119 ap->udma_mask);
6108 6120
6109 if (!ata_port_is_dummy(ap)) { 6121 if (!ata_port_is_dummy(ap)) {
6110 ata_port_printk(ap, KERN_INFO, 6122 ata_port_printk(ap, KERN_INFO,
6111 "%cATA max %s %s\n", 6123 "%cATA max %s %s\n",
6112 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6124 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6113 ata_mode_string(xfer_mask), 6125 ata_mode_string(xfer_mask),
6114 ap->link.eh_info.desc); 6126 ap->link.eh_info.desc);
6115 ata_ehi_clear_desc(&ap->link.eh_info); 6127 ata_ehi_clear_desc(&ap->link.eh_info);
6116 } else 6128 } else
6117 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6129 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6118 } 6130 }
6119 6131
6120 /* perform each probe asynchronously */ 6132 /* perform each probe asynchronously */
6121 for (i = 0; i < host->n_ports; i++) { 6133 for (i = 0; i < host->n_ports; i++) {
6122 struct ata_port *ap = host->ports[i]; 6134 struct ata_port *ap = host->ports[i];
6123 async_schedule(async_port_probe, ap); 6135 async_schedule(async_port_probe, ap);
6124 } 6136 }
6125 6137
6126 return 0; 6138 return 0;
6127 } 6139 }
6128 6140
6129 /** 6141 /**
6130 * ata_host_activate - start host, request IRQ and register it 6142 * ata_host_activate - start host, request IRQ and register it
6131 * @host: target ATA host 6143 * @host: target ATA host
6132 * @irq: IRQ to request 6144 * @irq: IRQ to request
6133 * @irq_handler: irq_handler used when requesting IRQ 6145 * @irq_handler: irq_handler used when requesting IRQ
6134 * @irq_flags: irq_flags used when requesting IRQ 6146 * @irq_flags: irq_flags used when requesting IRQ
6135 * @sht: scsi_host_template to use when registering the host 6147 * @sht: scsi_host_template to use when registering the host
6136 * 6148 *
6137 * After allocating an ATA host and initializing it, most libata 6149 * After allocating an ATA host and initializing it, most libata
6138 * LLDs perform three steps to activate the host - start host, 6150 * LLDs perform three steps to activate the host - start host,
6139 * request IRQ and register it. This helper takes necessasry 6151 * request IRQ and register it. This helper takes necessasry
6140 * arguments and performs the three steps in one go. 6152 * arguments and performs the three steps in one go.
6141 * 6153 *
6142 * An invalid IRQ skips the IRQ registration and expects the host to 6154 * An invalid IRQ skips the IRQ registration and expects the host to
6143 * have set polling mode on the port. In this case, @irq_handler 6155 * have set polling mode on the port. In this case, @irq_handler
6144 * should be NULL. 6156 * should be NULL.
6145 * 6157 *
6146 * LOCKING: 6158 * LOCKING:
6147 * Inherited from calling layer (may sleep). 6159 * Inherited from calling layer (may sleep).
6148 * 6160 *
6149 * RETURNS: 6161 * RETURNS:
6150 * 0 on success, -errno otherwise. 6162 * 0 on success, -errno otherwise.
6151 */ 6163 */
6152 int ata_host_activate(struct ata_host *host, int irq, 6164 int ata_host_activate(struct ata_host *host, int irq,
6153 irq_handler_t irq_handler, unsigned long irq_flags, 6165 irq_handler_t irq_handler, unsigned long irq_flags,
6154 struct scsi_host_template *sht) 6166 struct scsi_host_template *sht)
6155 { 6167 {
6156 int i, rc; 6168 int i, rc;
6157 6169
6158 rc = ata_host_start(host); 6170 rc = ata_host_start(host);
6159 if (rc) 6171 if (rc)
6160 return rc; 6172 return rc;
6161 6173
6162 /* Special case for polling mode */ 6174 /* Special case for polling mode */
6163 if (!irq) { 6175 if (!irq) {
6164 WARN_ON(irq_handler); 6176 WARN_ON(irq_handler);
6165 return ata_host_register(host, sht); 6177 return ata_host_register(host, sht);
6166 } 6178 }
6167 6179
6168 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6180 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6169 dev_driver_string(host->dev), host); 6181 dev_driver_string(host->dev), host);
6170 if (rc) 6182 if (rc)
6171 return rc; 6183 return rc;
6172 6184
6173 for (i = 0; i < host->n_ports; i++) 6185 for (i = 0; i < host->n_ports; i++)
6174 ata_port_desc(host->ports[i], "irq %d", irq); 6186 ata_port_desc(host->ports[i], "irq %d", irq);
6175 6187
6176 rc = ata_host_register(host, sht); 6188 rc = ata_host_register(host, sht);
6177 /* if failed, just free the IRQ and leave ports alone */ 6189 /* if failed, just free the IRQ and leave ports alone */
6178 if (rc) 6190 if (rc)
6179 devm_free_irq(host->dev, irq, host); 6191 devm_free_irq(host->dev, irq, host);
6180 6192
6181 return rc; 6193 return rc;
6182 } 6194 }
6183 6195
6184 /** 6196 /**
6185 * ata_port_detach - Detach ATA port in prepration of device removal 6197 * ata_port_detach - Detach ATA port in prepration of device removal
6186 * @ap: ATA port to be detached 6198 * @ap: ATA port to be detached
6187 * 6199 *
6188 * Detach all ATA devices and the associated SCSI devices of @ap; 6200 * Detach all ATA devices and the associated SCSI devices of @ap;
6189 * then, remove the associated SCSI host. @ap is guaranteed to 6201 * then, remove the associated SCSI host. @ap is guaranteed to
6190 * be quiescent on return from this function. 6202 * be quiescent on return from this function.
6191 * 6203 *
6192 * LOCKING: 6204 * LOCKING:
6193 * Kernel thread context (may sleep). 6205 * Kernel thread context (may sleep).
6194 */ 6206 */
6195 static void ata_port_detach(struct ata_port *ap) 6207 static void ata_port_detach(struct ata_port *ap)
6196 { 6208 {
6197 unsigned long flags; 6209 unsigned long flags;
6198 6210
6199 if (!ap->ops->error_handler) 6211 if (!ap->ops->error_handler)
6200 goto skip_eh; 6212 goto skip_eh;
6201 6213
6202 /* tell EH we're leaving & flush EH */ 6214 /* tell EH we're leaving & flush EH */
6203 spin_lock_irqsave(ap->lock, flags); 6215 spin_lock_irqsave(ap->lock, flags);
6204 ap->pflags |= ATA_PFLAG_UNLOADING; 6216 ap->pflags |= ATA_PFLAG_UNLOADING;
6205 ata_port_schedule_eh(ap); 6217 ata_port_schedule_eh(ap);
6206 spin_unlock_irqrestore(ap->lock, flags); 6218 spin_unlock_irqrestore(ap->lock, flags);
6207 6219
6208 /* wait till EH commits suicide */ 6220 /* wait till EH commits suicide */
6209 ata_port_wait_eh(ap); 6221 ata_port_wait_eh(ap);
6210 6222
6211 /* it better be dead now */ 6223 /* it better be dead now */
6212 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6224 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6213 6225
6214 cancel_rearming_delayed_work(&ap->hotplug_task); 6226 cancel_rearming_delayed_work(&ap->hotplug_task);
6215 6227
6216 skip_eh: 6228 skip_eh:
6217 /* remove the associated SCSI host */ 6229 /* remove the associated SCSI host */
6218 scsi_remove_host(ap->scsi_host); 6230 scsi_remove_host(ap->scsi_host);
6219 } 6231 }
6220 6232
6221 /** 6233 /**
6222 * ata_host_detach - Detach all ports of an ATA host 6234 * ata_host_detach - Detach all ports of an ATA host
6223 * @host: Host to detach 6235 * @host: Host to detach
6224 * 6236 *
6225 * Detach all ports of @host. 6237 * Detach all ports of @host.
6226 * 6238 *
6227 * LOCKING: 6239 * LOCKING:
6228 * Kernel thread context (may sleep). 6240 * Kernel thread context (may sleep).
6229 */ 6241 */
6230 void ata_host_detach(struct ata_host *host) 6242 void ata_host_detach(struct ata_host *host)
6231 { 6243 {
6232 int i; 6244 int i;
6233 6245
6234 for (i = 0; i < host->n_ports; i++) 6246 for (i = 0; i < host->n_ports; i++)
6235 ata_port_detach(host->ports[i]); 6247 ata_port_detach(host->ports[i]);
6236 6248
6237 /* the host is dead now, dissociate ACPI */ 6249 /* the host is dead now, dissociate ACPI */
6238 ata_acpi_dissociate(host); 6250 ata_acpi_dissociate(host);
6239 } 6251 }
6240 6252
6241 #ifdef CONFIG_PCI 6253 #ifdef CONFIG_PCI
6242 6254
6243 /** 6255 /**
6244 * ata_pci_remove_one - PCI layer callback for device removal 6256 * ata_pci_remove_one - PCI layer callback for device removal
6245 * @pdev: PCI device that was removed 6257 * @pdev: PCI device that was removed
6246 * 6258 *
6247 * PCI layer indicates to libata via this hook that hot-unplug or 6259 * PCI layer indicates to libata via this hook that hot-unplug or
6248 * module unload event has occurred. Detach all ports. Resource 6260 * module unload event has occurred. Detach all ports. Resource
6249 * release is handled via devres. 6261 * release is handled via devres.
6250 * 6262 *
6251 * LOCKING: 6263 * LOCKING:
6252 * Inherited from PCI layer (may sleep). 6264 * Inherited from PCI layer (may sleep).
6253 */ 6265 */
6254 void ata_pci_remove_one(struct pci_dev *pdev) 6266 void ata_pci_remove_one(struct pci_dev *pdev)
6255 { 6267 {
6256 struct device *dev = &pdev->dev; 6268 struct device *dev = &pdev->dev;
6257 struct ata_host *host = dev_get_drvdata(dev); 6269 struct ata_host *host = dev_get_drvdata(dev);
6258 6270
6259 ata_host_detach(host); 6271 ata_host_detach(host);
6260 } 6272 }
6261 6273
6262 /* move to PCI subsystem */ 6274 /* move to PCI subsystem */
6263 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6275 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6264 { 6276 {
6265 unsigned long tmp = 0; 6277 unsigned long tmp = 0;
6266 6278
6267 switch (bits->width) { 6279 switch (bits->width) {
6268 case 1: { 6280 case 1: {
6269 u8 tmp8 = 0; 6281 u8 tmp8 = 0;
6270 pci_read_config_byte(pdev, bits->reg, &tmp8); 6282 pci_read_config_byte(pdev, bits->reg, &tmp8);
6271 tmp = tmp8; 6283 tmp = tmp8;
6272 break; 6284 break;
6273 } 6285 }
6274 case 2: { 6286 case 2: {
6275 u16 tmp16 = 0; 6287 u16 tmp16 = 0;
6276 pci_read_config_word(pdev, bits->reg, &tmp16); 6288 pci_read_config_word(pdev, bits->reg, &tmp16);
6277 tmp = tmp16; 6289 tmp = tmp16;
6278 break; 6290 break;
6279 } 6291 }
6280 case 4: { 6292 case 4: {
6281 u32 tmp32 = 0; 6293 u32 tmp32 = 0;
6282 pci_read_config_dword(pdev, bits->reg, &tmp32); 6294 pci_read_config_dword(pdev, bits->reg, &tmp32);
6283 tmp = tmp32; 6295 tmp = tmp32;
6284 break; 6296 break;
6285 } 6297 }
6286 6298
6287 default: 6299 default:
6288 return -EINVAL; 6300 return -EINVAL;
6289 } 6301 }
6290 6302
6291 tmp &= bits->mask; 6303 tmp &= bits->mask;
6292 6304
6293 return (tmp == bits->val) ? 1 : 0; 6305 return (tmp == bits->val) ? 1 : 0;
6294 } 6306 }
6295 6307
6296 #ifdef CONFIG_PM 6308 #ifdef CONFIG_PM
6297 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6309 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6298 { 6310 {
6299 pci_save_state(pdev); 6311 pci_save_state(pdev);
6300 pci_disable_device(pdev); 6312 pci_disable_device(pdev);
6301 6313
6302 if (mesg.event & PM_EVENT_SLEEP) 6314 if (mesg.event & PM_EVENT_SLEEP)
6303 pci_set_power_state(pdev, PCI_D3hot); 6315 pci_set_power_state(pdev, PCI_D3hot);
6304 } 6316 }
6305 6317
6306 int ata_pci_device_do_resume(struct pci_dev *pdev) 6318 int ata_pci_device_do_resume(struct pci_dev *pdev)
6307 { 6319 {
6308 int rc; 6320 int rc;
6309 6321
6310 pci_set_power_state(pdev, PCI_D0); 6322 pci_set_power_state(pdev, PCI_D0);
6311 pci_restore_state(pdev); 6323 pci_restore_state(pdev);
6312 6324
6313 rc = pcim_enable_device(pdev); 6325 rc = pcim_enable_device(pdev);
6314 if (rc) { 6326 if (rc) {
6315 dev_printk(KERN_ERR, &pdev->dev, 6327 dev_printk(KERN_ERR, &pdev->dev,
6316 "failed to enable device after resume (%d)\n", rc); 6328 "failed to enable device after resume (%d)\n", rc);
6317 return rc; 6329 return rc;
6318 } 6330 }
6319 6331
6320 pci_set_master(pdev); 6332 pci_set_master(pdev);
6321 return 0; 6333 return 0;
6322 } 6334 }
6323 6335
6324 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6336 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6325 { 6337 {
6326 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6338 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6327 int rc = 0; 6339 int rc = 0;
6328 6340
6329 rc = ata_host_suspend(host, mesg); 6341 rc = ata_host_suspend(host, mesg);
6330 if (rc) 6342 if (rc)
6331 return rc; 6343 return rc;
6332 6344
6333 ata_pci_device_do_suspend(pdev, mesg); 6345 ata_pci_device_do_suspend(pdev, mesg);
6334 6346
6335 return 0; 6347 return 0;
6336 } 6348 }
6337 6349
6338 int ata_pci_device_resume(struct pci_dev *pdev) 6350 int ata_pci_device_resume(struct pci_dev *pdev)
6339 { 6351 {
6340 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6352 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6341 int rc; 6353 int rc;
6342 6354
6343 rc = ata_pci_device_do_resume(pdev); 6355 rc = ata_pci_device_do_resume(pdev);
6344 if (rc == 0) 6356 if (rc == 0)
6345 ata_host_resume(host); 6357 ata_host_resume(host);
6346 return rc; 6358 return rc;
6347 } 6359 }
6348 #endif /* CONFIG_PM */ 6360 #endif /* CONFIG_PM */
6349 6361
6350 #endif /* CONFIG_PCI */ 6362 #endif /* CONFIG_PCI */
6351 6363
6352 static int __init ata_parse_force_one(char **cur, 6364 static int __init ata_parse_force_one(char **cur,
6353 struct ata_force_ent *force_ent, 6365 struct ata_force_ent *force_ent,
6354 const char **reason) 6366 const char **reason)
6355 { 6367 {
6356 /* FIXME: Currently, there's no way to tag init const data and 6368 /* FIXME: Currently, there's no way to tag init const data and
6357 * using __initdata causes build failure on some versions of 6369 * using __initdata causes build failure on some versions of
6358 * gcc. Once __initdataconst is implemented, add const to the 6370 * gcc. Once __initdataconst is implemented, add const to the
6359 * following structure. 6371 * following structure.
6360 */ 6372 */
6361 static struct ata_force_param force_tbl[] __initdata = { 6373 static struct ata_force_param force_tbl[] __initdata = {
6362 { "40c", .cbl = ATA_CBL_PATA40 }, 6374 { "40c", .cbl = ATA_CBL_PATA40 },
6363 { "80c", .cbl = ATA_CBL_PATA80 }, 6375 { "80c", .cbl = ATA_CBL_PATA80 },
6364 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6376 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6365 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6377 { "unk", .cbl = ATA_CBL_PATA_UNK },
6366 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6378 { "ign", .cbl = ATA_CBL_PATA_IGN },
6367 { "sata", .cbl = ATA_CBL_SATA }, 6379 { "sata", .cbl = ATA_CBL_SATA },
6368 { "1.5Gbps", .spd_limit = 1 }, 6380 { "1.5Gbps", .spd_limit = 1 },
6369 { "3.0Gbps", .spd_limit = 2 }, 6381 { "3.0Gbps", .spd_limit = 2 },
6370 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6382 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6371 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6383 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6372 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6384 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6373 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6385 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6374 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6386 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6375 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6387 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6376 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6388 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6377 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6389 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6378 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6390 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6379 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6391 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6380 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6392 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6381 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6393 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6382 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6394 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6383 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6395 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6384 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6396 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6385 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6397 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6386 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6398 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6387 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6399 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6388 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6400 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6389 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6401 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6390 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6402 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6391 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6403 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6392 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6404 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6393 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6405 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6394 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6406 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6395 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6407 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6396 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6408 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6397 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6409 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6398 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6410 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6399 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6411 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6400 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6412 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6401 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6413 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6402 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6414 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6403 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6415 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6404 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6416 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6405 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6417 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6406 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6418 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6407 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6419 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6408 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6420 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6409 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6421 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6410 }; 6422 };
6411 char *start = *cur, *p = *cur; 6423 char *start = *cur, *p = *cur;
6412 char *id, *val, *endp; 6424 char *id, *val, *endp;
6413 const struct ata_force_param *match_fp = NULL; 6425 const struct ata_force_param *match_fp = NULL;
6414 int nr_matches = 0, i; 6426 int nr_matches = 0, i;
6415 6427
6416 /* find where this param ends and update *cur */ 6428 /* find where this param ends and update *cur */
6417 while (*p != '\0' && *p != ',') 6429 while (*p != '\0' && *p != ',')
6418 p++; 6430 p++;
6419 6431
6420 if (*p == '\0') 6432 if (*p == '\0')
6421 *cur = p; 6433 *cur = p;
6422 else 6434 else
6423 *cur = p + 1; 6435 *cur = p + 1;
6424 6436
6425 *p = '\0'; 6437 *p = '\0';
6426 6438
6427 /* parse */ 6439 /* parse */
6428 p = strchr(start, ':'); 6440 p = strchr(start, ':');
6429 if (!p) { 6441 if (!p) {
6430 val = strstrip(start); 6442 val = strstrip(start);
6431 goto parse_val; 6443 goto parse_val;
6432 } 6444 }
6433 *p = '\0'; 6445 *p = '\0';
6434 6446
6435 id = strstrip(start); 6447 id = strstrip(start);
6436 val = strstrip(p + 1); 6448 val = strstrip(p + 1);
6437 6449
6438 /* parse id */ 6450 /* parse id */
6439 p = strchr(id, '.'); 6451 p = strchr(id, '.');
6440 if (p) { 6452 if (p) {
6441 *p++ = '\0'; 6453 *p++ = '\0';
6442 force_ent->device = simple_strtoul(p, &endp, 10); 6454 force_ent->device = simple_strtoul(p, &endp, 10);
6443 if (p == endp || *endp != '\0') { 6455 if (p == endp || *endp != '\0') {
6444 *reason = "invalid device"; 6456 *reason = "invalid device";
6445 return -EINVAL; 6457 return -EINVAL;
6446 } 6458 }
6447 } 6459 }
6448 6460
6449 force_ent->port = simple_strtoul(id, &endp, 10); 6461 force_ent->port = simple_strtoul(id, &endp, 10);
6450 if (p == endp || *endp != '\0') { 6462 if (p == endp || *endp != '\0') {
6451 *reason = "invalid port/link"; 6463 *reason = "invalid port/link";
6452 return -EINVAL; 6464 return -EINVAL;
6453 } 6465 }
6454 6466
6455 parse_val: 6467 parse_val:
6456 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6468 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6457 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6469 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6458 const struct ata_force_param *fp = &force_tbl[i]; 6470 const struct ata_force_param *fp = &force_tbl[i];
6459 6471
6460 if (strncasecmp(val, fp->name, strlen(val))) 6472 if (strncasecmp(val, fp->name, strlen(val)))
6461 continue; 6473 continue;
6462 6474
6463 nr_matches++; 6475 nr_matches++;
6464 match_fp = fp; 6476 match_fp = fp;
6465 6477
6466 if (strcasecmp(val, fp->name) == 0) { 6478 if (strcasecmp(val, fp->name) == 0) {
6467 nr_matches = 1; 6479 nr_matches = 1;
6468 break; 6480 break;
6469 } 6481 }
6470 } 6482 }
6471 6483
6472 if (!nr_matches) { 6484 if (!nr_matches) {
6473 *reason = "unknown value"; 6485 *reason = "unknown value";
6474 return -EINVAL; 6486 return -EINVAL;
6475 } 6487 }
6476 if (nr_matches > 1) { 6488 if (nr_matches > 1) {
6477 *reason = "ambigious value"; 6489 *reason = "ambigious value";
6478 return -EINVAL; 6490 return -EINVAL;
6479 } 6491 }
6480 6492
6481 force_ent->param = *match_fp; 6493 force_ent->param = *match_fp;
6482 6494
6483 return 0; 6495 return 0;
6484 } 6496 }
6485 6497
6486 static void __init ata_parse_force_param(void) 6498 static void __init ata_parse_force_param(void)
6487 { 6499 {
6488 int idx = 0, size = 1; 6500 int idx = 0, size = 1;
6489 int last_port = -1, last_device = -1; 6501 int last_port = -1, last_device = -1;
6490 char *p, *cur, *next; 6502 char *p, *cur, *next;
6491 6503
6492 /* calculate maximum number of params and allocate force_tbl */ 6504 /* calculate maximum number of params and allocate force_tbl */
6493 for (p = ata_force_param_buf; *p; p++) 6505 for (p = ata_force_param_buf; *p; p++)
6494 if (*p == ',') 6506 if (*p == ',')
6495 size++; 6507 size++;
6496 6508
6497 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6509 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6498 if (!ata_force_tbl) { 6510 if (!ata_force_tbl) {
6499 printk(KERN_WARNING "ata: failed to extend force table, " 6511 printk(KERN_WARNING "ata: failed to extend force table, "
6500 "libata.force ignored\n"); 6512 "libata.force ignored\n");
6501 return; 6513 return;
6502 } 6514 }
6503 6515
6504 /* parse and populate the table */ 6516 /* parse and populate the table */
6505 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6517 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6506 const char *reason = ""; 6518 const char *reason = "";
6507 struct ata_force_ent te = { .port = -1, .device = -1 }; 6519 struct ata_force_ent te = { .port = -1, .device = -1 };
6508 6520
6509 next = cur; 6521 next = cur;
6510 if (ata_parse_force_one(&next, &te, &reason)) { 6522 if (ata_parse_force_one(&next, &te, &reason)) {
6511 printk(KERN_WARNING "ata: failed to parse force " 6523 printk(KERN_WARNING "ata: failed to parse force "
6512 "parameter \"%s\" (%s)\n", 6524 "parameter \"%s\" (%s)\n",
6513 cur, reason); 6525 cur, reason);
6514 continue; 6526 continue;
6515 } 6527 }
6516 6528
6517 if (te.port == -1) { 6529 if (te.port == -1) {
6518 te.port = last_port; 6530 te.port = last_port;
6519 te.device = last_device; 6531 te.device = last_device;
6520 } 6532 }
6521 6533
6522 ata_force_tbl[idx++] = te; 6534 ata_force_tbl[idx++] = te;
6523 6535
6524 last_port = te.port; 6536 last_port = te.port;
6525 last_device = te.device; 6537 last_device = te.device;
6526 } 6538 }
6527 6539
6528 ata_force_tbl_size = idx; 6540 ata_force_tbl_size = idx;
6529 } 6541 }
6530 6542
6531 static int __init ata_init(void) 6543 static int __init ata_init(void)
6532 { 6544 {
6533 int rc = -ENOMEM; 6545 int rc = -ENOMEM;
6534 6546
6535 ata_parse_force_param(); 6547 ata_parse_force_param();
6536 6548
6537 rc = ata_sff_init(); 6549 rc = ata_sff_init();
6538 if (rc) { 6550 if (rc) {
6539 kfree(ata_force_tbl); 6551 kfree(ata_force_tbl);
6540 return rc; 6552 return rc;
6541 } 6553 }
6542 6554
6543 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6555 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6544 return 0; 6556 return 0;
6545 } 6557 }
6546 6558
6547 static void __exit ata_exit(void) 6559 static void __exit ata_exit(void)
6548 { 6560 {
6549 ata_sff_exit(); 6561 ata_sff_exit();
6550 kfree(ata_force_tbl); 6562 kfree(ata_force_tbl);
6551 } 6563 }
6552 6564
6553 subsys_initcall(ata_init); 6565 subsys_initcall(ata_init);
6554 module_exit(ata_exit); 6566 module_exit(ata_exit);
6555 6567
6556 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6568 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6557 6569
6558 int ata_ratelimit(void) 6570 int ata_ratelimit(void)
6559 { 6571 {
6560 return __ratelimit(&ratelimit); 6572 return __ratelimit(&ratelimit);
6561 } 6573 }
6562 6574
6563 /** 6575 /**
6564 * ata_wait_register - wait until register value changes 6576 * ata_wait_register - wait until register value changes
6565 * @reg: IO-mapped register 6577 * @reg: IO-mapped register
6566 * @mask: Mask to apply to read register value 6578 * @mask: Mask to apply to read register value
6567 * @val: Wait condition 6579 * @val: Wait condition
6568 * @interval: polling interval in milliseconds 6580 * @interval: polling interval in milliseconds
6569 * @timeout: timeout in milliseconds 6581 * @timeout: timeout in milliseconds
6570 * 6582 *
6571 * Waiting for some bits of register to change is a common 6583 * Waiting for some bits of register to change is a common
6572 * operation for ATA controllers. This function reads 32bit LE 6584 * operation for ATA controllers. This function reads 32bit LE
6573 * IO-mapped register @reg and tests for the following condition. 6585 * IO-mapped register @reg and tests for the following condition.
6574 * 6586 *
6575 * (*@reg & mask) != val 6587 * (*@reg & mask) != val
6576 * 6588 *
6577 * If the condition is met, it returns; otherwise, the process is 6589 * If the condition is met, it returns; otherwise, the process is
6578 * repeated after @interval_msec until timeout. 6590 * repeated after @interval_msec until timeout.
6579 * 6591 *
6580 * LOCKING: 6592 * LOCKING:
6581 * Kernel thread context (may sleep) 6593 * Kernel thread context (may sleep)
6582 * 6594 *
6583 * RETURNS: 6595 * RETURNS:
6584 * The final register value. 6596 * The final register value.
6585 */ 6597 */
6586 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6598 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6587 unsigned long interval, unsigned long timeout) 6599 unsigned long interval, unsigned long timeout)
6588 { 6600 {
6589 unsigned long deadline; 6601 unsigned long deadline;
6590 u32 tmp; 6602 u32 tmp;
6591 6603
6592 tmp = ioread32(reg); 6604 tmp = ioread32(reg);
6593 6605
6594 /* Calculate timeout _after_ the first read to make sure 6606 /* Calculate timeout _after_ the first read to make sure
6595 * preceding writes reach the controller before starting to 6607 * preceding writes reach the controller before starting to
6596 * eat away the timeout. 6608 * eat away the timeout.
6597 */ 6609 */
6598 deadline = ata_deadline(jiffies, timeout); 6610 deadline = ata_deadline(jiffies, timeout);
6599 6611
6600 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6612 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6601 msleep(interval); 6613 msleep(interval);
6602 tmp = ioread32(reg); 6614 tmp = ioread32(reg);
6603 } 6615 }
6604 6616
6605 return tmp; 6617 return tmp;
6606 } 6618 }
6607 6619
6608 /* 6620 /*
6609 * Dummy port_ops 6621 * Dummy port_ops
6610 */ 6622 */
6611 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6623 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6612 { 6624 {
6613 return AC_ERR_SYSTEM; 6625 return AC_ERR_SYSTEM;
6614 } 6626 }
6615 6627
6616 static void ata_dummy_error_handler(struct ata_port *ap) 6628 static void ata_dummy_error_handler(struct ata_port *ap)
6617 { 6629 {
6618 /* truly dummy */ 6630 /* truly dummy */
6619 } 6631 }
6620 6632
6621 struct ata_port_operations ata_dummy_port_ops = { 6633 struct ata_port_operations ata_dummy_port_ops = {
6622 .qc_prep = ata_noop_qc_prep, 6634 .qc_prep = ata_noop_qc_prep,
6623 .qc_issue = ata_dummy_qc_issue, 6635 .qc_issue = ata_dummy_qc_issue,
6624 .error_handler = ata_dummy_error_handler, 6636 .error_handler = ata_dummy_error_handler,
6625 }; 6637 };
6626 6638
6627 const struct ata_port_info ata_dummy_port_info = { 6639 const struct ata_port_info ata_dummy_port_info = {
6628 .port_ops = &ata_dummy_port_ops, 6640 .port_ops = &ata_dummy_port_ops,
6629 }; 6641 };
6630 6642
6631 /* 6643 /*
6632 * libata is essentially a library of internal helper functions for 6644 * libata is essentially a library of internal helper functions for
6633 * low-level ATA host controller drivers. As such, the API/ABI is 6645 * low-level ATA host controller drivers. As such, the API/ABI is
6634 * likely to change as new drivers are added and updated. 6646 * likely to change as new drivers are added and updated.
6635 * Do not depend on ABI/API stability. 6647 * Do not depend on ABI/API stability.
6636 */ 6648 */
6637 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6649 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6638 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6650 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6639 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6651 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6640 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6652 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6641 EXPORT_SYMBOL_GPL(sata_port_ops); 6653 EXPORT_SYMBOL_GPL(sata_port_ops);
6642 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6654 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6643 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6655 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6644 EXPORT_SYMBOL_GPL(ata_link_next); 6656 EXPORT_SYMBOL_GPL(ata_link_next);
6645 EXPORT_SYMBOL_GPL(ata_dev_next); 6657 EXPORT_SYMBOL_GPL(ata_dev_next);
6646 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6658 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6647 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6659 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity);
6648 EXPORT_SYMBOL_GPL(ata_host_init); 6660 EXPORT_SYMBOL_GPL(ata_host_init);
6649 EXPORT_SYMBOL_GPL(ata_host_alloc); 6661 EXPORT_SYMBOL_GPL(ata_host_alloc);
6650 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6662 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6651 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6663 EXPORT_SYMBOL_GPL(ata_slave_link_init);
6652 EXPORT_SYMBOL_GPL(ata_host_start); 6664 EXPORT_SYMBOL_GPL(ata_host_start);
6653 EXPORT_SYMBOL_GPL(ata_host_register); 6665 EXPORT_SYMBOL_GPL(ata_host_register);
6654 EXPORT_SYMBOL_GPL(ata_host_activate); 6666 EXPORT_SYMBOL_GPL(ata_host_activate);
6655 EXPORT_SYMBOL_GPL(ata_host_detach); 6667 EXPORT_SYMBOL_GPL(ata_host_detach);
6656 EXPORT_SYMBOL_GPL(ata_sg_init); 6668 EXPORT_SYMBOL_GPL(ata_sg_init);
6657 EXPORT_SYMBOL_GPL(ata_qc_complete); 6669 EXPORT_SYMBOL_GPL(ata_qc_complete);
6658 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6670 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6659 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6671 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6660 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6672 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6661 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6673 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6662 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6674 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6663 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6675 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6664 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6676 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6665 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6677 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6666 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6678 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6667 EXPORT_SYMBOL_GPL(ata_mode_string); 6679 EXPORT_SYMBOL_GPL(ata_mode_string);
6668 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6680 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6669 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6681 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6670 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6682 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6671 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6683 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6672 EXPORT_SYMBOL_GPL(ata_dev_disable); 6684 EXPORT_SYMBOL_GPL(ata_dev_disable);
6673 EXPORT_SYMBOL_GPL(sata_set_spd); 6685 EXPORT_SYMBOL_GPL(sata_set_spd);
6674 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6686 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6675 EXPORT_SYMBOL_GPL(sata_link_debounce); 6687 EXPORT_SYMBOL_GPL(sata_link_debounce);
6676 EXPORT_SYMBOL_GPL(sata_link_resume); 6688 EXPORT_SYMBOL_GPL(sata_link_resume);
6677 EXPORT_SYMBOL_GPL(ata_std_prereset); 6689 EXPORT_SYMBOL_GPL(ata_std_prereset);
6678 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6690 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6679 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6691 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6680 EXPORT_SYMBOL_GPL(ata_std_postreset); 6692 EXPORT_SYMBOL_GPL(ata_std_postreset);
6681 EXPORT_SYMBOL_GPL(ata_dev_classify); 6693 EXPORT_SYMBOL_GPL(ata_dev_classify);
6682 EXPORT_SYMBOL_GPL(ata_dev_pair); 6694 EXPORT_SYMBOL_GPL(ata_dev_pair);
6683 EXPORT_SYMBOL_GPL(ata_ratelimit); 6695 EXPORT_SYMBOL_GPL(ata_ratelimit);
6684 EXPORT_SYMBOL_GPL(ata_wait_register); 6696 EXPORT_SYMBOL_GPL(ata_wait_register);
6685 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6697 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6686 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6698 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6687 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6699 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6688 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6700 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6689 EXPORT_SYMBOL_GPL(sata_scr_valid); 6701 EXPORT_SYMBOL_GPL(sata_scr_valid);
6690 EXPORT_SYMBOL_GPL(sata_scr_read); 6702 EXPORT_SYMBOL_GPL(sata_scr_read);
6691 EXPORT_SYMBOL_GPL(sata_scr_write); 6703 EXPORT_SYMBOL_GPL(sata_scr_write);
6692 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6704 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6693 EXPORT_SYMBOL_GPL(ata_link_online); 6705 EXPORT_SYMBOL_GPL(ata_link_online);
6694 EXPORT_SYMBOL_GPL(ata_link_offline); 6706 EXPORT_SYMBOL_GPL(ata_link_offline);
6695 #ifdef CONFIG_PM 6707 #ifdef CONFIG_PM
6696 EXPORT_SYMBOL_GPL(ata_host_suspend); 6708 EXPORT_SYMBOL_GPL(ata_host_suspend);
6697 EXPORT_SYMBOL_GPL(ata_host_resume); 6709 EXPORT_SYMBOL_GPL(ata_host_resume);
6698 #endif /* CONFIG_PM */ 6710 #endif /* CONFIG_PM */
6699 EXPORT_SYMBOL_GPL(ata_id_string); 6711 EXPORT_SYMBOL_GPL(ata_id_string);
6700 EXPORT_SYMBOL_GPL(ata_id_c_string); 6712 EXPORT_SYMBOL_GPL(ata_id_c_string);
6701 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6713 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
6702 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6714 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6703 6715
6704 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6716 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6705 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6717 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6706 EXPORT_SYMBOL_GPL(ata_timing_compute); 6718 EXPORT_SYMBOL_GPL(ata_timing_compute);
6707 EXPORT_SYMBOL_GPL(ata_timing_merge); 6719 EXPORT_SYMBOL_GPL(ata_timing_merge);
6708 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6720 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6709 6721
6710 #ifdef CONFIG_PCI 6722 #ifdef CONFIG_PCI
6711 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6723 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6712 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6724 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6713 #ifdef CONFIG_PM 6725 #ifdef CONFIG_PM
6714 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6726 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6715 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6727 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6716 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6728 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6717 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6729 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6718 #endif /* CONFIG_PM */ 6730 #endif /* CONFIG_PM */
6719 #endif /* CONFIG_PCI */ 6731 #endif /* CONFIG_PCI */
6720 6732
6721 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6733 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6722 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6734 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6723 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6735 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6724 EXPORT_SYMBOL_GPL(ata_port_desc); 6736 EXPORT_SYMBOL_GPL(ata_port_desc);
6725 #ifdef CONFIG_PCI 6737 #ifdef CONFIG_PCI
6726 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6738 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6727 #endif /* CONFIG_PCI */ 6739 #endif /* CONFIG_PCI */
6728 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6740 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6729 EXPORT_SYMBOL_GPL(ata_link_abort); 6741 EXPORT_SYMBOL_GPL(ata_link_abort);
6730 EXPORT_SYMBOL_GPL(ata_port_abort); 6742 EXPORT_SYMBOL_GPL(ata_port_abort);
6731 EXPORT_SYMBOL_GPL(ata_port_freeze); 6743 EXPORT_SYMBOL_GPL(ata_port_freeze);
6732 EXPORT_SYMBOL_GPL(sata_async_notification); 6744 EXPORT_SYMBOL_GPL(sata_async_notification);
6733 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6745 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6734 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6746 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6735 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6747 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6736 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6748 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6737 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6749 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6738 EXPORT_SYMBOL_GPL(ata_do_eh); 6750 EXPORT_SYMBOL_GPL(ata_do_eh);
6739 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6751 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6740 6752
6741 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6753 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6742 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6754 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6743 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6755 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6744 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6756 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6745 EXPORT_SYMBOL_GPL(ata_cable_sata); 6757 EXPORT_SYMBOL_GPL(ata_cable_sata);
6746 6758
drivers/ata/libata-eh.c
1 /* 1 /*
2 * libata-eh.c - libata error handling 2 * libata-eh.c - libata error handling
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com> 8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 * 9 *
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or 13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details. 19 * General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to 22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA. 24 * USA.
25 * 25 *
26 * 26 *
27 * libata documentation is available via 'make {ps|pdf}docs', 27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.* 28 * as Documentation/DocBook/libata.*
29 * 29 *
30 * Hardware documentation available from http://www.t13.org/ and 30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/ 31 * http://www.sata-io.org/
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/blkdev.h> 36 #include <linux/blkdev.h>
37 #include <linux/pci.h> 37 #include <linux/pci.h>
38 #include <scsi/scsi.h> 38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h> 40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h> 43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h" 44 #include "../scsi/scsi_transport_api.h"
45 45
46 #include <linux/libata.h> 46 #include <linux/libata.h>
47 47
48 #include "libata.h" 48 #include "libata.h"
49 49
50 enum { 50 enum {
51 /* speed down verdicts */ 51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0), 52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56 56
57 /* error flags */ 57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0), 58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60 60
61 /* error categories */ 61 /* error categories */
62 ATA_ECAT_NONE = 0, 62 ATA_ECAT_NONE = 0,
63 ATA_ECAT_ATA_BUS = 1, 63 ATA_ECAT_ATA_BUS = 1,
64 ATA_ECAT_TOUT_HSM = 2, 64 ATA_ECAT_TOUT_HSM = 2,
65 ATA_ECAT_UNK_DEV = 3, 65 ATA_ECAT_UNK_DEV = 3,
66 ATA_ECAT_DUBIOUS_NONE = 4, 66 ATA_ECAT_DUBIOUS_NONE = 4,
67 ATA_ECAT_DUBIOUS_ATA_BUS = 5, 67 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
69 ATA_ECAT_DUBIOUS_UNK_DEV = 7, 69 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
70 ATA_ECAT_NR = 8, 70 ATA_ECAT_NR = 8,
71 71
72 ATA_EH_CMD_DFL_TIMEOUT = 5000, 72 ATA_EH_CMD_DFL_TIMEOUT = 5000,
73 73
74 /* always put at least this amount of time between resets */ 74 /* always put at least this amount of time between resets */
75 ATA_EH_RESET_COOL_DOWN = 5000, 75 ATA_EH_RESET_COOL_DOWN = 5000,
76 76
77 /* Waiting in ->prereset can never be reliable. It's 77 /* Waiting in ->prereset can never be reliable. It's
78 * sometimes nice to wait there but it can't be depended upon; 78 * sometimes nice to wait there but it can't be depended upon;
79 * otherwise, we wouldn't be resetting. Just give it enough 79 * otherwise, we wouldn't be resetting. Just give it enough
80 * time for most drives to spin up. 80 * time for most drives to spin up.
81 */ 81 */
82 ATA_EH_PRERESET_TIMEOUT = 10000, 82 ATA_EH_PRERESET_TIMEOUT = 10000,
83 ATA_EH_FASTDRAIN_INTERVAL = 3000, 83 ATA_EH_FASTDRAIN_INTERVAL = 3000,
84 84
85 ATA_EH_UA_TRIES = 5, 85 ATA_EH_UA_TRIES = 5,
86 86
87 /* probe speed down parameters, see ata_eh_schedule_probe() */ 87 /* probe speed down parameters, see ata_eh_schedule_probe() */
88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
89 ATA_EH_PROBE_TRIALS = 2, 89 ATA_EH_PROBE_TRIALS = 2,
90 }; 90 };
91 91
92 /* The following table determines how we sequence resets. Each entry 92 /* The following table determines how we sequence resets. Each entry
93 * represents timeout for that try. The first try can be soft or 93 * represents timeout for that try. The first try can be soft or
94 * hardreset. All others are hardreset if available. In most cases 94 * hardreset. All others are hardreset if available. In most cases
95 * the first reset w/ 10sec timeout should succeed. Following entries 95 * the first reset w/ 10sec timeout should succeed. Following entries
96 * are mostly for error handling, hotplug and retarded devices. 96 * are mostly for error handling, hotplug and retarded devices.
97 */ 97 */
98 static const unsigned long ata_eh_reset_timeouts[] = { 98 static const unsigned long ata_eh_reset_timeouts[] = {
99 10000, /* most drives spin up by 10sec */ 99 10000, /* most drives spin up by 10sec */
100 10000, /* > 99% working drives spin up before 20sec */ 100 10000, /* > 99% working drives spin up before 20sec */
101 35000, /* give > 30 secs of idleness for retarded devices */ 101 35000, /* give > 30 secs of idleness for retarded devices */
102 5000, /* and sweet one last chance */ 102 5000, /* and sweet one last chance */
103 ULONG_MAX, /* > 1 min has elapsed, give up */ 103 ULONG_MAX, /* > 1 min has elapsed, give up */
104 }; 104 };
105 105
106 static const unsigned long ata_eh_identify_timeouts[] = { 106 static const unsigned long ata_eh_identify_timeouts[] = {
107 5000, /* covers > 99% of successes and not too boring on failures */ 107 5000, /* covers > 99% of successes and not too boring on failures */
108 10000, /* combined time till here is enough even for media access */ 108 10000, /* combined time till here is enough even for media access */
109 30000, /* for true idiots */ 109 30000, /* for true idiots */
110 ULONG_MAX, 110 ULONG_MAX,
111 }; 111 };
112 112
113 static const unsigned long ata_eh_flush_timeouts[] = { 113 static const unsigned long ata_eh_flush_timeouts[] = {
114 15000, /* be generous with flush */ 114 15000, /* be generous with flush */
115 15000, /* ditto */ 115 15000, /* ditto */
116 30000, /* and even more generous */ 116 30000, /* and even more generous */
117 ULONG_MAX, 117 ULONG_MAX,
118 }; 118 };
119 119
120 static const unsigned long ata_eh_other_timeouts[] = { 120 static const unsigned long ata_eh_other_timeouts[] = {
121 5000, /* same rationale as identify timeout */ 121 5000, /* same rationale as identify timeout */
122 10000, /* ditto */ 122 10000, /* ditto */
123 /* but no merciful 30sec for other commands, it just isn't worth it */ 123 /* but no merciful 30sec for other commands, it just isn't worth it */
124 ULONG_MAX, 124 ULONG_MAX,
125 }; 125 };
126 126
127 struct ata_eh_cmd_timeout_ent { 127 struct ata_eh_cmd_timeout_ent {
128 const u8 *commands; 128 const u8 *commands;
129 const unsigned long *timeouts; 129 const unsigned long *timeouts;
130 }; 130 };
131 131
132 /* The following table determines timeouts to use for EH internal 132 /* The following table determines timeouts to use for EH internal
133 * commands. Each table entry is a command class and matches the 133 * commands. Each table entry is a command class and matches the
134 * commands the entry applies to and the timeout table to use. 134 * commands the entry applies to and the timeout table to use.
135 * 135 *
136 * On the retry after a command timed out, the next timeout value from 136 * On the retry after a command timed out, the next timeout value from
137 * the table is used. If the table doesn't contain further entries, 137 * the table is used. If the table doesn't contain further entries,
138 * the last value is used. 138 * the last value is used.
139 * 139 *
140 * ehc->cmd_timeout_idx keeps track of which timeout to use per 140 * ehc->cmd_timeout_idx keeps track of which timeout to use per
141 * command class, so if SET_FEATURES times out on the first try, the 141 * command class, so if SET_FEATURES times out on the first try, the
142 * next try will use the second timeout value only for that class. 142 * next try will use the second timeout value only for that class.
143 */ 143 */
144 #define CMDS(cmds...) (const u8 []){ cmds, 0 } 144 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
145 static const struct ata_eh_cmd_timeout_ent 145 static const struct ata_eh_cmd_timeout_ent
146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
147 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 147 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
148 .timeouts = ata_eh_identify_timeouts, }, 148 .timeouts = ata_eh_identify_timeouts, },
149 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 149 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
150 .timeouts = ata_eh_other_timeouts, }, 150 .timeouts = ata_eh_other_timeouts, },
151 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 151 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
152 .timeouts = ata_eh_other_timeouts, }, 152 .timeouts = ata_eh_other_timeouts, },
153 { .commands = CMDS(ATA_CMD_SET_FEATURES), 153 { .commands = CMDS(ATA_CMD_SET_FEATURES),
154 .timeouts = ata_eh_other_timeouts, }, 154 .timeouts = ata_eh_other_timeouts, },
155 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 155 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
156 .timeouts = ata_eh_other_timeouts, }, 156 .timeouts = ata_eh_other_timeouts, },
157 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 157 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
158 .timeouts = ata_eh_flush_timeouts }, 158 .timeouts = ata_eh_flush_timeouts },
159 }; 159 };
160 #undef CMDS 160 #undef CMDS
161 161
162 static void __ata_port_freeze(struct ata_port *ap); 162 static void __ata_port_freeze(struct ata_port *ap);
163 #ifdef CONFIG_PM 163 #ifdef CONFIG_PM
164 static void ata_eh_handle_port_suspend(struct ata_port *ap); 164 static void ata_eh_handle_port_suspend(struct ata_port *ap);
165 static void ata_eh_handle_port_resume(struct ata_port *ap); 165 static void ata_eh_handle_port_resume(struct ata_port *ap);
166 #else /* CONFIG_PM */ 166 #else /* CONFIG_PM */
167 static void ata_eh_handle_port_suspend(struct ata_port *ap) 167 static void ata_eh_handle_port_suspend(struct ata_port *ap)
168 { } 168 { }
169 169
170 static void ata_eh_handle_port_resume(struct ata_port *ap) 170 static void ata_eh_handle_port_resume(struct ata_port *ap)
171 { } 171 { }
172 #endif /* CONFIG_PM */ 172 #endif /* CONFIG_PM */
173 173
174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
175 va_list args) 175 va_list args)
176 { 176 {
177 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 177 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178 ATA_EH_DESC_LEN - ehi->desc_len, 178 ATA_EH_DESC_LEN - ehi->desc_len,
179 fmt, args); 179 fmt, args);
180 } 180 }
181 181
182 /** 182 /**
183 * __ata_ehi_push_desc - push error description without adding separator 183 * __ata_ehi_push_desc - push error description without adding separator
184 * @ehi: target EHI 184 * @ehi: target EHI
185 * @fmt: printf format string 185 * @fmt: printf format string
186 * 186 *
187 * Format string according to @fmt and append it to @ehi->desc. 187 * Format string according to @fmt and append it to @ehi->desc.
188 * 188 *
189 * LOCKING: 189 * LOCKING:
190 * spin_lock_irqsave(host lock) 190 * spin_lock_irqsave(host lock)
191 */ 191 */
192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
193 { 193 {
194 va_list args; 194 va_list args;
195 195
196 va_start(args, fmt); 196 va_start(args, fmt);
197 __ata_ehi_pushv_desc(ehi, fmt, args); 197 __ata_ehi_pushv_desc(ehi, fmt, args);
198 va_end(args); 198 va_end(args);
199 } 199 }
200 200
201 /** 201 /**
202 * ata_ehi_push_desc - push error description with separator 202 * ata_ehi_push_desc - push error description with separator
203 * @ehi: target EHI 203 * @ehi: target EHI
204 * @fmt: printf format string 204 * @fmt: printf format string
205 * 205 *
206 * Format string according to @fmt and append it to @ehi->desc. 206 * Format string according to @fmt and append it to @ehi->desc.
207 * If @ehi->desc is not empty, ", " is added in-between. 207 * If @ehi->desc is not empty, ", " is added in-between.
208 * 208 *
209 * LOCKING: 209 * LOCKING:
210 * spin_lock_irqsave(host lock) 210 * spin_lock_irqsave(host lock)
211 */ 211 */
212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
213 { 213 {
214 va_list args; 214 va_list args;
215 215
216 if (ehi->desc_len) 216 if (ehi->desc_len)
217 __ata_ehi_push_desc(ehi, ", "); 217 __ata_ehi_push_desc(ehi, ", ");
218 218
219 va_start(args, fmt); 219 va_start(args, fmt);
220 __ata_ehi_pushv_desc(ehi, fmt, args); 220 __ata_ehi_pushv_desc(ehi, fmt, args);
221 va_end(args); 221 va_end(args);
222 } 222 }
223 223
224 /** 224 /**
225 * ata_ehi_clear_desc - clean error description 225 * ata_ehi_clear_desc - clean error description
226 * @ehi: target EHI 226 * @ehi: target EHI
227 * 227 *
228 * Clear @ehi->desc. 228 * Clear @ehi->desc.
229 * 229 *
230 * LOCKING: 230 * LOCKING:
231 * spin_lock_irqsave(host lock) 231 * spin_lock_irqsave(host lock)
232 */ 232 */
233 void ata_ehi_clear_desc(struct ata_eh_info *ehi) 233 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
234 { 234 {
235 ehi->desc[0] = '\0'; 235 ehi->desc[0] = '\0';
236 ehi->desc_len = 0; 236 ehi->desc_len = 0;
237 } 237 }
238 238
239 /** 239 /**
240 * ata_port_desc - append port description 240 * ata_port_desc - append port description
241 * @ap: target ATA port 241 * @ap: target ATA port
242 * @fmt: printf format string 242 * @fmt: printf format string
243 * 243 *
244 * Format string according to @fmt and append it to port 244 * Format string according to @fmt and append it to port
245 * description. If port description is not empty, " " is added 245 * description. If port description is not empty, " " is added
246 * in-between. This function is to be used while initializing 246 * in-between. This function is to be used while initializing
247 * ata_host. The description is printed on host registration. 247 * ata_host. The description is printed on host registration.
248 * 248 *
249 * LOCKING: 249 * LOCKING:
250 * None. 250 * None.
251 */ 251 */
252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
253 { 253 {
254 va_list args; 254 va_list args;
255 255
256 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 256 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
257 257
258 if (ap->link.eh_info.desc_len) 258 if (ap->link.eh_info.desc_len)
259 __ata_ehi_push_desc(&ap->link.eh_info, " "); 259 __ata_ehi_push_desc(&ap->link.eh_info, " ");
260 260
261 va_start(args, fmt); 261 va_start(args, fmt);
262 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 262 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
263 va_end(args); 263 va_end(args);
264 } 264 }
265 265
266 #ifdef CONFIG_PCI 266 #ifdef CONFIG_PCI
267 267
268 /** 268 /**
269 * ata_port_pbar_desc - append PCI BAR description 269 * ata_port_pbar_desc - append PCI BAR description
270 * @ap: target ATA port 270 * @ap: target ATA port
271 * @bar: target PCI BAR 271 * @bar: target PCI BAR
272 * @offset: offset into PCI BAR 272 * @offset: offset into PCI BAR
273 * @name: name of the area 273 * @name: name of the area
274 * 274 *
275 * If @offset is negative, this function formats a string which 275 * If @offset is negative, this function formats a string which
276 * contains the name, address, size and type of the BAR and 276 * contains the name, address, size and type of the BAR and
277 * appends it to the port description. If @offset is zero or 277 * appends it to the port description. If @offset is zero or
278 * positive, only name and offsetted address is appended. 278 * positive, only name and offsetted address is appended.
279 * 279 *
280 * LOCKING: 280 * LOCKING:
281 * None. 281 * None.
282 */ 282 */
283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
284 const char *name) 284 const char *name)
285 { 285 {
286 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 286 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287 char *type = ""; 287 char *type = "";
288 unsigned long long start, len; 288 unsigned long long start, len;
289 289
290 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 290 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
291 type = "m"; 291 type = "m";
292 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 292 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
293 type = "i"; 293 type = "i";
294 294
295 start = (unsigned long long)pci_resource_start(pdev, bar); 295 start = (unsigned long long)pci_resource_start(pdev, bar);
296 len = (unsigned long long)pci_resource_len(pdev, bar); 296 len = (unsigned long long)pci_resource_len(pdev, bar);
297 297
298 if (offset < 0) 298 if (offset < 0)
299 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 299 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
300 else 300 else
301 ata_port_desc(ap, "%s 0x%llx", name, 301 ata_port_desc(ap, "%s 0x%llx", name,
302 start + (unsigned long long)offset); 302 start + (unsigned long long)offset);
303 } 303 }
304 304
305 #endif /* CONFIG_PCI */ 305 #endif /* CONFIG_PCI */
306 306
307 static int ata_lookup_timeout_table(u8 cmd) 307 static int ata_lookup_timeout_table(u8 cmd)
308 { 308 {
309 int i; 309 int i;
310 310
311 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 311 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
312 const u8 *cur; 312 const u8 *cur;
313 313
314 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 314 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
315 if (*cur == cmd) 315 if (*cur == cmd)
316 return i; 316 return i;
317 } 317 }
318 318
319 return -1; 319 return -1;
320 } 320 }
321 321
322 /** 322 /**
323 * ata_internal_cmd_timeout - determine timeout for an internal command 323 * ata_internal_cmd_timeout - determine timeout for an internal command
324 * @dev: target device 324 * @dev: target device
325 * @cmd: internal command to be issued 325 * @cmd: internal command to be issued
326 * 326 *
327 * Determine timeout for internal command @cmd for @dev. 327 * Determine timeout for internal command @cmd for @dev.
328 * 328 *
329 * LOCKING: 329 * LOCKING:
330 * EH context. 330 * EH context.
331 * 331 *
332 * RETURNS: 332 * RETURNS:
333 * Determined timeout. 333 * Determined timeout.
334 */ 334 */
335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
336 { 336 {
337 struct ata_eh_context *ehc = &dev->link->eh_context; 337 struct ata_eh_context *ehc = &dev->link->eh_context;
338 int ent = ata_lookup_timeout_table(cmd); 338 int ent = ata_lookup_timeout_table(cmd);
339 int idx; 339 int idx;
340 340
341 if (ent < 0) 341 if (ent < 0)
342 return ATA_EH_CMD_DFL_TIMEOUT; 342 return ATA_EH_CMD_DFL_TIMEOUT;
343 343
344 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 344 idx = ehc->cmd_timeout_idx[dev->devno][ent];
345 return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 345 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
346 } 346 }
347 347
348 /** 348 /**
349 * ata_internal_cmd_timed_out - notification for internal command timeout 349 * ata_internal_cmd_timed_out - notification for internal command timeout
350 * @dev: target device 350 * @dev: target device
351 * @cmd: internal command which timed out 351 * @cmd: internal command which timed out
352 * 352 *
353 * Notify EH that internal command @cmd for @dev timed out. This 353 * Notify EH that internal command @cmd for @dev timed out. This
354 * function should be called only for commands whose timeouts are 354 * function should be called only for commands whose timeouts are
355 * determined using ata_internal_cmd_timeout(). 355 * determined using ata_internal_cmd_timeout().
356 * 356 *
357 * LOCKING: 357 * LOCKING:
358 * EH context. 358 * EH context.
359 */ 359 */
360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
361 { 361 {
362 struct ata_eh_context *ehc = &dev->link->eh_context; 362 struct ata_eh_context *ehc = &dev->link->eh_context;
363 int ent = ata_lookup_timeout_table(cmd); 363 int ent = ata_lookup_timeout_table(cmd);
364 int idx; 364 int idx;
365 365
366 if (ent < 0) 366 if (ent < 0)
367 return; 367 return;
368 368
369 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 369 idx = ehc->cmd_timeout_idx[dev->devno][ent];
370 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 370 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
371 ehc->cmd_timeout_idx[dev->devno][ent]++; 371 ehc->cmd_timeout_idx[dev->devno][ent]++;
372 } 372 }
373 373
374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375 unsigned int err_mask) 375 unsigned int err_mask)
376 { 376 {
377 struct ata_ering_entry *ent; 377 struct ata_ering_entry *ent;
378 378
379 WARN_ON(!err_mask); 379 WARN_ON(!err_mask);
380 380
381 ering->cursor++; 381 ering->cursor++;
382 ering->cursor %= ATA_ERING_SIZE; 382 ering->cursor %= ATA_ERING_SIZE;
383 383
384 ent = &ering->ring[ering->cursor]; 384 ent = &ering->ring[ering->cursor];
385 ent->eflags = eflags; 385 ent->eflags = eflags;
386 ent->err_mask = err_mask; 386 ent->err_mask = err_mask;
387 ent->timestamp = get_jiffies_64(); 387 ent->timestamp = get_jiffies_64();
388 } 388 }
389 389
390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
391 { 391 {
392 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 392 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
393 393
394 if (ent->err_mask) 394 if (ent->err_mask)
395 return ent; 395 return ent;
396 return NULL; 396 return NULL;
397 } 397 }
398 398
399 static void ata_ering_clear(struct ata_ering *ering) 399 static void ata_ering_clear(struct ata_ering *ering)
400 { 400 {
401 memset(ering, 0, sizeof(*ering)); 401 memset(ering, 0, sizeof(*ering));
402 } 402 }
403 403
404 static int ata_ering_map(struct ata_ering *ering, 404 static int ata_ering_map(struct ata_ering *ering,
405 int (*map_fn)(struct ata_ering_entry *, void *), 405 int (*map_fn)(struct ata_ering_entry *, void *),
406 void *arg) 406 void *arg)
407 { 407 {
408 int idx, rc = 0; 408 int idx, rc = 0;
409 struct ata_ering_entry *ent; 409 struct ata_ering_entry *ent;
410 410
411 idx = ering->cursor; 411 idx = ering->cursor;
412 do { 412 do {
413 ent = &ering->ring[idx]; 413 ent = &ering->ring[idx];
414 if (!ent->err_mask) 414 if (!ent->err_mask)
415 break; 415 break;
416 rc = map_fn(ent, arg); 416 rc = map_fn(ent, arg);
417 if (rc) 417 if (rc)
418 break; 418 break;
419 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 419 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420 } while (idx != ering->cursor); 420 } while (idx != ering->cursor);
421 421
422 return rc; 422 return rc;
423 } 423 }
424 424
425 static unsigned int ata_eh_dev_action(struct ata_device *dev) 425 static unsigned int ata_eh_dev_action(struct ata_device *dev)
426 { 426 {
427 struct ata_eh_context *ehc = &dev->link->eh_context; 427 struct ata_eh_context *ehc = &dev->link->eh_context;
428 428
429 return ehc->i.action | ehc->i.dev_action[dev->devno]; 429 return ehc->i.action | ehc->i.dev_action[dev->devno];
430 } 430 }
431 431
432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433 struct ata_eh_info *ehi, unsigned int action) 433 struct ata_eh_info *ehi, unsigned int action)
434 { 434 {
435 struct ata_device *tdev; 435 struct ata_device *tdev;
436 436
437 if (!dev) { 437 if (!dev) {
438 ehi->action &= ~action; 438 ehi->action &= ~action;
439 ata_for_each_dev(tdev, link, ALL) 439 ata_for_each_dev(tdev, link, ALL)
440 ehi->dev_action[tdev->devno] &= ~action; 440 ehi->dev_action[tdev->devno] &= ~action;
441 } else { 441 } else {
442 /* doesn't make sense for port-wide EH actions */ 442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 443 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444 444
445 /* break ehi->action into ehi->dev_action */ 445 /* break ehi->action into ehi->dev_action */
446 if (ehi->action & action) { 446 if (ehi->action & action) {
447 ata_for_each_dev(tdev, link, ALL) 447 ata_for_each_dev(tdev, link, ALL)
448 ehi->dev_action[tdev->devno] |= 448 ehi->dev_action[tdev->devno] |=
449 ehi->action & action; 449 ehi->action & action;
450 ehi->action &= ~action; 450 ehi->action &= ~action;
451 } 451 }
452 452
453 /* turn off the specified per-dev action */ 453 /* turn off the specified per-dev action */
454 ehi->dev_action[dev->devno] &= ~action; 454 ehi->dev_action[dev->devno] &= ~action;
455 } 455 }
456 } 456 }
457 457
458 /** 458 /**
459 * ata_scsi_timed_out - SCSI layer time out callback 459 * ata_scsi_timed_out - SCSI layer time out callback
460 * @cmd: timed out SCSI command 460 * @cmd: timed out SCSI command
461 * 461 *
462 * Handles SCSI layer timeout. We race with normal completion of 462 * Handles SCSI layer timeout. We race with normal completion of
463 * the qc for @cmd. If the qc is already gone, we lose and let 463 * the qc for @cmd. If the qc is already gone, we lose and let
464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
465 * timed out and EH should be invoked. Prevent ata_qc_complete() 465 * timed out and EH should be invoked. Prevent ata_qc_complete()
466 * from finishing it by setting EH_SCHEDULED and return 466 * from finishing it by setting EH_SCHEDULED and return
467 * EH_NOT_HANDLED. 467 * EH_NOT_HANDLED.
468 * 468 *
469 * TODO: kill this function once old EH is gone. 469 * TODO: kill this function once old EH is gone.
470 * 470 *
471 * LOCKING: 471 * LOCKING:
472 * Called from timer context 472 * Called from timer context
473 * 473 *
474 * RETURNS: 474 * RETURNS:
475 * EH_HANDLED or EH_NOT_HANDLED 475 * EH_HANDLED or EH_NOT_HANDLED
476 */ 476 */
477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
478 { 478 {
479 struct Scsi_Host *host = cmd->device->host; 479 struct Scsi_Host *host = cmd->device->host;
480 struct ata_port *ap = ata_shost_to_port(host); 480 struct ata_port *ap = ata_shost_to_port(host);
481 unsigned long flags; 481 unsigned long flags;
482 struct ata_queued_cmd *qc; 482 struct ata_queued_cmd *qc;
483 enum blk_eh_timer_return ret; 483 enum blk_eh_timer_return ret;
484 484
485 DPRINTK("ENTER\n"); 485 DPRINTK("ENTER\n");
486 486
487 if (ap->ops->error_handler) { 487 if (ap->ops->error_handler) {
488 ret = BLK_EH_NOT_HANDLED; 488 ret = BLK_EH_NOT_HANDLED;
489 goto out; 489 goto out;
490 } 490 }
491 491
492 ret = BLK_EH_HANDLED; 492 ret = BLK_EH_HANDLED;
493 spin_lock_irqsave(ap->lock, flags); 493 spin_lock_irqsave(ap->lock, flags);
494 qc = ata_qc_from_tag(ap, ap->link.active_tag); 494 qc = ata_qc_from_tag(ap, ap->link.active_tag);
495 if (qc) { 495 if (qc) {
496 WARN_ON(qc->scsicmd != cmd); 496 WARN_ON(qc->scsicmd != cmd);
497 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 497 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498 qc->err_mask |= AC_ERR_TIMEOUT; 498 qc->err_mask |= AC_ERR_TIMEOUT;
499 ret = BLK_EH_NOT_HANDLED; 499 ret = BLK_EH_NOT_HANDLED;
500 } 500 }
501 spin_unlock_irqrestore(ap->lock, flags); 501 spin_unlock_irqrestore(ap->lock, flags);
502 502
503 out: 503 out:
504 DPRINTK("EXIT, ret=%d\n", ret); 504 DPRINTK("EXIT, ret=%d\n", ret);
505 return ret; 505 return ret;
506 } 506 }
507 507
508 static void ata_eh_unload(struct ata_port *ap) 508 static void ata_eh_unload(struct ata_port *ap)
509 { 509 {
510 struct ata_link *link; 510 struct ata_link *link;
511 struct ata_device *dev; 511 struct ata_device *dev;
512 unsigned long flags; 512 unsigned long flags;
513 513
514 /* Restore SControl IPM and SPD for the next driver and 514 /* Restore SControl IPM and SPD for the next driver and
515 * disable attached devices. 515 * disable attached devices.
516 */ 516 */
517 ata_for_each_link(link, ap, PMP_FIRST) { 517 ata_for_each_link(link, ap, PMP_FIRST) {
518 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 518 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519 ata_for_each_dev(dev, link, ALL) 519 ata_for_each_dev(dev, link, ALL)
520 ata_dev_disable(dev); 520 ata_dev_disable(dev);
521 } 521 }
522 522
523 /* freeze and set UNLOADED */ 523 /* freeze and set UNLOADED */
524 spin_lock_irqsave(ap->lock, flags); 524 spin_lock_irqsave(ap->lock, flags);
525 525
526 ata_port_freeze(ap); /* won't be thawed */ 526 ata_port_freeze(ap); /* won't be thawed */
527 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 527 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
528 ap->pflags |= ATA_PFLAG_UNLOADED; 528 ap->pflags |= ATA_PFLAG_UNLOADED;
529 529
530 spin_unlock_irqrestore(ap->lock, flags); 530 spin_unlock_irqrestore(ap->lock, flags);
531 } 531 }
532 532
533 /** 533 /**
534 * ata_scsi_error - SCSI layer error handler callback 534 * ata_scsi_error - SCSI layer error handler callback
535 * @host: SCSI host on which error occurred 535 * @host: SCSI host on which error occurred
536 * 536 *
537 * Handles SCSI-layer-thrown error events. 537 * Handles SCSI-layer-thrown error events.
538 * 538 *
539 * LOCKING: 539 * LOCKING:
540 * Inherited from SCSI layer (none, can sleep) 540 * Inherited from SCSI layer (none, can sleep)
541 * 541 *
542 * RETURNS: 542 * RETURNS:
543 * Zero. 543 * Zero.
544 */ 544 */
545 void ata_scsi_error(struct Scsi_Host *host) 545 void ata_scsi_error(struct Scsi_Host *host)
546 { 546 {
547 struct ata_port *ap = ata_shost_to_port(host); 547 struct ata_port *ap = ata_shost_to_port(host);
548 int i; 548 int i;
549 unsigned long flags; 549 unsigned long flags;
550 550
551 DPRINTK("ENTER\n"); 551 DPRINTK("ENTER\n");
552 552
553 /* make sure sff pio task is not running */ 553 /* make sure sff pio task is not running */
554 ata_sff_flush_pio_task(ap); 554 ata_sff_flush_pio_task(ap);
555 555
556 /* synchronize with host lock and sort out timeouts */ 556 /* synchronize with host lock and sort out timeouts */
557 557
558 /* For new EH, all qcs are finished in one of three ways - 558 /* For new EH, all qcs are finished in one of three ways -
559 * normal completion, error completion, and SCSI timeout. 559 * normal completion, error completion, and SCSI timeout.
560 * Both completions can race against SCSI timeout. When normal 560 * Both completions can race against SCSI timeout. When normal
561 * completion wins, the qc never reaches EH. When error 561 * completion wins, the qc never reaches EH. When error
562 * completion wins, the qc has ATA_QCFLAG_FAILED set. 562 * completion wins, the qc has ATA_QCFLAG_FAILED set.
563 * 563 *
564 * When SCSI timeout wins, things are a bit more complex. 564 * When SCSI timeout wins, things are a bit more complex.
565 * Normal or error completion can occur after the timeout but 565 * Normal or error completion can occur after the timeout but
566 * before this point. In such cases, both types of 566 * before this point. In such cases, both types of
567 * completions are honored. A scmd is determined to have 567 * completions are honored. A scmd is determined to have
568 * timed out iff its associated qc is active and not failed. 568 * timed out iff its associated qc is active and not failed.
569 */ 569 */
570 if (ap->ops->error_handler) { 570 if (ap->ops->error_handler) {
571 struct scsi_cmnd *scmd, *tmp; 571 struct scsi_cmnd *scmd, *tmp;
572 int nr_timedout = 0; 572 int nr_timedout = 0;
573 573
574 spin_lock_irqsave(ap->lock, flags); 574 spin_lock_irqsave(ap->lock, flags);
575 575
576 /* This must occur under the ap->lock as we don't want 576 /* This must occur under the ap->lock as we don't want
577 a polled recovery to race the real interrupt handler 577 a polled recovery to race the real interrupt handler
578 578
579 The lost_interrupt handler checks for any completed but 579 The lost_interrupt handler checks for any completed but
580 non-notified command and completes much like an IRQ handler. 580 non-notified command and completes much like an IRQ handler.
581 581
582 We then fall into the error recovery code which will treat 582 We then fall into the error recovery code which will treat
583 this as if normal completion won the race */ 583 this as if normal completion won the race */
584 584
585 if (ap->ops->lost_interrupt) 585 if (ap->ops->lost_interrupt)
586 ap->ops->lost_interrupt(ap); 586 ap->ops->lost_interrupt(ap);
587 587
588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589 struct ata_queued_cmd *qc; 589 struct ata_queued_cmd *qc;
590 590
591 for (i = 0; i < ATA_MAX_QUEUE; i++) { 591 for (i = 0; i < ATA_MAX_QUEUE; i++) {
592 qc = __ata_qc_from_tag(ap, i); 592 qc = __ata_qc_from_tag(ap, i);
593 if (qc->flags & ATA_QCFLAG_ACTIVE && 593 if (qc->flags & ATA_QCFLAG_ACTIVE &&
594 qc->scsicmd == scmd) 594 qc->scsicmd == scmd)
595 break; 595 break;
596 } 596 }
597 597
598 if (i < ATA_MAX_QUEUE) { 598 if (i < ATA_MAX_QUEUE) {
599 /* the scmd has an associated qc */ 599 /* the scmd has an associated qc */
600 if (!(qc->flags & ATA_QCFLAG_FAILED)) { 600 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601 /* which hasn't failed yet, timeout */ 601 /* which hasn't failed yet, timeout */
602 qc->err_mask |= AC_ERR_TIMEOUT; 602 qc->err_mask |= AC_ERR_TIMEOUT;
603 qc->flags |= ATA_QCFLAG_FAILED; 603 qc->flags |= ATA_QCFLAG_FAILED;
604 nr_timedout++; 604 nr_timedout++;
605 } 605 }
606 } else { 606 } else {
607 /* Normal completion occurred after 607 /* Normal completion occurred after
608 * SCSI timeout but before this point. 608 * SCSI timeout but before this point.
609 * Successfully complete it. 609 * Successfully complete it.
610 */ 610 */
611 scmd->retries = scmd->allowed; 611 scmd->retries = scmd->allowed;
612 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 612 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
613 } 613 }
614 } 614 }
615 615
616 /* If we have timed out qcs. They belong to EH from 616 /* If we have timed out qcs. They belong to EH from
617 * this point but the state of the controller is 617 * this point but the state of the controller is
618 * unknown. Freeze the port to make sure the IRQ 618 * unknown. Freeze the port to make sure the IRQ
619 * handler doesn't diddle with those qcs. This must 619 * handler doesn't diddle with those qcs. This must
620 * be done atomically w.r.t. setting QCFLAG_FAILED. 620 * be done atomically w.r.t. setting QCFLAG_FAILED.
621 */ 621 */
622 if (nr_timedout) 622 if (nr_timedout)
623 __ata_port_freeze(ap); 623 __ata_port_freeze(ap);
624 624
625 spin_unlock_irqrestore(ap->lock, flags); 625 spin_unlock_irqrestore(ap->lock, flags);
626 626
627 /* initialize eh_tries */ 627 /* initialize eh_tries */
628 ap->eh_tries = ATA_EH_MAX_TRIES; 628 ap->eh_tries = ATA_EH_MAX_TRIES;
629 } else 629 } else
630 spin_unlock_wait(ap->lock); 630 spin_unlock_wait(ap->lock);
631 631
632 /* If we timed raced normal completion and there is nothing to 632 /* If we timed raced normal completion and there is nothing to
633 recover nr_timedout == 0 why exactly are we doing error recovery ? */ 633 recover nr_timedout == 0 why exactly are we doing error recovery ? */
634 634
635 repeat: 635 repeat:
636 /* invoke error handler */ 636 /* invoke error handler */
637 if (ap->ops->error_handler) { 637 if (ap->ops->error_handler) {
638 struct ata_link *link; 638 struct ata_link *link;
639 639
640 /* kill fast drain timer */ 640 /* kill fast drain timer */
641 del_timer_sync(&ap->fastdrain_timer); 641 del_timer_sync(&ap->fastdrain_timer);
642 642
643 /* process port resume request */ 643 /* process port resume request */
644 ata_eh_handle_port_resume(ap); 644 ata_eh_handle_port_resume(ap);
645 645
646 /* fetch & clear EH info */ 646 /* fetch & clear EH info */
647 spin_lock_irqsave(ap->lock, flags); 647 spin_lock_irqsave(ap->lock, flags);
648 648
649 ata_for_each_link(link, ap, HOST_FIRST) { 649 ata_for_each_link(link, ap, HOST_FIRST) {
650 struct ata_eh_context *ehc = &link->eh_context; 650 struct ata_eh_context *ehc = &link->eh_context;
651 struct ata_device *dev; 651 struct ata_device *dev;
652 652
653 memset(&link->eh_context, 0, sizeof(link->eh_context)); 653 memset(&link->eh_context, 0, sizeof(link->eh_context));
654 link->eh_context.i = link->eh_info; 654 link->eh_context.i = link->eh_info;
655 memset(&link->eh_info, 0, sizeof(link->eh_info)); 655 memset(&link->eh_info, 0, sizeof(link->eh_info));
656 656
657 ata_for_each_dev(dev, link, ENABLED) { 657 ata_for_each_dev(dev, link, ENABLED) {
658 int devno = dev->devno; 658 int devno = dev->devno;
659 659
660 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 660 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
661 if (ata_ncq_enabled(dev)) 661 if (ata_ncq_enabled(dev))
662 ehc->saved_ncq_enabled |= 1 << devno; 662 ehc->saved_ncq_enabled |= 1 << devno;
663 } 663 }
664 } 664 }
665 665
666 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 666 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 667 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668 ap->excl_link = NULL; /* don't maintain exclusion over EH */ 668 ap->excl_link = NULL; /* don't maintain exclusion over EH */
669 669
670 spin_unlock_irqrestore(ap->lock, flags); 670 spin_unlock_irqrestore(ap->lock, flags);
671 671
672 /* invoke EH, skip if unloading or suspended */ 672 /* invoke EH, skip if unloading or suspended */
673 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 673 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674 ap->ops->error_handler(ap); 674 ap->ops->error_handler(ap);
675 else { 675 else {
676 /* if unloading, commence suicide */ 676 /* if unloading, commence suicide */
677 if ((ap->pflags & ATA_PFLAG_UNLOADING) && 677 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678 !(ap->pflags & ATA_PFLAG_UNLOADED)) 678 !(ap->pflags & ATA_PFLAG_UNLOADED))
679 ata_eh_unload(ap); 679 ata_eh_unload(ap);
680 ata_eh_finish(ap); 680 ata_eh_finish(ap);
681 } 681 }
682 682
683 /* process port suspend request */ 683 /* process port suspend request */
684 ata_eh_handle_port_suspend(ap); 684 ata_eh_handle_port_suspend(ap);
685 685
686 /* Exception might have happend after ->error_handler 686 /* Exception might have happend after ->error_handler
687 * recovered the port but before this point. Repeat 687 * recovered the port but before this point. Repeat
688 * EH in such case. 688 * EH in such case.
689 */ 689 */
690 spin_lock_irqsave(ap->lock, flags); 690 spin_lock_irqsave(ap->lock, flags);
691 691
692 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 692 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693 if (--ap->eh_tries) { 693 if (--ap->eh_tries) {
694 spin_unlock_irqrestore(ap->lock, flags); 694 spin_unlock_irqrestore(ap->lock, flags);
695 goto repeat; 695 goto repeat;
696 } 696 }
697 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 697 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698 "tries, giving up\n", ATA_EH_MAX_TRIES); 698 "tries, giving up\n", ATA_EH_MAX_TRIES);
699 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 699 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
700 } 700 }
701 701
702 /* this run is complete, make sure EH info is clear */ 702 /* this run is complete, make sure EH info is clear */
703 ata_for_each_link(link, ap, HOST_FIRST) 703 ata_for_each_link(link, ap, HOST_FIRST)
704 memset(&link->eh_info, 0, sizeof(link->eh_info)); 704 memset(&link->eh_info, 0, sizeof(link->eh_info));
705 705
706 /* Clear host_eh_scheduled while holding ap->lock such 706 /* Clear host_eh_scheduled while holding ap->lock such
707 * that if exception occurs after this point but 707 * that if exception occurs after this point but
708 * before EH completion, SCSI midlayer will 708 * before EH completion, SCSI midlayer will
709 * re-initiate EH. 709 * re-initiate EH.
710 */ 710 */
711 host->host_eh_scheduled = 0; 711 host->host_eh_scheduled = 0;
712 712
713 spin_unlock_irqrestore(ap->lock, flags); 713 spin_unlock_irqrestore(ap->lock, flags);
714 } else { 714 } else {
715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716 ap->ops->eng_timeout(ap); 716 ap->ops->eng_timeout(ap);
717 } 717 }
718 718
719 /* finish or retry handled scmd's and clean up */ 719 /* finish or retry handled scmd's and clean up */
720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721 721
722 scsi_eh_flush_done_q(&ap->eh_done_q); 722 scsi_eh_flush_done_q(&ap->eh_done_q);
723 723
724 /* clean up */ 724 /* clean up */
725 spin_lock_irqsave(ap->lock, flags); 725 spin_lock_irqsave(ap->lock, flags);
726 726
727 if (ap->pflags & ATA_PFLAG_LOADING) 727 if (ap->pflags & ATA_PFLAG_LOADING)
728 ap->pflags &= ~ATA_PFLAG_LOADING; 728 ap->pflags &= ~ATA_PFLAG_LOADING;
729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730 schedule_delayed_work(&ap->hotplug_task, 0); 730 schedule_delayed_work(&ap->hotplug_task, 0);
731 731
732 if (ap->pflags & ATA_PFLAG_RECOVERED) 732 if (ap->pflags & ATA_PFLAG_RECOVERED)
733 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 733 ata_port_printk(ap, KERN_INFO, "EH complete\n");
734 734
735 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 735 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
736 736
737 /* tell wait_eh that we're done */ 737 /* tell wait_eh that we're done */
738 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 738 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739 wake_up_all(&ap->eh_wait_q); 739 wake_up_all(&ap->eh_wait_q);
740 740
741 spin_unlock_irqrestore(ap->lock, flags); 741 spin_unlock_irqrestore(ap->lock, flags);
742 742
743 DPRINTK("EXIT\n"); 743 DPRINTK("EXIT\n");
744 } 744 }
745 745
746 /** 746 /**
747 * ata_port_wait_eh - Wait for the currently pending EH to complete 747 * ata_port_wait_eh - Wait for the currently pending EH to complete
748 * @ap: Port to wait EH for 748 * @ap: Port to wait EH for
749 * 749 *
750 * Wait until the currently pending EH is complete. 750 * Wait until the currently pending EH is complete.
751 * 751 *
752 * LOCKING: 752 * LOCKING:
753 * Kernel thread context (may sleep). 753 * Kernel thread context (may sleep).
754 */ 754 */
755 void ata_port_wait_eh(struct ata_port *ap) 755 void ata_port_wait_eh(struct ata_port *ap)
756 { 756 {
757 unsigned long flags; 757 unsigned long flags;
758 DEFINE_WAIT(wait); 758 DEFINE_WAIT(wait);
759 759
760 retry: 760 retry:
761 spin_lock_irqsave(ap->lock, flags); 761 spin_lock_irqsave(ap->lock, flags);
762 762
763 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 763 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 764 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765 spin_unlock_irqrestore(ap->lock, flags); 765 spin_unlock_irqrestore(ap->lock, flags);
766 schedule(); 766 schedule();
767 spin_lock_irqsave(ap->lock, flags); 767 spin_lock_irqsave(ap->lock, flags);
768 } 768 }
769 finish_wait(&ap->eh_wait_q, &wait); 769 finish_wait(&ap->eh_wait_q, &wait);
770 770
771 spin_unlock_irqrestore(ap->lock, flags); 771 spin_unlock_irqrestore(ap->lock, flags);
772 772
773 /* make sure SCSI EH is complete */ 773 /* make sure SCSI EH is complete */
774 if (scsi_host_in_recovery(ap->scsi_host)) { 774 if (scsi_host_in_recovery(ap->scsi_host)) {
775 msleep(10); 775 msleep(10);
776 goto retry; 776 goto retry;
777 } 777 }
778 } 778 }
779 779
780 static int ata_eh_nr_in_flight(struct ata_port *ap) 780 static int ata_eh_nr_in_flight(struct ata_port *ap)
781 { 781 {
782 unsigned int tag; 782 unsigned int tag;
783 int nr = 0; 783 int nr = 0;
784 784
785 /* count only non-internal commands */ 785 /* count only non-internal commands */
786 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 786 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
787 if (ata_qc_from_tag(ap, tag)) 787 if (ata_qc_from_tag(ap, tag))
788 nr++; 788 nr++;
789 789
790 return nr; 790 return nr;
791 } 791 }
792 792
793 void ata_eh_fastdrain_timerfn(unsigned long arg) 793 void ata_eh_fastdrain_timerfn(unsigned long arg)
794 { 794 {
795 struct ata_port *ap = (void *)arg; 795 struct ata_port *ap = (void *)arg;
796 unsigned long flags; 796 unsigned long flags;
797 int cnt; 797 int cnt;
798 798
799 spin_lock_irqsave(ap->lock, flags); 799 spin_lock_irqsave(ap->lock, flags);
800 800
801 cnt = ata_eh_nr_in_flight(ap); 801 cnt = ata_eh_nr_in_flight(ap);
802 802
803 /* are we done? */ 803 /* are we done? */
804 if (!cnt) 804 if (!cnt)
805 goto out_unlock; 805 goto out_unlock;
806 806
807 if (cnt == ap->fastdrain_cnt) { 807 if (cnt == ap->fastdrain_cnt) {
808 unsigned int tag; 808 unsigned int tag;
809 809
810 /* No progress during the last interval, tag all 810 /* No progress during the last interval, tag all
811 * in-flight qcs as timed out and freeze the port. 811 * in-flight qcs as timed out and freeze the port.
812 */ 812 */
813 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 813 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
814 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 814 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
815 if (qc) 815 if (qc)
816 qc->err_mask |= AC_ERR_TIMEOUT; 816 qc->err_mask |= AC_ERR_TIMEOUT;
817 } 817 }
818 818
819 ata_port_freeze(ap); 819 ata_port_freeze(ap);
820 } else { 820 } else {
821 /* some qcs have finished, give it another chance */ 821 /* some qcs have finished, give it another chance */
822 ap->fastdrain_cnt = cnt; 822 ap->fastdrain_cnt = cnt;
823 ap->fastdrain_timer.expires = 823 ap->fastdrain_timer.expires =
824 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 824 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
825 add_timer(&ap->fastdrain_timer); 825 add_timer(&ap->fastdrain_timer);
826 } 826 }
827 827
828 out_unlock: 828 out_unlock:
829 spin_unlock_irqrestore(ap->lock, flags); 829 spin_unlock_irqrestore(ap->lock, flags);
830 } 830 }
831 831
832 /** 832 /**
833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834 * @ap: target ATA port 834 * @ap: target ATA port
835 * @fastdrain: activate fast drain 835 * @fastdrain: activate fast drain
836 * 836 *
837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838 * is non-zero and EH wasn't pending before. Fast drain ensures 838 * is non-zero and EH wasn't pending before. Fast drain ensures
839 * that EH kicks in in timely manner. 839 * that EH kicks in in timely manner.
840 * 840 *
841 * LOCKING: 841 * LOCKING:
842 * spin_lock_irqsave(host lock) 842 * spin_lock_irqsave(host lock)
843 */ 843 */
844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
845 { 845 {
846 int cnt; 846 int cnt;
847 847
848 /* already scheduled? */ 848 /* already scheduled? */
849 if (ap->pflags & ATA_PFLAG_EH_PENDING) 849 if (ap->pflags & ATA_PFLAG_EH_PENDING)
850 return; 850 return;
851 851
852 ap->pflags |= ATA_PFLAG_EH_PENDING; 852 ap->pflags |= ATA_PFLAG_EH_PENDING;
853 853
854 if (!fastdrain) 854 if (!fastdrain)
855 return; 855 return;
856 856
857 /* do we have in-flight qcs? */ 857 /* do we have in-flight qcs? */
858 cnt = ata_eh_nr_in_flight(ap); 858 cnt = ata_eh_nr_in_flight(ap);
859 if (!cnt) 859 if (!cnt)
860 return; 860 return;
861 861
862 /* activate fast drain */ 862 /* activate fast drain */
863 ap->fastdrain_cnt = cnt; 863 ap->fastdrain_cnt = cnt;
864 ap->fastdrain_timer.expires = 864 ap->fastdrain_timer.expires =
865 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 865 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
866 add_timer(&ap->fastdrain_timer); 866 add_timer(&ap->fastdrain_timer);
867 } 867 }
868 868
869 /** 869 /**
870 * ata_qc_schedule_eh - schedule qc for error handling 870 * ata_qc_schedule_eh - schedule qc for error handling
871 * @qc: command to schedule error handling for 871 * @qc: command to schedule error handling for
872 * 872 *
873 * Schedule error handling for @qc. EH will kick in as soon as 873 * Schedule error handling for @qc. EH will kick in as soon as
874 * other commands are drained. 874 * other commands are drained.
875 * 875 *
876 * LOCKING: 876 * LOCKING:
877 * spin_lock_irqsave(host lock) 877 * spin_lock_irqsave(host lock)
878 */ 878 */
879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880 { 880 {
881 struct ata_port *ap = qc->ap; 881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue; 882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags; 883 unsigned long flags;
884 884
885 WARN_ON(!ap->ops->error_handler); 885 WARN_ON(!ap->ops->error_handler);
886 886
887 qc->flags |= ATA_QCFLAG_FAILED; 887 qc->flags |= ATA_QCFLAG_FAILED;
888 ata_eh_set_pending(ap, 1); 888 ata_eh_set_pending(ap, 1);
889 889
890 /* The following will fail if timeout has already expired. 890 /* The following will fail if timeout has already expired.
891 * ata_scsi_error() takes care of such scmds on EH entry. 891 * ata_scsi_error() takes care of such scmds on EH entry.
892 * Note that ATA_QCFLAG_FAILED is unconditionally set after 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893 * this function completes. 893 * this function completes.
894 */ 894 */
895 spin_lock_irqsave(q->queue_lock, flags); 895 spin_lock_irqsave(q->queue_lock, flags);
896 blk_abort_request(qc->scsicmd->request); 896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags); 897 spin_unlock_irqrestore(q->queue_lock, flags);
898 } 898 }
899 899
900 /** 900 /**
901 * ata_port_schedule_eh - schedule error handling without a qc 901 * ata_port_schedule_eh - schedule error handling without a qc
902 * @ap: ATA port to schedule EH for 902 * @ap: ATA port to schedule EH for
903 * 903 *
904 * Schedule error handling for @ap. EH will kick in as soon as 904 * Schedule error handling for @ap. EH will kick in as soon as
905 * all commands are drained. 905 * all commands are drained.
906 * 906 *
907 * LOCKING: 907 * LOCKING:
908 * spin_lock_irqsave(host lock) 908 * spin_lock_irqsave(host lock)
909 */ 909 */
910 void ata_port_schedule_eh(struct ata_port *ap) 910 void ata_port_schedule_eh(struct ata_port *ap)
911 { 911 {
912 WARN_ON(!ap->ops->error_handler); 912 WARN_ON(!ap->ops->error_handler);
913 913
914 if (ap->pflags & ATA_PFLAG_INITIALIZING) 914 if (ap->pflags & ATA_PFLAG_INITIALIZING)
915 return; 915 return;
916 916
917 ata_eh_set_pending(ap, 1); 917 ata_eh_set_pending(ap, 1);
918 scsi_schedule_eh(ap->scsi_host); 918 scsi_schedule_eh(ap->scsi_host);
919 919
920 DPRINTK("port EH scheduled\n"); 920 DPRINTK("port EH scheduled\n");
921 } 921 }
922 922
923 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 923 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
924 { 924 {
925 int tag, nr_aborted = 0; 925 int tag, nr_aborted = 0;
926 926
927 WARN_ON(!ap->ops->error_handler); 927 WARN_ON(!ap->ops->error_handler);
928 928
929 /* we're gonna abort all commands, no need for fast drain */ 929 /* we're gonna abort all commands, no need for fast drain */
930 ata_eh_set_pending(ap, 0); 930 ata_eh_set_pending(ap, 0);
931 931
932 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 932 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
934 934
935 if (qc && (!link || qc->dev->link == link)) { 935 if (qc && (!link || qc->dev->link == link)) {
936 qc->flags |= ATA_QCFLAG_FAILED; 936 qc->flags |= ATA_QCFLAG_FAILED;
937 ata_qc_complete(qc); 937 ata_qc_complete(qc);
938 nr_aborted++; 938 nr_aborted++;
939 } 939 }
940 } 940 }
941 941
942 if (!nr_aborted) 942 if (!nr_aborted)
943 ata_port_schedule_eh(ap); 943 ata_port_schedule_eh(ap);
944 944
945 return nr_aborted; 945 return nr_aborted;
946 } 946 }
947 947
948 /** 948 /**
949 * ata_link_abort - abort all qc's on the link 949 * ata_link_abort - abort all qc's on the link
950 * @link: ATA link to abort qc's for 950 * @link: ATA link to abort qc's for
951 * 951 *
952 * Abort all active qc's active on @link and schedule EH. 952 * Abort all active qc's active on @link and schedule EH.
953 * 953 *
954 * LOCKING: 954 * LOCKING:
955 * spin_lock_irqsave(host lock) 955 * spin_lock_irqsave(host lock)
956 * 956 *
957 * RETURNS: 957 * RETURNS:
958 * Number of aborted qc's. 958 * Number of aborted qc's.
959 */ 959 */
960 int ata_link_abort(struct ata_link *link) 960 int ata_link_abort(struct ata_link *link)
961 { 961 {
962 return ata_do_link_abort(link->ap, link); 962 return ata_do_link_abort(link->ap, link);
963 } 963 }
964 964
965 /** 965 /**
966 * ata_port_abort - abort all qc's on the port 966 * ata_port_abort - abort all qc's on the port
967 * @ap: ATA port to abort qc's for 967 * @ap: ATA port to abort qc's for
968 * 968 *
969 * Abort all active qc's of @ap and schedule EH. 969 * Abort all active qc's of @ap and schedule EH.
970 * 970 *
971 * LOCKING: 971 * LOCKING:
972 * spin_lock_irqsave(host_set lock) 972 * spin_lock_irqsave(host_set lock)
973 * 973 *
974 * RETURNS: 974 * RETURNS:
975 * Number of aborted qc's. 975 * Number of aborted qc's.
976 */ 976 */
977 int ata_port_abort(struct ata_port *ap) 977 int ata_port_abort(struct ata_port *ap)
978 { 978 {
979 return ata_do_link_abort(ap, NULL); 979 return ata_do_link_abort(ap, NULL);
980 } 980 }
981 981
982 /** 982 /**
983 * __ata_port_freeze - freeze port 983 * __ata_port_freeze - freeze port
984 * @ap: ATA port to freeze 984 * @ap: ATA port to freeze
985 * 985 *
986 * This function is called when HSM violation or some other 986 * This function is called when HSM violation or some other
987 * condition disrupts normal operation of the port. Frozen port 987 * condition disrupts normal operation of the port. Frozen port
988 * is not allowed to perform any operation until the port is 988 * is not allowed to perform any operation until the port is
989 * thawed, which usually follows a successful reset. 989 * thawed, which usually follows a successful reset.
990 * 990 *
991 * ap->ops->freeze() callback can be used for freezing the port 991 * ap->ops->freeze() callback can be used for freezing the port
992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
993 * port cannot be frozen hardware-wise, the interrupt handler 993 * port cannot be frozen hardware-wise, the interrupt handler
994 * must ack and clear interrupts unconditionally while the port 994 * must ack and clear interrupts unconditionally while the port
995 * is frozen. 995 * is frozen.
996 * 996 *
997 * LOCKING: 997 * LOCKING:
998 * spin_lock_irqsave(host lock) 998 * spin_lock_irqsave(host lock)
999 */ 999 */
1000 static void __ata_port_freeze(struct ata_port *ap) 1000 static void __ata_port_freeze(struct ata_port *ap)
1001 { 1001 {
1002 WARN_ON(!ap->ops->error_handler); 1002 WARN_ON(!ap->ops->error_handler);
1003 1003
1004 if (ap->ops->freeze) 1004 if (ap->ops->freeze)
1005 ap->ops->freeze(ap); 1005 ap->ops->freeze(ap);
1006 1006
1007 ap->pflags |= ATA_PFLAG_FROZEN; 1007 ap->pflags |= ATA_PFLAG_FROZEN;
1008 1008
1009 DPRINTK("ata%u port frozen\n", ap->print_id); 1009 DPRINTK("ata%u port frozen\n", ap->print_id);
1010 } 1010 }
1011 1011
1012 /** 1012 /**
1013 * ata_port_freeze - abort & freeze port 1013 * ata_port_freeze - abort & freeze port
1014 * @ap: ATA port to freeze 1014 * @ap: ATA port to freeze
1015 * 1015 *
1016 * Abort and freeze @ap. The freeze operation must be called 1016 * Abort and freeze @ap. The freeze operation must be called
1017 * first, because some hardware requires special operations 1017 * first, because some hardware requires special operations
1018 * before the taskfile registers are accessible. 1018 * before the taskfile registers are accessible.
1019 * 1019 *
1020 * LOCKING: 1020 * LOCKING:
1021 * spin_lock_irqsave(host lock) 1021 * spin_lock_irqsave(host lock)
1022 * 1022 *
1023 * RETURNS: 1023 * RETURNS:
1024 * Number of aborted commands. 1024 * Number of aborted commands.
1025 */ 1025 */
1026 int ata_port_freeze(struct ata_port *ap) 1026 int ata_port_freeze(struct ata_port *ap)
1027 { 1027 {
1028 int nr_aborted; 1028 int nr_aborted;
1029 1029
1030 WARN_ON(!ap->ops->error_handler); 1030 WARN_ON(!ap->ops->error_handler);
1031 1031
1032 __ata_port_freeze(ap); 1032 __ata_port_freeze(ap);
1033 nr_aborted = ata_port_abort(ap); 1033 nr_aborted = ata_port_abort(ap);
1034 1034
1035 return nr_aborted; 1035 return nr_aborted;
1036 } 1036 }
1037 1037
1038 /** 1038 /**
1039 * sata_async_notification - SATA async notification handler 1039 * sata_async_notification - SATA async notification handler
1040 * @ap: ATA port where async notification is received 1040 * @ap: ATA port where async notification is received
1041 * 1041 *
1042 * Handler to be called when async notification via SDB FIS is 1042 * Handler to be called when async notification via SDB FIS is
1043 * received. This function schedules EH if necessary. 1043 * received. This function schedules EH if necessary.
1044 * 1044 *
1045 * LOCKING: 1045 * LOCKING:
1046 * spin_lock_irqsave(host lock) 1046 * spin_lock_irqsave(host lock)
1047 * 1047 *
1048 * RETURNS: 1048 * RETURNS:
1049 * 1 if EH is scheduled, 0 otherwise. 1049 * 1 if EH is scheduled, 0 otherwise.
1050 */ 1050 */
1051 int sata_async_notification(struct ata_port *ap) 1051 int sata_async_notification(struct ata_port *ap)
1052 { 1052 {
1053 u32 sntf; 1053 u32 sntf;
1054 int rc; 1054 int rc;
1055 1055
1056 if (!(ap->flags & ATA_FLAG_AN)) 1056 if (!(ap->flags & ATA_FLAG_AN))
1057 return 0; 1057 return 0;
1058 1058
1059 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 1059 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1060 if (rc == 0) 1060 if (rc == 0)
1061 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 1061 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1062 1062
1063 if (!sata_pmp_attached(ap) || rc) { 1063 if (!sata_pmp_attached(ap) || rc) {
1064 /* PMP is not attached or SNTF is not available */ 1064 /* PMP is not attached or SNTF is not available */
1065 if (!sata_pmp_attached(ap)) { 1065 if (!sata_pmp_attached(ap)) {
1066 /* PMP is not attached. Check whether ATAPI 1066 /* PMP is not attached. Check whether ATAPI
1067 * AN is configured. If so, notify media 1067 * AN is configured. If so, notify media
1068 * change. 1068 * change.
1069 */ 1069 */
1070 struct ata_device *dev = ap->link.device; 1070 struct ata_device *dev = ap->link.device;
1071 1071
1072 if ((dev->class == ATA_DEV_ATAPI) && 1072 if ((dev->class == ATA_DEV_ATAPI) &&
1073 (dev->flags & ATA_DFLAG_AN)) 1073 (dev->flags & ATA_DFLAG_AN))
1074 ata_scsi_media_change_notify(dev); 1074 ata_scsi_media_change_notify(dev);
1075 return 0; 1075 return 0;
1076 } else { 1076 } else {
1077 /* PMP is attached but SNTF is not available. 1077 /* PMP is attached but SNTF is not available.
1078 * ATAPI async media change notification is 1078 * ATAPI async media change notification is
1079 * not used. The PMP must be reporting PHY 1079 * not used. The PMP must be reporting PHY
1080 * status change, schedule EH. 1080 * status change, schedule EH.
1081 */ 1081 */
1082 ata_port_schedule_eh(ap); 1082 ata_port_schedule_eh(ap);
1083 return 1; 1083 return 1;
1084 } 1084 }
1085 } else { 1085 } else {
1086 /* PMP is attached and SNTF is available */ 1086 /* PMP is attached and SNTF is available */
1087 struct ata_link *link; 1087 struct ata_link *link;
1088 1088
1089 /* check and notify ATAPI AN */ 1089 /* check and notify ATAPI AN */
1090 ata_for_each_link(link, ap, EDGE) { 1090 ata_for_each_link(link, ap, EDGE) {
1091 if (!(sntf & (1 << link->pmp))) 1091 if (!(sntf & (1 << link->pmp)))
1092 continue; 1092 continue;
1093 1093
1094 if ((link->device->class == ATA_DEV_ATAPI) && 1094 if ((link->device->class == ATA_DEV_ATAPI) &&
1095 (link->device->flags & ATA_DFLAG_AN)) 1095 (link->device->flags & ATA_DFLAG_AN))
1096 ata_scsi_media_change_notify(link->device); 1096 ata_scsi_media_change_notify(link->device);
1097 } 1097 }
1098 1098
1099 /* If PMP is reporting that PHY status of some 1099 /* If PMP is reporting that PHY status of some
1100 * downstream ports has changed, schedule EH. 1100 * downstream ports has changed, schedule EH.
1101 */ 1101 */
1102 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 1102 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1103 ata_port_schedule_eh(ap); 1103 ata_port_schedule_eh(ap);
1104 return 1; 1104 return 1;
1105 } 1105 }
1106 1106
1107 return 0; 1107 return 0;
1108 } 1108 }
1109 } 1109 }
1110 1110
1111 /** 1111 /**
1112 * ata_eh_freeze_port - EH helper to freeze port 1112 * ata_eh_freeze_port - EH helper to freeze port
1113 * @ap: ATA port to freeze 1113 * @ap: ATA port to freeze
1114 * 1114 *
1115 * Freeze @ap. 1115 * Freeze @ap.
1116 * 1116 *
1117 * LOCKING: 1117 * LOCKING:
1118 * None. 1118 * None.
1119 */ 1119 */
1120 void ata_eh_freeze_port(struct ata_port *ap) 1120 void ata_eh_freeze_port(struct ata_port *ap)
1121 { 1121 {
1122 unsigned long flags; 1122 unsigned long flags;
1123 1123
1124 if (!ap->ops->error_handler) 1124 if (!ap->ops->error_handler)
1125 return; 1125 return;
1126 1126
1127 spin_lock_irqsave(ap->lock, flags); 1127 spin_lock_irqsave(ap->lock, flags);
1128 __ata_port_freeze(ap); 1128 __ata_port_freeze(ap);
1129 spin_unlock_irqrestore(ap->lock, flags); 1129 spin_unlock_irqrestore(ap->lock, flags);
1130 } 1130 }
1131 1131
1132 /** 1132 /**
1133 * ata_port_thaw_port - EH helper to thaw port 1133 * ata_port_thaw_port - EH helper to thaw port
1134 * @ap: ATA port to thaw 1134 * @ap: ATA port to thaw
1135 * 1135 *
1136 * Thaw frozen port @ap. 1136 * Thaw frozen port @ap.
1137 * 1137 *
1138 * LOCKING: 1138 * LOCKING:
1139 * None. 1139 * None.
1140 */ 1140 */
1141 void ata_eh_thaw_port(struct ata_port *ap) 1141 void ata_eh_thaw_port(struct ata_port *ap)
1142 { 1142 {
1143 unsigned long flags; 1143 unsigned long flags;
1144 1144
1145 if (!ap->ops->error_handler) 1145 if (!ap->ops->error_handler)
1146 return; 1146 return;
1147 1147
1148 spin_lock_irqsave(ap->lock, flags); 1148 spin_lock_irqsave(ap->lock, flags);
1149 1149
1150 ap->pflags &= ~ATA_PFLAG_FROZEN; 1150 ap->pflags &= ~ATA_PFLAG_FROZEN;
1151 1151
1152 if (ap->ops->thaw) 1152 if (ap->ops->thaw)
1153 ap->ops->thaw(ap); 1153 ap->ops->thaw(ap);
1154 1154
1155 spin_unlock_irqrestore(ap->lock, flags); 1155 spin_unlock_irqrestore(ap->lock, flags);
1156 1156
1157 DPRINTK("ata%u port thawed\n", ap->print_id); 1157 DPRINTK("ata%u port thawed\n", ap->print_id);
1158 } 1158 }
1159 1159
1160 static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1160 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1161 { 1161 {
1162 /* nada */ 1162 /* nada */
1163 } 1163 }
1164 1164
1165 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1165 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1166 { 1166 {
1167 struct ata_port *ap = qc->ap; 1167 struct ata_port *ap = qc->ap;
1168 struct scsi_cmnd *scmd = qc->scsicmd; 1168 struct scsi_cmnd *scmd = qc->scsicmd;
1169 unsigned long flags; 1169 unsigned long flags;
1170 1170
1171 spin_lock_irqsave(ap->lock, flags); 1171 spin_lock_irqsave(ap->lock, flags);
1172 qc->scsidone = ata_eh_scsidone; 1172 qc->scsidone = ata_eh_scsidone;
1173 __ata_qc_complete(qc); 1173 __ata_qc_complete(qc);
1174 WARN_ON(ata_tag_valid(qc->tag)); 1174 WARN_ON(ata_tag_valid(qc->tag));
1175 spin_unlock_irqrestore(ap->lock, flags); 1175 spin_unlock_irqrestore(ap->lock, flags);
1176 1176
1177 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1177 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1178 } 1178 }
1179 1179
1180 /** 1180 /**
1181 * ata_eh_qc_complete - Complete an active ATA command from EH 1181 * ata_eh_qc_complete - Complete an active ATA command from EH
1182 * @qc: Command to complete 1182 * @qc: Command to complete
1183 * 1183 *
1184 * Indicate to the mid and upper layers that an ATA command has 1184 * Indicate to the mid and upper layers that an ATA command has
1185 * completed. To be used from EH. 1185 * completed. To be used from EH.
1186 */ 1186 */
1187 void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1187 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1188 { 1188 {
1189 struct scsi_cmnd *scmd = qc->scsicmd; 1189 struct scsi_cmnd *scmd = qc->scsicmd;
1190 scmd->retries = scmd->allowed; 1190 scmd->retries = scmd->allowed;
1191 __ata_eh_qc_complete(qc); 1191 __ata_eh_qc_complete(qc);
1192 } 1192 }
1193 1193
1194 /** 1194 /**
1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196 * @qc: Command to retry 1196 * @qc: Command to retry
1197 * 1197 *
1198 * Indicate to the mid and upper layers that an ATA command 1198 * Indicate to the mid and upper layers that an ATA command
1199 * should be retried. To be used from EH. 1199 * should be retried. To be used from EH.
1200 * 1200 *
1201 * SCSI midlayer limits the number of retries to scmd->allowed. 1201 * SCSI midlayer limits the number of retries to scmd->allowed.
1202 * scmd->retries is decremented for commands which get retried 1202 * scmd->retries is decremented for commands which get retried
1203 * due to unrelated failures (qc->err_mask is zero). 1203 * due to unrelated failures (qc->err_mask is zero).
1204 */ 1204 */
1205 void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1205 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1206 { 1206 {
1207 struct scsi_cmnd *scmd = qc->scsicmd; 1207 struct scsi_cmnd *scmd = qc->scsicmd;
1208 if (!qc->err_mask && scmd->retries) 1208 if (!qc->err_mask && scmd->retries)
1209 scmd->retries--; 1209 scmd->retries--;
1210 __ata_eh_qc_complete(qc); 1210 __ata_eh_qc_complete(qc);
1211 } 1211 }
1212 1212
1213 /** 1213 /**
1214 * ata_dev_disable - disable ATA device 1214 * ata_dev_disable - disable ATA device
1215 * @dev: ATA device to disable 1215 * @dev: ATA device to disable
1216 * 1216 *
1217 * Disable @dev. 1217 * Disable @dev.
1218 * 1218 *
1219 * Locking: 1219 * Locking:
1220 * EH context. 1220 * EH context.
1221 */ 1221 */
1222 void ata_dev_disable(struct ata_device *dev) 1222 void ata_dev_disable(struct ata_device *dev)
1223 { 1223 {
1224 if (!ata_dev_enabled(dev)) 1224 if (!ata_dev_enabled(dev))
1225 return; 1225 return;
1226 1226
1227 if (ata_msg_drv(dev->link->ap)) 1227 if (ata_msg_drv(dev->link->ap))
1228 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1228 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1229 ata_acpi_on_disable(dev); 1229 ata_acpi_on_disable(dev);
1230 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1230 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1231 dev->class++; 1231 dev->class++;
1232 1232
1233 /* From now till the next successful probe, ering is used to 1233 /* From now till the next successful probe, ering is used to
1234 * track probe failures. Clear accumulated device error info. 1234 * track probe failures. Clear accumulated device error info.
1235 */ 1235 */
1236 ata_ering_clear(&dev->ering); 1236 ata_ering_clear(&dev->ering);
1237 } 1237 }
1238 1238
1239 /** 1239 /**
1240 * ata_eh_detach_dev - detach ATA device 1240 * ata_eh_detach_dev - detach ATA device
1241 * @dev: ATA device to detach 1241 * @dev: ATA device to detach
1242 * 1242 *
1243 * Detach @dev. 1243 * Detach @dev.
1244 * 1244 *
1245 * LOCKING: 1245 * LOCKING:
1246 * None. 1246 * None.
1247 */ 1247 */
1248 void ata_eh_detach_dev(struct ata_device *dev) 1248 void ata_eh_detach_dev(struct ata_device *dev)
1249 { 1249 {
1250 struct ata_link *link = dev->link; 1250 struct ata_link *link = dev->link;
1251 struct ata_port *ap = link->ap; 1251 struct ata_port *ap = link->ap;
1252 struct ata_eh_context *ehc = &link->eh_context; 1252 struct ata_eh_context *ehc = &link->eh_context;
1253 unsigned long flags; 1253 unsigned long flags;
1254 1254
1255 ata_dev_disable(dev); 1255 ata_dev_disable(dev);
1256 1256
1257 spin_lock_irqsave(ap->lock, flags); 1257 spin_lock_irqsave(ap->lock, flags);
1258 1258
1259 dev->flags &= ~ATA_DFLAG_DETACH; 1259 dev->flags &= ~ATA_DFLAG_DETACH;
1260 1260
1261 if (ata_scsi_offline_dev(dev)) { 1261 if (ata_scsi_offline_dev(dev)) {
1262 dev->flags |= ATA_DFLAG_DETACHED; 1262 dev->flags |= ATA_DFLAG_DETACHED;
1263 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1263 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1264 } 1264 }
1265 1265
1266 /* clear per-dev EH info */ 1266 /* clear per-dev EH info */
1267 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1267 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1268 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1268 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1269 ehc->saved_xfer_mode[dev->devno] = 0; 1269 ehc->saved_xfer_mode[dev->devno] = 0;
1270 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1270 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1271 1271
1272 spin_unlock_irqrestore(ap->lock, flags); 1272 spin_unlock_irqrestore(ap->lock, flags);
1273 } 1273 }
1274 1274
1275 /** 1275 /**
1276 * ata_eh_about_to_do - about to perform eh_action 1276 * ata_eh_about_to_do - about to perform eh_action
1277 * @link: target ATA link 1277 * @link: target ATA link
1278 * @dev: target ATA dev for per-dev action (can be NULL) 1278 * @dev: target ATA dev for per-dev action (can be NULL)
1279 * @action: action about to be performed 1279 * @action: action about to be performed
1280 * 1280 *
1281 * Called just before performing EH actions to clear related bits 1281 * Called just before performing EH actions to clear related bits
1282 * in @link->eh_info such that eh actions are not unnecessarily 1282 * in @link->eh_info such that eh actions are not unnecessarily
1283 * repeated. 1283 * repeated.
1284 * 1284 *
1285 * LOCKING: 1285 * LOCKING:
1286 * None. 1286 * None.
1287 */ 1287 */
1288 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1288 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1289 unsigned int action) 1289 unsigned int action)
1290 { 1290 {
1291 struct ata_port *ap = link->ap; 1291 struct ata_port *ap = link->ap;
1292 struct ata_eh_info *ehi = &link->eh_info; 1292 struct ata_eh_info *ehi = &link->eh_info;
1293 struct ata_eh_context *ehc = &link->eh_context; 1293 struct ata_eh_context *ehc = &link->eh_context;
1294 unsigned long flags; 1294 unsigned long flags;
1295 1295
1296 spin_lock_irqsave(ap->lock, flags); 1296 spin_lock_irqsave(ap->lock, flags);
1297 1297
1298 ata_eh_clear_action(link, dev, ehi, action); 1298 ata_eh_clear_action(link, dev, ehi, action);
1299 1299
1300 /* About to take EH action, set RECOVERED. Ignore actions on 1300 /* About to take EH action, set RECOVERED. Ignore actions on
1301 * slave links as master will do them again. 1301 * slave links as master will do them again.
1302 */ 1302 */
1303 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1303 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1304 ap->pflags |= ATA_PFLAG_RECOVERED; 1304 ap->pflags |= ATA_PFLAG_RECOVERED;
1305 1305
1306 spin_unlock_irqrestore(ap->lock, flags); 1306 spin_unlock_irqrestore(ap->lock, flags);
1307 } 1307 }
1308 1308
1309 /** 1309 /**
1310 * ata_eh_done - EH action complete 1310 * ata_eh_done - EH action complete
1311 * @ap: target ATA port 1311 * @ap: target ATA port
1312 * @dev: target ATA dev for per-dev action (can be NULL) 1312 * @dev: target ATA dev for per-dev action (can be NULL)
1313 * @action: action just completed 1313 * @action: action just completed
1314 * 1314 *
1315 * Called right after performing EH actions to clear related bits 1315 * Called right after performing EH actions to clear related bits
1316 * in @link->eh_context. 1316 * in @link->eh_context.
1317 * 1317 *
1318 * LOCKING: 1318 * LOCKING:
1319 * None. 1319 * None.
1320 */ 1320 */
1321 void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1321 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1322 unsigned int action) 1322 unsigned int action)
1323 { 1323 {
1324 struct ata_eh_context *ehc = &link->eh_context; 1324 struct ata_eh_context *ehc = &link->eh_context;
1325 1325
1326 ata_eh_clear_action(link, dev, &ehc->i, action); 1326 ata_eh_clear_action(link, dev, &ehc->i, action);
1327 } 1327 }
1328 1328
1329 /** 1329 /**
1330 * ata_err_string - convert err_mask to descriptive string 1330 * ata_err_string - convert err_mask to descriptive string
1331 * @err_mask: error mask to convert to string 1331 * @err_mask: error mask to convert to string
1332 * 1332 *
1333 * Convert @err_mask to descriptive string. Errors are 1333 * Convert @err_mask to descriptive string. Errors are
1334 * prioritized according to severity and only the most severe 1334 * prioritized according to severity and only the most severe
1335 * error is reported. 1335 * error is reported.
1336 * 1336 *
1337 * LOCKING: 1337 * LOCKING:
1338 * None. 1338 * None.
1339 * 1339 *
1340 * RETURNS: 1340 * RETURNS:
1341 * Descriptive string for @err_mask 1341 * Descriptive string for @err_mask
1342 */ 1342 */
1343 static const char *ata_err_string(unsigned int err_mask) 1343 static const char *ata_err_string(unsigned int err_mask)
1344 { 1344 {
1345 if (err_mask & AC_ERR_HOST_BUS) 1345 if (err_mask & AC_ERR_HOST_BUS)
1346 return "host bus error"; 1346 return "host bus error";
1347 if (err_mask & AC_ERR_ATA_BUS) 1347 if (err_mask & AC_ERR_ATA_BUS)
1348 return "ATA bus error"; 1348 return "ATA bus error";
1349 if (err_mask & AC_ERR_TIMEOUT) 1349 if (err_mask & AC_ERR_TIMEOUT)
1350 return "timeout"; 1350 return "timeout";
1351 if (err_mask & AC_ERR_HSM) 1351 if (err_mask & AC_ERR_HSM)
1352 return "HSM violation"; 1352 return "HSM violation";
1353 if (err_mask & AC_ERR_SYSTEM) 1353 if (err_mask & AC_ERR_SYSTEM)
1354 return "internal error"; 1354 return "internal error";
1355 if (err_mask & AC_ERR_MEDIA) 1355 if (err_mask & AC_ERR_MEDIA)
1356 return "media error"; 1356 return "media error";
1357 if (err_mask & AC_ERR_INVALID) 1357 if (err_mask & AC_ERR_INVALID)
1358 return "invalid argument"; 1358 return "invalid argument";
1359 if (err_mask & AC_ERR_DEV) 1359 if (err_mask & AC_ERR_DEV)
1360 return "device error"; 1360 return "device error";
1361 return "unknown error"; 1361 return "unknown error";
1362 } 1362 }
1363 1363
1364 /** 1364 /**
1365 * ata_read_log_page - read a specific log page 1365 * ata_read_log_page - read a specific log page
1366 * @dev: target device 1366 * @dev: target device
1367 * @page: page to read 1367 * @page: page to read
1368 * @buf: buffer to store read page 1368 * @buf: buffer to store read page
1369 * @sectors: number of sectors to read 1369 * @sectors: number of sectors to read
1370 * 1370 *
1371 * Read log page using READ_LOG_EXT command. 1371 * Read log page using READ_LOG_EXT command.
1372 * 1372 *
1373 * LOCKING: 1373 * LOCKING:
1374 * Kernel thread context (may sleep). 1374 * Kernel thread context (may sleep).
1375 * 1375 *
1376 * RETURNS: 1376 * RETURNS:
1377 * 0 on success, AC_ERR_* mask otherwise. 1377 * 0 on success, AC_ERR_* mask otherwise.
1378 */ 1378 */
1379 static unsigned int ata_read_log_page(struct ata_device *dev, 1379 static unsigned int ata_read_log_page(struct ata_device *dev,
1380 u8 page, void *buf, unsigned int sectors) 1380 u8 page, void *buf, unsigned int sectors)
1381 { 1381 {
1382 struct ata_taskfile tf; 1382 struct ata_taskfile tf;
1383 unsigned int err_mask; 1383 unsigned int err_mask;
1384 1384
1385 DPRINTK("read log page - page %d\n", page); 1385 DPRINTK("read log page - page %d\n", page);
1386 1386
1387 ata_tf_init(dev, &tf); 1387 ata_tf_init(dev, &tf);
1388 tf.command = ATA_CMD_READ_LOG_EXT; 1388 tf.command = ATA_CMD_READ_LOG_EXT;
1389 tf.lbal = page; 1389 tf.lbal = page;
1390 tf.nsect = sectors; 1390 tf.nsect = sectors;
1391 tf.hob_nsect = sectors >> 8; 1391 tf.hob_nsect = sectors >> 8;
1392 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1392 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1393 tf.protocol = ATA_PROT_PIO; 1393 tf.protocol = ATA_PROT_PIO;
1394 1394
1395 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1395 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1396 buf, sectors * ATA_SECT_SIZE, 0); 1396 buf, sectors * ATA_SECT_SIZE, 0);
1397 1397
1398 DPRINTK("EXIT, err_mask=%x\n", err_mask); 1398 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1399 return err_mask; 1399 return err_mask;
1400 } 1400 }
1401 1401
1402 /** 1402 /**
1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404 * @dev: Device to read log page 10h from 1404 * @dev: Device to read log page 10h from
1405 * @tag: Resulting tag of the failed command 1405 * @tag: Resulting tag of the failed command
1406 * @tf: Resulting taskfile registers of the failed command 1406 * @tf: Resulting taskfile registers of the failed command
1407 * 1407 *
1408 * Read log page 10h to obtain NCQ error details and clear error 1408 * Read log page 10h to obtain NCQ error details and clear error
1409 * condition. 1409 * condition.
1410 * 1410 *
1411 * LOCKING: 1411 * LOCKING:
1412 * Kernel thread context (may sleep). 1412 * Kernel thread context (may sleep).
1413 * 1413 *
1414 * RETURNS: 1414 * RETURNS:
1415 * 0 on success, -errno otherwise. 1415 * 0 on success, -errno otherwise.
1416 */ 1416 */
1417 static int ata_eh_read_log_10h(struct ata_device *dev, 1417 static int ata_eh_read_log_10h(struct ata_device *dev,
1418 int *tag, struct ata_taskfile *tf) 1418 int *tag, struct ata_taskfile *tf)
1419 { 1419 {
1420 u8 *buf = dev->link->ap->sector_buf; 1420 u8 *buf = dev->link->ap->sector_buf;
1421 unsigned int err_mask; 1421 unsigned int err_mask;
1422 u8 csum; 1422 u8 csum;
1423 int i; 1423 int i;
1424 1424
1425 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1425 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1426 if (err_mask) 1426 if (err_mask)
1427 return -EIO; 1427 return -EIO;
1428 1428
1429 csum = 0; 1429 csum = 0;
1430 for (i = 0; i < ATA_SECT_SIZE; i++) 1430 for (i = 0; i < ATA_SECT_SIZE; i++)
1431 csum += buf[i]; 1431 csum += buf[i];
1432 if (csum) 1432 if (csum)
1433 ata_dev_printk(dev, KERN_WARNING, 1433 ata_dev_printk(dev, KERN_WARNING,
1434 "invalid checksum 0x%x on log page 10h\n", csum); 1434 "invalid checksum 0x%x on log page 10h\n", csum);
1435 1435
1436 if (buf[0] & 0x80) 1436 if (buf[0] & 0x80)
1437 return -ENOENT; 1437 return -ENOENT;
1438 1438
1439 *tag = buf[0] & 0x1f; 1439 *tag = buf[0] & 0x1f;
1440 1440
1441 tf->command = buf[2]; 1441 tf->command = buf[2];
1442 tf->feature = buf[3]; 1442 tf->feature = buf[3];
1443 tf->lbal = buf[4]; 1443 tf->lbal = buf[4];
1444 tf->lbam = buf[5]; 1444 tf->lbam = buf[5];
1445 tf->lbah = buf[6]; 1445 tf->lbah = buf[6];
1446 tf->device = buf[7]; 1446 tf->device = buf[7];
1447 tf->hob_lbal = buf[8]; 1447 tf->hob_lbal = buf[8];
1448 tf->hob_lbam = buf[9]; 1448 tf->hob_lbam = buf[9];
1449 tf->hob_lbah = buf[10]; 1449 tf->hob_lbah = buf[10];
1450 tf->nsect = buf[12]; 1450 tf->nsect = buf[12];
1451 tf->hob_nsect = buf[13]; 1451 tf->hob_nsect = buf[13];
1452 1452
1453 return 0; 1453 return 0;
1454 } 1454 }
1455 1455
1456 /** 1456 /**
1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1458 * @dev: target ATAPI device 1458 * @dev: target ATAPI device
1459 * @r_sense_key: out parameter for sense_key 1459 * @r_sense_key: out parameter for sense_key
1460 * 1460 *
1461 * Perform ATAPI TEST_UNIT_READY. 1461 * Perform ATAPI TEST_UNIT_READY.
1462 * 1462 *
1463 * LOCKING: 1463 * LOCKING:
1464 * EH context (may sleep). 1464 * EH context (may sleep).
1465 * 1465 *
1466 * RETURNS: 1466 * RETURNS:
1467 * 0 on success, AC_ERR_* mask on failure. 1467 * 0 on success, AC_ERR_* mask on failure.
1468 */ 1468 */
1469 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 1469 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1470 { 1470 {
1471 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 1471 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1472 struct ata_taskfile tf; 1472 struct ata_taskfile tf;
1473 unsigned int err_mask; 1473 unsigned int err_mask;
1474 1474
1475 ata_tf_init(dev, &tf); 1475 ata_tf_init(dev, &tf);
1476 1476
1477 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1477 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1478 tf.command = ATA_CMD_PACKET; 1478 tf.command = ATA_CMD_PACKET;
1479 tf.protocol = ATAPI_PROT_NODATA; 1479 tf.protocol = ATAPI_PROT_NODATA;
1480 1480
1481 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 1481 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1482 if (err_mask == AC_ERR_DEV) 1482 if (err_mask == AC_ERR_DEV)
1483 *r_sense_key = tf.feature >> 4; 1483 *r_sense_key = tf.feature >> 4;
1484 return err_mask; 1484 return err_mask;
1485 } 1485 }
1486 1486
1487 /** 1487 /**
1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489 * @dev: device to perform REQUEST_SENSE to 1489 * @dev: device to perform REQUEST_SENSE to
1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1491 * @dfl_sense_key: default sense key to use 1491 * @dfl_sense_key: default sense key to use
1492 * 1492 *
1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494 * SENSE. This function is EH helper. 1494 * SENSE. This function is EH helper.
1495 * 1495 *
1496 * LOCKING: 1496 * LOCKING:
1497 * Kernel thread context (may sleep). 1497 * Kernel thread context (may sleep).
1498 * 1498 *
1499 * RETURNS: 1499 * RETURNS:
1500 * 0 on success, AC_ERR_* mask on failure 1500 * 0 on success, AC_ERR_* mask on failure
1501 */ 1501 */
1502 static unsigned int atapi_eh_request_sense(struct ata_device *dev, 1502 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1503 u8 *sense_buf, u8 dfl_sense_key) 1503 u8 *sense_buf, u8 dfl_sense_key)
1504 { 1504 {
1505 u8 cdb[ATAPI_CDB_LEN] = 1505 u8 cdb[ATAPI_CDB_LEN] =
1506 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 1506 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1507 struct ata_port *ap = dev->link->ap; 1507 struct ata_port *ap = dev->link->ap;
1508 struct ata_taskfile tf; 1508 struct ata_taskfile tf;
1509 1509
1510 DPRINTK("ATAPI request sense\n"); 1510 DPRINTK("ATAPI request sense\n");
1511 1511
1512 /* FIXME: is this needed? */ 1512 /* FIXME: is this needed? */
1513 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1513 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1514 1514
1515 /* initialize sense_buf with the error register, 1515 /* initialize sense_buf with the error register,
1516 * for the case where they are -not- overwritten 1516 * for the case where they are -not- overwritten
1517 */ 1517 */
1518 sense_buf[0] = 0x70; 1518 sense_buf[0] = 0x70;
1519 sense_buf[2] = dfl_sense_key; 1519 sense_buf[2] = dfl_sense_key;
1520 1520
1521 /* some devices time out if garbage left in tf */ 1521 /* some devices time out if garbage left in tf */
1522 ata_tf_init(dev, &tf); 1522 ata_tf_init(dev, &tf);
1523 1523
1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1525 tf.command = ATA_CMD_PACKET; 1525 tf.command = ATA_CMD_PACKET;
1526 1526
1527 /* is it pointless to prefer PIO for "safety reasons"? */ 1527 /* is it pointless to prefer PIO for "safety reasons"? */
1528 if (ap->flags & ATA_FLAG_PIO_DMA) { 1528 if (ap->flags & ATA_FLAG_PIO_DMA) {
1529 tf.protocol = ATAPI_PROT_DMA; 1529 tf.protocol = ATAPI_PROT_DMA;
1530 tf.feature |= ATAPI_PKT_DMA; 1530 tf.feature |= ATAPI_PKT_DMA;
1531 } else { 1531 } else {
1532 tf.protocol = ATAPI_PROT_PIO; 1532 tf.protocol = ATAPI_PROT_PIO;
1533 tf.lbam = SCSI_SENSE_BUFFERSIZE; 1533 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1534 tf.lbah = 0; 1534 tf.lbah = 0;
1535 } 1535 }
1536 1536
1537 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1537 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1538 sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1538 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1539 } 1539 }
1540 1540
1541 /** 1541 /**
1542 * ata_eh_analyze_serror - analyze SError for a failed port 1542 * ata_eh_analyze_serror - analyze SError for a failed port
1543 * @link: ATA link to analyze SError for 1543 * @link: ATA link to analyze SError for
1544 * 1544 *
1545 * Analyze SError if available and further determine cause of 1545 * Analyze SError if available and further determine cause of
1546 * failure. 1546 * failure.
1547 * 1547 *
1548 * LOCKING: 1548 * LOCKING:
1549 * None. 1549 * None.
1550 */ 1550 */
1551 static void ata_eh_analyze_serror(struct ata_link *link) 1551 static void ata_eh_analyze_serror(struct ata_link *link)
1552 { 1552 {
1553 struct ata_eh_context *ehc = &link->eh_context; 1553 struct ata_eh_context *ehc = &link->eh_context;
1554 u32 serror = ehc->i.serror; 1554 u32 serror = ehc->i.serror;
1555 unsigned int err_mask = 0, action = 0; 1555 unsigned int err_mask = 0, action = 0;
1556 u32 hotplug_mask; 1556 u32 hotplug_mask;
1557 1557
1558 if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1558 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1559 err_mask |= AC_ERR_ATA_BUS; 1559 err_mask |= AC_ERR_ATA_BUS;
1560 action |= ATA_EH_RESET; 1560 action |= ATA_EH_RESET;
1561 } 1561 }
1562 if (serror & SERR_PROTOCOL) { 1562 if (serror & SERR_PROTOCOL) {
1563 err_mask |= AC_ERR_HSM; 1563 err_mask |= AC_ERR_HSM;
1564 action |= ATA_EH_RESET; 1564 action |= ATA_EH_RESET;
1565 } 1565 }
1566 if (serror & SERR_INTERNAL) { 1566 if (serror & SERR_INTERNAL) {
1567 err_mask |= AC_ERR_SYSTEM; 1567 err_mask |= AC_ERR_SYSTEM;
1568 action |= ATA_EH_RESET; 1568 action |= ATA_EH_RESET;
1569 } 1569 }
1570 1570
1571 /* Determine whether a hotplug event has occurred. Both 1571 /* Determine whether a hotplug event has occurred. Both
1572 * SError.N/X are considered hotplug events for enabled or 1572 * SError.N/X are considered hotplug events for enabled or
1573 * host links. For disabled PMP links, only N bit is 1573 * host links. For disabled PMP links, only N bit is
1574 * considered as X bit is left at 1 for link plugging. 1574 * considered as X bit is left at 1 for link plugging.
1575 */ 1575 */
1576 hotplug_mask = 0; 1576 hotplug_mask = 0;
1577 1577
1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1580 else 1580 else
1581 hotplug_mask = SERR_PHYRDY_CHG; 1581 hotplug_mask = SERR_PHYRDY_CHG;
1582 1582
1583 if (serror & hotplug_mask) 1583 if (serror & hotplug_mask)
1584 ata_ehi_hotplugged(&ehc->i); 1584 ata_ehi_hotplugged(&ehc->i);
1585 1585
1586 ehc->i.err_mask |= err_mask; 1586 ehc->i.err_mask |= err_mask;
1587 ehc->i.action |= action; 1587 ehc->i.action |= action;
1588 } 1588 }
1589 1589
1590 /** 1590 /**
1591 * ata_eh_analyze_ncq_error - analyze NCQ error 1591 * ata_eh_analyze_ncq_error - analyze NCQ error
1592 * @link: ATA link to analyze NCQ error for 1592 * @link: ATA link to analyze NCQ error for
1593 * 1593 *
1594 * Read log page 10h, determine the offending qc and acquire 1594 * Read log page 10h, determine the offending qc and acquire
1595 * error status TF. For NCQ device errors, all LLDDs have to do 1595 * error status TF. For NCQ device errors, all LLDDs have to do
1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1597 * care of the rest. 1597 * care of the rest.
1598 * 1598 *
1599 * LOCKING: 1599 * LOCKING:
1600 * Kernel thread context (may sleep). 1600 * Kernel thread context (may sleep).
1601 */ 1601 */
1602 void ata_eh_analyze_ncq_error(struct ata_link *link) 1602 void ata_eh_analyze_ncq_error(struct ata_link *link)
1603 { 1603 {
1604 struct ata_port *ap = link->ap; 1604 struct ata_port *ap = link->ap;
1605 struct ata_eh_context *ehc = &link->eh_context; 1605 struct ata_eh_context *ehc = &link->eh_context;
1606 struct ata_device *dev = link->device; 1606 struct ata_device *dev = link->device;
1607 struct ata_queued_cmd *qc; 1607 struct ata_queued_cmd *qc;
1608 struct ata_taskfile tf; 1608 struct ata_taskfile tf;
1609 int tag, rc; 1609 int tag, rc;
1610 1610
1611 /* if frozen, we can't do much */ 1611 /* if frozen, we can't do much */
1612 if (ap->pflags & ATA_PFLAG_FROZEN) 1612 if (ap->pflags & ATA_PFLAG_FROZEN)
1613 return; 1613 return;
1614 1614
1615 /* is it NCQ device error? */ 1615 /* is it NCQ device error? */
1616 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1616 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1617 return; 1617 return;
1618 1618
1619 /* has LLDD analyzed already? */ 1619 /* has LLDD analyzed already? */
1620 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1620 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1621 qc = __ata_qc_from_tag(ap, tag); 1621 qc = __ata_qc_from_tag(ap, tag);
1622 1622
1623 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1623 if (!(qc->flags & ATA_QCFLAG_FAILED))
1624 continue; 1624 continue;
1625 1625
1626 if (qc->err_mask) 1626 if (qc->err_mask)
1627 return; 1627 return;
1628 } 1628 }
1629 1629
1630 /* okay, this error is ours */ 1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf)); 1631 memset(&tf, 0, sizeof(tf));
1632 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1633 if (rc) { 1633 if (rc) {
1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1635 "(errno=%d)\n", rc); 1635 "(errno=%d)\n", rc);
1636 return; 1636 return;
1637 } 1637 }
1638 1638
1639 if (!(link->sactive & (1 << tag))) { 1639 if (!(link->sactive & (1 << tag))) {
1640 ata_link_printk(link, KERN_ERR, "log page 10h reported " 1640 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1641 "inactive tag %d\n", tag); 1641 "inactive tag %d\n", tag);
1642 return; 1642 return;
1643 } 1643 }
1644 1644
1645 /* we've got the perpetrator, condemn it */ 1645 /* we've got the perpetrator, condemn it */
1646 qc = __ata_qc_from_tag(ap, tag); 1646 qc = __ata_qc_from_tag(ap, tag);
1647 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1647 memcpy(&qc->result_tf, &tf, sizeof(tf));
1648 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1648 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1649 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1649 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1650 ehc->i.err_mask &= ~AC_ERR_DEV; 1650 ehc->i.err_mask &= ~AC_ERR_DEV;
1651 } 1651 }
1652 1652
1653 /** 1653 /**
1654 * ata_eh_analyze_tf - analyze taskfile of a failed qc 1654 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1655 * @qc: qc to analyze 1655 * @qc: qc to analyze
1656 * @tf: Taskfile registers to analyze 1656 * @tf: Taskfile registers to analyze
1657 * 1657 *
1658 * Analyze taskfile of @qc and further determine cause of 1658 * Analyze taskfile of @qc and further determine cause of
1659 * failure. This function also requests ATAPI sense data if 1659 * failure. This function also requests ATAPI sense data if
1660 * avaliable. 1660 * avaliable.
1661 * 1661 *
1662 * LOCKING: 1662 * LOCKING:
1663 * Kernel thread context (may sleep). 1663 * Kernel thread context (may sleep).
1664 * 1664 *
1665 * RETURNS: 1665 * RETURNS:
1666 * Determined recovery action 1666 * Determined recovery action
1667 */ 1667 */
1668 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1668 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1669 const struct ata_taskfile *tf) 1669 const struct ata_taskfile *tf)
1670 { 1670 {
1671 unsigned int tmp, action = 0; 1671 unsigned int tmp, action = 0;
1672 u8 stat = tf->command, err = tf->feature; 1672 u8 stat = tf->command, err = tf->feature;
1673 1673
1674 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1674 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1675 qc->err_mask |= AC_ERR_HSM; 1675 qc->err_mask |= AC_ERR_HSM;
1676 return ATA_EH_RESET; 1676 return ATA_EH_RESET;
1677 } 1677 }
1678 1678
1679 if (stat & (ATA_ERR | ATA_DF)) 1679 if (stat & (ATA_ERR | ATA_DF))
1680 qc->err_mask |= AC_ERR_DEV; 1680 qc->err_mask |= AC_ERR_DEV;
1681 else 1681 else
1682 return 0; 1682 return 0;
1683 1683
1684 switch (qc->dev->class) { 1684 switch (qc->dev->class) {
1685 case ATA_DEV_ATA: 1685 case ATA_DEV_ATA:
1686 if (err & ATA_ICRC) 1686 if (err & ATA_ICRC)
1687 qc->err_mask |= AC_ERR_ATA_BUS; 1687 qc->err_mask |= AC_ERR_ATA_BUS;
1688 if (err & ATA_UNC) 1688 if (err & ATA_UNC)
1689 qc->err_mask |= AC_ERR_MEDIA; 1689 qc->err_mask |= AC_ERR_MEDIA;
1690 if (err & ATA_IDNF) 1690 if (err & ATA_IDNF)
1691 qc->err_mask |= AC_ERR_INVALID; 1691 qc->err_mask |= AC_ERR_INVALID;
1692 break; 1692 break;
1693 1693
1694 case ATA_DEV_ATAPI: 1694 case ATA_DEV_ATAPI:
1695 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1695 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1696 tmp = atapi_eh_request_sense(qc->dev, 1696 tmp = atapi_eh_request_sense(qc->dev,
1697 qc->scsicmd->sense_buffer, 1697 qc->scsicmd->sense_buffer,
1698 qc->result_tf.feature >> 4); 1698 qc->result_tf.feature >> 4);
1699 if (!tmp) { 1699 if (!tmp) {
1700 /* ATA_QCFLAG_SENSE_VALID is used to 1700 /* ATA_QCFLAG_SENSE_VALID is used to
1701 * tell atapi_qc_complete() that sense 1701 * tell atapi_qc_complete() that sense
1702 * data is already valid. 1702 * data is already valid.
1703 * 1703 *
1704 * TODO: interpret sense data and set 1704 * TODO: interpret sense data and set
1705 * appropriate err_mask. 1705 * appropriate err_mask.
1706 */ 1706 */
1707 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1707 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1708 } else 1708 } else
1709 qc->err_mask |= tmp; 1709 qc->err_mask |= tmp;
1710 } 1710 }
1711 } 1711 }
1712 1712
1713 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1713 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1714 action |= ATA_EH_RESET; 1714 action |= ATA_EH_RESET;
1715 1715
1716 return action; 1716 return action;
1717 } 1717 }
1718 1718
1719 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 1719 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1720 int *xfer_ok) 1720 int *xfer_ok)
1721 { 1721 {
1722 int base = 0; 1722 int base = 0;
1723 1723
1724 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 1724 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1725 *xfer_ok = 1; 1725 *xfer_ok = 1;
1726 1726
1727 if (!*xfer_ok) 1727 if (!*xfer_ok)
1728 base = ATA_ECAT_DUBIOUS_NONE; 1728 base = ATA_ECAT_DUBIOUS_NONE;
1729 1729
1730 if (err_mask & AC_ERR_ATA_BUS) 1730 if (err_mask & AC_ERR_ATA_BUS)
1731 return base + ATA_ECAT_ATA_BUS; 1731 return base + ATA_ECAT_ATA_BUS;
1732 1732
1733 if (err_mask & AC_ERR_TIMEOUT) 1733 if (err_mask & AC_ERR_TIMEOUT)
1734 return base + ATA_ECAT_TOUT_HSM; 1734 return base + ATA_ECAT_TOUT_HSM;
1735 1735
1736 if (eflags & ATA_EFLAG_IS_IO) { 1736 if (eflags & ATA_EFLAG_IS_IO) {
1737 if (err_mask & AC_ERR_HSM) 1737 if (err_mask & AC_ERR_HSM)
1738 return base + ATA_ECAT_TOUT_HSM; 1738 return base + ATA_ECAT_TOUT_HSM;
1739 if ((err_mask & 1739 if ((err_mask &
1740 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1740 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1741 return base + ATA_ECAT_UNK_DEV; 1741 return base + ATA_ECAT_UNK_DEV;
1742 } 1742 }
1743 1743
1744 return 0; 1744 return 0;
1745 } 1745 }
1746 1746
1747 struct speed_down_verdict_arg { 1747 struct speed_down_verdict_arg {
1748 u64 since; 1748 u64 since;
1749 int xfer_ok; 1749 int xfer_ok;
1750 int nr_errors[ATA_ECAT_NR]; 1750 int nr_errors[ATA_ECAT_NR];
1751 }; 1751 };
1752 1752
1753 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1753 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1754 { 1754 {
1755 struct speed_down_verdict_arg *arg = void_arg; 1755 struct speed_down_verdict_arg *arg = void_arg;
1756 int cat; 1756 int cat;
1757 1757
1758 if (ent->timestamp < arg->since) 1758 if (ent->timestamp < arg->since)
1759 return -1; 1759 return -1;
1760 1760
1761 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 1761 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1762 &arg->xfer_ok); 1762 &arg->xfer_ok);
1763 arg->nr_errors[cat]++; 1763 arg->nr_errors[cat]++;
1764 1764
1765 return 0; 1765 return 0;
1766 } 1766 }
1767 1767
1768 /** 1768 /**
1769 * ata_eh_speed_down_verdict - Determine speed down verdict 1769 * ata_eh_speed_down_verdict - Determine speed down verdict
1770 * @dev: Device of interest 1770 * @dev: Device of interest
1771 * 1771 *
1772 * This function examines error ring of @dev and determines 1772 * This function examines error ring of @dev and determines
1773 * whether NCQ needs to be turned off, transfer speed should be 1773 * whether NCQ needs to be turned off, transfer speed should be
1774 * stepped down, or falling back to PIO is necessary. 1774 * stepped down, or falling back to PIO is necessary.
1775 * 1775 *
1776 * ECAT_ATA_BUS : ATA_BUS error for any command 1776 * ECAT_ATA_BUS : ATA_BUS error for any command
1777 * 1777 *
1778 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 1778 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1779 * IO commands 1779 * IO commands
1780 * 1780 *
1781 * ECAT_UNK_DEV : Unknown DEV error for IO commands 1781 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1782 * 1782 *
1783 * ECAT_DUBIOUS_* : Identical to above three but occurred while 1783 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1784 * data transfer hasn't been verified. 1784 * data transfer hasn't been verified.
1785 * 1785 *
1786 * Verdicts are 1786 * Verdicts are
1787 * 1787 *
1788 * NCQ_OFF : Turn off NCQ. 1788 * NCQ_OFF : Turn off NCQ.
1789 * 1789 *
1790 * SPEED_DOWN : Speed down transfer speed but don't fall back 1790 * SPEED_DOWN : Speed down transfer speed but don't fall back
1791 * to PIO. 1791 * to PIO.
1792 * 1792 *
1793 * FALLBACK_TO_PIO : Fall back to PIO. 1793 * FALLBACK_TO_PIO : Fall back to PIO.
1794 * 1794 *
1795 * Even if multiple verdicts are returned, only one action is 1795 * Even if multiple verdicts are returned, only one action is
1796 * taken per error. An action triggered by non-DUBIOUS errors 1796 * taken per error. An action triggered by non-DUBIOUS errors
1797 * clears ering, while one triggered by DUBIOUS_* errors doesn't. 1797 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1798 * This is to expedite speed down decisions right after device is 1798 * This is to expedite speed down decisions right after device is
1799 * initially configured. 1799 * initially configured.
1800 * 1800 *
1801 * The followings are speed down rules. #1 and #2 deal with 1801 * The followings are speed down rules. #1 and #2 deal with
1802 * DUBIOUS errors. 1802 * DUBIOUS errors.
1803 * 1803 *
1804 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 1804 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1805 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 1805 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1806 * 1806 *
1807 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 1807 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1808 * occurred during last 5 mins, NCQ_OFF. 1808 * occurred during last 5 mins, NCQ_OFF.
1809 * 1809 *
1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 1810 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1811 * ocurred during last 5 mins, FALLBACK_TO_PIO 1811 * ocurred during last 5 mins, FALLBACK_TO_PIO
1812 * 1812 *
1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 1813 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1814 * during last 10 mins, NCQ_OFF. 1814 * during last 10 mins, NCQ_OFF.
1815 * 1815 *
1816 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 1816 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1817 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 1817 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1818 * 1818 *
1819 * LOCKING: 1819 * LOCKING:
1820 * Inherited from caller. 1820 * Inherited from caller.
1821 * 1821 *
1822 * RETURNS: 1822 * RETURNS:
1823 * OR of ATA_EH_SPDN_* flags. 1823 * OR of ATA_EH_SPDN_* flags.
1824 */ 1824 */
1825 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1825 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1826 { 1826 {
1827 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 1827 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1828 u64 j64 = get_jiffies_64(); 1828 u64 j64 = get_jiffies_64();
1829 struct speed_down_verdict_arg arg; 1829 struct speed_down_verdict_arg arg;
1830 unsigned int verdict = 0; 1830 unsigned int verdict = 0;
1831 1831
1832 /* scan past 5 mins of error history */ 1832 /* scan past 5 mins of error history */
1833 memset(&arg, 0, sizeof(arg)); 1833 memset(&arg, 0, sizeof(arg));
1834 arg.since = j64 - min(j64, j5mins); 1834 arg.since = j64 - min(j64, j5mins);
1835 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1835 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1836 1836
1837 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 1837 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1838 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 1838 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1839 verdict |= ATA_EH_SPDN_SPEED_DOWN | 1839 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1840 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 1840 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1841 1841
1842 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 1842 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1843 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 1843 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1844 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 1844 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1845 1845
1846 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1846 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1847 arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1847 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1848 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1848 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1849 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1849 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1850 1850
1851 /* scan past 10 mins of error history */ 1851 /* scan past 10 mins of error history */
1852 memset(&arg, 0, sizeof(arg)); 1852 memset(&arg, 0, sizeof(arg));
1853 arg.since = j64 - min(j64, j10mins); 1853 arg.since = j64 - min(j64, j10mins);
1854 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1854 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1855 1855
1856 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1856 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1857 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 1857 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1858 verdict |= ATA_EH_SPDN_NCQ_OFF; 1858 verdict |= ATA_EH_SPDN_NCQ_OFF;
1859 1859
1860 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1860 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1861 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1861 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1862 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1862 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1863 verdict |= ATA_EH_SPDN_SPEED_DOWN; 1863 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1864 1864
1865 return verdict; 1865 return verdict;
1866 } 1866 }
1867 1867
1868 /** 1868 /**
1869 * ata_eh_speed_down - record error and speed down if necessary 1869 * ata_eh_speed_down - record error and speed down if necessary
1870 * @dev: Failed device 1870 * @dev: Failed device
1871 * @eflags: mask of ATA_EFLAG_* flags 1871 * @eflags: mask of ATA_EFLAG_* flags
1872 * @err_mask: err_mask of the error 1872 * @err_mask: err_mask of the error
1873 * 1873 *
1874 * Record error and examine error history to determine whether 1874 * Record error and examine error history to determine whether
1875 * adjusting transmission speed is necessary. It also sets 1875 * adjusting transmission speed is necessary. It also sets
1876 * transmission limits appropriately if such adjustment is 1876 * transmission limits appropriately if such adjustment is
1877 * necessary. 1877 * necessary.
1878 * 1878 *
1879 * LOCKING: 1879 * LOCKING:
1880 * Kernel thread context (may sleep). 1880 * Kernel thread context (may sleep).
1881 * 1881 *
1882 * RETURNS: 1882 * RETURNS:
1883 * Determined recovery action. 1883 * Determined recovery action.
1884 */ 1884 */
1885 static unsigned int ata_eh_speed_down(struct ata_device *dev, 1885 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1886 unsigned int eflags, unsigned int err_mask) 1886 unsigned int eflags, unsigned int err_mask)
1887 { 1887 {
1888 struct ata_link *link = ata_dev_phys_link(dev); 1888 struct ata_link *link = ata_dev_phys_link(dev);
1889 int xfer_ok = 0; 1889 int xfer_ok = 0;
1890 unsigned int verdict; 1890 unsigned int verdict;
1891 unsigned int action = 0; 1891 unsigned int action = 0;
1892 1892
1893 /* don't bother if Cat-0 error */ 1893 /* don't bother if Cat-0 error */
1894 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1894 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1895 return 0; 1895 return 0;
1896 1896
1897 /* record error and determine whether speed down is necessary */ 1897 /* record error and determine whether speed down is necessary */
1898 ata_ering_record(&dev->ering, eflags, err_mask); 1898 ata_ering_record(&dev->ering, eflags, err_mask);
1899 verdict = ata_eh_speed_down_verdict(dev); 1899 verdict = ata_eh_speed_down_verdict(dev);
1900 1900
1901 /* turn off NCQ? */ 1901 /* turn off NCQ? */
1902 if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 1902 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1903 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 1903 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1904 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 1904 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1905 dev->flags |= ATA_DFLAG_NCQ_OFF; 1905 dev->flags |= ATA_DFLAG_NCQ_OFF;
1906 ata_dev_printk(dev, KERN_WARNING, 1906 ata_dev_printk(dev, KERN_WARNING,
1907 "NCQ disabled due to excessive errors\n"); 1907 "NCQ disabled due to excessive errors\n");
1908 goto done; 1908 goto done;
1909 } 1909 }
1910 1910
1911 /* speed down? */ 1911 /* speed down? */
1912 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1912 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1913 /* speed down SATA link speed if possible */ 1913 /* speed down SATA link speed if possible */
1914 if (sata_down_spd_limit(link, 0) == 0) { 1914 if (sata_down_spd_limit(link, 0) == 0) {
1915 action |= ATA_EH_RESET; 1915 action |= ATA_EH_RESET;
1916 goto done; 1916 goto done;
1917 } 1917 }
1918 1918
1919 /* lower transfer mode */ 1919 /* lower transfer mode */
1920 if (dev->spdn_cnt < 2) { 1920 if (dev->spdn_cnt < 2) {
1921 static const int dma_dnxfer_sel[] = 1921 static const int dma_dnxfer_sel[] =
1922 { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 1922 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1923 static const int pio_dnxfer_sel[] = 1923 static const int pio_dnxfer_sel[] =
1924 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 1924 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1925 int sel; 1925 int sel;
1926 1926
1927 if (dev->xfer_shift != ATA_SHIFT_PIO) 1927 if (dev->xfer_shift != ATA_SHIFT_PIO)
1928 sel = dma_dnxfer_sel[dev->spdn_cnt]; 1928 sel = dma_dnxfer_sel[dev->spdn_cnt];
1929 else 1929 else
1930 sel = pio_dnxfer_sel[dev->spdn_cnt]; 1930 sel = pio_dnxfer_sel[dev->spdn_cnt];
1931 1931
1932 dev->spdn_cnt++; 1932 dev->spdn_cnt++;
1933 1933
1934 if (ata_down_xfermask_limit(dev, sel) == 0) { 1934 if (ata_down_xfermask_limit(dev, sel) == 0) {
1935 action |= ATA_EH_RESET; 1935 action |= ATA_EH_RESET;
1936 goto done; 1936 goto done;
1937 } 1937 }
1938 } 1938 }
1939 } 1939 }
1940 1940
1941 /* Fall back to PIO? Slowing down to PIO is meaningless for 1941 /* Fall back to PIO? Slowing down to PIO is meaningless for
1942 * SATA ATA devices. Consider it only for PATA and SATAPI. 1942 * SATA ATA devices. Consider it only for PATA and SATAPI.
1943 */ 1943 */
1944 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1944 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1945 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 1945 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1946 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1946 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1947 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1947 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1948 dev->spdn_cnt = 0; 1948 dev->spdn_cnt = 0;
1949 action |= ATA_EH_RESET; 1949 action |= ATA_EH_RESET;
1950 goto done; 1950 goto done;
1951 } 1951 }
1952 } 1952 }
1953 1953
1954 return 0; 1954 return 0;
1955 done: 1955 done:
1956 /* device has been slowed down, blow error history */ 1956 /* device has been slowed down, blow error history */
1957 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 1957 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1958 ata_ering_clear(&dev->ering); 1958 ata_ering_clear(&dev->ering);
1959 return action; 1959 return action;
1960 } 1960 }
1961 1961
1962 /** 1962 /**
1963 * ata_eh_link_autopsy - analyze error and determine recovery action 1963 * ata_eh_link_autopsy - analyze error and determine recovery action
1964 * @link: host link to perform autopsy on 1964 * @link: host link to perform autopsy on
1965 * 1965 *
1966 * Analyze why @link failed and determine which recovery actions 1966 * Analyze why @link failed and determine which recovery actions
1967 * are needed. This function also sets more detailed AC_ERR_* 1967 * are needed. This function also sets more detailed AC_ERR_*
1968 * values and fills sense data for ATAPI CHECK SENSE. 1968 * values and fills sense data for ATAPI CHECK SENSE.
1969 * 1969 *
1970 * LOCKING: 1970 * LOCKING:
1971 * Kernel thread context (may sleep). 1971 * Kernel thread context (may sleep).
1972 */ 1972 */
1973 static void ata_eh_link_autopsy(struct ata_link *link) 1973 static void ata_eh_link_autopsy(struct ata_link *link)
1974 { 1974 {
1975 struct ata_port *ap = link->ap; 1975 struct ata_port *ap = link->ap;
1976 struct ata_eh_context *ehc = &link->eh_context; 1976 struct ata_eh_context *ehc = &link->eh_context;
1977 struct ata_device *dev; 1977 struct ata_device *dev;
1978 unsigned int all_err_mask = 0, eflags = 0; 1978 unsigned int all_err_mask = 0, eflags = 0;
1979 int tag; 1979 int tag;
1980 u32 serror; 1980 u32 serror;
1981 int rc; 1981 int rc;
1982 1982
1983 DPRINTK("ENTER\n"); 1983 DPRINTK("ENTER\n");
1984 1984
1985 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 1985 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1986 return; 1986 return;
1987 1987
1988 /* obtain and analyze SError */ 1988 /* obtain and analyze SError */
1989 rc = sata_scr_read(link, SCR_ERROR, &serror); 1989 rc = sata_scr_read(link, SCR_ERROR, &serror);
1990 if (rc == 0) { 1990 if (rc == 0) {
1991 ehc->i.serror |= serror; 1991 ehc->i.serror |= serror;
1992 ata_eh_analyze_serror(link); 1992 ata_eh_analyze_serror(link);
1993 } else if (rc != -EOPNOTSUPP) { 1993 } else if (rc != -EOPNOTSUPP) {
1994 /* SError read failed, force reset and probing */ 1994 /* SError read failed, force reset and probing */
1995 ehc->i.probe_mask |= ATA_ALL_DEVICES; 1995 ehc->i.probe_mask |= ATA_ALL_DEVICES;
1996 ehc->i.action |= ATA_EH_RESET; 1996 ehc->i.action |= ATA_EH_RESET;
1997 ehc->i.err_mask |= AC_ERR_OTHER; 1997 ehc->i.err_mask |= AC_ERR_OTHER;
1998 } 1998 }
1999 1999
2000 /* analyze NCQ failure */ 2000 /* analyze NCQ failure */
2001 ata_eh_analyze_ncq_error(link); 2001 ata_eh_analyze_ncq_error(link);
2002 2002
2003 /* any real error trumps AC_ERR_OTHER */ 2003 /* any real error trumps AC_ERR_OTHER */
2004 if (ehc->i.err_mask & ~AC_ERR_OTHER) 2004 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2005 ehc->i.err_mask &= ~AC_ERR_OTHER; 2005 ehc->i.err_mask &= ~AC_ERR_OTHER;
2006 2006
2007 all_err_mask |= ehc->i.err_mask; 2007 all_err_mask |= ehc->i.err_mask;
2008 2008
2009 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2009 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2010 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2010 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2011 2011
2012 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2012 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2013 ata_dev_phys_link(qc->dev) != link) 2013 ata_dev_phys_link(qc->dev) != link)
2014 continue; 2014 continue;
2015 2015
2016 /* inherit upper level err_mask */ 2016 /* inherit upper level err_mask */
2017 qc->err_mask |= ehc->i.err_mask; 2017 qc->err_mask |= ehc->i.err_mask;
2018 2018
2019 /* analyze TF */ 2019 /* analyze TF */
2020 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2020 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2021 2021
2022 /* DEV errors are probably spurious in case of ATA_BUS error */ 2022 /* DEV errors are probably spurious in case of ATA_BUS error */
2023 if (qc->err_mask & AC_ERR_ATA_BUS) 2023 if (qc->err_mask & AC_ERR_ATA_BUS)
2024 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2024 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2025 AC_ERR_INVALID); 2025 AC_ERR_INVALID);
2026 2026
2027 /* any real error trumps unknown error */ 2027 /* any real error trumps unknown error */
2028 if (qc->err_mask & ~AC_ERR_OTHER) 2028 if (qc->err_mask & ~AC_ERR_OTHER)
2029 qc->err_mask &= ~AC_ERR_OTHER; 2029 qc->err_mask &= ~AC_ERR_OTHER;
2030 2030
2031 /* SENSE_VALID trumps dev/unknown error and revalidation */ 2031 /* SENSE_VALID trumps dev/unknown error and revalidation */
2032 if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2032 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2033 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2033 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2034 2034
2035 /* determine whether the command is worth retrying */ 2035 /* determine whether the command is worth retrying */
2036 if (qc->flags & ATA_QCFLAG_IO || 2036 if (qc->flags & ATA_QCFLAG_IO ||
2037 (!(qc->err_mask & AC_ERR_INVALID) && 2037 (!(qc->err_mask & AC_ERR_INVALID) &&
2038 qc->err_mask != AC_ERR_DEV)) 2038 qc->err_mask != AC_ERR_DEV))
2039 qc->flags |= ATA_QCFLAG_RETRY; 2039 qc->flags |= ATA_QCFLAG_RETRY;
2040 2040
2041 /* accumulate error info */ 2041 /* accumulate error info */
2042 ehc->i.dev = qc->dev; 2042 ehc->i.dev = qc->dev;
2043 all_err_mask |= qc->err_mask; 2043 all_err_mask |= qc->err_mask;
2044 if (qc->flags & ATA_QCFLAG_IO) 2044 if (qc->flags & ATA_QCFLAG_IO)
2045 eflags |= ATA_EFLAG_IS_IO; 2045 eflags |= ATA_EFLAG_IS_IO;
2046 } 2046 }
2047 2047
2048 /* enforce default EH actions */ 2048 /* enforce default EH actions */
2049 if (ap->pflags & ATA_PFLAG_FROZEN || 2049 if (ap->pflags & ATA_PFLAG_FROZEN ||
2050 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2050 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2051 ehc->i.action |= ATA_EH_RESET; 2051 ehc->i.action |= ATA_EH_RESET;
2052 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 2052 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2053 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2053 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2054 ehc->i.action |= ATA_EH_REVALIDATE; 2054 ehc->i.action |= ATA_EH_REVALIDATE;
2055 2055
2056 /* If we have offending qcs and the associated failed device, 2056 /* If we have offending qcs and the associated failed device,
2057 * perform per-dev EH action only on the offending device. 2057 * perform per-dev EH action only on the offending device.
2058 */ 2058 */
2059 if (ehc->i.dev) { 2059 if (ehc->i.dev) {
2060 ehc->i.dev_action[ehc->i.dev->devno] |= 2060 ehc->i.dev_action[ehc->i.dev->devno] |=
2061 ehc->i.action & ATA_EH_PERDEV_MASK; 2061 ehc->i.action & ATA_EH_PERDEV_MASK;
2062 ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2062 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2063 } 2063 }
2064 2064
2065 /* propagate timeout to host link */ 2065 /* propagate timeout to host link */
2066 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 2066 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2067 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 2067 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2068 2068
2069 /* record error and consider speeding down */ 2069 /* record error and consider speeding down */
2070 dev = ehc->i.dev; 2070 dev = ehc->i.dev;
2071 if (!dev && ((ata_link_max_devices(link) == 1 && 2071 if (!dev && ((ata_link_max_devices(link) == 1 &&
2072 ata_dev_enabled(link->device)))) 2072 ata_dev_enabled(link->device))))
2073 dev = link->device; 2073 dev = link->device;
2074 2074
2075 if (dev) { 2075 if (dev) {
2076 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 2076 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2077 eflags |= ATA_EFLAG_DUBIOUS_XFER; 2077 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2078 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2078 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2079 } 2079 }
2080 2080
2081 DPRINTK("EXIT\n"); 2081 DPRINTK("EXIT\n");
2082 } 2082 }
2083 2083
2084 /** 2084 /**
2085 * ata_eh_autopsy - analyze error and determine recovery action 2085 * ata_eh_autopsy - analyze error and determine recovery action
2086 * @ap: host port to perform autopsy on 2086 * @ap: host port to perform autopsy on
2087 * 2087 *
2088 * Analyze all links of @ap and determine why they failed and 2088 * Analyze all links of @ap and determine why they failed and
2089 * which recovery actions are needed. 2089 * which recovery actions are needed.
2090 * 2090 *
2091 * LOCKING: 2091 * LOCKING:
2092 * Kernel thread context (may sleep). 2092 * Kernel thread context (may sleep).
2093 */ 2093 */
2094 void ata_eh_autopsy(struct ata_port *ap) 2094 void ata_eh_autopsy(struct ata_port *ap)
2095 { 2095 {
2096 struct ata_link *link; 2096 struct ata_link *link;
2097 2097
2098 ata_for_each_link(link, ap, EDGE) 2098 ata_for_each_link(link, ap, EDGE)
2099 ata_eh_link_autopsy(link); 2099 ata_eh_link_autopsy(link);
2100 2100
2101 /* Handle the frigging slave link. Autopsy is done similarly 2101 /* Handle the frigging slave link. Autopsy is done similarly
2102 * but actions and flags are transferred over to the master 2102 * but actions and flags are transferred over to the master
2103 * link and handled from there. 2103 * link and handled from there.
2104 */ 2104 */
2105 if (ap->slave_link) { 2105 if (ap->slave_link) {
2106 struct ata_eh_context *mehc = &ap->link.eh_context; 2106 struct ata_eh_context *mehc = &ap->link.eh_context;
2107 struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2107 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2108 2108
2109 /* transfer control flags from master to slave */ 2109 /* transfer control flags from master to slave */
2110 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2110 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2111 2111
2112 /* perform autopsy on the slave link */ 2112 /* perform autopsy on the slave link */
2113 ata_eh_link_autopsy(ap->slave_link); 2113 ata_eh_link_autopsy(ap->slave_link);
2114 2114
2115 /* transfer actions from slave to master and clear slave */ 2115 /* transfer actions from slave to master and clear slave */
2116 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2116 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2117 mehc->i.action |= sehc->i.action; 2117 mehc->i.action |= sehc->i.action;
2118 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2118 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2119 mehc->i.flags |= sehc->i.flags; 2119 mehc->i.flags |= sehc->i.flags;
2120 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2120 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2121 } 2121 }
2122 2122
2123 /* Autopsy of fanout ports can affect host link autopsy. 2123 /* Autopsy of fanout ports can affect host link autopsy.
2124 * Perform host link autopsy last. 2124 * Perform host link autopsy last.
2125 */ 2125 */
2126 if (sata_pmp_attached(ap)) 2126 if (sata_pmp_attached(ap))
2127 ata_eh_link_autopsy(&ap->link); 2127 ata_eh_link_autopsy(&ap->link);
2128 } 2128 }
2129 2129
2130 /** 2130 /**
2131 * ata_get_cmd_descript - get description for ATA command 2131 * ata_get_cmd_descript - get description for ATA command
2132 * @command: ATA command code to get description for 2132 * @command: ATA command code to get description for
2133 * 2133 *
2134 * Return a textual description of the given command, or NULL if the 2134 * Return a textual description of the given command, or NULL if the
2135 * command is not known. 2135 * command is not known.
2136 * 2136 *
2137 * LOCKING: 2137 * LOCKING:
2138 * None 2138 * None
2139 */ 2139 */
2140 const char *ata_get_cmd_descript(u8 command) 2140 const char *ata_get_cmd_descript(u8 command)
2141 { 2141 {
2142 #ifdef CONFIG_ATA_VERBOSE_ERROR 2142 #ifdef CONFIG_ATA_VERBOSE_ERROR
2143 static const struct 2143 static const struct
2144 { 2144 {
2145 u8 command; 2145 u8 command;
2146 const char *text; 2146 const char *text;
2147 } cmd_descr[] = { 2147 } cmd_descr[] = {
2148 { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 2148 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2149 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2149 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2150 { ATA_CMD_STANDBY, "STANDBY" }, 2150 { ATA_CMD_STANDBY, "STANDBY" },
2151 { ATA_CMD_IDLE, "IDLE" }, 2151 { ATA_CMD_IDLE, "IDLE" },
2152 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2152 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2153 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2153 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2154 { ATA_CMD_NOP, "NOP" }, 2154 { ATA_CMD_NOP, "NOP" },
2155 { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2155 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2156 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2156 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2157 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2157 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2158 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2158 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2159 { ATA_CMD_SERVICE, "SERVICE" }, 2159 { ATA_CMD_SERVICE, "SERVICE" },
2160 { ATA_CMD_READ, "READ DMA" }, 2160 { ATA_CMD_READ, "READ DMA" },
2161 { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2161 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2162 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2162 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2163 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2163 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2164 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 2164 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2165 { ATA_CMD_WRITE, "WRITE DMA" }, 2165 { ATA_CMD_WRITE, "WRITE DMA" },
2166 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2166 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2167 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2167 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2168 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2168 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2169 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 2169 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2170 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 2170 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2171 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 2171 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2172 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 2172 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2173 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2173 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2174 { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 2174 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2175 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 2175 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2176 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 2176 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2177 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 2177 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2178 { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 2178 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2179 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 2179 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2180 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 2180 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2181 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 2181 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2182 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2182 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2183 { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 2183 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2184 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 2184 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2185 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 2185 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2186 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 2186 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2187 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 2187 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2188 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 2188 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2189 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 2189 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2190 { ATA_CMD_SLEEP, "SLEEP" }, 2190 { ATA_CMD_SLEEP, "SLEEP" },
2191 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 2191 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2192 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 2192 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2193 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 2193 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2194 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 2194 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2195 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 2195 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2196 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 2196 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2197 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 2197 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2198 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 2198 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2199 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2199 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2200 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 2200 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2201 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2201 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2202 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 2202 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2203 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2203 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2204 { ATA_CMD_PMP_READ, "READ BUFFER" }, 2204 { ATA_CMD_PMP_READ, "READ BUFFER" },
2205 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 2205 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2206 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 2206 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2207 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 2207 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2208 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 2208 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2209 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 2209 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2210 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 2210 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2211 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 2211 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2212 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 2212 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2213 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 2213 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2214 { ATA_CMD_SMART, "SMART" }, 2214 { ATA_CMD_SMART, "SMART" },
2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2215 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2216 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2217 { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 2217 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2218 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2218 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2219 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2219 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2220 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2220 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2221 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 2221 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2222 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 2222 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2223 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2223 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2224 { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 2224 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2225 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 2225 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2226 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 2226 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2227 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 2227 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2228 { ATA_CMD_RESTORE, "RECALIBRATE" }, 2228 { ATA_CMD_RESTORE, "RECALIBRATE" },
2229 { 0, NULL } /* terminate list */ 2229 { 0, NULL } /* terminate list */
2230 }; 2230 };
2231 2231
2232 unsigned int i; 2232 unsigned int i;
2233 for (i = 0; cmd_descr[i].text; i++) 2233 for (i = 0; cmd_descr[i].text; i++)
2234 if (cmd_descr[i].command == command) 2234 if (cmd_descr[i].command == command)
2235 return cmd_descr[i].text; 2235 return cmd_descr[i].text;
2236 #endif 2236 #endif
2237 2237
2238 return NULL; 2238 return NULL;
2239 } 2239 }
2240 2240
2241 /** 2241 /**
2242 * ata_eh_link_report - report error handling to user 2242 * ata_eh_link_report - report error handling to user
2243 * @link: ATA link EH is going on 2243 * @link: ATA link EH is going on
2244 * 2244 *
2245 * Report EH to user. 2245 * Report EH to user.
2246 * 2246 *
2247 * LOCKING: 2247 * LOCKING:
2248 * None. 2248 * None.
2249 */ 2249 */
2250 static void ata_eh_link_report(struct ata_link *link) 2250 static void ata_eh_link_report(struct ata_link *link)
2251 { 2251 {
2252 struct ata_port *ap = link->ap; 2252 struct ata_port *ap = link->ap;
2253 struct ata_eh_context *ehc = &link->eh_context; 2253 struct ata_eh_context *ehc = &link->eh_context;
2254 const char *frozen, *desc; 2254 const char *frozen, *desc;
2255 char tries_buf[6]; 2255 char tries_buf[6];
2256 int tag, nr_failed = 0; 2256 int tag, nr_failed = 0;
2257 2257
2258 if (ehc->i.flags & ATA_EHI_QUIET) 2258 if (ehc->i.flags & ATA_EHI_QUIET)
2259 return; 2259 return;
2260 2260
2261 desc = NULL; 2261 desc = NULL;
2262 if (ehc->i.desc[0] != '\0') 2262 if (ehc->i.desc[0] != '\0')
2263 desc = ehc->i.desc; 2263 desc = ehc->i.desc;
2264 2264
2265 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2265 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2266 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2266 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2267 2267
2268 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2268 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2269 ata_dev_phys_link(qc->dev) != link || 2269 ata_dev_phys_link(qc->dev) != link ||
2270 ((qc->flags & ATA_QCFLAG_QUIET) && 2270 ((qc->flags & ATA_QCFLAG_QUIET) &&
2271 qc->err_mask == AC_ERR_DEV)) 2271 qc->err_mask == AC_ERR_DEV))
2272 continue; 2272 continue;
2273 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2273 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2274 continue; 2274 continue;
2275 2275
2276 nr_failed++; 2276 nr_failed++;
2277 } 2277 }
2278 2278
2279 if (!nr_failed && !ehc->i.err_mask) 2279 if (!nr_failed && !ehc->i.err_mask)
2280 return; 2280 return;
2281 2281
2282 frozen = ""; 2282 frozen = "";
2283 if (ap->pflags & ATA_PFLAG_FROZEN) 2283 if (ap->pflags & ATA_PFLAG_FROZEN)
2284 frozen = " frozen"; 2284 frozen = " frozen";
2285 2285
2286 memset(tries_buf, 0, sizeof(tries_buf)); 2286 memset(tries_buf, 0, sizeof(tries_buf));
2287 if (ap->eh_tries < ATA_EH_MAX_TRIES) 2287 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2288 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2288 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2289 ap->eh_tries); 2289 ap->eh_tries);
2290 2290
2291 if (ehc->i.dev) { 2291 if (ehc->i.dev) {
2292 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2292 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2293 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2293 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2294 ehc->i.err_mask, link->sactive, ehc->i.serror, 2294 ehc->i.err_mask, link->sactive, ehc->i.serror,
2295 ehc->i.action, frozen, tries_buf); 2295 ehc->i.action, frozen, tries_buf);
2296 if (desc) 2296 if (desc)
2297 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2297 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2298 } else { 2298 } else {
2299 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2299 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2300 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2301 ehc->i.err_mask, link->sactive, ehc->i.serror, 2301 ehc->i.err_mask, link->sactive, ehc->i.serror,
2302 ehc->i.action, frozen, tries_buf); 2302 ehc->i.action, frozen, tries_buf);
2303 if (desc) 2303 if (desc)
2304 ata_link_printk(link, KERN_ERR, "%s\n", desc); 2304 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2305 } 2305 }
2306 2306
2307 #ifdef CONFIG_ATA_VERBOSE_ERROR 2307 #ifdef CONFIG_ATA_VERBOSE_ERROR
2308 if (ehc->i.serror) 2308 if (ehc->i.serror)
2309 ata_link_printk(link, KERN_ERR, 2309 ata_link_printk(link, KERN_ERR,
2310 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 2310 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2311 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 2311 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2312 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 2312 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2313 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 2313 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2314 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 2314 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2315 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 2315 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2316 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 2316 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2317 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 2317 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2318 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 2318 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2319 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 2319 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2320 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 2320 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2321 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 2321 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2322 ehc->i.serror & SERR_CRC ? "BadCRC " : "", 2322 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2323 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 2323 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2324 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 2324 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2325 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 2325 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2326 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 2326 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2327 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2327 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2328 #endif 2328 #endif
2329 2329
2330 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2330 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2331 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2331 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2332 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2332 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2333 const u8 *cdb = qc->cdb; 2333 const u8 *cdb = qc->cdb;
2334 char data_buf[20] = ""; 2334 char data_buf[20] = "";
2335 char cdb_buf[70] = ""; 2335 char cdb_buf[70] = "";
2336 2336
2337 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2337 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2338 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2338 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2339 continue; 2339 continue;
2340 2340
2341 if (qc->dma_dir != DMA_NONE) { 2341 if (qc->dma_dir != DMA_NONE) {
2342 static const char *dma_str[] = { 2342 static const char *dma_str[] = {
2343 [DMA_BIDIRECTIONAL] = "bidi", 2343 [DMA_BIDIRECTIONAL] = "bidi",
2344 [DMA_TO_DEVICE] = "out", 2344 [DMA_TO_DEVICE] = "out",
2345 [DMA_FROM_DEVICE] = "in", 2345 [DMA_FROM_DEVICE] = "in",
2346 }; 2346 };
2347 static const char *prot_str[] = { 2347 static const char *prot_str[] = {
2348 [ATA_PROT_PIO] = "pio", 2348 [ATA_PROT_PIO] = "pio",
2349 [ATA_PROT_DMA] = "dma", 2349 [ATA_PROT_DMA] = "dma",
2350 [ATA_PROT_NCQ] = "ncq", 2350 [ATA_PROT_NCQ] = "ncq",
2351 [ATAPI_PROT_PIO] = "pio", 2351 [ATAPI_PROT_PIO] = "pio",
2352 [ATAPI_PROT_DMA] = "dma", 2352 [ATAPI_PROT_DMA] = "dma",
2353 }; 2353 };
2354 2354
2355 snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2355 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2356 prot_str[qc->tf.protocol], qc->nbytes, 2356 prot_str[qc->tf.protocol], qc->nbytes,
2357 dma_str[qc->dma_dir]); 2357 dma_str[qc->dma_dir]);
2358 } 2358 }
2359 2359
2360 if (ata_is_atapi(qc->tf.protocol)) { 2360 if (ata_is_atapi(qc->tf.protocol)) {
2361 if (qc->scsicmd) 2361 if (qc->scsicmd)
2362 scsi_print_command(qc->scsicmd); 2362 scsi_print_command(qc->scsicmd);
2363 else 2363 else
2364 snprintf(cdb_buf, sizeof(cdb_buf), 2364 snprintf(cdb_buf, sizeof(cdb_buf),
2365 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2365 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2366 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2366 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2367 cdb[0], cdb[1], cdb[2], cdb[3], 2367 cdb[0], cdb[1], cdb[2], cdb[3],
2368 cdb[4], cdb[5], cdb[6], cdb[7], 2368 cdb[4], cdb[5], cdb[6], cdb[7],
2369 cdb[8], cdb[9], cdb[10], cdb[11], 2369 cdb[8], cdb[9], cdb[10], cdb[11],
2370 cdb[12], cdb[13], cdb[14], cdb[15]); 2370 cdb[12], cdb[13], cdb[14], cdb[15]);
2371 } else { 2371 } else {
2372 const char *descr = ata_get_cmd_descript(cmd->command); 2372 const char *descr = ata_get_cmd_descript(cmd->command);
2373 if (descr) 2373 if (descr)
2374 ata_dev_printk(qc->dev, KERN_ERR, 2374 ata_dev_printk(qc->dev, KERN_ERR,
2375 "failed command: %s\n", descr); 2375 "failed command: %s\n", descr);
2376 } 2376 }
2377 2377
2378 ata_dev_printk(qc->dev, KERN_ERR, 2378 ata_dev_printk(qc->dev, KERN_ERR,
2379 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2379 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2380 "tag %d%s\n %s" 2380 "tag %d%s\n %s"
2381 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2381 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2382 "Emask 0x%x (%s)%s\n", 2382 "Emask 0x%x (%s)%s\n",
2383 cmd->command, cmd->feature, cmd->nsect, 2383 cmd->command, cmd->feature, cmd->nsect,
2384 cmd->lbal, cmd->lbam, cmd->lbah, 2384 cmd->lbal, cmd->lbam, cmd->lbah,
2385 cmd->hob_feature, cmd->hob_nsect, 2385 cmd->hob_feature, cmd->hob_nsect,
2386 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2386 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2387 cmd->device, qc->tag, data_buf, cdb_buf, 2387 cmd->device, qc->tag, data_buf, cdb_buf,
2388 res->command, res->feature, res->nsect, 2388 res->command, res->feature, res->nsect,
2389 res->lbal, res->lbam, res->lbah, 2389 res->lbal, res->lbam, res->lbah,
2390 res->hob_feature, res->hob_nsect, 2390 res->hob_feature, res->hob_nsect,
2391 res->hob_lbal, res->hob_lbam, res->hob_lbah, 2391 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2392 res->device, qc->err_mask, ata_err_string(qc->err_mask), 2392 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2393 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 2393 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2394 2394
2395 #ifdef CONFIG_ATA_VERBOSE_ERROR 2395 #ifdef CONFIG_ATA_VERBOSE_ERROR
2396 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2396 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2397 ATA_ERR)) { 2397 ATA_ERR)) {
2398 if (res->command & ATA_BUSY) 2398 if (res->command & ATA_BUSY)
2399 ata_dev_printk(qc->dev, KERN_ERR, 2399 ata_dev_printk(qc->dev, KERN_ERR,
2400 "status: { Busy }\n"); 2400 "status: { Busy }\n");
2401 else 2401 else
2402 ata_dev_printk(qc->dev, KERN_ERR, 2402 ata_dev_printk(qc->dev, KERN_ERR,
2403 "status: { %s%s%s%s}\n", 2403 "status: { %s%s%s%s}\n",
2404 res->command & ATA_DRDY ? "DRDY " : "", 2404 res->command & ATA_DRDY ? "DRDY " : "",
2405 res->command & ATA_DF ? "DF " : "", 2405 res->command & ATA_DF ? "DF " : "",
2406 res->command & ATA_DRQ ? "DRQ " : "", 2406 res->command & ATA_DRQ ? "DRQ " : "",
2407 res->command & ATA_ERR ? "ERR " : ""); 2407 res->command & ATA_ERR ? "ERR " : "");
2408 } 2408 }
2409 2409
2410 if (cmd->command != ATA_CMD_PACKET && 2410 if (cmd->command != ATA_CMD_PACKET &&
2411 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2411 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2412 ATA_ABORTED))) 2412 ATA_ABORTED)))
2413 ata_dev_printk(qc->dev, KERN_ERR, 2413 ata_dev_printk(qc->dev, KERN_ERR,
2414 "error: { %s%s%s%s}\n", 2414 "error: { %s%s%s%s}\n",
2415 res->feature & ATA_ICRC ? "ICRC " : "", 2415 res->feature & ATA_ICRC ? "ICRC " : "",
2416 res->feature & ATA_UNC ? "UNC " : "", 2416 res->feature & ATA_UNC ? "UNC " : "",
2417 res->feature & ATA_IDNF ? "IDNF " : "", 2417 res->feature & ATA_IDNF ? "IDNF " : "",
2418 res->feature & ATA_ABORTED ? "ABRT " : ""); 2418 res->feature & ATA_ABORTED ? "ABRT " : "");
2419 #endif 2419 #endif
2420 } 2420 }
2421 } 2421 }
2422 2422
2423 /** 2423 /**
2424 * ata_eh_report - report error handling to user 2424 * ata_eh_report - report error handling to user
2425 * @ap: ATA port to report EH about 2425 * @ap: ATA port to report EH about
2426 * 2426 *
2427 * Report EH to user. 2427 * Report EH to user.
2428 * 2428 *
2429 * LOCKING: 2429 * LOCKING:
2430 * None. 2430 * None.
2431 */ 2431 */
2432 void ata_eh_report(struct ata_port *ap) 2432 void ata_eh_report(struct ata_port *ap)
2433 { 2433 {
2434 struct ata_link *link; 2434 struct ata_link *link;
2435 2435
2436 ata_for_each_link(link, ap, HOST_FIRST) 2436 ata_for_each_link(link, ap, HOST_FIRST)
2437 ata_eh_link_report(link); 2437 ata_eh_link_report(link);
2438 } 2438 }
2439 2439
2440 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2440 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2441 unsigned int *classes, unsigned long deadline, 2441 unsigned int *classes, unsigned long deadline,
2442 bool clear_classes) 2442 bool clear_classes)
2443 { 2443 {
2444 struct ata_device *dev; 2444 struct ata_device *dev;
2445 2445
2446 if (clear_classes) 2446 if (clear_classes)
2447 ata_for_each_dev(dev, link, ALL) 2447 ata_for_each_dev(dev, link, ALL)
2448 classes[dev->devno] = ATA_DEV_UNKNOWN; 2448 classes[dev->devno] = ATA_DEV_UNKNOWN;
2449 2449
2450 return reset(link, classes, deadline); 2450 return reset(link, classes, deadline);
2451 } 2451 }
2452 2452
2453 static int ata_eh_followup_srst_needed(struct ata_link *link, 2453 static int ata_eh_followup_srst_needed(struct ata_link *link,
2454 int rc, const unsigned int *classes) 2454 int rc, const unsigned int *classes)
2455 { 2455 {
2456 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2456 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2457 return 0; 2457 return 0;
2458 if (rc == -EAGAIN) 2458 if (rc == -EAGAIN)
2459 return 1; 2459 return 1;
2460 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 2460 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2461 return 1; 2461 return 1;
2462 return 0; 2462 return 0;
2463 } 2463 }
2464 2464
2465 int ata_eh_reset(struct ata_link *link, int classify, 2465 int ata_eh_reset(struct ata_link *link, int classify,
2466 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2466 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2467 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2467 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2468 { 2468 {
2469 struct ata_port *ap = link->ap; 2469 struct ata_port *ap = link->ap;
2470 struct ata_link *slave = ap->slave_link; 2470 struct ata_link *slave = ap->slave_link;
2471 struct ata_eh_context *ehc = &link->eh_context; 2471 struct ata_eh_context *ehc = &link->eh_context;
2472 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2472 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2473 unsigned int *classes = ehc->classes; 2473 unsigned int *classes = ehc->classes;
2474 unsigned int lflags = link->flags; 2474 unsigned int lflags = link->flags;
2475 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2475 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2476 int max_tries = 0, try = 0; 2476 int max_tries = 0, try = 0;
2477 struct ata_link *failed_link; 2477 struct ata_link *failed_link;
2478 struct ata_device *dev; 2478 struct ata_device *dev;
2479 unsigned long deadline, now; 2479 unsigned long deadline, now;
2480 ata_reset_fn_t reset; 2480 ata_reset_fn_t reset;
2481 unsigned long flags; 2481 unsigned long flags;
2482 u32 sstatus; 2482 u32 sstatus;
2483 int nr_unknown, rc; 2483 int nr_unknown, rc;
2484 2484
2485 /* 2485 /*
2486 * Prepare to reset 2486 * Prepare to reset
2487 */ 2487 */
2488 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2488 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2489 max_tries++; 2489 max_tries++;
2490 if (link->flags & ATA_LFLAG_NO_HRST) 2490 if (link->flags & ATA_LFLAG_NO_HRST)
2491 hardreset = NULL; 2491 hardreset = NULL;
2492 if (link->flags & ATA_LFLAG_NO_SRST) 2492 if (link->flags & ATA_LFLAG_NO_SRST)
2493 softreset = NULL; 2493 softreset = NULL;
2494 2494
2495 /* make sure each reset attemp is at least COOL_DOWN apart */ 2495 /* make sure each reset attemp is at least COOL_DOWN apart */
2496 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2496 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2497 now = jiffies; 2497 now = jiffies;
2498 WARN_ON(time_after(ehc->last_reset, now)); 2498 WARN_ON(time_after(ehc->last_reset, now));
2499 deadline = ata_deadline(ehc->last_reset, 2499 deadline = ata_deadline(ehc->last_reset,
2500 ATA_EH_RESET_COOL_DOWN); 2500 ATA_EH_RESET_COOL_DOWN);
2501 if (time_before(now, deadline)) 2501 if (time_before(now, deadline))
2502 schedule_timeout_uninterruptible(deadline - now); 2502 schedule_timeout_uninterruptible(deadline - now);
2503 } 2503 }
2504 2504
2505 spin_lock_irqsave(ap->lock, flags); 2505 spin_lock_irqsave(ap->lock, flags);
2506 ap->pflags |= ATA_PFLAG_RESETTING; 2506 ap->pflags |= ATA_PFLAG_RESETTING;
2507 spin_unlock_irqrestore(ap->lock, flags); 2507 spin_unlock_irqrestore(ap->lock, flags);
2508 2508
2509 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2509 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2510 2510
2511 ata_for_each_dev(dev, link, ALL) { 2511 ata_for_each_dev(dev, link, ALL) {
2512 /* If we issue an SRST then an ATA drive (not ATAPI) 2512 /* If we issue an SRST then an ATA drive (not ATAPI)
2513 * may change configuration and be in PIO0 timing. If 2513 * may change configuration and be in PIO0 timing. If
2514 * we do a hard reset (or are coming from power on) 2514 * we do a hard reset (or are coming from power on)
2515 * this is true for ATA or ATAPI. Until we've set a 2515 * this is true for ATA or ATAPI. Until we've set a
2516 * suitable controller mode we should not touch the 2516 * suitable controller mode we should not touch the
2517 * bus as we may be talking too fast. 2517 * bus as we may be talking too fast.
2518 */ 2518 */
2519 dev->pio_mode = XFER_PIO_0; 2519 dev->pio_mode = XFER_PIO_0;
2520 2520
2521 /* If the controller has a pio mode setup function 2521 /* If the controller has a pio mode setup function
2522 * then use it to set the chipset to rights. Don't 2522 * then use it to set the chipset to rights. Don't
2523 * touch the DMA setup as that will be dealt with when 2523 * touch the DMA setup as that will be dealt with when
2524 * configuring devices. 2524 * configuring devices.
2525 */ 2525 */
2526 if (ap->ops->set_piomode) 2526 if (ap->ops->set_piomode)
2527 ap->ops->set_piomode(ap, dev); 2527 ap->ops->set_piomode(ap, dev);
2528 } 2528 }
2529 2529
2530 /* prefer hardreset */ 2530 /* prefer hardreset */
2531 reset = NULL; 2531 reset = NULL;
2532 ehc->i.action &= ~ATA_EH_RESET; 2532 ehc->i.action &= ~ATA_EH_RESET;
2533 if (hardreset) { 2533 if (hardreset) {
2534 reset = hardreset; 2534 reset = hardreset;
2535 ehc->i.action |= ATA_EH_HARDRESET; 2535 ehc->i.action |= ATA_EH_HARDRESET;
2536 } else if (softreset) { 2536 } else if (softreset) {
2537 reset = softreset; 2537 reset = softreset;
2538 ehc->i.action |= ATA_EH_SOFTRESET; 2538 ehc->i.action |= ATA_EH_SOFTRESET;
2539 } 2539 }
2540 2540
2541 if (prereset) { 2541 if (prereset) {
2542 unsigned long deadline = ata_deadline(jiffies, 2542 unsigned long deadline = ata_deadline(jiffies,
2543 ATA_EH_PRERESET_TIMEOUT); 2543 ATA_EH_PRERESET_TIMEOUT);
2544 2544
2545 if (slave) { 2545 if (slave) {
2546 sehc->i.action &= ~ATA_EH_RESET; 2546 sehc->i.action &= ~ATA_EH_RESET;
2547 sehc->i.action |= ehc->i.action; 2547 sehc->i.action |= ehc->i.action;
2548 } 2548 }
2549 2549
2550 rc = prereset(link, deadline); 2550 rc = prereset(link, deadline);
2551 2551
2552 /* If present, do prereset on slave link too. Reset 2552 /* If present, do prereset on slave link too. Reset
2553 * is skipped iff both master and slave links report 2553 * is skipped iff both master and slave links report
2554 * -ENOENT or clear ATA_EH_RESET. 2554 * -ENOENT or clear ATA_EH_RESET.
2555 */ 2555 */
2556 if (slave && (rc == 0 || rc == -ENOENT)) { 2556 if (slave && (rc == 0 || rc == -ENOENT)) {
2557 int tmp; 2557 int tmp;
2558 2558
2559 tmp = prereset(slave, deadline); 2559 tmp = prereset(slave, deadline);
2560 if (tmp != -ENOENT) 2560 if (tmp != -ENOENT)
2561 rc = tmp; 2561 rc = tmp;
2562 2562
2563 ehc->i.action |= sehc->i.action; 2563 ehc->i.action |= sehc->i.action;
2564 } 2564 }
2565 2565
2566 if (rc) { 2566 if (rc) {
2567 if (rc == -ENOENT) { 2567 if (rc == -ENOENT) {
2568 ata_link_printk(link, KERN_DEBUG, 2568 ata_link_printk(link, KERN_DEBUG,
2569 "port disabled. ignoring.\n"); 2569 "port disabled. ignoring.\n");
2570 ehc->i.action &= ~ATA_EH_RESET; 2570 ehc->i.action &= ~ATA_EH_RESET;
2571 2571
2572 ata_for_each_dev(dev, link, ALL) 2572 ata_for_each_dev(dev, link, ALL)
2573 classes[dev->devno] = ATA_DEV_NONE; 2573 classes[dev->devno] = ATA_DEV_NONE;
2574 2574
2575 rc = 0; 2575 rc = 0;
2576 } else 2576 } else
2577 ata_link_printk(link, KERN_ERR, 2577 ata_link_printk(link, KERN_ERR,
2578 "prereset failed (errno=%d)\n", rc); 2578 "prereset failed (errno=%d)\n", rc);
2579 goto out; 2579 goto out;
2580 } 2580 }
2581 2581
2582 /* prereset() might have cleared ATA_EH_RESET. If so, 2582 /* prereset() might have cleared ATA_EH_RESET. If so,
2583 * bang classes, thaw and return. 2583 * bang classes, thaw and return.
2584 */ 2584 */
2585 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2585 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2586 ata_for_each_dev(dev, link, ALL) 2586 ata_for_each_dev(dev, link, ALL)
2587 classes[dev->devno] = ATA_DEV_NONE; 2587 classes[dev->devno] = ATA_DEV_NONE;
2588 if ((ap->pflags & ATA_PFLAG_FROZEN) && 2588 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2589 ata_is_host_link(link)) 2589 ata_is_host_link(link))
2590 ata_eh_thaw_port(ap); 2590 ata_eh_thaw_port(ap);
2591 rc = 0; 2591 rc = 0;
2592 goto out; 2592 goto out;
2593 } 2593 }
2594 } 2594 }
2595 2595
2596 retry: 2596 retry:
2597 /* 2597 /*
2598 * Perform reset 2598 * Perform reset
2599 */ 2599 */
2600 if (ata_is_host_link(link)) 2600 if (ata_is_host_link(link))
2601 ata_eh_freeze_port(ap); 2601 ata_eh_freeze_port(ap);
2602 2602
2603 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 2603 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2604 2604
2605 if (reset) { 2605 if (reset) {
2606 if (verbose) 2606 if (verbose)
2607 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2607 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2608 reset == softreset ? "soft" : "hard"); 2608 reset == softreset ? "soft" : "hard");
2609 2609
2610 /* mark that this EH session started with reset */ 2610 /* mark that this EH session started with reset */
2611 ehc->last_reset = jiffies; 2611 ehc->last_reset = jiffies;
2612 if (reset == hardreset) 2612 if (reset == hardreset)
2613 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2613 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2614 else 2614 else
2615 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2615 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2616 2616
2617 rc = ata_do_reset(link, reset, classes, deadline, true); 2617 rc = ata_do_reset(link, reset, classes, deadline, true);
2618 if (rc && rc != -EAGAIN) { 2618 if (rc && rc != -EAGAIN) {
2619 failed_link = link; 2619 failed_link = link;
2620 goto fail; 2620 goto fail;
2621 } 2621 }
2622 2622
2623 /* hardreset slave link if existent */ 2623 /* hardreset slave link if existent */
2624 if (slave && reset == hardreset) { 2624 if (slave && reset == hardreset) {
2625 int tmp; 2625 int tmp;
2626 2626
2627 if (verbose) 2627 if (verbose)
2628 ata_link_printk(slave, KERN_INFO, 2628 ata_link_printk(slave, KERN_INFO,
2629 "hard resetting link\n"); 2629 "hard resetting link\n");
2630 2630
2631 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2631 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2632 tmp = ata_do_reset(slave, reset, classes, deadline, 2632 tmp = ata_do_reset(slave, reset, classes, deadline,
2633 false); 2633 false);
2634 switch (tmp) { 2634 switch (tmp) {
2635 case -EAGAIN: 2635 case -EAGAIN:
2636 rc = -EAGAIN; 2636 rc = -EAGAIN;
2637 case 0: 2637 case 0:
2638 break; 2638 break;
2639 default: 2639 default:
2640 failed_link = slave; 2640 failed_link = slave;
2641 rc = tmp; 2641 rc = tmp;
2642 goto fail; 2642 goto fail;
2643 } 2643 }
2644 } 2644 }
2645 2645
2646 /* perform follow-up SRST if necessary */ 2646 /* perform follow-up SRST if necessary */
2647 if (reset == hardreset && 2647 if (reset == hardreset &&
2648 ata_eh_followup_srst_needed(link, rc, classes)) { 2648 ata_eh_followup_srst_needed(link, rc, classes)) {
2649 reset = softreset; 2649 reset = softreset;
2650 2650
2651 if (!reset) { 2651 if (!reset) {
2652 ata_link_printk(link, KERN_ERR, 2652 ata_link_printk(link, KERN_ERR,
2653 "follow-up softreset required " 2653 "follow-up softreset required "
2654 "but no softreset avaliable\n"); 2654 "but no softreset avaliable\n");
2655 failed_link = link; 2655 failed_link = link;
2656 rc = -EINVAL; 2656 rc = -EINVAL;
2657 goto fail; 2657 goto fail;
2658 } 2658 }
2659 2659
2660 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2660 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2661 rc = ata_do_reset(link, reset, classes, deadline, true); 2661 rc = ata_do_reset(link, reset, classes, deadline, true);
2662 if (rc) { 2662 if (rc) {
2663 failed_link = link; 2663 failed_link = link;
2664 goto fail; 2664 goto fail;
2665 } 2665 }
2666 } 2666 }
2667 } else { 2667 } else {
2668 if (verbose) 2668 if (verbose)
2669 ata_link_printk(link, KERN_INFO, "no reset method " 2669 ata_link_printk(link, KERN_INFO, "no reset method "
2670 "available, skipping reset\n"); 2670 "available, skipping reset\n");
2671 if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2671 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2672 lflags |= ATA_LFLAG_ASSUME_ATA; 2672 lflags |= ATA_LFLAG_ASSUME_ATA;
2673 } 2673 }
2674 2674
2675 /* 2675 /*
2676 * Post-reset processing 2676 * Post-reset processing
2677 */ 2677 */
2678 ata_for_each_dev(dev, link, ALL) { 2678 ata_for_each_dev(dev, link, ALL) {
2679 /* After the reset, the device state is PIO 0 and the 2679 /* After the reset, the device state is PIO 0 and the
2680 * controller state is undefined. Reset also wakes up 2680 * controller state is undefined. Reset also wakes up
2681 * drives from sleeping mode. 2681 * drives from sleeping mode.
2682 */ 2682 */
2683 dev->pio_mode = XFER_PIO_0; 2683 dev->pio_mode = XFER_PIO_0;
2684 dev->flags &= ~ATA_DFLAG_SLEEPING; 2684 dev->flags &= ~ATA_DFLAG_SLEEPING;
2685 2685
2686 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 2686 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2687 continue; 2687 continue;
2688 2688
2689 /* apply class override */ 2689 /* apply class override */
2690 if (lflags & ATA_LFLAG_ASSUME_ATA) 2690 if (lflags & ATA_LFLAG_ASSUME_ATA)
2691 classes[dev->devno] = ATA_DEV_ATA; 2691 classes[dev->devno] = ATA_DEV_ATA;
2692 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2692 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2693 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2693 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2694 } 2694 }
2695 2695
2696 /* record current link speed */ 2696 /* record current link speed */
2697 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2697 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2698 link->sata_spd = (sstatus >> 4) & 0xf; 2698 link->sata_spd = (sstatus >> 4) & 0xf;
2699 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2699 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2700 slave->sata_spd = (sstatus >> 4) & 0xf; 2700 slave->sata_spd = (sstatus >> 4) & 0xf;
2701 2701
2702 /* thaw the port */ 2702 /* thaw the port */
2703 if (ata_is_host_link(link)) 2703 if (ata_is_host_link(link))
2704 ata_eh_thaw_port(ap); 2704 ata_eh_thaw_port(ap);
2705 2705
2706 /* postreset() should clear hardware SError. Although SError 2706 /* postreset() should clear hardware SError. Although SError
2707 * is cleared during link resume, clearing SError here is 2707 * is cleared during link resume, clearing SError here is
2708 * necessary as some PHYs raise hotplug events after SRST. 2708 * necessary as some PHYs raise hotplug events after SRST.
2709 * This introduces race condition where hotplug occurs between 2709 * This introduces race condition where hotplug occurs between
2710 * reset and here. This race is mediated by cross checking 2710 * reset and here. This race is mediated by cross checking
2711 * link onlineness and classification result later. 2711 * link onlineness and classification result later.
2712 */ 2712 */
2713 if (postreset) { 2713 if (postreset) {
2714 postreset(link, classes); 2714 postreset(link, classes);
2715 if (slave) 2715 if (slave)
2716 postreset(slave, classes); 2716 postreset(slave, classes);
2717 } 2717 }
2718 2718
2719 /* 2719 /*
2720 * Some controllers can't be frozen very well and may set 2720 * Some controllers can't be frozen very well and may set
2721 * spuruious error conditions during reset. Clear accumulated 2721 * spuruious error conditions during reset. Clear accumulated
2722 * error information. As reset is the final recovery action, 2722 * error information. As reset is the final recovery action,
2723 * nothing is lost by doing this. 2723 * nothing is lost by doing this.
2724 */ 2724 */
2725 spin_lock_irqsave(link->ap->lock, flags); 2725 spin_lock_irqsave(link->ap->lock, flags);
2726 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2726 memset(&link->eh_info, 0, sizeof(link->eh_info));
2727 if (slave) 2727 if (slave)
2728 memset(&slave->eh_info, 0, sizeof(link->eh_info)); 2728 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2729 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2729 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2730 spin_unlock_irqrestore(link->ap->lock, flags); 2730 spin_unlock_irqrestore(link->ap->lock, flags);
2731 2731
2732 /* 2732 /*
2733 * Make sure onlineness and classification result correspond. 2733 * Make sure onlineness and classification result correspond.
2734 * Hotplug could have happened during reset and some 2734 * Hotplug could have happened during reset and some
2735 * controllers fail to wait while a drive is spinning up after 2735 * controllers fail to wait while a drive is spinning up after
2736 * being hotplugged causing misdetection. By cross checking 2736 * being hotplugged causing misdetection. By cross checking
2737 * link on/offlineness and classification result, those 2737 * link on/offlineness and classification result, those
2738 * conditions can be reliably detected and retried. 2738 * conditions can be reliably detected and retried.
2739 */ 2739 */
2740 nr_unknown = 0; 2740 nr_unknown = 0;
2741 ata_for_each_dev(dev, link, ALL) { 2741 ata_for_each_dev(dev, link, ALL) {
2742 if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2742 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2743 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2743 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2744 ata_dev_printk(dev, KERN_DEBUG, "link online " 2744 ata_dev_printk(dev, KERN_DEBUG, "link online "
2745 "but device misclassifed\n"); 2745 "but device misclassifed\n");
2746 classes[dev->devno] = ATA_DEV_NONE; 2746 classes[dev->devno] = ATA_DEV_NONE;
2747 nr_unknown++; 2747 nr_unknown++;
2748 } 2748 }
2749 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2749 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2750 if (ata_class_enabled(classes[dev->devno])) 2750 if (ata_class_enabled(classes[dev->devno]))
2751 ata_dev_printk(dev, KERN_DEBUG, "link offline, " 2751 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2752 "clearing class %d to NONE\n", 2752 "clearing class %d to NONE\n",
2753 classes[dev->devno]); 2753 classes[dev->devno]);
2754 classes[dev->devno] = ATA_DEV_NONE; 2754 classes[dev->devno] = ATA_DEV_NONE;
2755 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2755 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2756 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 2756 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2757 "clearing UNKNOWN to NONE\n"); 2757 "clearing UNKNOWN to NONE\n");
2758 classes[dev->devno] = ATA_DEV_NONE; 2758 classes[dev->devno] = ATA_DEV_NONE;
2759 } 2759 }
2760 } 2760 }
2761 2761
2762 if (classify && nr_unknown) { 2762 if (classify && nr_unknown) {
2763 if (try < max_tries) { 2763 if (try < max_tries) {
2764 ata_link_printk(link, KERN_WARNING, "link online but " 2764 ata_link_printk(link, KERN_WARNING, "link online but "
2765 "%d devices misclassified, retrying\n", 2765 "%d devices misclassified, retrying\n",
2766 nr_unknown); 2766 nr_unknown);
2767 failed_link = link; 2767 failed_link = link;
2768 rc = -EAGAIN; 2768 rc = -EAGAIN;
2769 goto fail; 2769 goto fail;
2770 } 2770 }
2771 ata_link_printk(link, KERN_WARNING, 2771 ata_link_printk(link, KERN_WARNING,
2772 "link online but %d devices misclassified, " 2772 "link online but %d devices misclassified, "
2773 "device detection might fail\n", nr_unknown); 2773 "device detection might fail\n", nr_unknown);
2774 } 2774 }
2775 2775
2776 /* reset successful, schedule revalidation */ 2776 /* reset successful, schedule revalidation */
2777 ata_eh_done(link, NULL, ATA_EH_RESET); 2777 ata_eh_done(link, NULL, ATA_EH_RESET);
2778 if (slave) 2778 if (slave)
2779 ata_eh_done(slave, NULL, ATA_EH_RESET); 2779 ata_eh_done(slave, NULL, ATA_EH_RESET);
2780 ehc->last_reset = jiffies; /* update to completion time */ 2780 ehc->last_reset = jiffies; /* update to completion time */
2781 ehc->i.action |= ATA_EH_REVALIDATE; 2781 ehc->i.action |= ATA_EH_REVALIDATE;
2782 2782
2783 rc = 0; 2783 rc = 0;
2784 out: 2784 out:
2785 /* clear hotplug flag */ 2785 /* clear hotplug flag */
2786 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2786 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2787 if (slave) 2787 if (slave)
2788 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2788 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2789 2789
2790 spin_lock_irqsave(ap->lock, flags); 2790 spin_lock_irqsave(ap->lock, flags);
2791 ap->pflags &= ~ATA_PFLAG_RESETTING; 2791 ap->pflags &= ~ATA_PFLAG_RESETTING;
2792 spin_unlock_irqrestore(ap->lock, flags); 2792 spin_unlock_irqrestore(ap->lock, flags);
2793 2793
2794 return rc; 2794 return rc;
2795 2795
2796 fail: 2796 fail:
2797 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 2797 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2798 if (!ata_is_host_link(link) && 2798 if (!ata_is_host_link(link) &&
2799 sata_scr_read(link, SCR_STATUS, &sstatus)) 2799 sata_scr_read(link, SCR_STATUS, &sstatus))
2800 rc = -ERESTART; 2800 rc = -ERESTART;
2801 2801
2802 if (rc == -ERESTART || try >= max_tries) 2802 if (rc == -ERESTART || try >= max_tries)
2803 goto out; 2803 goto out;
2804 2804
2805 now = jiffies; 2805 now = jiffies;
2806 if (time_before(now, deadline)) { 2806 if (time_before(now, deadline)) {
2807 unsigned long delta = deadline - now; 2807 unsigned long delta = deadline - now;
2808 2808
2809 ata_link_printk(failed_link, KERN_WARNING, 2809 ata_link_printk(failed_link, KERN_WARNING,
2810 "reset failed (errno=%d), retrying in %u secs\n", 2810 "reset failed (errno=%d), retrying in %u secs\n",
2811 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2811 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2812 2812
2813 while (delta) 2813 while (delta)
2814 delta = schedule_timeout_uninterruptible(delta); 2814 delta = schedule_timeout_uninterruptible(delta);
2815 } 2815 }
2816 2816
2817 if (try == max_tries - 1) { 2817 if (try == max_tries - 1) {
2818 sata_down_spd_limit(link, 0); 2818 sata_down_spd_limit(link, 0);
2819 if (slave) 2819 if (slave)
2820 sata_down_spd_limit(slave, 0); 2820 sata_down_spd_limit(slave, 0);
2821 } else if (rc == -EPIPE) 2821 } else if (rc == -EPIPE)
2822 sata_down_spd_limit(failed_link, 0); 2822 sata_down_spd_limit(failed_link, 0);
2823 2823
2824 if (hardreset) 2824 if (hardreset)
2825 reset = hardreset; 2825 reset = hardreset;
2826 goto retry; 2826 goto retry;
2827 } 2827 }
2828 2828
2829 static inline void ata_eh_pull_park_action(struct ata_port *ap) 2829 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2830 { 2830 {
2831 struct ata_link *link; 2831 struct ata_link *link;
2832 struct ata_device *dev; 2832 struct ata_device *dev;
2833 unsigned long flags; 2833 unsigned long flags;
2834 2834
2835 /* 2835 /*
2836 * This function can be thought of as an extended version of 2836 * This function can be thought of as an extended version of
2837 * ata_eh_about_to_do() specially crafted to accommodate the 2837 * ata_eh_about_to_do() specially crafted to accommodate the
2838 * requirements of ATA_EH_PARK handling. Since the EH thread 2838 * requirements of ATA_EH_PARK handling. Since the EH thread
2839 * does not leave the do {} while () loop in ata_eh_recover as 2839 * does not leave the do {} while () loop in ata_eh_recover as
2840 * long as the timeout for a park request to *one* device on 2840 * long as the timeout for a park request to *one* device on
2841 * the port has not expired, and since we still want to pick 2841 * the port has not expired, and since we still want to pick
2842 * up park requests to other devices on the same port or 2842 * up park requests to other devices on the same port or
2843 * timeout updates for the same device, we have to pull 2843 * timeout updates for the same device, we have to pull
2844 * ATA_EH_PARK actions from eh_info into eh_context.i 2844 * ATA_EH_PARK actions from eh_info into eh_context.i
2845 * ourselves at the beginning of each pass over the loop. 2845 * ourselves at the beginning of each pass over the loop.
2846 * 2846 *
2847 * Additionally, all write accesses to &ap->park_req_pending 2847 * Additionally, all write accesses to &ap->park_req_pending
2848 * through INIT_COMPLETION() (see below) or complete_all() 2848 * through INIT_COMPLETION() (see below) or complete_all()
2849 * (see ata_scsi_park_store()) are protected by the host lock. 2849 * (see ata_scsi_park_store()) are protected by the host lock.
2850 * As a result we have that park_req_pending.done is zero on 2850 * As a result we have that park_req_pending.done is zero on
2851 * exit from this function, i.e. when ATA_EH_PARK actions for 2851 * exit from this function, i.e. when ATA_EH_PARK actions for
2852 * *all* devices on port ap have been pulled into the 2852 * *all* devices on port ap have been pulled into the
2853 * respective eh_context structs. If, and only if, 2853 * respective eh_context structs. If, and only if,
2854 * park_req_pending.done is non-zero by the time we reach 2854 * park_req_pending.done is non-zero by the time we reach
2855 * wait_for_completion_timeout(), another ATA_EH_PARK action 2855 * wait_for_completion_timeout(), another ATA_EH_PARK action
2856 * has been scheduled for at least one of the devices on port 2856 * has been scheduled for at least one of the devices on port
2857 * ap and we have to cycle over the do {} while () loop in 2857 * ap and we have to cycle over the do {} while () loop in
2858 * ata_eh_recover() again. 2858 * ata_eh_recover() again.
2859 */ 2859 */
2860 2860
2861 spin_lock_irqsave(ap->lock, flags); 2861 spin_lock_irqsave(ap->lock, flags);
2862 INIT_COMPLETION(ap->park_req_pending); 2862 INIT_COMPLETION(ap->park_req_pending);
2863 ata_for_each_link(link, ap, EDGE) { 2863 ata_for_each_link(link, ap, EDGE) {
2864 ata_for_each_dev(dev, link, ALL) { 2864 ata_for_each_dev(dev, link, ALL) {
2865 struct ata_eh_info *ehi = &link->eh_info; 2865 struct ata_eh_info *ehi = &link->eh_info;
2866 2866
2867 link->eh_context.i.dev_action[dev->devno] |= 2867 link->eh_context.i.dev_action[dev->devno] |=
2868 ehi->dev_action[dev->devno] & ATA_EH_PARK; 2868 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2869 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 2869 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2870 } 2870 }
2871 } 2871 }
2872 spin_unlock_irqrestore(ap->lock, flags); 2872 spin_unlock_irqrestore(ap->lock, flags);
2873 } 2873 }
2874 2874
2875 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 2875 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2876 { 2876 {
2877 struct ata_eh_context *ehc = &dev->link->eh_context; 2877 struct ata_eh_context *ehc = &dev->link->eh_context;
2878 struct ata_taskfile tf; 2878 struct ata_taskfile tf;
2879 unsigned int err_mask; 2879 unsigned int err_mask;
2880 2880
2881 ata_tf_init(dev, &tf); 2881 ata_tf_init(dev, &tf);
2882 if (park) { 2882 if (park) {
2883 ehc->unloaded_mask |= 1 << dev->devno; 2883 ehc->unloaded_mask |= 1 << dev->devno;
2884 tf.command = ATA_CMD_IDLEIMMEDIATE; 2884 tf.command = ATA_CMD_IDLEIMMEDIATE;
2885 tf.feature = 0x44; 2885 tf.feature = 0x44;
2886 tf.lbal = 0x4c; 2886 tf.lbal = 0x4c;
2887 tf.lbam = 0x4e; 2887 tf.lbam = 0x4e;
2888 tf.lbah = 0x55; 2888 tf.lbah = 0x55;
2889 } else { 2889 } else {
2890 ehc->unloaded_mask &= ~(1 << dev->devno); 2890 ehc->unloaded_mask &= ~(1 << dev->devno);
2891 tf.command = ATA_CMD_CHK_POWER; 2891 tf.command = ATA_CMD_CHK_POWER;
2892 } 2892 }
2893 2893
2894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 2894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2895 tf.protocol |= ATA_PROT_NODATA; 2895 tf.protocol |= ATA_PROT_NODATA;
2896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 2896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2897 if (park && (err_mask || tf.lbal != 0xc4)) { 2897 if (park && (err_mask || tf.lbal != 0xc4)) {
2898 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 2898 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2899 ehc->unloaded_mask &= ~(1 << dev->devno); 2899 ehc->unloaded_mask &= ~(1 << dev->devno);
2900 } 2900 }
2901 } 2901 }
2902 2902
2903 static int ata_eh_revalidate_and_attach(struct ata_link *link, 2903 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2904 struct ata_device **r_failed_dev) 2904 struct ata_device **r_failed_dev)
2905 { 2905 {
2906 struct ata_port *ap = link->ap; 2906 struct ata_port *ap = link->ap;
2907 struct ata_eh_context *ehc = &link->eh_context; 2907 struct ata_eh_context *ehc = &link->eh_context;
2908 struct ata_device *dev; 2908 struct ata_device *dev;
2909 unsigned int new_mask = 0; 2909 unsigned int new_mask = 0;
2910 unsigned long flags; 2910 unsigned long flags;
2911 int rc = 0; 2911 int rc = 0;
2912 2912
2913 DPRINTK("ENTER\n"); 2913 DPRINTK("ENTER\n");
2914 2914
2915 /* For PATA drive side cable detection to work, IDENTIFY must 2915 /* For PATA drive side cable detection to work, IDENTIFY must
2916 * be done backwards such that PDIAG- is released by the slave 2916 * be done backwards such that PDIAG- is released by the slave
2917 * device before the master device is identified. 2917 * device before the master device is identified.
2918 */ 2918 */
2919 ata_for_each_dev(dev, link, ALL_REVERSE) { 2919 ata_for_each_dev(dev, link, ALL_REVERSE) {
2920 unsigned int action = ata_eh_dev_action(dev); 2920 unsigned int action = ata_eh_dev_action(dev);
2921 unsigned int readid_flags = 0; 2921 unsigned int readid_flags = 0;
2922 2922
2923 if (ehc->i.flags & ATA_EHI_DID_RESET) 2923 if (ehc->i.flags & ATA_EHI_DID_RESET)
2924 readid_flags |= ATA_READID_POSTRESET; 2924 readid_flags |= ATA_READID_POSTRESET;
2925 2925
2926 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2926 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2927 WARN_ON(dev->class == ATA_DEV_PMP); 2927 WARN_ON(dev->class == ATA_DEV_PMP);
2928 2928
2929 if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2929 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2930 rc = -EIO; 2930 rc = -EIO;
2931 goto err; 2931 goto err;
2932 } 2932 }
2933 2933
2934 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2934 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2935 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2935 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2936 readid_flags); 2936 readid_flags);
2937 if (rc) 2937 if (rc)
2938 goto err; 2938 goto err;
2939 2939
2940 ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2940 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2941 2941
2942 /* Configuration may have changed, reconfigure 2942 /* Configuration may have changed, reconfigure
2943 * transfer mode. 2943 * transfer mode.
2944 */ 2944 */
2945 ehc->i.flags |= ATA_EHI_SETMODE; 2945 ehc->i.flags |= ATA_EHI_SETMODE;
2946 2946
2947 /* schedule the scsi_rescan_device() here */ 2947 /* schedule the scsi_rescan_device() here */
2948 schedule_work(&(ap->scsi_rescan_task)); 2948 schedule_work(&(ap->scsi_rescan_task));
2949 } else if (dev->class == ATA_DEV_UNKNOWN && 2949 } else if (dev->class == ATA_DEV_UNKNOWN &&
2950 ehc->tries[dev->devno] && 2950 ehc->tries[dev->devno] &&
2951 ata_class_enabled(ehc->classes[dev->devno])) { 2951 ata_class_enabled(ehc->classes[dev->devno])) {
2952 /* Temporarily set dev->class, it will be 2952 /* Temporarily set dev->class, it will be
2953 * permanently set once all configurations are 2953 * permanently set once all configurations are
2954 * complete. This is necessary because new 2954 * complete. This is necessary because new
2955 * device configuration is done in two 2955 * device configuration is done in two
2956 * separate loops. 2956 * separate loops.
2957 */ 2957 */
2958 dev->class = ehc->classes[dev->devno]; 2958 dev->class = ehc->classes[dev->devno];
2959 2959
2960 if (dev->class == ATA_DEV_PMP) 2960 if (dev->class == ATA_DEV_PMP)
2961 rc = sata_pmp_attach(dev); 2961 rc = sata_pmp_attach(dev);
2962 else 2962 else
2963 rc = ata_dev_read_id(dev, &dev->class, 2963 rc = ata_dev_read_id(dev, &dev->class,
2964 readid_flags, dev->id); 2964 readid_flags, dev->id);
2965 2965
2966 /* read_id might have changed class, store and reset */ 2966 /* read_id might have changed class, store and reset */
2967 ehc->classes[dev->devno] = dev->class; 2967 ehc->classes[dev->devno] = dev->class;
2968 dev->class = ATA_DEV_UNKNOWN; 2968 dev->class = ATA_DEV_UNKNOWN;
2969 2969
2970 switch (rc) { 2970 switch (rc) {
2971 case 0: 2971 case 0:
2972 /* clear error info accumulated during probe */ 2972 /* clear error info accumulated during probe */
2973 ata_ering_clear(&dev->ering); 2973 ata_ering_clear(&dev->ering);
2974 new_mask |= 1 << dev->devno; 2974 new_mask |= 1 << dev->devno;
2975 break; 2975 break;
2976 case -ENOENT: 2976 case -ENOENT:
2977 /* IDENTIFY was issued to non-existent 2977 /* IDENTIFY was issued to non-existent
2978 * device. No need to reset. Just 2978 * device. No need to reset. Just
2979 * thaw and ignore the device. 2979 * thaw and ignore the device.
2980 */ 2980 */
2981 ata_eh_thaw_port(ap); 2981 ata_eh_thaw_port(ap);
2982 break; 2982 break;
2983 default: 2983 default:
2984 goto err; 2984 goto err;
2985 } 2985 }
2986 } 2986 }
2987 } 2987 }
2988 2988
2989 /* PDIAG- should have been released, ask cable type if post-reset */ 2989 /* PDIAG- should have been released, ask cable type if post-reset */
2990 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 2990 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2991 if (ap->ops->cable_detect) 2991 if (ap->ops->cable_detect)
2992 ap->cbl = ap->ops->cable_detect(ap); 2992 ap->cbl = ap->ops->cable_detect(ap);
2993 ata_force_cbl(ap); 2993 ata_force_cbl(ap);
2994 } 2994 }
2995 2995
2996 /* Configure new devices forward such that user doesn't see 2996 /* Configure new devices forward such that user doesn't see
2997 * device detection messages backwards. 2997 * device detection messages backwards.
2998 */ 2998 */
2999 ata_for_each_dev(dev, link, ALL) { 2999 ata_for_each_dev(dev, link, ALL) {
3000 if (!(new_mask & (1 << dev->devno))) 3000 if (!(new_mask & (1 << dev->devno)))
3001 continue; 3001 continue;
3002 3002
3003 dev->class = ehc->classes[dev->devno]; 3003 dev->class = ehc->classes[dev->devno];
3004 3004
3005 if (dev->class == ATA_DEV_PMP) 3005 if (dev->class == ATA_DEV_PMP)
3006 continue; 3006 continue;
3007 3007
3008 ehc->i.flags |= ATA_EHI_PRINTINFO; 3008 ehc->i.flags |= ATA_EHI_PRINTINFO;
3009 rc = ata_dev_configure(dev); 3009 rc = ata_dev_configure(dev);
3010 ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3010 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3011 if (rc) { 3011 if (rc) {
3012 dev->class = ATA_DEV_UNKNOWN; 3012 dev->class = ATA_DEV_UNKNOWN;
3013 goto err; 3013 goto err;
3014 } 3014 }
3015 3015
3016 spin_lock_irqsave(ap->lock, flags); 3016 spin_lock_irqsave(ap->lock, flags);
3017 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3017 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3018 spin_unlock_irqrestore(ap->lock, flags); 3018 spin_unlock_irqrestore(ap->lock, flags);
3019 3019
3020 /* new device discovered, configure xfermode */ 3020 /* new device discovered, configure xfermode */
3021 ehc->i.flags |= ATA_EHI_SETMODE; 3021 ehc->i.flags |= ATA_EHI_SETMODE;
3022 } 3022 }
3023 3023
3024 return 0; 3024 return 0;
3025 3025
3026 err: 3026 err:
3027 *r_failed_dev = dev; 3027 *r_failed_dev = dev;
3028 DPRINTK("EXIT rc=%d\n", rc); 3028 DPRINTK("EXIT rc=%d\n", rc);
3029 return rc; 3029 return rc;
3030 } 3030 }
3031 3031
3032 /** 3032 /**
3033 * ata_set_mode - Program timings and issue SET FEATURES - XFER 3033 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3034 * @link: link on which timings will be programmed 3034 * @link: link on which timings will be programmed
3035 * @r_failed_dev: out parameter for failed device 3035 * @r_failed_dev: out parameter for failed device
3036 * 3036 *
3037 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3037 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3038 * ata_set_mode() fails, pointer to the failing device is 3038 * ata_set_mode() fails, pointer to the failing device is
3039 * returned in @r_failed_dev. 3039 * returned in @r_failed_dev.
3040 * 3040 *
3041 * LOCKING: 3041 * LOCKING:
3042 * PCI/etc. bus probe sem. 3042 * PCI/etc. bus probe sem.
3043 * 3043 *
3044 * RETURNS: 3044 * RETURNS:
3045 * 0 on success, negative errno otherwise 3045 * 0 on success, negative errno otherwise
3046 */ 3046 */
3047 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3047 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3048 { 3048 {
3049 struct ata_port *ap = link->ap; 3049 struct ata_port *ap = link->ap;
3050 struct ata_device *dev; 3050 struct ata_device *dev;
3051 int rc; 3051 int rc;
3052 3052
3053 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 3053 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3054 ata_for_each_dev(dev, link, ENABLED) { 3054 ata_for_each_dev(dev, link, ENABLED) {
3055 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 3055 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3056 struct ata_ering_entry *ent; 3056 struct ata_ering_entry *ent;
3057 3057
3058 ent = ata_ering_top(&dev->ering); 3058 ent = ata_ering_top(&dev->ering);
3059 if (ent) 3059 if (ent)
3060 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 3060 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3061 } 3061 }
3062 } 3062 }
3063 3063
3064 /* has private set_mode? */ 3064 /* has private set_mode? */
3065 if (ap->ops->set_mode) 3065 if (ap->ops->set_mode)
3066 rc = ap->ops->set_mode(link, r_failed_dev); 3066 rc = ap->ops->set_mode(link, r_failed_dev);
3067 else 3067 else
3068 rc = ata_do_set_mode(link, r_failed_dev); 3068 rc = ata_do_set_mode(link, r_failed_dev);
3069 3069
3070 /* if transfer mode has changed, set DUBIOUS_XFER on device */ 3070 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3071 ata_for_each_dev(dev, link, ENABLED) { 3071 ata_for_each_dev(dev, link, ENABLED) {
3072 struct ata_eh_context *ehc = &link->eh_context; 3072 struct ata_eh_context *ehc = &link->eh_context;
3073 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 3073 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3074 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 3074 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3075 3075
3076 if (dev->xfer_mode != saved_xfer_mode || 3076 if (dev->xfer_mode != saved_xfer_mode ||
3077 ata_ncq_enabled(dev) != saved_ncq) 3077 ata_ncq_enabled(dev) != saved_ncq)
3078 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 3078 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3079 } 3079 }
3080 3080
3081 return rc; 3081 return rc;
3082 } 3082 }
3083 3083
3084 /** 3084 /**
3085 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 3085 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3086 * @dev: ATAPI device to clear UA for 3086 * @dev: ATAPI device to clear UA for
3087 * 3087 *
3088 * Resets and other operations can make an ATAPI device raise 3088 * Resets and other operations can make an ATAPI device raise
3089 * UNIT ATTENTION which causes the next operation to fail. This 3089 * UNIT ATTENTION which causes the next operation to fail. This
3090 * function clears UA. 3090 * function clears UA.
3091 * 3091 *
3092 * LOCKING: 3092 * LOCKING:
3093 * EH context (may sleep). 3093 * EH context (may sleep).
3094 * 3094 *
3095 * RETURNS: 3095 * RETURNS:
3096 * 0 on success, -errno on failure. 3096 * 0 on success, -errno on failure.
3097 */ 3097 */
3098 static int atapi_eh_clear_ua(struct ata_device *dev) 3098 static int atapi_eh_clear_ua(struct ata_device *dev)
3099 { 3099 {
3100 int i; 3100 int i;
3101 3101
3102 for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3102 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3103 u8 *sense_buffer = dev->link->ap->sector_buf; 3103 u8 *sense_buffer = dev->link->ap->sector_buf;
3104 u8 sense_key = 0; 3104 u8 sense_key = 0;
3105 unsigned int err_mask; 3105 unsigned int err_mask;
3106 3106
3107 err_mask = atapi_eh_tur(dev, &sense_key); 3107 err_mask = atapi_eh_tur(dev, &sense_key);
3108 if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3108 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3109 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 3109 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3110 "failed (err_mask=0x%x)\n", err_mask); 3110 "failed (err_mask=0x%x)\n", err_mask);
3111 return -EIO; 3111 return -EIO;
3112 } 3112 }
3113 3113
3114 if (!err_mask || sense_key != UNIT_ATTENTION) 3114 if (!err_mask || sense_key != UNIT_ATTENTION)
3115 return 0; 3115 return 0;
3116 3116
3117 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 3117 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3118 if (err_mask) { 3118 if (err_mask) {
3119 ata_dev_printk(dev, KERN_WARNING, "failed to clear " 3119 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3120 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 3120 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3121 return -EIO; 3121 return -EIO;
3122 } 3122 }
3123 } 3123 }
3124 3124
3125 ata_dev_printk(dev, KERN_WARNING, 3125 ata_dev_printk(dev, KERN_WARNING,
3126 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 3126 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3127 3127
3128 return 0; 3128 return 0;
3129 } 3129 }
3130 3130
3131 /** 3131 /**
3132 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 3132 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3133 * @dev: ATA device which may need FLUSH retry 3133 * @dev: ATA device which may need FLUSH retry
3134 * 3134 *
3135 * If @dev failed FLUSH, it needs to be reported upper layer 3135 * If @dev failed FLUSH, it needs to be reported upper layer
3136 * immediately as it means that @dev failed to remap and already 3136 * immediately as it means that @dev failed to remap and already
3137 * lost at least a sector and further FLUSH retrials won't make 3137 * lost at least a sector and further FLUSH retrials won't make
3138 * any difference to the lost sector. However, if FLUSH failed 3138 * any difference to the lost sector. However, if FLUSH failed
3139 * for other reasons, for example transmission error, FLUSH needs 3139 * for other reasons, for example transmission error, FLUSH needs
3140 * to be retried. 3140 * to be retried.
3141 * 3141 *
3142 * This function determines whether FLUSH failure retry is 3142 * This function determines whether FLUSH failure retry is
3143 * necessary and performs it if so. 3143 * necessary and performs it if so.
3144 * 3144 *
3145 * RETURNS: 3145 * RETURNS:
3146 * 0 if EH can continue, -errno if EH needs to be repeated. 3146 * 0 if EH can continue, -errno if EH needs to be repeated.
3147 */ 3147 */
3148 static int ata_eh_maybe_retry_flush(struct ata_device *dev) 3148 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3149 { 3149 {
3150 struct ata_link *link = dev->link; 3150 struct ata_link *link = dev->link;
3151 struct ata_port *ap = link->ap; 3151 struct ata_port *ap = link->ap;
3152 struct ata_queued_cmd *qc; 3152 struct ata_queued_cmd *qc;
3153 struct ata_taskfile tf; 3153 struct ata_taskfile tf;
3154 unsigned int err_mask; 3154 unsigned int err_mask;
3155 int rc = 0; 3155 int rc = 0;
3156 3156
3157 /* did flush fail for this device? */ 3157 /* did flush fail for this device? */
3158 if (!ata_tag_valid(link->active_tag)) 3158 if (!ata_tag_valid(link->active_tag))
3159 return 0; 3159 return 0;
3160 3160
3161 qc = __ata_qc_from_tag(ap, link->active_tag); 3161 qc = __ata_qc_from_tag(ap, link->active_tag);
3162 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 3162 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3163 qc->tf.command != ATA_CMD_FLUSH)) 3163 qc->tf.command != ATA_CMD_FLUSH))
3164 return 0; 3164 return 0;
3165 3165
3166 /* if the device failed it, it should be reported to upper layers */ 3166 /* if the device failed it, it should be reported to upper layers */
3167 if (qc->err_mask & AC_ERR_DEV) 3167 if (qc->err_mask & AC_ERR_DEV)
3168 return 0; 3168 return 0;
3169 3169
3170 /* flush failed for some other reason, give it another shot */ 3170 /* flush failed for some other reason, give it another shot */
3171 ata_tf_init(dev, &tf); 3171 ata_tf_init(dev, &tf);
3172 3172
3173 tf.command = qc->tf.command; 3173 tf.command = qc->tf.command;
3174 tf.flags |= ATA_TFLAG_DEVICE; 3174 tf.flags |= ATA_TFLAG_DEVICE;
3175 tf.protocol = ATA_PROT_NODATA; 3175 tf.protocol = ATA_PROT_NODATA;
3176 3176
3177 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 3177 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3178 tf.command, qc->err_mask); 3178 tf.command, qc->err_mask);
3179 3179
3180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 3180 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3181 if (!err_mask) { 3181 if (!err_mask) {
3182 /* 3182 /*
3183 * FLUSH is complete but there's no way to 3183 * FLUSH is complete but there's no way to
3184 * successfully complete a failed command from EH. 3184 * successfully complete a failed command from EH.
3185 * Making sure retry is allowed at least once and 3185 * Making sure retry is allowed at least once and
3186 * retrying it should do the trick - whatever was in 3186 * retrying it should do the trick - whatever was in
3187 * the cache is already on the platter and this won't 3187 * the cache is already on the platter and this won't
3188 * cause infinite loop. 3188 * cause infinite loop.
3189 */ 3189 */
3190 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 3190 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3191 } else { 3191 } else {
3192 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 3192 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3193 err_mask); 3193 err_mask);
3194 rc = -EIO; 3194 rc = -EIO;
3195 3195
3196 /* if device failed it, report it to upper layers */ 3196 /* if device failed it, report it to upper layers */
3197 if (err_mask & AC_ERR_DEV) { 3197 if (err_mask & AC_ERR_DEV) {
3198 qc->err_mask |= AC_ERR_DEV; 3198 qc->err_mask |= AC_ERR_DEV;
3199 qc->result_tf = tf; 3199 qc->result_tf = tf;
3200 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 3200 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3201 rc = 0; 3201 rc = 0;
3202 } 3202 }
3203 } 3203 }
3204 return rc; 3204 return rc;
3205 } 3205 }
3206 3206
3207 static int ata_link_nr_enabled(struct ata_link *link) 3207 static int ata_link_nr_enabled(struct ata_link *link)
3208 { 3208 {
3209 struct ata_device *dev; 3209 struct ata_device *dev;
3210 int cnt = 0; 3210 int cnt = 0;
3211 3211
3212 ata_for_each_dev(dev, link, ENABLED) 3212 ata_for_each_dev(dev, link, ENABLED)
3213 cnt++; 3213 cnt++;
3214 return cnt; 3214 return cnt;
3215 } 3215 }
3216 3216
3217 static int ata_link_nr_vacant(struct ata_link *link) 3217 static int ata_link_nr_vacant(struct ata_link *link)
3218 { 3218 {
3219 struct ata_device *dev; 3219 struct ata_device *dev;
3220 int cnt = 0; 3220 int cnt = 0;
3221 3221
3222 ata_for_each_dev(dev, link, ALL) 3222 ata_for_each_dev(dev, link, ALL)
3223 if (dev->class == ATA_DEV_UNKNOWN) 3223 if (dev->class == ATA_DEV_UNKNOWN)
3224 cnt++; 3224 cnt++;
3225 return cnt; 3225 return cnt;
3226 } 3226 }
3227 3227
3228 static int ata_eh_skip_recovery(struct ata_link *link) 3228 static int ata_eh_skip_recovery(struct ata_link *link)
3229 { 3229 {
3230 struct ata_port *ap = link->ap; 3230 struct ata_port *ap = link->ap;
3231 struct ata_eh_context *ehc = &link->eh_context; 3231 struct ata_eh_context *ehc = &link->eh_context;
3232 struct ata_device *dev; 3232 struct ata_device *dev;
3233 3233
3234 /* skip disabled links */ 3234 /* skip disabled links */
3235 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3236 return 1; 3236 return 1;
3237 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3238 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 return 0; 3244 return 0;
3241 3245
3242 /* reset at least once if reset is requested */ 3246 /* reset at least once if reset is requested */
3243 if ((ehc->i.action & ATA_EH_RESET) && 3247 if ((ehc->i.action & ATA_EH_RESET) &&
3244 !(ehc->i.flags & ATA_EHI_DID_RESET)) 3248 !(ehc->i.flags & ATA_EHI_DID_RESET))
3245 return 0; 3249 return 0;
3246 3250
3247 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 3251 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3248 ata_for_each_dev(dev, link, ALL) { 3252 ata_for_each_dev(dev, link, ALL) {
3249 if (dev->class == ATA_DEV_UNKNOWN && 3253 if (dev->class == ATA_DEV_UNKNOWN &&
3250 ehc->classes[dev->devno] != ATA_DEV_NONE) 3254 ehc->classes[dev->devno] != ATA_DEV_NONE)
3251 return 0; 3255 return 0;
3252 } 3256 }
3253 3257
3254 return 1; 3258 return 1;
3255 } 3259 }
3256 3260
3257 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3261 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3258 { 3262 {
3259 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3263 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3260 u64 now = get_jiffies_64(); 3264 u64 now = get_jiffies_64();
3261 int *trials = void_arg; 3265 int *trials = void_arg;
3262 3266
3263 if (ent->timestamp < now - min(now, interval)) 3267 if (ent->timestamp < now - min(now, interval))
3264 return -1; 3268 return -1;
3265 3269
3266 (*trials)++; 3270 (*trials)++;
3267 return 0; 3271 return 0;
3268 } 3272 }
3269 3273
3270 static int ata_eh_schedule_probe(struct ata_device *dev) 3274 static int ata_eh_schedule_probe(struct ata_device *dev)
3271 { 3275 {
3272 struct ata_eh_context *ehc = &dev->link->eh_context; 3276 struct ata_eh_context *ehc = &dev->link->eh_context;
3273 struct ata_link *link = ata_dev_phys_link(dev); 3277 struct ata_link *link = ata_dev_phys_link(dev);
3274 int trials = 0; 3278 int trials = 0;
3275 3279
3276 if (!(ehc->i.probe_mask & (1 << dev->devno)) || 3280 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3277 (ehc->did_probe_mask & (1 << dev->devno))) 3281 (ehc->did_probe_mask & (1 << dev->devno)))
3278 return 0; 3282 return 0;
3279 3283
3280 ata_eh_detach_dev(dev); 3284 ata_eh_detach_dev(dev);
3281 ata_dev_init(dev); 3285 ata_dev_init(dev);
3282 ehc->did_probe_mask |= (1 << dev->devno); 3286 ehc->did_probe_mask |= (1 << dev->devno);
3283 ehc->i.action |= ATA_EH_RESET; 3287 ehc->i.action |= ATA_EH_RESET;
3284 ehc->saved_xfer_mode[dev->devno] = 0; 3288 ehc->saved_xfer_mode[dev->devno] = 0;
3285 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 3289 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3286 3290
3287 /* Record and count probe trials on the ering. The specific 3291 /* Record and count probe trials on the ering. The specific
3288 * error mask used is irrelevant. Because a successful device 3292 * error mask used is irrelevant. Because a successful device
3289 * detection clears the ering, this count accumulates only if 3293 * detection clears the ering, this count accumulates only if
3290 * there are consecutive failed probes. 3294 * there are consecutive failed probes.
3291 * 3295 *
3292 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3296 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3293 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3297 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3294 * forced to 1.5Gbps. 3298 * forced to 1.5Gbps.
3295 * 3299 *
3296 * This is to work around cases where failed link speed 3300 * This is to work around cases where failed link speed
3297 * negotiation results in device misdetection leading to 3301 * negotiation results in device misdetection leading to
3298 * infinite DEVXCHG or PHRDY CHG events. 3302 * infinite DEVXCHG or PHRDY CHG events.
3299 */ 3303 */
3300 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3304 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3301 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3305 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3302 3306
3303 if (trials > ATA_EH_PROBE_TRIALS) 3307 if (trials > ATA_EH_PROBE_TRIALS)
3304 sata_down_spd_limit(link, 1); 3308 sata_down_spd_limit(link, 1);
3305 3309
3306 return 1; 3310 return 1;
3307 } 3311 }
3308 3312
3309 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3313 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3310 { 3314 {
3311 struct ata_eh_context *ehc = &dev->link->eh_context; 3315 struct ata_eh_context *ehc = &dev->link->eh_context;
3312 3316
3313 /* -EAGAIN from EH routine indicates retry without prejudice. 3317 /* -EAGAIN from EH routine indicates retry without prejudice.
3314 * The requester is responsible for ensuring forward progress. 3318 * The requester is responsible for ensuring forward progress.
3315 */ 3319 */
3316 if (err != -EAGAIN) 3320 if (err != -EAGAIN)
3317 ehc->tries[dev->devno]--; 3321 ehc->tries[dev->devno]--;
3318 3322
3319 switch (err) { 3323 switch (err) {
3320 case -ENODEV: 3324 case -ENODEV:
3321 /* device missing or wrong IDENTIFY data, schedule probing */ 3325 /* device missing or wrong IDENTIFY data, schedule probing */
3322 ehc->i.probe_mask |= (1 << dev->devno); 3326 ehc->i.probe_mask |= (1 << dev->devno);
3323 case -EINVAL: 3327 case -EINVAL:
3324 /* give it just one more chance */ 3328 /* give it just one more chance */
3325 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3329 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3326 case -EIO: 3330 case -EIO:
3327 if (ehc->tries[dev->devno] == 1) { 3331 if (ehc->tries[dev->devno] == 1) {
3328 /* This is the last chance, better to slow 3332 /* This is the last chance, better to slow
3329 * down than lose it. 3333 * down than lose it.
3330 */ 3334 */
3331 sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3335 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3332 if (dev->pio_mode > XFER_PIO_0) 3336 if (dev->pio_mode > XFER_PIO_0)
3333 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3337 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3334 } 3338 }
3335 } 3339 }
3336 3340
3337 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3341 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3338 /* disable device if it has used up all its chances */ 3342 /* disable device if it has used up all its chances */
3339 ata_dev_disable(dev); 3343 ata_dev_disable(dev);
3340 3344
3341 /* detach if offline */ 3345 /* detach if offline */
3342 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3346 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3343 ata_eh_detach_dev(dev); 3347 ata_eh_detach_dev(dev);
3344 3348
3345 /* schedule probe if necessary */ 3349 /* schedule probe if necessary */
3346 if (ata_eh_schedule_probe(dev)) { 3350 if (ata_eh_schedule_probe(dev)) {
3347 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3351 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3348 memset(ehc->cmd_timeout_idx[dev->devno], 0, 3352 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3349 sizeof(ehc->cmd_timeout_idx[dev->devno])); 3353 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3350 } 3354 }
3351 3355
3352 return 1; 3356 return 1;
3353 } else { 3357 } else {
3354 ehc->i.action |= ATA_EH_RESET; 3358 ehc->i.action |= ATA_EH_RESET;
3355 return 0; 3359 return 0;
3356 } 3360 }
3357 } 3361 }
3358 3362
3359 /** 3363 /**
3360 * ata_eh_recover - recover host port after error 3364 * ata_eh_recover - recover host port after error
3361 * @ap: host port to recover 3365 * @ap: host port to recover
3362 * @prereset: prereset method (can be NULL) 3366 * @prereset: prereset method (can be NULL)
3363 * @softreset: softreset method (can be NULL) 3367 * @softreset: softreset method (can be NULL)
3364 * @hardreset: hardreset method (can be NULL) 3368 * @hardreset: hardreset method (can be NULL)
3365 * @postreset: postreset method (can be NULL) 3369 * @postreset: postreset method (can be NULL)
3366 * @r_failed_link: out parameter for failed link 3370 * @r_failed_link: out parameter for failed link
3367 * 3371 *
3368 * This is the alpha and omega, eum and yang, heart and soul of 3372 * This is the alpha and omega, eum and yang, heart and soul of
3369 * libata exception handling. On entry, actions required to 3373 * libata exception handling. On entry, actions required to
3370 * recover each link and hotplug requests are recorded in the 3374 * recover each link and hotplug requests are recorded in the
3371 * link's eh_context. This function executes all the operations 3375 * link's eh_context. This function executes all the operations
3372 * with appropriate retrials and fallbacks to resurrect failed 3376 * with appropriate retrials and fallbacks to resurrect failed
3373 * devices, detach goners and greet newcomers. 3377 * devices, detach goners and greet newcomers.
3374 * 3378 *
3375 * LOCKING: 3379 * LOCKING:
3376 * Kernel thread context (may sleep). 3380 * Kernel thread context (may sleep).
3377 * 3381 *
3378 * RETURNS: 3382 * RETURNS:
3379 * 0 on success, -errno on failure. 3383 * 0 on success, -errno on failure.
3380 */ 3384 */
3381 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3385 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3382 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3386 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3383 ata_postreset_fn_t postreset, 3387 ata_postreset_fn_t postreset,
3384 struct ata_link **r_failed_link) 3388 struct ata_link **r_failed_link)
3385 { 3389 {
3386 struct ata_link *link; 3390 struct ata_link *link;
3387 struct ata_device *dev; 3391 struct ata_device *dev;
3388 int nr_failed_devs; 3392 int nr_failed_devs;
3389 int rc; 3393 int rc;
3390 unsigned long flags, deadline; 3394 unsigned long flags, deadline;
3391 3395
3392 DPRINTK("ENTER\n"); 3396 DPRINTK("ENTER\n");
3393 3397
3394 /* prep for recovery */ 3398 /* prep for recovery */
3395 ata_for_each_link(link, ap, EDGE) { 3399 ata_for_each_link(link, ap, EDGE) {
3396 struct ata_eh_context *ehc = &link->eh_context; 3400 struct ata_eh_context *ehc = &link->eh_context;
3397 3401
3398 /* re-enable link? */ 3402 /* re-enable link? */
3399 if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3403 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3400 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3404 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3401 spin_lock_irqsave(ap->lock, flags); 3405 spin_lock_irqsave(ap->lock, flags);
3402 link->flags &= ~ATA_LFLAG_DISABLED; 3406 link->flags &= ~ATA_LFLAG_DISABLED;
3403 spin_unlock_irqrestore(ap->lock, flags); 3407 spin_unlock_irqrestore(ap->lock, flags);
3404 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3408 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3405 } 3409 }
3406 3410
3407 ata_for_each_dev(dev, link, ALL) { 3411 ata_for_each_dev(dev, link, ALL) {
3408 if (link->flags & ATA_LFLAG_NO_RETRY) 3412 if (link->flags & ATA_LFLAG_NO_RETRY)
3409 ehc->tries[dev->devno] = 1; 3413 ehc->tries[dev->devno] = 1;
3410 else 3414 else
3411 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3415 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3412 3416
3413 /* collect port action mask recorded in dev actions */ 3417 /* collect port action mask recorded in dev actions */
3414 ehc->i.action |= ehc->i.dev_action[dev->devno] & 3418 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3415 ~ATA_EH_PERDEV_MASK; 3419 ~ATA_EH_PERDEV_MASK;
3416 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 3420 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3417 3421
3418 /* process hotplug request */ 3422 /* process hotplug request */
3419 if (dev->flags & ATA_DFLAG_DETACH) 3423 if (dev->flags & ATA_DFLAG_DETACH)
3420 ata_eh_detach_dev(dev); 3424 ata_eh_detach_dev(dev);
3421 3425
3422 /* schedule probe if necessary */ 3426 /* schedule probe if necessary */
3423 if (!ata_dev_enabled(dev)) 3427 if (!ata_dev_enabled(dev))
3424 ata_eh_schedule_probe(dev); 3428 ata_eh_schedule_probe(dev);
3425 } 3429 }
3426 } 3430 }
3427 3431
3428 retry: 3432 retry:
3429 rc = 0; 3433 rc = 0;
3430 nr_failed_devs = 0; 3434 nr_failed_devs = 0;
3431 3435
3432 /* if UNLOADING, finish immediately */ 3436 /* if UNLOADING, finish immediately */
3433 if (ap->pflags & ATA_PFLAG_UNLOADING) 3437 if (ap->pflags & ATA_PFLAG_UNLOADING)
3434 goto out; 3438 goto out;
3435 3439
3436 /* prep for EH */ 3440 /* prep for EH */
3437 ata_for_each_link(link, ap, EDGE) { 3441 ata_for_each_link(link, ap, EDGE) {
3438 struct ata_eh_context *ehc = &link->eh_context; 3442 struct ata_eh_context *ehc = &link->eh_context;
3439 3443
3440 /* skip EH if possible. */ 3444 /* skip EH if possible. */
3441 if (ata_eh_skip_recovery(link)) 3445 if (ata_eh_skip_recovery(link))
3442 ehc->i.action = 0; 3446 ehc->i.action = 0;
3443 3447
3444 ata_for_each_dev(dev, link, ALL) 3448 ata_for_each_dev(dev, link, ALL)
3445 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 3449 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3446 } 3450 }
3447 3451
3448 /* reset */ 3452 /* reset */
3449 ata_for_each_link(link, ap, EDGE) { 3453 ata_for_each_link(link, ap, EDGE) {
3450 struct ata_eh_context *ehc = &link->eh_context; 3454 struct ata_eh_context *ehc = &link->eh_context;
3451 3455
3452 if (!(ehc->i.action & ATA_EH_RESET)) 3456 if (!(ehc->i.action & ATA_EH_RESET))
3453 continue; 3457 continue;
3454 3458
3455 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3459 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3456 prereset, softreset, hardreset, postreset); 3460 prereset, softreset, hardreset, postreset);
3457 if (rc) { 3461 if (rc) {
3458 ata_link_printk(link, KERN_ERR, 3462 ata_link_printk(link, KERN_ERR,
3459 "reset failed, giving up\n"); 3463 "reset failed, giving up\n");
3460 goto out; 3464 goto out;
3461 } 3465 }
3462 } 3466 }
3463 3467
3464 do { 3468 do {
3465 unsigned long now; 3469 unsigned long now;
3466 3470
3467 /* 3471 /*
3468 * clears ATA_EH_PARK in eh_info and resets 3472 * clears ATA_EH_PARK in eh_info and resets
3469 * ap->park_req_pending 3473 * ap->park_req_pending
3470 */ 3474 */
3471 ata_eh_pull_park_action(ap); 3475 ata_eh_pull_park_action(ap);
3472 3476
3473 deadline = jiffies; 3477 deadline = jiffies;
3474 ata_for_each_link(link, ap, EDGE) { 3478 ata_for_each_link(link, ap, EDGE) {
3475 ata_for_each_dev(dev, link, ALL) { 3479 ata_for_each_dev(dev, link, ALL) {
3476 struct ata_eh_context *ehc = &link->eh_context; 3480 struct ata_eh_context *ehc = &link->eh_context;
3477 unsigned long tmp; 3481 unsigned long tmp;
3478 3482
3479 if (dev->class != ATA_DEV_ATA) 3483 if (dev->class != ATA_DEV_ATA)
3480 continue; 3484 continue;
3481 if (!(ehc->i.dev_action[dev->devno] & 3485 if (!(ehc->i.dev_action[dev->devno] &
3482 ATA_EH_PARK)) 3486 ATA_EH_PARK))
3483 continue; 3487 continue;
3484 tmp = dev->unpark_deadline; 3488 tmp = dev->unpark_deadline;
3485 if (time_before(deadline, tmp)) 3489 if (time_before(deadline, tmp))
3486 deadline = tmp; 3490 deadline = tmp;
3487 else if (time_before_eq(tmp, jiffies)) 3491 else if (time_before_eq(tmp, jiffies))
3488 continue; 3492 continue;
3489 if (ehc->unloaded_mask & (1 << dev->devno)) 3493 if (ehc->unloaded_mask & (1 << dev->devno))
3490 continue; 3494 continue;
3491 3495
3492 ata_eh_park_issue_cmd(dev, 1); 3496 ata_eh_park_issue_cmd(dev, 1);
3493 } 3497 }
3494 } 3498 }
3495 3499
3496 now = jiffies; 3500 now = jiffies;
3497 if (time_before_eq(deadline, now)) 3501 if (time_before_eq(deadline, now))
3498 break; 3502 break;
3499 3503
3500 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3504 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3501 deadline - now); 3505 deadline - now);
3502 } while (deadline); 3506 } while (deadline);
3503 ata_for_each_link(link, ap, EDGE) { 3507 ata_for_each_link(link, ap, EDGE) {
3504 ata_for_each_dev(dev, link, ALL) { 3508 ata_for_each_dev(dev, link, ALL) {
3505 if (!(link->eh_context.unloaded_mask & 3509 if (!(link->eh_context.unloaded_mask &
3506 (1 << dev->devno))) 3510 (1 << dev->devno)))
3507 continue; 3511 continue;
3508 3512
3509 ata_eh_park_issue_cmd(dev, 0); 3513 ata_eh_park_issue_cmd(dev, 0);
3510 ata_eh_done(link, dev, ATA_EH_PARK); 3514 ata_eh_done(link, dev, ATA_EH_PARK);
3511 } 3515 }
3512 } 3516 }
3513 3517
3514 /* the rest */ 3518 /* the rest */
3515 ata_for_each_link(link, ap, EDGE) { 3519 ata_for_each_link(link, ap, EDGE) {
3516 struct ata_eh_context *ehc = &link->eh_context; 3520 struct ata_eh_context *ehc = &link->eh_context;
3517 3521
3518 /* revalidate existing devices and attach new ones */ 3522 /* revalidate existing devices and attach new ones */
3519 rc = ata_eh_revalidate_and_attach(link, &dev); 3523 rc = ata_eh_revalidate_and_attach(link, &dev);
3520 if (rc) 3524 if (rc)
3521 goto dev_fail; 3525 goto dev_fail;
3522 3526
3523 /* if PMP got attached, return, pmp EH will take care of it */ 3527 /* if PMP got attached, return, pmp EH will take care of it */
3524 if (link->device->class == ATA_DEV_PMP) { 3528 if (link->device->class == ATA_DEV_PMP) {
3525 ehc->i.action = 0; 3529 ehc->i.action = 0;
3526 return 0; 3530 return 0;
3527 } 3531 }
3528 3532
3529 /* configure transfer mode if necessary */ 3533 /* configure transfer mode if necessary */
3530 if (ehc->i.flags & ATA_EHI_SETMODE) { 3534 if (ehc->i.flags & ATA_EHI_SETMODE) {
3531 rc = ata_set_mode(link, &dev); 3535 rc = ata_set_mode(link, &dev);
3532 if (rc) 3536 if (rc)
3533 goto dev_fail; 3537 goto dev_fail;
3534 ehc->i.flags &= ~ATA_EHI_SETMODE; 3538 ehc->i.flags &= ~ATA_EHI_SETMODE;
3535 } 3539 }
3536 3540
3537 /* If reset has been issued, clear UA to avoid 3541 /* If reset has been issued, clear UA to avoid
3538 * disrupting the current users of the device. 3542 * disrupting the current users of the device.
3539 */ 3543 */
3540 if (ehc->i.flags & ATA_EHI_DID_RESET) { 3544 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3541 ata_for_each_dev(dev, link, ALL) { 3545 ata_for_each_dev(dev, link, ALL) {
3542 if (dev->class != ATA_DEV_ATAPI) 3546 if (dev->class != ATA_DEV_ATAPI)
3543 continue; 3547 continue;
3544 rc = atapi_eh_clear_ua(dev); 3548 rc = atapi_eh_clear_ua(dev);
3545 if (rc) 3549 if (rc)
3546 goto dev_fail; 3550 goto dev_fail;
3547 } 3551 }
3548 } 3552 }
3549 3553
3550 /* retry flush if necessary */ 3554 /* retry flush if necessary */
3551 ata_for_each_dev(dev, link, ALL) { 3555 ata_for_each_dev(dev, link, ALL) {
3552 if (dev->class != ATA_DEV_ATA) 3556 if (dev->class != ATA_DEV_ATA)
3553 continue; 3557 continue;
3554 rc = ata_eh_maybe_retry_flush(dev); 3558 rc = ata_eh_maybe_retry_flush(dev);
3555 if (rc) 3559 if (rc)
3556 goto dev_fail; 3560 goto dev_fail;
3557 } 3561 }
3558 3562
3559 /* configure link power saving */ 3563 /* configure link power saving */
3560 if (ehc->i.action & ATA_EH_LPM) 3564 if (ehc->i.action & ATA_EH_LPM)
3561 ata_for_each_dev(dev, link, ALL) 3565 ata_for_each_dev(dev, link, ALL)
3562 ata_dev_enable_pm(dev, ap->pm_policy); 3566 ata_dev_enable_pm(dev, ap->pm_policy);
3563 3567
3564 /* this link is okay now */ 3568 /* this link is okay now */
3565 ehc->i.flags = 0; 3569 ehc->i.flags = 0;
3566 continue; 3570 continue;
3567 3571
3568 dev_fail: 3572 dev_fail:
3569 nr_failed_devs++; 3573 nr_failed_devs++;
3570 ata_eh_handle_dev_fail(dev, rc); 3574 ata_eh_handle_dev_fail(dev, rc);
3571 3575
3572 if (ap->pflags & ATA_PFLAG_FROZEN) { 3576 if (ap->pflags & ATA_PFLAG_FROZEN) {
3573 /* PMP reset requires working host port. 3577 /* PMP reset requires working host port.
3574 * Can't retry if it's frozen. 3578 * Can't retry if it's frozen.
3575 */ 3579 */
3576 if (sata_pmp_attached(ap)) 3580 if (sata_pmp_attached(ap))
3577 goto out; 3581 goto out;
3578 break; 3582 break;
3579 } 3583 }
3580 } 3584 }
3581 3585
3582 if (nr_failed_devs) 3586 if (nr_failed_devs)
3583 goto retry; 3587 goto retry;
3584 3588
3585 out: 3589 out:
3586 if (rc && r_failed_link) 3590 if (rc && r_failed_link)
3587 *r_failed_link = link; 3591 *r_failed_link = link;
3588 3592
3589 DPRINTK("EXIT, rc=%d\n", rc); 3593 DPRINTK("EXIT, rc=%d\n", rc);
3590 return rc; 3594 return rc;
3591 } 3595 }
3592 3596
3593 /** 3597 /**
3594 * ata_eh_finish - finish up EH 3598 * ata_eh_finish - finish up EH
3595 * @ap: host port to finish EH for 3599 * @ap: host port to finish EH for
3596 * 3600 *
3597 * Recovery is complete. Clean up EH states and retry or finish 3601 * Recovery is complete. Clean up EH states and retry or finish
3598 * failed qcs. 3602 * failed qcs.
3599 * 3603 *
3600 * LOCKING: 3604 * LOCKING:
3601 * None. 3605 * None.
3602 */ 3606 */
3603 void ata_eh_finish(struct ata_port *ap) 3607 void ata_eh_finish(struct ata_port *ap)
3604 { 3608 {
3605 int tag; 3609 int tag;
3606 3610
3607 /* retry or finish qcs */ 3611 /* retry or finish qcs */
3608 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3612 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3609 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3613 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3610 3614
3611 if (!(qc->flags & ATA_QCFLAG_FAILED)) 3615 if (!(qc->flags & ATA_QCFLAG_FAILED))
3612 continue; 3616 continue;
3613 3617
3614 if (qc->err_mask) { 3618 if (qc->err_mask) {
3615 /* FIXME: Once EH migration is complete, 3619 /* FIXME: Once EH migration is complete,
3616 * generate sense data in this function, 3620 * generate sense data in this function,
3617 * considering both err_mask and tf. 3621 * considering both err_mask and tf.
3618 */ 3622 */
3619 if (qc->flags & ATA_QCFLAG_RETRY) 3623 if (qc->flags & ATA_QCFLAG_RETRY)
3620 ata_eh_qc_retry(qc); 3624 ata_eh_qc_retry(qc);
3621 else 3625 else
3622 ata_eh_qc_complete(qc); 3626 ata_eh_qc_complete(qc);
3623 } else { 3627 } else {
3624 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3628 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3625 ata_eh_qc_complete(qc); 3629 ata_eh_qc_complete(qc);
3626 } else { 3630 } else {
3627 /* feed zero TF to sense generation */ 3631 /* feed zero TF to sense generation */
3628 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3632 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3629 ata_eh_qc_retry(qc); 3633 ata_eh_qc_retry(qc);
3630 } 3634 }
3631 } 3635 }
3632 } 3636 }
3633 3637
3634 /* make sure nr_active_links is zero after EH */ 3638 /* make sure nr_active_links is zero after EH */
3635 WARN_ON(ap->nr_active_links); 3639 WARN_ON(ap->nr_active_links);
3636 ap->nr_active_links = 0; 3640 ap->nr_active_links = 0;
3637 } 3641 }
3638 3642
3639 /** 3643 /**
3640 * ata_do_eh - do standard error handling 3644 * ata_do_eh - do standard error handling
3641 * @ap: host port to handle error for 3645 * @ap: host port to handle error for
3642 * 3646 *
3643 * @prereset: prereset method (can be NULL) 3647 * @prereset: prereset method (can be NULL)
3644 * @softreset: softreset method (can be NULL) 3648 * @softreset: softreset method (can be NULL)
3645 * @hardreset: hardreset method (can be NULL) 3649 * @hardreset: hardreset method (can be NULL)
3646 * @postreset: postreset method (can be NULL) 3650 * @postreset: postreset method (can be NULL)
3647 * 3651 *
3648 * Perform standard error handling sequence. 3652 * Perform standard error handling sequence.
3649 * 3653 *
3650 * LOCKING: 3654 * LOCKING:
3651 * Kernel thread context (may sleep). 3655 * Kernel thread context (may sleep).
3652 */ 3656 */
3653 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3657 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3654 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3658 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3655 ata_postreset_fn_t postreset) 3659 ata_postreset_fn_t postreset)
3656 { 3660 {
3657 struct ata_device *dev; 3661 struct ata_device *dev;
3658 int rc; 3662 int rc;
3659 3663
3660 ata_eh_autopsy(ap); 3664 ata_eh_autopsy(ap);
3661 ata_eh_report(ap); 3665 ata_eh_report(ap);
3662 3666
3663 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 3667 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3664 NULL); 3668 NULL);
3665 if (rc) { 3669 if (rc) {
3666 ata_for_each_dev(dev, &ap->link, ALL) 3670 ata_for_each_dev(dev, &ap->link, ALL)
3667 ata_dev_disable(dev); 3671 ata_dev_disable(dev);
3668 } 3672 }
3669 3673
3670 ata_eh_finish(ap); 3674 ata_eh_finish(ap);
3671 } 3675 }
3672 3676
3673 /** 3677 /**
3674 * ata_std_error_handler - standard error handler 3678 * ata_std_error_handler - standard error handler
3675 * @ap: host port to handle error for 3679 * @ap: host port to handle error for
3676 * 3680 *
3677 * Standard error handler 3681 * Standard error handler
3678 * 3682 *
3679 * LOCKING: 3683 * LOCKING:
3680 * Kernel thread context (may sleep). 3684 * Kernel thread context (may sleep).
3681 */ 3685 */
3682 void ata_std_error_handler(struct ata_port *ap) 3686 void ata_std_error_handler(struct ata_port *ap)
3683 { 3687 {
3684 struct ata_port_operations *ops = ap->ops; 3688 struct ata_port_operations *ops = ap->ops;
3685 ata_reset_fn_t hardreset = ops->hardreset; 3689 ata_reset_fn_t hardreset = ops->hardreset;
3686 3690
3687 /* ignore built-in hardreset if SCR access is not available */ 3691 /* ignore built-in hardreset if SCR access is not available */
3688 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3692 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3689 hardreset = NULL; 3693 hardreset = NULL;
3690 3694
3691 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3695 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3692 } 3696 }
3693 3697
3694 #ifdef CONFIG_PM 3698 #ifdef CONFIG_PM
3695 /** 3699 /**
3696 * ata_eh_handle_port_suspend - perform port suspend operation 3700 * ata_eh_handle_port_suspend - perform port suspend operation
3697 * @ap: port to suspend 3701 * @ap: port to suspend
3698 * 3702 *
3699 * Suspend @ap. 3703 * Suspend @ap.
3700 * 3704 *
3701 * LOCKING: 3705 * LOCKING:
3702 * Kernel thread context (may sleep). 3706 * Kernel thread context (may sleep).
3703 */ 3707 */
3704 static void ata_eh_handle_port_suspend(struct ata_port *ap) 3708 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3705 { 3709 {
3706 unsigned long flags; 3710 unsigned long flags;
3707 int rc = 0; 3711 int rc = 0;
3708 3712
3709 /* are we suspending? */ 3713 /* are we suspending? */
3710 spin_lock_irqsave(ap->lock, flags); 3714 spin_lock_irqsave(ap->lock, flags);
3711 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3715 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3712 ap->pm_mesg.event == PM_EVENT_ON) { 3716 ap->pm_mesg.event == PM_EVENT_ON) {
3713 spin_unlock_irqrestore(ap->lock, flags); 3717 spin_unlock_irqrestore(ap->lock, flags);
3714 return; 3718 return;
3715 } 3719 }
3716 spin_unlock_irqrestore(ap->lock, flags); 3720 spin_unlock_irqrestore(ap->lock, flags);
3717 3721
3718 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3722 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3719 3723
3720 /* tell ACPI we're suspending */ 3724 /* tell ACPI we're suspending */
3721 rc = ata_acpi_on_suspend(ap); 3725 rc = ata_acpi_on_suspend(ap);
3722 if (rc) 3726 if (rc)
3723 goto out; 3727 goto out;
3724 3728
3725 /* suspend */ 3729 /* suspend */
3726 ata_eh_freeze_port(ap); 3730 ata_eh_freeze_port(ap);
3727 3731
3728 if (ap->ops->port_suspend) 3732 if (ap->ops->port_suspend)
3729 rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3733 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3730 3734
3731 ata_acpi_set_state(ap, PMSG_SUSPEND); 3735 ata_acpi_set_state(ap, PMSG_SUSPEND);
3732 out: 3736 out:
3733 /* report result */ 3737 /* report result */
3734 spin_lock_irqsave(ap->lock, flags); 3738 spin_lock_irqsave(ap->lock, flags);
3735 3739
3736 ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3740 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3737 if (rc == 0) 3741 if (rc == 0)
3738 ap->pflags |= ATA_PFLAG_SUSPENDED; 3742 ap->pflags |= ATA_PFLAG_SUSPENDED;
3739 else if (ap->pflags & ATA_PFLAG_FROZEN) 3743 else if (ap->pflags & ATA_PFLAG_FROZEN)
3740 ata_port_schedule_eh(ap); 3744 ata_port_schedule_eh(ap);
3741 3745
3742 if (ap->pm_result) { 3746 if (ap->pm_result) {
3743 *ap->pm_result = rc; 3747 *ap->pm_result = rc;
3744 ap->pm_result = NULL; 3748 ap->pm_result = NULL;
3745 } 3749 }
3746 3750
3747 spin_unlock_irqrestore(ap->lock, flags); 3751 spin_unlock_irqrestore(ap->lock, flags);
3748 3752
3749 return; 3753 return;
3750 } 3754 }
3751 3755
3752 /** 3756 /**
3753 * ata_eh_handle_port_resume - perform port resume operation 3757 * ata_eh_handle_port_resume - perform port resume operation
3754 * @ap: port to resume 3758 * @ap: port to resume
3755 * 3759 *
3756 * Resume @ap. 3760 * Resume @ap.
3757 * 3761 *
3758 * LOCKING: 3762 * LOCKING:
3759 * Kernel thread context (may sleep). 3763 * Kernel thread context (may sleep).
3760 */ 3764 */
3761 static void ata_eh_handle_port_resume(struct ata_port *ap) 3765 static void ata_eh_handle_port_resume(struct ata_port *ap)
3762 { 3766 {
3763 struct ata_link *link; 3767 struct ata_link *link;
3764 struct ata_device *dev; 3768 struct ata_device *dev;
3765 unsigned long flags; 3769 unsigned long flags;
3766 int rc = 0; 3770 int rc = 0;
3767 3771
3768 /* are we resuming? */ 3772 /* are we resuming? */
3769 spin_lock_irqsave(ap->lock, flags); 3773 spin_lock_irqsave(ap->lock, flags);
3770 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3774 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3771 ap->pm_mesg.event != PM_EVENT_ON) { 3775 ap->pm_mesg.event != PM_EVENT_ON) {
3772 spin_unlock_irqrestore(ap->lock, flags); 3776 spin_unlock_irqrestore(ap->lock, flags);
3773 return; 3777 return;
3774 } 3778 }
3775 spin_unlock_irqrestore(ap->lock, flags); 3779 spin_unlock_irqrestore(ap->lock, flags);
3776 3780
3777 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3781 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3778 3782
3779 /* 3783 /*
3780 * Error timestamps are in jiffies which doesn't run while 3784 * Error timestamps are in jiffies which doesn't run while
3781 * suspended and PHY events during resume isn't too uncommon. 3785 * suspended and PHY events during resume isn't too uncommon.
3782 * When the two are combined, it can lead to unnecessary speed 3786 * When the two are combined, it can lead to unnecessary speed
3783 * downs if the machine is suspended and resumed repeatedly. 3787 * downs if the machine is suspended and resumed repeatedly.
3784 * Clear error history. 3788 * Clear error history.
3785 */ 3789 */
3786 ata_for_each_link(link, ap, HOST_FIRST) 3790 ata_for_each_link(link, ap, HOST_FIRST)
3787 ata_for_each_dev(dev, link, ALL) 3791 ata_for_each_dev(dev, link, ALL)
3788 ata_ering_clear(&dev->ering); 3792 ata_ering_clear(&dev->ering);
3789 3793
3790 ata_acpi_set_state(ap, PMSG_ON); 3794 ata_acpi_set_state(ap, PMSG_ON);
3791 3795
3792 if (ap->ops->port_resume) 3796 if (ap->ops->port_resume)
3793 rc = ap->ops->port_resume(ap); 3797 rc = ap->ops->port_resume(ap);
3794 3798
3795 /* tell ACPI that we're resuming */ 3799 /* tell ACPI that we're resuming */
3796 ata_acpi_on_resume(ap); 3800 ata_acpi_on_resume(ap);
3797 3801
3798 /* report result */ 3802 /* report result */
3799 spin_lock_irqsave(ap->lock, flags); 3803 spin_lock_irqsave(ap->lock, flags);
3800 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3804 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3801 if (ap->pm_result) { 3805 if (ap->pm_result) {
3802 *ap->pm_result = rc; 3806 *ap->pm_result = rc;
3803 ap->pm_result = NULL; 3807 ap->pm_result = NULL;
3804 } 3808 }
3805 spin_unlock_irqrestore(ap->lock, flags); 3809 spin_unlock_irqrestore(ap->lock, flags);
3806 } 3810 }
3807 #endif /* CONFIG_PM */ 3811 #endif /* CONFIG_PM */
3808 3812
drivers/ata/libata-sff.c
1 /* 1 /*
2 * libata-sff.c - helper library for PCI IDE BMDMA 2 * libata-sff.c - helper library for PCI IDE BMDMA
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved. 8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik 9 * Copyright 2003-2006 Jeff Garzik
10 * 10 *
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option) 14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version. 15 * any later version.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to 23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 * 25 *
26 * 26 *
27 * libata documentation is available via 'make {ps|pdf}docs', 27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.* 28 * as Documentation/DocBook/libata.*
29 * 29 *
30 * Hardware documentation available from http://www.t13.org/ and 30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/ 31 * http://www.sata-io.org/
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/gfp.h> 36 #include <linux/gfp.h>
37 #include <linux/pci.h> 37 #include <linux/pci.h>
38 #include <linux/libata.h> 38 #include <linux/libata.h>
39 #include <linux/highmem.h> 39 #include <linux/highmem.h>
40 40
41 #include "libata.h" 41 #include "libata.h"
42 42
43 static struct workqueue_struct *ata_sff_wq; 43 static struct workqueue_struct *ata_sff_wq;
44 44
45 const struct ata_port_operations ata_sff_port_ops = { 45 const struct ata_port_operations ata_sff_port_ops = {
46 .inherits = &ata_base_port_ops, 46 .inherits = &ata_base_port_ops,
47 47
48 .qc_prep = ata_noop_qc_prep, 48 .qc_prep = ata_noop_qc_prep,
49 .qc_issue = ata_sff_qc_issue, 49 .qc_issue = ata_sff_qc_issue,
50 .qc_fill_rtf = ata_sff_qc_fill_rtf, 50 .qc_fill_rtf = ata_sff_qc_fill_rtf,
51 51
52 .freeze = ata_sff_freeze, 52 .freeze = ata_sff_freeze,
53 .thaw = ata_sff_thaw, 53 .thaw = ata_sff_thaw,
54 .prereset = ata_sff_prereset, 54 .prereset = ata_sff_prereset,
55 .softreset = ata_sff_softreset, 55 .softreset = ata_sff_softreset,
56 .hardreset = sata_sff_hardreset, 56 .hardreset = sata_sff_hardreset,
57 .postreset = ata_sff_postreset, 57 .postreset = ata_sff_postreset,
58 .error_handler = ata_sff_error_handler, 58 .error_handler = ata_sff_error_handler,
59 59
60 .sff_dev_select = ata_sff_dev_select, 60 .sff_dev_select = ata_sff_dev_select,
61 .sff_check_status = ata_sff_check_status, 61 .sff_check_status = ata_sff_check_status,
62 .sff_tf_load = ata_sff_tf_load, 62 .sff_tf_load = ata_sff_tf_load,
63 .sff_tf_read = ata_sff_tf_read, 63 .sff_tf_read = ata_sff_tf_read,
64 .sff_exec_command = ata_sff_exec_command, 64 .sff_exec_command = ata_sff_exec_command,
65 .sff_data_xfer = ata_sff_data_xfer, 65 .sff_data_xfer = ata_sff_data_xfer,
66 .sff_drain_fifo = ata_sff_drain_fifo, 66 .sff_drain_fifo = ata_sff_drain_fifo,
67 67
68 .lost_interrupt = ata_sff_lost_interrupt, 68 .lost_interrupt = ata_sff_lost_interrupt,
69 }; 69 };
70 EXPORT_SYMBOL_GPL(ata_sff_port_ops); 70 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
71 71
72 /** 72 /**
73 * ata_sff_check_status - Read device status reg & clear interrupt 73 * ata_sff_check_status - Read device status reg & clear interrupt
74 * @ap: port where the device is 74 * @ap: port where the device is
75 * 75 *
76 * Reads ATA taskfile status register for currently-selected device 76 * Reads ATA taskfile status register for currently-selected device
77 * and return its value. This also clears pending interrupts 77 * and return its value. This also clears pending interrupts
78 * from this device 78 * from this device
79 * 79 *
80 * LOCKING: 80 * LOCKING:
81 * Inherited from caller. 81 * Inherited from caller.
82 */ 82 */
83 u8 ata_sff_check_status(struct ata_port *ap) 83 u8 ata_sff_check_status(struct ata_port *ap)
84 { 84 {
85 return ioread8(ap->ioaddr.status_addr); 85 return ioread8(ap->ioaddr.status_addr);
86 } 86 }
87 EXPORT_SYMBOL_GPL(ata_sff_check_status); 87 EXPORT_SYMBOL_GPL(ata_sff_check_status);
88 88
89 /** 89 /**
90 * ata_sff_altstatus - Read device alternate status reg 90 * ata_sff_altstatus - Read device alternate status reg
91 * @ap: port where the device is 91 * @ap: port where the device is
92 * 92 *
93 * Reads ATA taskfile alternate status register for 93 * Reads ATA taskfile alternate status register for
94 * currently-selected device and return its value. 94 * currently-selected device and return its value.
95 * 95 *
96 * Note: may NOT be used as the check_altstatus() entry in 96 * Note: may NOT be used as the check_altstatus() entry in
97 * ata_port_operations. 97 * ata_port_operations.
98 * 98 *
99 * LOCKING: 99 * LOCKING:
100 * Inherited from caller. 100 * Inherited from caller.
101 */ 101 */
102 static u8 ata_sff_altstatus(struct ata_port *ap) 102 static u8 ata_sff_altstatus(struct ata_port *ap)
103 { 103 {
104 if (ap->ops->sff_check_altstatus) 104 if (ap->ops->sff_check_altstatus)
105 return ap->ops->sff_check_altstatus(ap); 105 return ap->ops->sff_check_altstatus(ap);
106 106
107 return ioread8(ap->ioaddr.altstatus_addr); 107 return ioread8(ap->ioaddr.altstatus_addr);
108 } 108 }
109 109
110 /** 110 /**
111 * ata_sff_irq_status - Check if the device is busy 111 * ata_sff_irq_status - Check if the device is busy
112 * @ap: port where the device is 112 * @ap: port where the device is
113 * 113 *
114 * Determine if the port is currently busy. Uses altstatus 114 * Determine if the port is currently busy. Uses altstatus
115 * if available in order to avoid clearing shared IRQ status 115 * if available in order to avoid clearing shared IRQ status
116 * when finding an IRQ source. Non ctl capable devices don't 116 * when finding an IRQ source. Non ctl capable devices don't
117 * share interrupt lines fortunately for us. 117 * share interrupt lines fortunately for us.
118 * 118 *
119 * LOCKING: 119 * LOCKING:
120 * Inherited from caller. 120 * Inherited from caller.
121 */ 121 */
122 static u8 ata_sff_irq_status(struct ata_port *ap) 122 static u8 ata_sff_irq_status(struct ata_port *ap)
123 { 123 {
124 u8 status; 124 u8 status;
125 125
126 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 126 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
127 status = ata_sff_altstatus(ap); 127 status = ata_sff_altstatus(ap);
128 /* Not us: We are busy */ 128 /* Not us: We are busy */
129 if (status & ATA_BUSY) 129 if (status & ATA_BUSY)
130 return status; 130 return status;
131 } 131 }
132 /* Clear INTRQ latch */ 132 /* Clear INTRQ latch */
133 status = ap->ops->sff_check_status(ap); 133 status = ap->ops->sff_check_status(ap);
134 return status; 134 return status;
135 } 135 }
136 136
137 /** 137 /**
138 * ata_sff_sync - Flush writes 138 * ata_sff_sync - Flush writes
139 * @ap: Port to wait for. 139 * @ap: Port to wait for.
140 * 140 *
141 * CAUTION: 141 * CAUTION:
142 * If we have an mmio device with no ctl and no altstatus 142 * If we have an mmio device with no ctl and no altstatus
143 * method this will fail. No such devices are known to exist. 143 * method this will fail. No such devices are known to exist.
144 * 144 *
145 * LOCKING: 145 * LOCKING:
146 * Inherited from caller. 146 * Inherited from caller.
147 */ 147 */
148 148
149 static void ata_sff_sync(struct ata_port *ap) 149 static void ata_sff_sync(struct ata_port *ap)
150 { 150 {
151 if (ap->ops->sff_check_altstatus) 151 if (ap->ops->sff_check_altstatus)
152 ap->ops->sff_check_altstatus(ap); 152 ap->ops->sff_check_altstatus(ap);
153 else if (ap->ioaddr.altstatus_addr) 153 else if (ap->ioaddr.altstatus_addr)
154 ioread8(ap->ioaddr.altstatus_addr); 154 ioread8(ap->ioaddr.altstatus_addr);
155 } 155 }
156 156
157 /** 157 /**
158 * ata_sff_pause - Flush writes and wait 400nS 158 * ata_sff_pause - Flush writes and wait 400nS
159 * @ap: Port to pause for. 159 * @ap: Port to pause for.
160 * 160 *
161 * CAUTION: 161 * CAUTION:
162 * If we have an mmio device with no ctl and no altstatus 162 * If we have an mmio device with no ctl and no altstatus
163 * method this will fail. No such devices are known to exist. 163 * method this will fail. No such devices are known to exist.
164 * 164 *
165 * LOCKING: 165 * LOCKING:
166 * Inherited from caller. 166 * Inherited from caller.
167 */ 167 */
168 168
169 void ata_sff_pause(struct ata_port *ap) 169 void ata_sff_pause(struct ata_port *ap)
170 { 170 {
171 ata_sff_sync(ap); 171 ata_sff_sync(ap);
172 ndelay(400); 172 ndelay(400);
173 } 173 }
174 EXPORT_SYMBOL_GPL(ata_sff_pause); 174 EXPORT_SYMBOL_GPL(ata_sff_pause);
175 175
176 /** 176 /**
177 * ata_sff_dma_pause - Pause before commencing DMA 177 * ata_sff_dma_pause - Pause before commencing DMA
178 * @ap: Port to pause for. 178 * @ap: Port to pause for.
179 * 179 *
180 * Perform I/O fencing and ensure sufficient cycle delays occur 180 * Perform I/O fencing and ensure sufficient cycle delays occur
181 * for the HDMA1:0 transition 181 * for the HDMA1:0 transition
182 */ 182 */
183 183
184 void ata_sff_dma_pause(struct ata_port *ap) 184 void ata_sff_dma_pause(struct ata_port *ap)
185 { 185 {
186 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 186 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
187 /* An altstatus read will cause the needed delay without 187 /* An altstatus read will cause the needed delay without
188 messing up the IRQ status */ 188 messing up the IRQ status */
189 ata_sff_altstatus(ap); 189 ata_sff_altstatus(ap);
190 return; 190 return;
191 } 191 }
192 /* There are no DMA controllers without ctl. BUG here to ensure 192 /* There are no DMA controllers without ctl. BUG here to ensure
193 we never violate the HDMA1:0 transition timing and risk 193 we never violate the HDMA1:0 transition timing and risk
194 corruption. */ 194 corruption. */
195 BUG(); 195 BUG();
196 } 196 }
197 EXPORT_SYMBOL_GPL(ata_sff_dma_pause); 197 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
198 198
199 /** 199 /**
200 * ata_sff_busy_sleep - sleep until BSY clears, or timeout 200 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
201 * @ap: port containing status register to be polled 201 * @ap: port containing status register to be polled
202 * @tmout_pat: impatience timeout in msecs 202 * @tmout_pat: impatience timeout in msecs
203 * @tmout: overall timeout in msecs 203 * @tmout: overall timeout in msecs
204 * 204 *
205 * Sleep until ATA Status register bit BSY clears, 205 * Sleep until ATA Status register bit BSY clears,
206 * or a timeout occurs. 206 * or a timeout occurs.
207 * 207 *
208 * LOCKING: 208 * LOCKING:
209 * Kernel thread context (may sleep). 209 * Kernel thread context (may sleep).
210 * 210 *
211 * RETURNS: 211 * RETURNS:
212 * 0 on success, -errno otherwise. 212 * 0 on success, -errno otherwise.
213 */ 213 */
214 int ata_sff_busy_sleep(struct ata_port *ap, 214 int ata_sff_busy_sleep(struct ata_port *ap,
215 unsigned long tmout_pat, unsigned long tmout) 215 unsigned long tmout_pat, unsigned long tmout)
216 { 216 {
217 unsigned long timer_start, timeout; 217 unsigned long timer_start, timeout;
218 u8 status; 218 u8 status;
219 219
220 status = ata_sff_busy_wait(ap, ATA_BUSY, 300); 220 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
221 timer_start = jiffies; 221 timer_start = jiffies;
222 timeout = ata_deadline(timer_start, tmout_pat); 222 timeout = ata_deadline(timer_start, tmout_pat);
223 while (status != 0xff && (status & ATA_BUSY) && 223 while (status != 0xff && (status & ATA_BUSY) &&
224 time_before(jiffies, timeout)) { 224 time_before(jiffies, timeout)) {
225 msleep(50); 225 msleep(50);
226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3); 226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
227 } 227 }
228 228
229 if (status != 0xff && (status & ATA_BUSY)) 229 if (status != 0xff && (status & ATA_BUSY))
230 ata_port_printk(ap, KERN_WARNING, 230 ata_port_printk(ap, KERN_WARNING,
231 "port is slow to respond, please be patient " 231 "port is slow to respond, please be patient "
232 "(Status 0x%x)\n", status); 232 "(Status 0x%x)\n", status);
233 233
234 timeout = ata_deadline(timer_start, tmout); 234 timeout = ata_deadline(timer_start, tmout);
235 while (status != 0xff && (status & ATA_BUSY) && 235 while (status != 0xff && (status & ATA_BUSY) &&
236 time_before(jiffies, timeout)) { 236 time_before(jiffies, timeout)) {
237 msleep(50); 237 msleep(50);
238 status = ap->ops->sff_check_status(ap); 238 status = ap->ops->sff_check_status(ap);
239 } 239 }
240 240
241 if (status == 0xff) 241 if (status == 0xff)
242 return -ENODEV; 242 return -ENODEV;
243 243
244 if (status & ATA_BUSY) { 244 if (status & ATA_BUSY) {
245 ata_port_printk(ap, KERN_ERR, "port failed to respond " 245 ata_port_printk(ap, KERN_ERR, "port failed to respond "
246 "(%lu secs, Status 0x%x)\n", 246 "(%lu secs, Status 0x%x)\n",
247 DIV_ROUND_UP(tmout, 1000), status); 247 DIV_ROUND_UP(tmout, 1000), status);
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
250 250
251 return 0; 251 return 0;
252 } 252 }
253 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); 253 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
254 254
255 static int ata_sff_check_ready(struct ata_link *link) 255 static int ata_sff_check_ready(struct ata_link *link)
256 { 256 {
257 u8 status = link->ap->ops->sff_check_status(link->ap); 257 u8 status = link->ap->ops->sff_check_status(link->ap);
258 258
259 return ata_check_ready(status); 259 return ata_check_ready(status);
260 } 260 }
261 261
262 /** 262 /**
263 * ata_sff_wait_ready - sleep until BSY clears, or timeout 263 * ata_sff_wait_ready - sleep until BSY clears, or timeout
264 * @link: SFF link to wait ready status for 264 * @link: SFF link to wait ready status for
265 * @deadline: deadline jiffies for the operation 265 * @deadline: deadline jiffies for the operation
266 * 266 *
267 * Sleep until ATA Status register bit BSY clears, or timeout 267 * Sleep until ATA Status register bit BSY clears, or timeout
268 * occurs. 268 * occurs.
269 * 269 *
270 * LOCKING: 270 * LOCKING:
271 * Kernel thread context (may sleep). 271 * Kernel thread context (may sleep).
272 * 272 *
273 * RETURNS: 273 * RETURNS:
274 * 0 on success, -errno otherwise. 274 * 0 on success, -errno otherwise.
275 */ 275 */
276 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) 276 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
277 { 277 {
278 return ata_wait_ready(link, deadline, ata_sff_check_ready); 278 return ata_wait_ready(link, deadline, ata_sff_check_ready);
279 } 279 }
280 EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 280 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
281 281
282 /** 282 /**
283 * ata_sff_set_devctl - Write device control reg 283 * ata_sff_set_devctl - Write device control reg
284 * @ap: port where the device is 284 * @ap: port where the device is
285 * @ctl: value to write 285 * @ctl: value to write
286 * 286 *
287 * Writes ATA taskfile device control register. 287 * Writes ATA taskfile device control register.
288 * 288 *
289 * Note: may NOT be used as the sff_set_devctl() entry in 289 * Note: may NOT be used as the sff_set_devctl() entry in
290 * ata_port_operations. 290 * ata_port_operations.
291 * 291 *
292 * LOCKING: 292 * LOCKING:
293 * Inherited from caller. 293 * Inherited from caller.
294 */ 294 */
295 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl) 295 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
296 { 296 {
297 if (ap->ops->sff_set_devctl) 297 if (ap->ops->sff_set_devctl)
298 ap->ops->sff_set_devctl(ap, ctl); 298 ap->ops->sff_set_devctl(ap, ctl);
299 else 299 else
300 iowrite8(ctl, ap->ioaddr.ctl_addr); 300 iowrite8(ctl, ap->ioaddr.ctl_addr);
301 } 301 }
302 302
303 /** 303 /**
304 * ata_sff_dev_select - Select device 0/1 on ATA bus 304 * ata_sff_dev_select - Select device 0/1 on ATA bus
305 * @ap: ATA channel to manipulate 305 * @ap: ATA channel to manipulate
306 * @device: ATA device (numbered from zero) to select 306 * @device: ATA device (numbered from zero) to select
307 * 307 *
308 * Use the method defined in the ATA specification to 308 * Use the method defined in the ATA specification to
309 * make either device 0, or device 1, active on the 309 * make either device 0, or device 1, active on the
310 * ATA channel. Works with both PIO and MMIO. 310 * ATA channel. Works with both PIO and MMIO.
311 * 311 *
312 * May be used as the dev_select() entry in ata_port_operations. 312 * May be used as the dev_select() entry in ata_port_operations.
313 * 313 *
314 * LOCKING: 314 * LOCKING:
315 * caller. 315 * caller.
316 */ 316 */
317 void ata_sff_dev_select(struct ata_port *ap, unsigned int device) 317 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
318 { 318 {
319 u8 tmp; 319 u8 tmp;
320 320
321 if (device == 0) 321 if (device == 0)
322 tmp = ATA_DEVICE_OBS; 322 tmp = ATA_DEVICE_OBS;
323 else 323 else
324 tmp = ATA_DEVICE_OBS | ATA_DEV1; 324 tmp = ATA_DEVICE_OBS | ATA_DEV1;
325 325
326 iowrite8(tmp, ap->ioaddr.device_addr); 326 iowrite8(tmp, ap->ioaddr.device_addr);
327 ata_sff_pause(ap); /* needed; also flushes, for mmio */ 327 ata_sff_pause(ap); /* needed; also flushes, for mmio */
328 } 328 }
329 EXPORT_SYMBOL_GPL(ata_sff_dev_select); 329 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
330 330
331 /** 331 /**
332 * ata_dev_select - Select device 0/1 on ATA bus 332 * ata_dev_select - Select device 0/1 on ATA bus
333 * @ap: ATA channel to manipulate 333 * @ap: ATA channel to manipulate
334 * @device: ATA device (numbered from zero) to select 334 * @device: ATA device (numbered from zero) to select
335 * @wait: non-zero to wait for Status register BSY bit to clear 335 * @wait: non-zero to wait for Status register BSY bit to clear
336 * @can_sleep: non-zero if context allows sleeping 336 * @can_sleep: non-zero if context allows sleeping
337 * 337 *
338 * Use the method defined in the ATA specification to 338 * Use the method defined in the ATA specification to
339 * make either device 0, or device 1, active on the 339 * make either device 0, or device 1, active on the
340 * ATA channel. 340 * ATA channel.
341 * 341 *
342 * This is a high-level version of ata_sff_dev_select(), which 342 * This is a high-level version of ata_sff_dev_select(), which
343 * additionally provides the services of inserting the proper 343 * additionally provides the services of inserting the proper
344 * pauses and status polling, where needed. 344 * pauses and status polling, where needed.
345 * 345 *
346 * LOCKING: 346 * LOCKING:
347 * caller. 347 * caller.
348 */ 348 */
349 static void ata_dev_select(struct ata_port *ap, unsigned int device, 349 static void ata_dev_select(struct ata_port *ap, unsigned int device,
350 unsigned int wait, unsigned int can_sleep) 350 unsigned int wait, unsigned int can_sleep)
351 { 351 {
352 if (ata_msg_probe(ap)) 352 if (ata_msg_probe(ap))
353 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 353 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
354 "device %u, wait %u\n", device, wait); 354 "device %u, wait %u\n", device, wait);
355 355
356 if (wait) 356 if (wait)
357 ata_wait_idle(ap); 357 ata_wait_idle(ap);
358 358
359 ap->ops->sff_dev_select(ap, device); 359 ap->ops->sff_dev_select(ap, device);
360 360
361 if (wait) { 361 if (wait) {
362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
363 msleep(150); 363 msleep(150);
364 ata_wait_idle(ap); 364 ata_wait_idle(ap);
365 } 365 }
366 } 366 }
367 367
368 /** 368 /**
369 * ata_sff_irq_on - Enable interrupts on a port. 369 * ata_sff_irq_on - Enable interrupts on a port.
370 * @ap: Port on which interrupts are enabled. 370 * @ap: Port on which interrupts are enabled.
371 * 371 *
372 * Enable interrupts on a legacy IDE device using MMIO or PIO, 372 * Enable interrupts on a legacy IDE device using MMIO or PIO,
373 * wait for idle, clear any pending interrupts. 373 * wait for idle, clear any pending interrupts.
374 * 374 *
375 * Note: may NOT be used as the sff_irq_on() entry in 375 * Note: may NOT be used as the sff_irq_on() entry in
376 * ata_port_operations. 376 * ata_port_operations.
377 * 377 *
378 * LOCKING: 378 * LOCKING:
379 * Inherited from caller. 379 * Inherited from caller.
380 */ 380 */
381 void ata_sff_irq_on(struct ata_port *ap) 381 void ata_sff_irq_on(struct ata_port *ap)
382 { 382 {
383 struct ata_ioports *ioaddr = &ap->ioaddr; 383 struct ata_ioports *ioaddr = &ap->ioaddr;
384 384
385 if (ap->ops->sff_irq_on) { 385 if (ap->ops->sff_irq_on) {
386 ap->ops->sff_irq_on(ap); 386 ap->ops->sff_irq_on(ap);
387 return; 387 return;
388 } 388 }
389 389
390 ap->ctl &= ~ATA_NIEN; 390 ap->ctl &= ~ATA_NIEN;
391 ap->last_ctl = ap->ctl; 391 ap->last_ctl = ap->ctl;
392 392
393 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr) 393 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
394 ata_sff_set_devctl(ap, ap->ctl); 394 ata_sff_set_devctl(ap, ap->ctl);
395 ata_wait_idle(ap); 395 ata_wait_idle(ap);
396 396
397 if (ap->ops->sff_irq_clear) 397 if (ap->ops->sff_irq_clear)
398 ap->ops->sff_irq_clear(ap); 398 ap->ops->sff_irq_clear(ap);
399 } 399 }
400 EXPORT_SYMBOL_GPL(ata_sff_irq_on); 400 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
401 401
402 /** 402 /**
403 * ata_sff_tf_load - send taskfile registers to host controller 403 * ata_sff_tf_load - send taskfile registers to host controller
404 * @ap: Port to which output is sent 404 * @ap: Port to which output is sent
405 * @tf: ATA taskfile register set 405 * @tf: ATA taskfile register set
406 * 406 *
407 * Outputs ATA taskfile to standard ATA host controller. 407 * Outputs ATA taskfile to standard ATA host controller.
408 * 408 *
409 * LOCKING: 409 * LOCKING:
410 * Inherited from caller. 410 * Inherited from caller.
411 */ 411 */
412 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 412 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
413 { 413 {
414 struct ata_ioports *ioaddr = &ap->ioaddr; 414 struct ata_ioports *ioaddr = &ap->ioaddr;
415 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 415 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
416 416
417 if (tf->ctl != ap->last_ctl) { 417 if (tf->ctl != ap->last_ctl) {
418 if (ioaddr->ctl_addr) 418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr); 419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl; 420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
421 } 422 }
422 423
423 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
424 WARN_ON_ONCE(!ioaddr->ctl_addr); 425 WARN_ON_ONCE(!ioaddr->ctl_addr);
425 iowrite8(tf->hob_feature, ioaddr->feature_addr); 426 iowrite8(tf->hob_feature, ioaddr->feature_addr);
426 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 427 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
427 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 428 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
428 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 429 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
429 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 430 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
430 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 431 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
431 tf->hob_feature, 432 tf->hob_feature,
432 tf->hob_nsect, 433 tf->hob_nsect,
433 tf->hob_lbal, 434 tf->hob_lbal,
434 tf->hob_lbam, 435 tf->hob_lbam,
435 tf->hob_lbah); 436 tf->hob_lbah);
436 } 437 }
437 438
438 if (is_addr) { 439 if (is_addr) {
439 iowrite8(tf->feature, ioaddr->feature_addr); 440 iowrite8(tf->feature, ioaddr->feature_addr);
440 iowrite8(tf->nsect, ioaddr->nsect_addr); 441 iowrite8(tf->nsect, ioaddr->nsect_addr);
441 iowrite8(tf->lbal, ioaddr->lbal_addr); 442 iowrite8(tf->lbal, ioaddr->lbal_addr);
442 iowrite8(tf->lbam, ioaddr->lbam_addr); 443 iowrite8(tf->lbam, ioaddr->lbam_addr);
443 iowrite8(tf->lbah, ioaddr->lbah_addr); 444 iowrite8(tf->lbah, ioaddr->lbah_addr);
444 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 445 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
445 tf->feature, 446 tf->feature,
446 tf->nsect, 447 tf->nsect,
447 tf->lbal, 448 tf->lbal,
448 tf->lbam, 449 tf->lbam,
449 tf->lbah); 450 tf->lbah);
450 } 451 }
451 452
452 if (tf->flags & ATA_TFLAG_DEVICE) { 453 if (tf->flags & ATA_TFLAG_DEVICE) {
453 iowrite8(tf->device, ioaddr->device_addr); 454 iowrite8(tf->device, ioaddr->device_addr);
454 VPRINTK("device 0x%X\n", tf->device); 455 VPRINTK("device 0x%X\n", tf->device);
455 } 456 }
457
458 ata_wait_idle(ap);
456 } 459 }
457 EXPORT_SYMBOL_GPL(ata_sff_tf_load); 460 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
458 461
459 /** 462 /**
460 * ata_sff_tf_read - input device's ATA taskfile shadow registers 463 * ata_sff_tf_read - input device's ATA taskfile shadow registers
461 * @ap: Port from which input is read 464 * @ap: Port from which input is read
462 * @tf: ATA taskfile register set for storing input 465 * @tf: ATA taskfile register set for storing input
463 * 466 *
464 * Reads ATA taskfile registers for currently-selected device 467 * Reads ATA taskfile registers for currently-selected device
465 * into @tf. Assumes the device has a fully SFF compliant task file 468 * into @tf. Assumes the device has a fully SFF compliant task file
466 * layout and behaviour. If you device does not (eg has a different 469 * layout and behaviour. If you device does not (eg has a different
467 * status method) then you will need to provide a replacement tf_read 470 * status method) then you will need to provide a replacement tf_read
468 * 471 *
469 * LOCKING: 472 * LOCKING:
470 * Inherited from caller. 473 * Inherited from caller.
471 */ 474 */
472 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 475 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
473 { 476 {
474 struct ata_ioports *ioaddr = &ap->ioaddr; 477 struct ata_ioports *ioaddr = &ap->ioaddr;
475 478
476 tf->command = ata_sff_check_status(ap); 479 tf->command = ata_sff_check_status(ap);
477 tf->feature = ioread8(ioaddr->error_addr); 480 tf->feature = ioread8(ioaddr->error_addr);
478 tf->nsect = ioread8(ioaddr->nsect_addr); 481 tf->nsect = ioread8(ioaddr->nsect_addr);
479 tf->lbal = ioread8(ioaddr->lbal_addr); 482 tf->lbal = ioread8(ioaddr->lbal_addr);
480 tf->lbam = ioread8(ioaddr->lbam_addr); 483 tf->lbam = ioread8(ioaddr->lbam_addr);
481 tf->lbah = ioread8(ioaddr->lbah_addr); 484 tf->lbah = ioread8(ioaddr->lbah_addr);
482 tf->device = ioread8(ioaddr->device_addr); 485 tf->device = ioread8(ioaddr->device_addr);
483 486
484 if (tf->flags & ATA_TFLAG_LBA48) { 487 if (tf->flags & ATA_TFLAG_LBA48) {
485 if (likely(ioaddr->ctl_addr)) { 488 if (likely(ioaddr->ctl_addr)) {
486 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 489 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
487 tf->hob_feature = ioread8(ioaddr->error_addr); 490 tf->hob_feature = ioread8(ioaddr->error_addr);
488 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 491 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
489 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 492 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
490 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 493 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
491 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 494 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
492 iowrite8(tf->ctl, ioaddr->ctl_addr); 495 iowrite8(tf->ctl, ioaddr->ctl_addr);
493 ap->last_ctl = tf->ctl; 496 ap->last_ctl = tf->ctl;
494 } else 497 } else
495 WARN_ON_ONCE(1); 498 WARN_ON_ONCE(1);
496 } 499 }
497 } 500 }
498 EXPORT_SYMBOL_GPL(ata_sff_tf_read); 501 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
499 502
500 /** 503 /**
501 * ata_sff_exec_command - issue ATA command to host controller 504 * ata_sff_exec_command - issue ATA command to host controller
502 * @ap: port to which command is being issued 505 * @ap: port to which command is being issued
503 * @tf: ATA taskfile register set 506 * @tf: ATA taskfile register set
504 * 507 *
505 * Issues ATA command, with proper synchronization with interrupt 508 * Issues ATA command, with proper synchronization with interrupt
506 * handler / other threads. 509 * handler / other threads.
507 * 510 *
508 * LOCKING: 511 * LOCKING:
509 * spin_lock_irqsave(host lock) 512 * spin_lock_irqsave(host lock)
510 */ 513 */
511 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 514 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
512 { 515 {
513 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); 516 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
514 517
515 iowrite8(tf->command, ap->ioaddr.command_addr); 518 iowrite8(tf->command, ap->ioaddr.command_addr);
516 ata_sff_pause(ap); 519 ata_sff_pause(ap);
517 } 520 }
518 EXPORT_SYMBOL_GPL(ata_sff_exec_command); 521 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
519 522
520 /** 523 /**
521 * ata_tf_to_host - issue ATA taskfile to host controller 524 * ata_tf_to_host - issue ATA taskfile to host controller
522 * @ap: port to which command is being issued 525 * @ap: port to which command is being issued
523 * @tf: ATA taskfile register set 526 * @tf: ATA taskfile register set
524 * 527 *
525 * Issues ATA taskfile register set to ATA host controller, 528 * Issues ATA taskfile register set to ATA host controller,
526 * with proper synchronization with interrupt handler and 529 * with proper synchronization with interrupt handler and
527 * other threads. 530 * other threads.
528 * 531 *
529 * LOCKING: 532 * LOCKING:
530 * spin_lock_irqsave(host lock) 533 * spin_lock_irqsave(host lock)
531 */ 534 */
532 static inline void ata_tf_to_host(struct ata_port *ap, 535 static inline void ata_tf_to_host(struct ata_port *ap,
533 const struct ata_taskfile *tf) 536 const struct ata_taskfile *tf)
534 { 537 {
535 ap->ops->sff_tf_load(ap, tf); 538 ap->ops->sff_tf_load(ap, tf);
536 ap->ops->sff_exec_command(ap, tf); 539 ap->ops->sff_exec_command(ap, tf);
537 } 540 }
538 541
539 /** 542 /**
540 * ata_sff_data_xfer - Transfer data by PIO 543 * ata_sff_data_xfer - Transfer data by PIO
541 * @dev: device to target 544 * @dev: device to target
542 * @buf: data buffer 545 * @buf: data buffer
543 * @buflen: buffer length 546 * @buflen: buffer length
544 * @rw: read/write 547 * @rw: read/write
545 * 548 *
546 * Transfer data from/to the device data register by PIO. 549 * Transfer data from/to the device data register by PIO.
547 * 550 *
548 * LOCKING: 551 * LOCKING:
549 * Inherited from caller. 552 * Inherited from caller.
550 * 553 *
551 * RETURNS: 554 * RETURNS:
552 * Bytes consumed. 555 * Bytes consumed.
553 */ 556 */
554 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, 557 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
555 unsigned int buflen, int rw) 558 unsigned int buflen, int rw)
556 { 559 {
557 struct ata_port *ap = dev->link->ap; 560 struct ata_port *ap = dev->link->ap;
558 void __iomem *data_addr = ap->ioaddr.data_addr; 561 void __iomem *data_addr = ap->ioaddr.data_addr;
559 unsigned int words = buflen >> 1; 562 unsigned int words = buflen >> 1;
560 563
561 /* Transfer multiple of 2 bytes */ 564 /* Transfer multiple of 2 bytes */
562 if (rw == READ) 565 if (rw == READ)
563 ioread16_rep(data_addr, buf, words); 566 ioread16_rep(data_addr, buf, words);
564 else 567 else
565 iowrite16_rep(data_addr, buf, words); 568 iowrite16_rep(data_addr, buf, words);
566 569
567 /* Transfer trailing byte, if any. */ 570 /* Transfer trailing byte, if any. */
568 if (unlikely(buflen & 0x01)) { 571 if (unlikely(buflen & 0x01)) {
569 unsigned char pad[2]; 572 unsigned char pad[2];
570 573
571 /* Point buf to the tail of buffer */ 574 /* Point buf to the tail of buffer */
572 buf += buflen - 1; 575 buf += buflen - 1;
573 576
574 /* 577 /*
575 * Use io*16_rep() accessors here as well to avoid pointlessly 578 * Use io*16_rep() accessors here as well to avoid pointlessly
576 * swapping bytes to and from on the big endian machines... 579 * swapping bytes to and from on the big endian machines...
577 */ 580 */
578 if (rw == READ) { 581 if (rw == READ) {
579 ioread16_rep(data_addr, pad, 1); 582 ioread16_rep(data_addr, pad, 1);
580 *buf = pad[0]; 583 *buf = pad[0];
581 } else { 584 } else {
582 pad[0] = *buf; 585 pad[0] = *buf;
583 iowrite16_rep(data_addr, pad, 1); 586 iowrite16_rep(data_addr, pad, 1);
584 } 587 }
585 words++; 588 words++;
586 } 589 }
587 590
588 return words << 1; 591 return words << 1;
589 } 592 }
590 EXPORT_SYMBOL_GPL(ata_sff_data_xfer); 593 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
591 594
592 /** 595 /**
593 * ata_sff_data_xfer32 - Transfer data by PIO 596 * ata_sff_data_xfer32 - Transfer data by PIO
594 * @dev: device to target 597 * @dev: device to target
595 * @buf: data buffer 598 * @buf: data buffer
596 * @buflen: buffer length 599 * @buflen: buffer length
597 * @rw: read/write 600 * @rw: read/write
598 * 601 *
599 * Transfer data from/to the device data register by PIO using 32bit 602 * Transfer data from/to the device data register by PIO using 32bit
600 * I/O operations. 603 * I/O operations.
601 * 604 *
602 * LOCKING: 605 * LOCKING:
603 * Inherited from caller. 606 * Inherited from caller.
604 * 607 *
605 * RETURNS: 608 * RETURNS:
606 * Bytes consumed. 609 * Bytes consumed.
607 */ 610 */
608 611
609 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, 612 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
610 unsigned int buflen, int rw) 613 unsigned int buflen, int rw)
611 { 614 {
612 struct ata_port *ap = dev->link->ap; 615 struct ata_port *ap = dev->link->ap;
613 void __iomem *data_addr = ap->ioaddr.data_addr; 616 void __iomem *data_addr = ap->ioaddr.data_addr;
614 unsigned int words = buflen >> 2; 617 unsigned int words = buflen >> 2;
615 int slop = buflen & 3; 618 int slop = buflen & 3;
616 619
617 if (!(ap->pflags & ATA_PFLAG_PIO32)) 620 if (!(ap->pflags & ATA_PFLAG_PIO32))
618 return ata_sff_data_xfer(dev, buf, buflen, rw); 621 return ata_sff_data_xfer(dev, buf, buflen, rw);
619 622
620 /* Transfer multiple of 4 bytes */ 623 /* Transfer multiple of 4 bytes */
621 if (rw == READ) 624 if (rw == READ)
622 ioread32_rep(data_addr, buf, words); 625 ioread32_rep(data_addr, buf, words);
623 else 626 else
624 iowrite32_rep(data_addr, buf, words); 627 iowrite32_rep(data_addr, buf, words);
625 628
626 /* Transfer trailing bytes, if any */ 629 /* Transfer trailing bytes, if any */
627 if (unlikely(slop)) { 630 if (unlikely(slop)) {
628 unsigned char pad[4]; 631 unsigned char pad[4];
629 632
630 /* Point buf to the tail of buffer */ 633 /* Point buf to the tail of buffer */
631 buf += buflen - slop; 634 buf += buflen - slop;
632 635
633 /* 636 /*
634 * Use io*_rep() accessors here as well to avoid pointlessly 637 * Use io*_rep() accessors here as well to avoid pointlessly
635 * swapping bytes to and from on the big endian machines... 638 * swapping bytes to and from on the big endian machines...
636 */ 639 */
637 if (rw == READ) { 640 if (rw == READ) {
638 if (slop < 3) 641 if (slop < 3)
639 ioread16_rep(data_addr, pad, 1); 642 ioread16_rep(data_addr, pad, 1);
640 else 643 else
641 ioread32_rep(data_addr, pad, 1); 644 ioread32_rep(data_addr, pad, 1);
642 memcpy(buf, pad, slop); 645 memcpy(buf, pad, slop);
643 } else { 646 } else {
644 memcpy(pad, buf, slop); 647 memcpy(pad, buf, slop);
645 if (slop < 3) 648 if (slop < 3)
646 iowrite16_rep(data_addr, pad, 1); 649 iowrite16_rep(data_addr, pad, 1);
647 else 650 else
648 iowrite32_rep(data_addr, pad, 1); 651 iowrite32_rep(data_addr, pad, 1);
649 } 652 }
650 } 653 }
651 return (buflen + 1) & ~1; 654 return (buflen + 1) & ~1;
652 } 655 }
653 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); 656 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
654 657
655 /** 658 /**
656 * ata_sff_data_xfer_noirq - Transfer data by PIO 659 * ata_sff_data_xfer_noirq - Transfer data by PIO
657 * @dev: device to target 660 * @dev: device to target
658 * @buf: data buffer 661 * @buf: data buffer
659 * @buflen: buffer length 662 * @buflen: buffer length
660 * @rw: read/write 663 * @rw: read/write
661 * 664 *
662 * Transfer data from/to the device data register by PIO. Do the 665 * Transfer data from/to the device data register by PIO. Do the
663 * transfer with interrupts disabled. 666 * transfer with interrupts disabled.
664 * 667 *
665 * LOCKING: 668 * LOCKING:
666 * Inherited from caller. 669 * Inherited from caller.
667 * 670 *
668 * RETURNS: 671 * RETURNS:
669 * Bytes consumed. 672 * Bytes consumed.
670 */ 673 */
671 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, 674 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
672 unsigned int buflen, int rw) 675 unsigned int buflen, int rw)
673 { 676 {
674 unsigned long flags; 677 unsigned long flags;
675 unsigned int consumed; 678 unsigned int consumed;
676 679
677 local_irq_save(flags); 680 local_irq_save(flags);
678 consumed = ata_sff_data_xfer(dev, buf, buflen, rw); 681 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
679 local_irq_restore(flags); 682 local_irq_restore(flags);
680 683
681 return consumed; 684 return consumed;
682 } 685 }
683 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); 686 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
684 687
685 /** 688 /**
686 * ata_pio_sector - Transfer a sector of data. 689 * ata_pio_sector - Transfer a sector of data.
687 * @qc: Command on going 690 * @qc: Command on going
688 * 691 *
689 * Transfer qc->sect_size bytes of data from/to the ATA device. 692 * Transfer qc->sect_size bytes of data from/to the ATA device.
690 * 693 *
691 * LOCKING: 694 * LOCKING:
692 * Inherited from caller. 695 * Inherited from caller.
693 */ 696 */
694 static void ata_pio_sector(struct ata_queued_cmd *qc) 697 static void ata_pio_sector(struct ata_queued_cmd *qc)
695 { 698 {
696 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 699 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
697 struct ata_port *ap = qc->ap; 700 struct ata_port *ap = qc->ap;
698 struct page *page; 701 struct page *page;
699 unsigned int offset; 702 unsigned int offset;
700 unsigned char *buf; 703 unsigned char *buf;
701 704
702 if (qc->curbytes == qc->nbytes - qc->sect_size) 705 if (qc->curbytes == qc->nbytes - qc->sect_size)
703 ap->hsm_task_state = HSM_ST_LAST; 706 ap->hsm_task_state = HSM_ST_LAST;
704 707
705 page = sg_page(qc->cursg); 708 page = sg_page(qc->cursg);
706 offset = qc->cursg->offset + qc->cursg_ofs; 709 offset = qc->cursg->offset + qc->cursg_ofs;
707 710
708 /* get the current page and offset */ 711 /* get the current page and offset */
709 page = nth_page(page, (offset >> PAGE_SHIFT)); 712 page = nth_page(page, (offset >> PAGE_SHIFT));
710 offset %= PAGE_SIZE; 713 offset %= PAGE_SIZE;
711 714
712 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 715 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
713 716
714 if (PageHighMem(page)) { 717 if (PageHighMem(page)) {
715 unsigned long flags; 718 unsigned long flags;
716 719
717 /* FIXME: use a bounce buffer */ 720 /* FIXME: use a bounce buffer */
718 local_irq_save(flags); 721 local_irq_save(flags);
719 buf = kmap_atomic(page, KM_IRQ0); 722 buf = kmap_atomic(page, KM_IRQ0);
720 723
721 /* do the actual data transfer */ 724 /* do the actual data transfer */
722 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 725 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
723 do_write); 726 do_write);
724 727
725 kunmap_atomic(buf, KM_IRQ0); 728 kunmap_atomic(buf, KM_IRQ0);
726 local_irq_restore(flags); 729 local_irq_restore(flags);
727 } else { 730 } else {
728 buf = page_address(page); 731 buf = page_address(page);
729 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 732 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
730 do_write); 733 do_write);
731 } 734 }
732 735
733 if (!do_write && !PageSlab(page)) 736 if (!do_write && !PageSlab(page))
734 flush_dcache_page(page); 737 flush_dcache_page(page);
735 738
736 qc->curbytes += qc->sect_size; 739 qc->curbytes += qc->sect_size;
737 qc->cursg_ofs += qc->sect_size; 740 qc->cursg_ofs += qc->sect_size;
738 741
739 if (qc->cursg_ofs == qc->cursg->length) { 742 if (qc->cursg_ofs == qc->cursg->length) {
740 qc->cursg = sg_next(qc->cursg); 743 qc->cursg = sg_next(qc->cursg);
741 qc->cursg_ofs = 0; 744 qc->cursg_ofs = 0;
742 } 745 }
743 } 746 }
744 747
745 /** 748 /**
746 * ata_pio_sectors - Transfer one or many sectors. 749 * ata_pio_sectors - Transfer one or many sectors.
747 * @qc: Command on going 750 * @qc: Command on going
748 * 751 *
749 * Transfer one or many sectors of data from/to the 752 * Transfer one or many sectors of data from/to the
750 * ATA device for the DRQ request. 753 * ATA device for the DRQ request.
751 * 754 *
752 * LOCKING: 755 * LOCKING:
753 * Inherited from caller. 756 * Inherited from caller.
754 */ 757 */
755 static void ata_pio_sectors(struct ata_queued_cmd *qc) 758 static void ata_pio_sectors(struct ata_queued_cmd *qc)
756 { 759 {
757 if (is_multi_taskfile(&qc->tf)) { 760 if (is_multi_taskfile(&qc->tf)) {
758 /* READ/WRITE MULTIPLE */ 761 /* READ/WRITE MULTIPLE */
759 unsigned int nsect; 762 unsigned int nsect;
760 763
761 WARN_ON_ONCE(qc->dev->multi_count == 0); 764 WARN_ON_ONCE(qc->dev->multi_count == 0);
762 765
763 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 766 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
764 qc->dev->multi_count); 767 qc->dev->multi_count);
765 while (nsect--) 768 while (nsect--)
766 ata_pio_sector(qc); 769 ata_pio_sector(qc);
767 } else 770 } else
768 ata_pio_sector(qc); 771 ata_pio_sector(qc);
769 772
770 ata_sff_sync(qc->ap); /* flush */ 773 ata_sff_sync(qc->ap); /* flush */
771 } 774 }
772 775
773 /** 776 /**
774 * atapi_send_cdb - Write CDB bytes to hardware 777 * atapi_send_cdb - Write CDB bytes to hardware
775 * @ap: Port to which ATAPI device is attached. 778 * @ap: Port to which ATAPI device is attached.
776 * @qc: Taskfile currently active 779 * @qc: Taskfile currently active
777 * 780 *
778 * When device has indicated its readiness to accept 781 * When device has indicated its readiness to accept
779 * a CDB, this function is called. Send the CDB. 782 * a CDB, this function is called. Send the CDB.
780 * 783 *
781 * LOCKING: 784 * LOCKING:
782 * caller. 785 * caller.
783 */ 786 */
784 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 787 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
785 { 788 {
786 /* send SCSI cdb */ 789 /* send SCSI cdb */
787 DPRINTK("send cdb\n"); 790 DPRINTK("send cdb\n");
788 WARN_ON_ONCE(qc->dev->cdb_len < 12); 791 WARN_ON_ONCE(qc->dev->cdb_len < 12);
789 792
790 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 793 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
791 ata_sff_sync(ap); 794 ata_sff_sync(ap);
792 /* FIXME: If the CDB is for DMA do we need to do the transition delay 795 /* FIXME: If the CDB is for DMA do we need to do the transition delay
793 or is bmdma_start guaranteed to do it ? */ 796 or is bmdma_start guaranteed to do it ? */
794 switch (qc->tf.protocol) { 797 switch (qc->tf.protocol) {
795 case ATAPI_PROT_PIO: 798 case ATAPI_PROT_PIO:
796 ap->hsm_task_state = HSM_ST; 799 ap->hsm_task_state = HSM_ST;
797 break; 800 break;
798 case ATAPI_PROT_NODATA: 801 case ATAPI_PROT_NODATA:
799 ap->hsm_task_state = HSM_ST_LAST; 802 ap->hsm_task_state = HSM_ST_LAST;
800 break; 803 break;
801 #ifdef CONFIG_ATA_BMDMA 804 #ifdef CONFIG_ATA_BMDMA
802 case ATAPI_PROT_DMA: 805 case ATAPI_PROT_DMA:
803 ap->hsm_task_state = HSM_ST_LAST; 806 ap->hsm_task_state = HSM_ST_LAST;
804 /* initiate bmdma */ 807 /* initiate bmdma */
805 ap->ops->bmdma_start(qc); 808 ap->ops->bmdma_start(qc);
806 break; 809 break;
807 #endif /* CONFIG_ATA_BMDMA */ 810 #endif /* CONFIG_ATA_BMDMA */
808 default: 811 default:
809 BUG(); 812 BUG();
810 } 813 }
811 } 814 }
812 815
813 /** 816 /**
814 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 817 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
815 * @qc: Command on going 818 * @qc: Command on going
816 * @bytes: number of bytes 819 * @bytes: number of bytes
817 * 820 *
818 * Transfer Transfer data from/to the ATAPI device. 821 * Transfer Transfer data from/to the ATAPI device.
819 * 822 *
820 * LOCKING: 823 * LOCKING:
821 * Inherited from caller. 824 * Inherited from caller.
822 * 825 *
823 */ 826 */
824 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 827 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
825 { 828 {
826 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 829 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
827 struct ata_port *ap = qc->ap; 830 struct ata_port *ap = qc->ap;
828 struct ata_device *dev = qc->dev; 831 struct ata_device *dev = qc->dev;
829 struct ata_eh_info *ehi = &dev->link->eh_info; 832 struct ata_eh_info *ehi = &dev->link->eh_info;
830 struct scatterlist *sg; 833 struct scatterlist *sg;
831 struct page *page; 834 struct page *page;
832 unsigned char *buf; 835 unsigned char *buf;
833 unsigned int offset, count, consumed; 836 unsigned int offset, count, consumed;
834 837
835 next_sg: 838 next_sg:
836 sg = qc->cursg; 839 sg = qc->cursg;
837 if (unlikely(!sg)) { 840 if (unlikely(!sg)) {
838 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 841 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
839 "buf=%u cur=%u bytes=%u", 842 "buf=%u cur=%u bytes=%u",
840 qc->nbytes, qc->curbytes, bytes); 843 qc->nbytes, qc->curbytes, bytes);
841 return -1; 844 return -1;
842 } 845 }
843 846
844 page = sg_page(sg); 847 page = sg_page(sg);
845 offset = sg->offset + qc->cursg_ofs; 848 offset = sg->offset + qc->cursg_ofs;
846 849
847 /* get the current page and offset */ 850 /* get the current page and offset */
848 page = nth_page(page, (offset >> PAGE_SHIFT)); 851 page = nth_page(page, (offset >> PAGE_SHIFT));
849 offset %= PAGE_SIZE; 852 offset %= PAGE_SIZE;
850 853
851 /* don't overrun current sg */ 854 /* don't overrun current sg */
852 count = min(sg->length - qc->cursg_ofs, bytes); 855 count = min(sg->length - qc->cursg_ofs, bytes);
853 856
854 /* don't cross page boundaries */ 857 /* don't cross page boundaries */
855 count = min(count, (unsigned int)PAGE_SIZE - offset); 858 count = min(count, (unsigned int)PAGE_SIZE - offset);
856 859
857 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 860 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
858 861
859 if (PageHighMem(page)) { 862 if (PageHighMem(page)) {
860 unsigned long flags; 863 unsigned long flags;
861 864
862 /* FIXME: use bounce buffer */ 865 /* FIXME: use bounce buffer */
863 local_irq_save(flags); 866 local_irq_save(flags);
864 buf = kmap_atomic(page, KM_IRQ0); 867 buf = kmap_atomic(page, KM_IRQ0);
865 868
866 /* do the actual data transfer */ 869 /* do the actual data transfer */
867 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 870 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
868 count, rw); 871 count, rw);
869 872
870 kunmap_atomic(buf, KM_IRQ0); 873 kunmap_atomic(buf, KM_IRQ0);
871 local_irq_restore(flags); 874 local_irq_restore(flags);
872 } else { 875 } else {
873 buf = page_address(page); 876 buf = page_address(page);
874 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 877 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
875 count, rw); 878 count, rw);
876 } 879 }
877 880
878 bytes -= min(bytes, consumed); 881 bytes -= min(bytes, consumed);
879 qc->curbytes += count; 882 qc->curbytes += count;
880 qc->cursg_ofs += count; 883 qc->cursg_ofs += count;
881 884
882 if (qc->cursg_ofs == sg->length) { 885 if (qc->cursg_ofs == sg->length) {
883 qc->cursg = sg_next(qc->cursg); 886 qc->cursg = sg_next(qc->cursg);
884 qc->cursg_ofs = 0; 887 qc->cursg_ofs = 0;
885 } 888 }
886 889
887 /* 890 /*
888 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); 891 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
889 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN 892 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
890 * check correctly as it doesn't know if it is the last request being 893 * check correctly as it doesn't know if it is the last request being
891 * made. Somebody should implement a proper sanity check. 894 * made. Somebody should implement a proper sanity check.
892 */ 895 */
893 if (bytes) 896 if (bytes)
894 goto next_sg; 897 goto next_sg;
895 return 0; 898 return 0;
896 } 899 }
897 900
898 /** 901 /**
899 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 902 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
900 * @qc: Command on going 903 * @qc: Command on going
901 * 904 *
902 * Transfer Transfer data from/to the ATAPI device. 905 * Transfer Transfer data from/to the ATAPI device.
903 * 906 *
904 * LOCKING: 907 * LOCKING:
905 * Inherited from caller. 908 * Inherited from caller.
906 */ 909 */
907 static void atapi_pio_bytes(struct ata_queued_cmd *qc) 910 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
908 { 911 {
909 struct ata_port *ap = qc->ap; 912 struct ata_port *ap = qc->ap;
910 struct ata_device *dev = qc->dev; 913 struct ata_device *dev = qc->dev;
911 struct ata_eh_info *ehi = &dev->link->eh_info; 914 struct ata_eh_info *ehi = &dev->link->eh_info;
912 unsigned int ireason, bc_lo, bc_hi, bytes; 915 unsigned int ireason, bc_lo, bc_hi, bytes;
913 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 916 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
914 917
915 /* Abuse qc->result_tf for temp storage of intermediate TF 918 /* Abuse qc->result_tf for temp storage of intermediate TF
916 * here to save some kernel stack usage. 919 * here to save some kernel stack usage.
917 * For normal completion, qc->result_tf is not relevant. For 920 * For normal completion, qc->result_tf is not relevant. For
918 * error, qc->result_tf is later overwritten by ata_qc_complete(). 921 * error, qc->result_tf is later overwritten by ata_qc_complete().
919 * So, the correctness of qc->result_tf is not affected. 922 * So, the correctness of qc->result_tf is not affected.
920 */ 923 */
921 ap->ops->sff_tf_read(ap, &qc->result_tf); 924 ap->ops->sff_tf_read(ap, &qc->result_tf);
922 ireason = qc->result_tf.nsect; 925 ireason = qc->result_tf.nsect;
923 bc_lo = qc->result_tf.lbam; 926 bc_lo = qc->result_tf.lbam;
924 bc_hi = qc->result_tf.lbah; 927 bc_hi = qc->result_tf.lbah;
925 bytes = (bc_hi << 8) | bc_lo; 928 bytes = (bc_hi << 8) | bc_lo;
926 929
927 /* shall be cleared to zero, indicating xfer of data */ 930 /* shall be cleared to zero, indicating xfer of data */
928 if (unlikely(ireason & (1 << 0))) 931 if (unlikely(ireason & (1 << 0)))
929 goto atapi_check; 932 goto atapi_check;
930 933
931 /* make sure transfer direction matches expected */ 934 /* make sure transfer direction matches expected */
932 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 935 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
933 if (unlikely(do_write != i_write)) 936 if (unlikely(do_write != i_write))
934 goto atapi_check; 937 goto atapi_check;
935 938
936 if (unlikely(!bytes)) 939 if (unlikely(!bytes))
937 goto atapi_check; 940 goto atapi_check;
938 941
939 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 942 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
940 943
941 if (unlikely(__atapi_pio_bytes(qc, bytes))) 944 if (unlikely(__atapi_pio_bytes(qc, bytes)))
942 goto err_out; 945 goto err_out;
943 ata_sff_sync(ap); /* flush */ 946 ata_sff_sync(ap); /* flush */
944 947
945 return; 948 return;
946 949
947 atapi_check: 950 atapi_check:
948 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 951 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
949 ireason, bytes); 952 ireason, bytes);
950 err_out: 953 err_out:
951 qc->err_mask |= AC_ERR_HSM; 954 qc->err_mask |= AC_ERR_HSM;
952 ap->hsm_task_state = HSM_ST_ERR; 955 ap->hsm_task_state = HSM_ST_ERR;
953 } 956 }
954 957
955 /** 958 /**
956 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 959 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
957 * @ap: the target ata_port 960 * @ap: the target ata_port
958 * @qc: qc on going 961 * @qc: qc on going
959 * 962 *
960 * RETURNS: 963 * RETURNS:
961 * 1 if ok in workqueue, 0 otherwise. 964 * 1 if ok in workqueue, 0 otherwise.
962 */ 965 */
963 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, 966 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
964 struct ata_queued_cmd *qc) 967 struct ata_queued_cmd *qc)
965 { 968 {
966 if (qc->tf.flags & ATA_TFLAG_POLLING) 969 if (qc->tf.flags & ATA_TFLAG_POLLING)
967 return 1; 970 return 1;
968 971
969 if (ap->hsm_task_state == HSM_ST_FIRST) { 972 if (ap->hsm_task_state == HSM_ST_FIRST) {
970 if (qc->tf.protocol == ATA_PROT_PIO && 973 if (qc->tf.protocol == ATA_PROT_PIO &&
971 (qc->tf.flags & ATA_TFLAG_WRITE)) 974 (qc->tf.flags & ATA_TFLAG_WRITE))
972 return 1; 975 return 1;
973 976
974 if (ata_is_atapi(qc->tf.protocol) && 977 if (ata_is_atapi(qc->tf.protocol) &&
975 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 978 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
976 return 1; 979 return 1;
977 } 980 }
978 981
979 return 0; 982 return 0;
980 } 983 }
981 984
982 /** 985 /**
983 * ata_hsm_qc_complete - finish a qc running on standard HSM 986 * ata_hsm_qc_complete - finish a qc running on standard HSM
984 * @qc: Command to complete 987 * @qc: Command to complete
985 * @in_wq: 1 if called from workqueue, 0 otherwise 988 * @in_wq: 1 if called from workqueue, 0 otherwise
986 * 989 *
987 * Finish @qc which is running on standard HSM. 990 * Finish @qc which is running on standard HSM.
988 * 991 *
989 * LOCKING: 992 * LOCKING:
990 * If @in_wq is zero, spin_lock_irqsave(host lock). 993 * If @in_wq is zero, spin_lock_irqsave(host lock).
991 * Otherwise, none on entry and grabs host lock. 994 * Otherwise, none on entry and grabs host lock.
992 */ 995 */
993 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 996 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
994 { 997 {
995 struct ata_port *ap = qc->ap; 998 struct ata_port *ap = qc->ap;
996 unsigned long flags; 999 unsigned long flags;
997 1000
998 if (ap->ops->error_handler) { 1001 if (ap->ops->error_handler) {
999 if (in_wq) { 1002 if (in_wq) {
1000 spin_lock_irqsave(ap->lock, flags); 1003 spin_lock_irqsave(ap->lock, flags);
1001 1004
1002 /* EH might have kicked in while host lock is 1005 /* EH might have kicked in while host lock is
1003 * released. 1006 * released.
1004 */ 1007 */
1005 qc = ata_qc_from_tag(ap, qc->tag); 1008 qc = ata_qc_from_tag(ap, qc->tag);
1006 if (qc) { 1009 if (qc) {
1007 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 1010 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1008 ata_sff_irq_on(ap); 1011 ata_sff_irq_on(ap);
1009 ata_qc_complete(qc); 1012 ata_qc_complete(qc);
1010 } else 1013 } else
1011 ata_port_freeze(ap); 1014 ata_port_freeze(ap);
1012 } 1015 }
1013 1016
1014 spin_unlock_irqrestore(ap->lock, flags); 1017 spin_unlock_irqrestore(ap->lock, flags);
1015 } else { 1018 } else {
1016 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1019 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1017 ata_qc_complete(qc); 1020 ata_qc_complete(qc);
1018 else 1021 else
1019 ata_port_freeze(ap); 1022 ata_port_freeze(ap);
1020 } 1023 }
1021 } else { 1024 } else {
1022 if (in_wq) { 1025 if (in_wq) {
1023 spin_lock_irqsave(ap->lock, flags); 1026 spin_lock_irqsave(ap->lock, flags);
1024 ata_sff_irq_on(ap); 1027 ata_sff_irq_on(ap);
1025 ata_qc_complete(qc); 1028 ata_qc_complete(qc);
1026 spin_unlock_irqrestore(ap->lock, flags); 1029 spin_unlock_irqrestore(ap->lock, flags);
1027 } else 1030 } else
1028 ata_qc_complete(qc); 1031 ata_qc_complete(qc);
1029 } 1032 }
1030 } 1033 }
1031 1034
1032 /** 1035 /**
1033 * ata_sff_hsm_move - move the HSM to the next state. 1036 * ata_sff_hsm_move - move the HSM to the next state.
1034 * @ap: the target ata_port 1037 * @ap: the target ata_port
1035 * @qc: qc on going 1038 * @qc: qc on going
1036 * @status: current device status 1039 * @status: current device status
1037 * @in_wq: 1 if called from workqueue, 0 otherwise 1040 * @in_wq: 1 if called from workqueue, 0 otherwise
1038 * 1041 *
1039 * RETURNS: 1042 * RETURNS:
1040 * 1 when poll next status needed, 0 otherwise. 1043 * 1 when poll next status needed, 0 otherwise.
1041 */ 1044 */
1042 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1043 u8 status, int in_wq) 1046 u8 status, int in_wq)
1044 { 1047 {
1045 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1046 unsigned long flags = 0; 1050 unsigned long flags = 0;
1047 int poll_next; 1051 int poll_next;
1048 1052
1049 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1053 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1050 1054
1051 /* Make sure ata_sff_qc_issue() does not throw things 1055 /* Make sure ata_sff_qc_issue() does not throw things
1052 * like DMA polling into the workqueue. Notice that 1056 * like DMA polling into the workqueue. Notice that
1053 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 1057 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1054 */ 1058 */
1055 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); 1059 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1056 1060
1057 fsm_start: 1061 fsm_start:
1058 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 1062 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1059 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 1063 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1060 1064
1061 switch (ap->hsm_task_state) { 1065 switch (ap->hsm_task_state) {
1062 case HSM_ST_FIRST: 1066 case HSM_ST_FIRST:
1063 /* Send first data block or PACKET CDB */ 1067 /* Send first data block or PACKET CDB */
1064 1068
1065 /* If polling, we will stay in the work queue after 1069 /* If polling, we will stay in the work queue after
1066 * sending the data. Otherwise, interrupt handler 1070 * sending the data. Otherwise, interrupt handler
1067 * takes over after sending the data. 1071 * takes over after sending the data.
1068 */ 1072 */
1069 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 1073 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1070 1074
1071 /* check device status */ 1075 /* check device status */
1072 if (unlikely((status & ATA_DRQ) == 0)) { 1076 if (unlikely((status & ATA_DRQ) == 0)) {
1073 /* handle BSY=0, DRQ=0 as error */ 1077 /* handle BSY=0, DRQ=0 as error */
1074 if (likely(status & (ATA_ERR | ATA_DF))) 1078 if (likely(status & (ATA_ERR | ATA_DF)))
1075 /* device stops HSM for abort/error */ 1079 /* device stops HSM for abort/error */
1076 qc->err_mask |= AC_ERR_DEV; 1080 qc->err_mask |= AC_ERR_DEV;
1077 else { 1081 else {
1078 /* HSM violation. Let EH handle this */ 1082 /* HSM violation. Let EH handle this */
1079 ata_ehi_push_desc(ehi, 1083 ata_ehi_push_desc(ehi,
1080 "ST_FIRST: !(DRQ|ERR|DF)"); 1084 "ST_FIRST: !(DRQ|ERR|DF)");
1081 qc->err_mask |= AC_ERR_HSM; 1085 qc->err_mask |= AC_ERR_HSM;
1082 } 1086 }
1083 1087
1084 ap->hsm_task_state = HSM_ST_ERR; 1088 ap->hsm_task_state = HSM_ST_ERR;
1085 goto fsm_start; 1089 goto fsm_start;
1086 } 1090 }
1087 1091
1088 /* Device should not ask for data transfer (DRQ=1) 1092 /* Device should not ask for data transfer (DRQ=1)
1089 * when it finds something wrong. 1093 * when it finds something wrong.
1090 * We ignore DRQ here and stop the HSM by 1094 * We ignore DRQ here and stop the HSM by
1091 * changing hsm_task_state to HSM_ST_ERR and 1095 * changing hsm_task_state to HSM_ST_ERR and
1092 * let the EH abort the command or reset the device. 1096 * let the EH abort the command or reset the device.
1093 */ 1097 */
1094 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1098 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1095 /* Some ATAPI tape drives forget to clear the ERR bit 1099 /* Some ATAPI tape drives forget to clear the ERR bit
1096 * when doing the next command (mostly request sense). 1100 * when doing the next command (mostly request sense).
1097 * We ignore ERR here to workaround and proceed sending 1101 * We ignore ERR here to workaround and proceed sending
1098 * the CDB. 1102 * the CDB.
1099 */ 1103 */
1100 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 1104 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1101 ata_ehi_push_desc(ehi, "ST_FIRST: " 1105 ata_ehi_push_desc(ehi, "ST_FIRST: "
1102 "DRQ=1 with device error, " 1106 "DRQ=1 with device error, "
1103 "dev_stat 0x%X", status); 1107 "dev_stat 0x%X", status);
1104 qc->err_mask |= AC_ERR_HSM; 1108 qc->err_mask |= AC_ERR_HSM;
1105 ap->hsm_task_state = HSM_ST_ERR; 1109 ap->hsm_task_state = HSM_ST_ERR;
1106 goto fsm_start; 1110 goto fsm_start;
1107 } 1111 }
1108 } 1112 }
1109 1113
1110 /* Send the CDB (atapi) or the first data block (ata pio out). 1114 /* Send the CDB (atapi) or the first data block (ata pio out).
1111 * During the state transition, interrupt handler shouldn't 1115 * During the state transition, interrupt handler shouldn't
1112 * be invoked before the data transfer is complete and 1116 * be invoked before the data transfer is complete and
1113 * hsm_task_state is changed. Hence, the following locking. 1117 * hsm_task_state is changed. Hence, the following locking.
1114 */ 1118 */
1115 if (in_wq) 1119 if (in_wq)
1116 spin_lock_irqsave(ap->lock, flags); 1120 spin_lock_irqsave(ap->lock, flags);
1117 1121
1118 if (qc->tf.protocol == ATA_PROT_PIO) { 1122 if (qc->tf.protocol == ATA_PROT_PIO) {
1119 /* PIO data out protocol. 1123 /* PIO data out protocol.
1120 * send first data block. 1124 * send first data block.
1121 */ 1125 */
1122 1126
1123 /* ata_pio_sectors() might change the state 1127 /* ata_pio_sectors() might change the state
1124 * to HSM_ST_LAST. so, the state is changed here 1128 * to HSM_ST_LAST. so, the state is changed here
1125 * before ata_pio_sectors(). 1129 * before ata_pio_sectors().
1126 */ 1130 */
1127 ap->hsm_task_state = HSM_ST; 1131 ap->hsm_task_state = HSM_ST;
1128 ata_pio_sectors(qc); 1132 ata_pio_sectors(qc);
1129 } else 1133 } else
1130 /* send CDB */ 1134 /* send CDB */
1131 atapi_send_cdb(ap, qc); 1135 atapi_send_cdb(ap, qc);
1132 1136
1133 if (in_wq) 1137 if (in_wq)
1134 spin_unlock_irqrestore(ap->lock, flags); 1138 spin_unlock_irqrestore(ap->lock, flags);
1135 1139
1136 /* if polling, ata_sff_pio_task() handles the rest. 1140 /* if polling, ata_sff_pio_task() handles the rest.
1137 * otherwise, interrupt handler takes over from here. 1141 * otherwise, interrupt handler takes over from here.
1138 */ 1142 */
1139 break; 1143 break;
1140 1144
1141 case HSM_ST: 1145 case HSM_ST:
1142 /* complete command or read/write the data register */ 1146 /* complete command or read/write the data register */
1143 if (qc->tf.protocol == ATAPI_PROT_PIO) { 1147 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1144 /* ATAPI PIO protocol */ 1148 /* ATAPI PIO protocol */
1145 if ((status & ATA_DRQ) == 0) { 1149 if ((status & ATA_DRQ) == 0) {
1146 /* No more data to transfer or device error. 1150 /* No more data to transfer or device error.
1147 * Device error will be tagged in HSM_ST_LAST. 1151 * Device error will be tagged in HSM_ST_LAST.
1148 */ 1152 */
1149 ap->hsm_task_state = HSM_ST_LAST; 1153 ap->hsm_task_state = HSM_ST_LAST;
1150 goto fsm_start; 1154 goto fsm_start;
1151 } 1155 }
1152 1156
1153 /* Device should not ask for data transfer (DRQ=1) 1157 /* Device should not ask for data transfer (DRQ=1)
1154 * when it finds something wrong. 1158 * when it finds something wrong.
1155 * We ignore DRQ here and stop the HSM by 1159 * We ignore DRQ here and stop the HSM by
1156 * changing hsm_task_state to HSM_ST_ERR and 1160 * changing hsm_task_state to HSM_ST_ERR and
1157 * let the EH abort the command or reset the device. 1161 * let the EH abort the command or reset the device.
1158 */ 1162 */
1159 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1163 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1160 ata_ehi_push_desc(ehi, "ST-ATAPI: " 1164 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1161 "DRQ=1 with device error, " 1165 "DRQ=1 with device error, "
1162 "dev_stat 0x%X", status); 1166 "dev_stat 0x%X", status);
1163 qc->err_mask |= AC_ERR_HSM; 1167 qc->err_mask |= AC_ERR_HSM;
1164 ap->hsm_task_state = HSM_ST_ERR; 1168 ap->hsm_task_state = HSM_ST_ERR;
1165 goto fsm_start; 1169 goto fsm_start;
1166 } 1170 }
1167 1171
1168 atapi_pio_bytes(qc); 1172 atapi_pio_bytes(qc);
1169 1173
1170 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 1174 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1171 /* bad ireason reported by device */ 1175 /* bad ireason reported by device */
1172 goto fsm_start; 1176 goto fsm_start;
1173 1177
1174 } else { 1178 } else {
1175 /* ATA PIO protocol */ 1179 /* ATA PIO protocol */
1176 if (unlikely((status & ATA_DRQ) == 0)) { 1180 if (unlikely((status & ATA_DRQ) == 0)) {
1177 /* handle BSY=0, DRQ=0 as error */ 1181 /* handle BSY=0, DRQ=0 as error */
1178 if (likely(status & (ATA_ERR | ATA_DF))) { 1182 if (likely(status & (ATA_ERR | ATA_DF))) {
1179 /* device stops HSM for abort/error */ 1183 /* device stops HSM for abort/error */
1180 qc->err_mask |= AC_ERR_DEV; 1184 qc->err_mask |= AC_ERR_DEV;
1181 1185
1182 /* If diagnostic failed and this is 1186 /* If diagnostic failed and this is
1183 * IDENTIFY, it's likely a phantom 1187 * IDENTIFY, it's likely a phantom
1184 * device. Mark hint. 1188 * device. Mark hint.
1185 */ 1189 */
1186 if (qc->dev->horkage & 1190 if (qc->dev->horkage &
1187 ATA_HORKAGE_DIAGNOSTIC) 1191 ATA_HORKAGE_DIAGNOSTIC)
1188 qc->err_mask |= 1192 qc->err_mask |=
1189 AC_ERR_NODEV_HINT; 1193 AC_ERR_NODEV_HINT;
1190 } else { 1194 } else {
1191 /* HSM violation. Let EH handle this. 1195 /* HSM violation. Let EH handle this.
1192 * Phantom devices also trigger this 1196 * Phantom devices also trigger this
1193 * condition. Mark hint. 1197 * condition. Mark hint.
1194 */ 1198 */
1195 ata_ehi_push_desc(ehi, "ST-ATA: " 1199 ata_ehi_push_desc(ehi, "ST-ATA: "
1196 "DRQ=0 without device error, " 1200 "DRQ=0 without device error, "
1197 "dev_stat 0x%X", status); 1201 "dev_stat 0x%X", status);
1198 qc->err_mask |= AC_ERR_HSM | 1202 qc->err_mask |= AC_ERR_HSM |
1199 AC_ERR_NODEV_HINT; 1203 AC_ERR_NODEV_HINT;
1200 } 1204 }
1201 1205
1202 ap->hsm_task_state = HSM_ST_ERR; 1206 ap->hsm_task_state = HSM_ST_ERR;
1203 goto fsm_start; 1207 goto fsm_start;
1204 } 1208 }
1205 1209
1206 /* For PIO reads, some devices may ask for 1210 /* For PIO reads, some devices may ask for
1207 * data transfer (DRQ=1) alone with ERR=1. 1211 * data transfer (DRQ=1) alone with ERR=1.
1208 * We respect DRQ here and transfer one 1212 * We respect DRQ here and transfer one
1209 * block of junk data before changing the 1213 * block of junk data before changing the
1210 * hsm_task_state to HSM_ST_ERR. 1214 * hsm_task_state to HSM_ST_ERR.
1211 * 1215 *
1212 * For PIO writes, ERR=1 DRQ=1 doesn't make 1216 * For PIO writes, ERR=1 DRQ=1 doesn't make
1213 * sense since the data block has been 1217 * sense since the data block has been
1214 * transferred to the device. 1218 * transferred to the device.
1215 */ 1219 */
1216 if (unlikely(status & (ATA_ERR | ATA_DF))) { 1220 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1217 /* data might be corrputed */ 1221 /* data might be corrputed */
1218 qc->err_mask |= AC_ERR_DEV; 1222 qc->err_mask |= AC_ERR_DEV;
1219 1223
1220 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 1224 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1221 ata_pio_sectors(qc); 1225 ata_pio_sectors(qc);
1222 status = ata_wait_idle(ap); 1226 status = ata_wait_idle(ap);
1223 } 1227 }
1224 1228
1225 if (status & (ATA_BUSY | ATA_DRQ)) { 1229 if (status & (ATA_BUSY | ATA_DRQ)) {
1226 ata_ehi_push_desc(ehi, "ST-ATA: " 1230 ata_ehi_push_desc(ehi, "ST-ATA: "
1227 "BUSY|DRQ persists on ERR|DF, " 1231 "BUSY|DRQ persists on ERR|DF, "
1228 "dev_stat 0x%X", status); 1232 "dev_stat 0x%X", status);
1229 qc->err_mask |= AC_ERR_HSM; 1233 qc->err_mask |= AC_ERR_HSM;
1230 } 1234 }
1231 1235
1232 /* There are oddball controllers with 1236 /* There are oddball controllers with
1233 * status register stuck at 0x7f and 1237 * status register stuck at 0x7f and
1234 * lbal/m/h at zero which makes it 1238 * lbal/m/h at zero which makes it
1235 * pass all other presence detection 1239 * pass all other presence detection
1236 * mechanisms we have. Set NODEV_HINT 1240 * mechanisms we have. Set NODEV_HINT
1237 * for it. Kernel bz#7241. 1241 * for it. Kernel bz#7241.
1238 */ 1242 */
1239 if (status == 0x7f) 1243 if (status == 0x7f)
1240 qc->err_mask |= AC_ERR_NODEV_HINT; 1244 qc->err_mask |= AC_ERR_NODEV_HINT;
1241 1245
1242 /* ata_pio_sectors() might change the 1246 /* ata_pio_sectors() might change the
1243 * state to HSM_ST_LAST. so, the state 1247 * state to HSM_ST_LAST. so, the state
1244 * is changed after ata_pio_sectors(). 1248 * is changed after ata_pio_sectors().
1245 */ 1249 */
1246 ap->hsm_task_state = HSM_ST_ERR; 1250 ap->hsm_task_state = HSM_ST_ERR;
1247 goto fsm_start; 1251 goto fsm_start;
1248 } 1252 }
1249 1253
1250 ata_pio_sectors(qc); 1254 ata_pio_sectors(qc);
1251 1255
1252 if (ap->hsm_task_state == HSM_ST_LAST && 1256 if (ap->hsm_task_state == HSM_ST_LAST &&
1253 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 1257 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1254 /* all data read */ 1258 /* all data read */
1255 status = ata_wait_idle(ap); 1259 status = ata_wait_idle(ap);
1256 goto fsm_start; 1260 goto fsm_start;
1257 } 1261 }
1258 } 1262 }
1259 1263
1260 poll_next = 1; 1264 poll_next = 1;
1261 break; 1265 break;
1262 1266
1263 case HSM_ST_LAST: 1267 case HSM_ST_LAST:
1264 if (unlikely(!ata_ok(status))) { 1268 if (unlikely(!ata_ok(status))) {
1265 qc->err_mask |= __ac_err_mask(status); 1269 qc->err_mask |= __ac_err_mask(status);
1266 ap->hsm_task_state = HSM_ST_ERR; 1270 ap->hsm_task_state = HSM_ST_ERR;
1267 goto fsm_start; 1271 goto fsm_start;
1268 } 1272 }
1269 1273
1270 /* no more data to transfer */ 1274 /* no more data to transfer */
1271 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 1275 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1272 ap->print_id, qc->dev->devno, status); 1276 ap->print_id, qc->dev->devno, status);
1273 1277
1274 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); 1278 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1275 1279
1276 ap->hsm_task_state = HSM_ST_IDLE; 1280 ap->hsm_task_state = HSM_ST_IDLE;
1277 1281
1278 /* complete taskfile transaction */ 1282 /* complete taskfile transaction */
1279 ata_hsm_qc_complete(qc, in_wq); 1283 ata_hsm_qc_complete(qc, in_wq);
1280 1284
1281 poll_next = 0; 1285 poll_next = 0;
1282 break; 1286 break;
1283 1287
1284 case HSM_ST_ERR: 1288 case HSM_ST_ERR:
1285 ap->hsm_task_state = HSM_ST_IDLE; 1289 ap->hsm_task_state = HSM_ST_IDLE;
1286 1290
1287 /* complete taskfile transaction */ 1291 /* complete taskfile transaction */
1288 ata_hsm_qc_complete(qc, in_wq); 1292 ata_hsm_qc_complete(qc, in_wq);
1289 1293
1290 poll_next = 0; 1294 poll_next = 0;
1291 break; 1295 break;
1292 default: 1296 default:
1293 poll_next = 0; 1297 poll_next = 0;
1294 BUG(); 1298 BUG();
1295 } 1299 }
1296 1300
1297 return poll_next; 1301 return poll_next;
1298 } 1302 }
1299 EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1300 1304
1301 void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) 1305 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1302 { 1306 {
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1303 /* may fail if ata_sff_flush_pio_task() in progress */ 1313 /* may fail if ata_sff_flush_pio_task() in progress */
1304 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1305 msecs_to_jiffies(delay)); 1315 msecs_to_jiffies(delay));
1306 } 1316 }
1307 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); 1317 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1308 1318
1309 void ata_sff_flush_pio_task(struct ata_port *ap) 1319 void ata_sff_flush_pio_task(struct ata_port *ap)
1310 { 1320 {
1311 DPRINTK("ENTER\n"); 1321 DPRINTK("ENTER\n");
1312 1322
1313 cancel_rearming_delayed_work(&ap->sff_pio_task); 1323 cancel_rearming_delayed_work(&ap->sff_pio_task);
1314 ap->hsm_task_state = HSM_ST_IDLE; 1324 ap->hsm_task_state = HSM_ST_IDLE;
1315 1325
1316 if (ata_msg_ctl(ap)) 1326 if (ata_msg_ctl(ap))
1317 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); 1327 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1318 } 1328 }
1319 1329
1320 static void ata_sff_pio_task(struct work_struct *work) 1330 static void ata_sff_pio_task(struct work_struct *work)
1321 { 1331 {
1322 struct ata_port *ap = 1332 struct ata_port *ap =
1323 container_of(work, struct ata_port, sff_pio_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1324 struct ata_queued_cmd *qc; 1335 struct ata_queued_cmd *qc;
1325 u8 status; 1336 u8 status;
1326 int poll_next; 1337 int poll_next;
1327 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1328 /* qc can be NULL if timeout occurred */ 1340 /* qc can be NULL if timeout occurred */
1329 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1341 qc = ata_qc_from_tag(ap, link->active_tag);
1330 if (!qc) 1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1331 return; 1344 return;
1345 }
1332 1346
1333 fsm_start: 1347 fsm_start:
1334 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1335 1349
1336 /* 1350 /*
1337 * This is purely heuristic. This is a fast path. 1351 * This is purely heuristic. This is a fast path.
1338 * Sometimes when we enter, BSY will be cleared in 1352 * Sometimes when we enter, BSY will be cleared in
1339 * a chk-status or two. If not, the drive is probably seeking 1353 * a chk-status or two. If not, the drive is probably seeking
1340 * or something. Snooze for a couple msecs, then 1354 * or something. Snooze for a couple msecs, then
1341 * chk-status again. If still busy, queue delayed work. 1355 * chk-status again. If still busy, queue delayed work.
1342 */ 1356 */
1343 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1357 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1344 if (status & ATA_BUSY) { 1358 if (status & ATA_BUSY) {
1345 msleep(2); 1359 msleep(2);
1346 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1347 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1348 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1349 return; 1363 return;
1350 } 1364 }
1351 } 1365 }
1352 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1353 /* move the HSM */ 1372 /* move the HSM */
1354 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1355 1374
1356 /* another command or interrupt handler 1375 /* another command or interrupt handler
1357 * may be running at this point. 1376 * may be running at this point.
1358 */ 1377 */
1359 if (poll_next) 1378 if (poll_next)
1360 goto fsm_start; 1379 goto fsm_start;
1361 } 1380 }
1362 1381
1363 /** 1382 /**
1364 * ata_sff_qc_issue - issue taskfile to a SFF controller 1383 * ata_sff_qc_issue - issue taskfile to a SFF controller
1365 * @qc: command to issue to device 1384 * @qc: command to issue to device
1366 * 1385 *
1367 * This function issues a PIO or NODATA command to a SFF 1386 * This function issues a PIO or NODATA command to a SFF
1368 * controller. 1387 * controller.
1369 * 1388 *
1370 * LOCKING: 1389 * LOCKING:
1371 * spin_lock_irqsave(host lock) 1390 * spin_lock_irqsave(host lock)
1372 * 1391 *
1373 * RETURNS: 1392 * RETURNS:
1374 * Zero on success, AC_ERR_* mask on failure 1393 * Zero on success, AC_ERR_* mask on failure
1375 */ 1394 */
1376 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1377 { 1396 {
1378 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1379 1399
1380 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1381 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
1382 */ 1402 */
1383 if (ap->flags & ATA_FLAG_PIO_POLLING) 1403 if (ap->flags & ATA_FLAG_PIO_POLLING)
1384 qc->tf.flags |= ATA_TFLAG_POLLING; 1404 qc->tf.flags |= ATA_TFLAG_POLLING;
1385 1405
1386 /* select the device */ 1406 /* select the device */
1387 ata_dev_select(ap, qc->dev->devno, 1, 0); 1407 ata_dev_select(ap, qc->dev->devno, 1, 0);
1388 1408
1389 /* start the command */ 1409 /* start the command */
1390 switch (qc->tf.protocol) { 1410 switch (qc->tf.protocol) {
1391 case ATA_PROT_NODATA: 1411 case ATA_PROT_NODATA:
1392 if (qc->tf.flags & ATA_TFLAG_POLLING) 1412 if (qc->tf.flags & ATA_TFLAG_POLLING)
1393 ata_qc_set_polling(qc); 1413 ata_qc_set_polling(qc);
1394 1414
1395 ata_tf_to_host(ap, &qc->tf); 1415 ata_tf_to_host(ap, &qc->tf);
1396 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1397 1417
1398 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1399 ata_sff_queue_pio_task(ap, 0); 1419 ata_sff_queue_pio_task(link, 0);
1400 1420
1401 break; 1421 break;
1402 1422
1403 case ATA_PROT_PIO: 1423 case ATA_PROT_PIO:
1404 if (qc->tf.flags & ATA_TFLAG_POLLING) 1424 if (qc->tf.flags & ATA_TFLAG_POLLING)
1405 ata_qc_set_polling(qc); 1425 ata_qc_set_polling(qc);
1406 1426
1407 ata_tf_to_host(ap, &qc->tf); 1427 ata_tf_to_host(ap, &qc->tf);
1408 1428
1409 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1410 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1411 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1412 ata_sff_queue_pio_task(ap, 0); 1432 ata_sff_queue_pio_task(link, 0);
1413 1433
1414 /* always send first data block using the 1434 /* always send first data block using the
1415 * ata_sff_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
1416 */ 1436 */
1417 } else { 1437 } else {
1418 /* PIO data in protocol */ 1438 /* PIO data in protocol */
1419 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1420 1440
1421 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1422 ata_sff_queue_pio_task(ap, 0); 1442 ata_sff_queue_pio_task(link, 0);
1423 1443
1424 /* if polling, ata_sff_pio_task() handles the 1444 /* if polling, ata_sff_pio_task() handles the
1425 * rest. otherwise, interrupt handler takes 1445 * rest. otherwise, interrupt handler takes
1426 * over from here. 1446 * over from here.
1427 */ 1447 */
1428 } 1448 }
1429 1449
1430 break; 1450 break;
1431 1451
1432 case ATAPI_PROT_PIO: 1452 case ATAPI_PROT_PIO:
1433 case ATAPI_PROT_NODATA: 1453 case ATAPI_PROT_NODATA:
1434 if (qc->tf.flags & ATA_TFLAG_POLLING) 1454 if (qc->tf.flags & ATA_TFLAG_POLLING)
1435 ata_qc_set_polling(qc); 1455 ata_qc_set_polling(qc);
1436 1456
1437 ata_tf_to_host(ap, &qc->tf); 1457 ata_tf_to_host(ap, &qc->tf);
1438 1458
1439 ap->hsm_task_state = HSM_ST_FIRST; 1459 ap->hsm_task_state = HSM_ST_FIRST;
1440 1460
1441 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1442 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1443 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1444 ata_sff_queue_pio_task(ap, 0); 1464 ata_sff_queue_pio_task(link, 0);
1445 break; 1465 break;
1446 1466
1447 default: 1467 default:
1448 WARN_ON_ONCE(1); 1468 WARN_ON_ONCE(1);
1449 return AC_ERR_SYSTEM; 1469 return AC_ERR_SYSTEM;
1450 } 1470 }
1451 1471
1452 return 0; 1472 return 0;
1453 } 1473 }
1454 EXPORT_SYMBOL_GPL(ata_sff_qc_issue); 1474 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1455 1475
1456 /** 1476 /**
1457 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read 1477 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1458 * @qc: qc to fill result TF for 1478 * @qc: qc to fill result TF for
1459 * 1479 *
1460 * @qc is finished and result TF needs to be filled. Fill it 1480 * @qc is finished and result TF needs to be filled. Fill it
1461 * using ->sff_tf_read. 1481 * using ->sff_tf_read.
1462 * 1482 *
1463 * LOCKING: 1483 * LOCKING:
1464 * spin_lock_irqsave(host lock) 1484 * spin_lock_irqsave(host lock)
1465 * 1485 *
1466 * RETURNS: 1486 * RETURNS:
1467 * true indicating that result TF is successfully filled. 1487 * true indicating that result TF is successfully filled.
1468 */ 1488 */
1469 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) 1489 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1470 { 1490 {
1471 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); 1491 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1472 return true; 1492 return true;
1473 } 1493 }
1474 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); 1494 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1475 1495
1476 static unsigned int ata_sff_idle_irq(struct ata_port *ap) 1496 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1477 { 1497 {
1478 ap->stats.idle_irq++; 1498 ap->stats.idle_irq++;
1479 1499
1480 #ifdef ATA_IRQ_TRAP 1500 #ifdef ATA_IRQ_TRAP
1481 if ((ap->stats.idle_irq % 1000) == 0) { 1501 if ((ap->stats.idle_irq % 1000) == 0) {
1482 ap->ops->sff_check_status(ap); 1502 ap->ops->sff_check_status(ap);
1483 if (ap->ops->sff_irq_clear) 1503 if (ap->ops->sff_irq_clear)
1484 ap->ops->sff_irq_clear(ap); 1504 ap->ops->sff_irq_clear(ap);
1485 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 1505 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1486 return 1; 1506 return 1;
1487 } 1507 }
1488 #endif 1508 #endif
1489 return 0; /* irq not handled */ 1509 return 0; /* irq not handled */
1490 } 1510 }
1491 1511
1492 static unsigned int __ata_sff_port_intr(struct ata_port *ap, 1512 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1493 struct ata_queued_cmd *qc, 1513 struct ata_queued_cmd *qc,
1494 bool hsmv_on_idle) 1514 bool hsmv_on_idle)
1495 { 1515 {
1496 u8 status; 1516 u8 status;
1497 1517
1498 VPRINTK("ata%u: protocol %d task_state %d\n", 1518 VPRINTK("ata%u: protocol %d task_state %d\n",
1499 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 1519 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1500 1520
1501 /* Check whether we are expecting interrupt in this state */ 1521 /* Check whether we are expecting interrupt in this state */
1502 switch (ap->hsm_task_state) { 1522 switch (ap->hsm_task_state) {
1503 case HSM_ST_FIRST: 1523 case HSM_ST_FIRST:
1504 /* Some pre-ATAPI-4 devices assert INTRQ 1524 /* Some pre-ATAPI-4 devices assert INTRQ
1505 * at this state when ready to receive CDB. 1525 * at this state when ready to receive CDB.
1506 */ 1526 */
1507 1527
1508 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 1528 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1509 * The flag was turned on only for atapi devices. No 1529 * The flag was turned on only for atapi devices. No
1510 * need to check ata_is_atapi(qc->tf.protocol) again. 1530 * need to check ata_is_atapi(qc->tf.protocol) again.
1511 */ 1531 */
1512 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 1532 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1513 return ata_sff_idle_irq(ap); 1533 return ata_sff_idle_irq(ap);
1514 break; 1534 break;
1515 case HSM_ST: 1535 case HSM_ST:
1516 case HSM_ST_LAST: 1536 case HSM_ST_LAST:
1517 break; 1537 break;
1518 default: 1538 default:
1519 return ata_sff_idle_irq(ap); 1539 return ata_sff_idle_irq(ap);
1520 } 1540 }
1521 1541
1522 /* check main status, clearing INTRQ if needed */ 1542 /* check main status, clearing INTRQ if needed */
1523 status = ata_sff_irq_status(ap); 1543 status = ata_sff_irq_status(ap);
1524 if (status & ATA_BUSY) { 1544 if (status & ATA_BUSY) {
1525 if (hsmv_on_idle) { 1545 if (hsmv_on_idle) {
1526 /* BMDMA engine is already stopped, we're screwed */ 1546 /* BMDMA engine is already stopped, we're screwed */
1527 qc->err_mask |= AC_ERR_HSM; 1547 qc->err_mask |= AC_ERR_HSM;
1528 ap->hsm_task_state = HSM_ST_ERR; 1548 ap->hsm_task_state = HSM_ST_ERR;
1529 } else 1549 } else
1530 return ata_sff_idle_irq(ap); 1550 return ata_sff_idle_irq(ap);
1531 } 1551 }
1532 1552
1533 /* clear irq events */ 1553 /* clear irq events */
1534 if (ap->ops->sff_irq_clear) 1554 if (ap->ops->sff_irq_clear)
1535 ap->ops->sff_irq_clear(ap); 1555 ap->ops->sff_irq_clear(ap);
1536 1556
1537 ata_sff_hsm_move(ap, qc, status, 0); 1557 ata_sff_hsm_move(ap, qc, status, 0);
1538 1558
1539 return 1; /* irq handled */ 1559 return 1; /* irq handled */
1540 } 1560 }
1541 1561
1542 /** 1562 /**
1543 * ata_sff_port_intr - Handle SFF port interrupt 1563 * ata_sff_port_intr - Handle SFF port interrupt
1544 * @ap: Port on which interrupt arrived (possibly...) 1564 * @ap: Port on which interrupt arrived (possibly...)
1545 * @qc: Taskfile currently active in engine 1565 * @qc: Taskfile currently active in engine
1546 * 1566 *
1547 * Handle port interrupt for given queued command. 1567 * Handle port interrupt for given queued command.
1548 * 1568 *
1549 * LOCKING: 1569 * LOCKING:
1550 * spin_lock_irqsave(host lock) 1570 * spin_lock_irqsave(host lock)
1551 * 1571 *
1552 * RETURNS: 1572 * RETURNS:
1553 * One if interrupt was handled, zero if not (shared irq). 1573 * One if interrupt was handled, zero if not (shared irq).
1554 */ 1574 */
1555 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 1575 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1556 { 1576 {
1557 return __ata_sff_port_intr(ap, qc, false); 1577 return __ata_sff_port_intr(ap, qc, false);
1558 } 1578 }
1559 EXPORT_SYMBOL_GPL(ata_sff_port_intr); 1579 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1560 1580
1561 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, 1581 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1562 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) 1582 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1563 { 1583 {
1564 struct ata_host *host = dev_instance; 1584 struct ata_host *host = dev_instance;
1565 bool retried = false; 1585 bool retried = false;
1566 unsigned int i; 1586 unsigned int i;
1567 unsigned int handled, idle, polling; 1587 unsigned int handled, idle, polling;
1568 unsigned long flags; 1588 unsigned long flags;
1569 1589
1570 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 1590 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1571 spin_lock_irqsave(&host->lock, flags); 1591 spin_lock_irqsave(&host->lock, flags);
1572 1592
1573 retry: 1593 retry:
1574 handled = idle = polling = 0; 1594 handled = idle = polling = 0;
1575 for (i = 0; i < host->n_ports; i++) { 1595 for (i = 0; i < host->n_ports; i++) {
1576 struct ata_port *ap = host->ports[i]; 1596 struct ata_port *ap = host->ports[i];
1577 struct ata_queued_cmd *qc; 1597 struct ata_queued_cmd *qc;
1578 1598
1579 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1580 if (qc) { 1600 if (qc) {
1581 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) 1601 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1582 handled |= port_intr(ap, qc); 1602 handled |= port_intr(ap, qc);
1583 else 1603 else
1584 polling |= 1 << i; 1604 polling |= 1 << i;
1585 } else 1605 } else
1586 idle |= 1 << i; 1606 idle |= 1 << i;
1587 } 1607 }
1588 1608
1589 /* 1609 /*
1590 * If no port was expecting IRQ but the controller is actually 1610 * If no port was expecting IRQ but the controller is actually
1591 * asserting IRQ line, nobody cared will ensue. Check IRQ 1611 * asserting IRQ line, nobody cared will ensue. Check IRQ
1592 * pending status if available and clear spurious IRQ. 1612 * pending status if available and clear spurious IRQ.
1593 */ 1613 */
1594 if (!handled && !retried) { 1614 if (!handled && !retried) {
1595 bool retry = false; 1615 bool retry = false;
1596 1616
1597 for (i = 0; i < host->n_ports; i++) { 1617 for (i = 0; i < host->n_ports; i++) {
1598 struct ata_port *ap = host->ports[i]; 1618 struct ata_port *ap = host->ports[i];
1599 1619
1600 if (polling & (1 << i)) 1620 if (polling & (1 << i))
1601 continue; 1621 continue;
1602 1622
1603 if (!ap->ops->sff_irq_check || 1623 if (!ap->ops->sff_irq_check ||
1604 !ap->ops->sff_irq_check(ap)) 1624 !ap->ops->sff_irq_check(ap))
1605 continue; 1625 continue;
1606 1626
1607 if (idle & (1 << i)) { 1627 if (idle & (1 << i)) {
1608 ap->ops->sff_check_status(ap); 1628 ap->ops->sff_check_status(ap);
1609 if (ap->ops->sff_irq_clear) 1629 if (ap->ops->sff_irq_clear)
1610 ap->ops->sff_irq_clear(ap); 1630 ap->ops->sff_irq_clear(ap);
1611 } else { 1631 } else {
1612 /* clear INTRQ and check if BUSY cleared */ 1632 /* clear INTRQ and check if BUSY cleared */
1613 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) 1633 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1614 retry |= true; 1634 retry |= true;
1615 /* 1635 /*
1616 * With command in flight, we can't do 1636 * With command in flight, we can't do
1617 * sff_irq_clear() w/o racing with completion. 1637 * sff_irq_clear() w/o racing with completion.
1618 */ 1638 */
1619 } 1639 }
1620 } 1640 }
1621 1641
1622 if (retry) { 1642 if (retry) {
1623 retried = true; 1643 retried = true;
1624 goto retry; 1644 goto retry;
1625 } 1645 }
1626 } 1646 }
1627 1647
1628 spin_unlock_irqrestore(&host->lock, flags); 1648 spin_unlock_irqrestore(&host->lock, flags);
1629 1649
1630 return IRQ_RETVAL(handled); 1650 return IRQ_RETVAL(handled);
1631 } 1651 }
1632 1652
1633 /** 1653 /**
1634 * ata_sff_interrupt - Default SFF ATA host interrupt handler 1654 * ata_sff_interrupt - Default SFF ATA host interrupt handler
1635 * @irq: irq line (unused) 1655 * @irq: irq line (unused)
1636 * @dev_instance: pointer to our ata_host information structure 1656 * @dev_instance: pointer to our ata_host information structure
1637 * 1657 *
1638 * Default interrupt handler for PCI IDE devices. Calls 1658 * Default interrupt handler for PCI IDE devices. Calls
1639 * ata_sff_port_intr() for each port that is not disabled. 1659 * ata_sff_port_intr() for each port that is not disabled.
1640 * 1660 *
1641 * LOCKING: 1661 * LOCKING:
1642 * Obtains host lock during operation. 1662 * Obtains host lock during operation.
1643 * 1663 *
1644 * RETURNS: 1664 * RETURNS:
1645 * IRQ_NONE or IRQ_HANDLED. 1665 * IRQ_NONE or IRQ_HANDLED.
1646 */ 1666 */
1647 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) 1667 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1648 { 1668 {
1649 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr); 1669 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1650 } 1670 }
1651 EXPORT_SYMBOL_GPL(ata_sff_interrupt); 1671 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1652 1672
1653 /** 1673 /**
1654 * ata_sff_lost_interrupt - Check for an apparent lost interrupt 1674 * ata_sff_lost_interrupt - Check for an apparent lost interrupt
1655 * @ap: port that appears to have timed out 1675 * @ap: port that appears to have timed out
1656 * 1676 *
1657 * Called from the libata error handlers when the core code suspects 1677 * Called from the libata error handlers when the core code suspects
1658 * an interrupt has been lost. If it has complete anything we can and 1678 * an interrupt has been lost. If it has complete anything we can and
1659 * then return. Interface must support altstatus for this faster 1679 * then return. Interface must support altstatus for this faster
1660 * recovery to occur. 1680 * recovery to occur.
1661 * 1681 *
1662 * Locking: 1682 * Locking:
1663 * Caller holds host lock 1683 * Caller holds host lock
1664 */ 1684 */
1665 1685
1666 void ata_sff_lost_interrupt(struct ata_port *ap) 1686 void ata_sff_lost_interrupt(struct ata_port *ap)
1667 { 1687 {
1668 u8 status; 1688 u8 status;
1669 struct ata_queued_cmd *qc; 1689 struct ata_queued_cmd *qc;
1670 1690
1671 /* Only one outstanding command per SFF channel */ 1691 /* Only one outstanding command per SFF channel */
1672 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1692 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1673 /* We cannot lose an interrupt on a non-existent or polled command */ 1693 /* We cannot lose an interrupt on a non-existent or polled command */
1674 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) 1694 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1675 return; 1695 return;
1676 /* See if the controller thinks it is still busy - if so the command 1696 /* See if the controller thinks it is still busy - if so the command
1677 isn't a lost IRQ but is still in progress */ 1697 isn't a lost IRQ but is still in progress */
1678 status = ata_sff_altstatus(ap); 1698 status = ata_sff_altstatus(ap);
1679 if (status & ATA_BUSY) 1699 if (status & ATA_BUSY)
1680 return; 1700 return;
1681 1701
1682 /* There was a command running, we are no longer busy and we have 1702 /* There was a command running, we are no longer busy and we have
1683 no interrupt. */ 1703 no interrupt. */
1684 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n", 1704 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
1685 status); 1705 status);
1686 /* Run the host interrupt logic as if the interrupt had not been 1706 /* Run the host interrupt logic as if the interrupt had not been
1687 lost */ 1707 lost */
1688 ata_sff_port_intr(ap, qc); 1708 ata_sff_port_intr(ap, qc);
1689 } 1709 }
1690 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); 1710 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1691 1711
1692 /** 1712 /**
1693 * ata_sff_freeze - Freeze SFF controller port 1713 * ata_sff_freeze - Freeze SFF controller port
1694 * @ap: port to freeze 1714 * @ap: port to freeze
1695 * 1715 *
1696 * Freeze SFF controller port. 1716 * Freeze SFF controller port.
1697 * 1717 *
1698 * LOCKING: 1718 * LOCKING:
1699 * Inherited from caller. 1719 * Inherited from caller.
1700 */ 1720 */
1701 void ata_sff_freeze(struct ata_port *ap) 1721 void ata_sff_freeze(struct ata_port *ap)
1702 { 1722 {
1703 ap->ctl |= ATA_NIEN; 1723 ap->ctl |= ATA_NIEN;
1704 ap->last_ctl = ap->ctl; 1724 ap->last_ctl = ap->ctl;
1705 1725
1706 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) 1726 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1707 ata_sff_set_devctl(ap, ap->ctl); 1727 ata_sff_set_devctl(ap, ap->ctl);
1708 1728
1709 /* Under certain circumstances, some controllers raise IRQ on 1729 /* Under certain circumstances, some controllers raise IRQ on
1710 * ATA_NIEN manipulation. Also, many controllers fail to mask 1730 * ATA_NIEN manipulation. Also, many controllers fail to mask
1711 * previously pending IRQ on ATA_NIEN assertion. Clear it. 1731 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1712 */ 1732 */
1713 ap->ops->sff_check_status(ap); 1733 ap->ops->sff_check_status(ap);
1714 1734
1715 if (ap->ops->sff_irq_clear) 1735 if (ap->ops->sff_irq_clear)
1716 ap->ops->sff_irq_clear(ap); 1736 ap->ops->sff_irq_clear(ap);
1717 } 1737 }
1718 EXPORT_SYMBOL_GPL(ata_sff_freeze); 1738 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1719 1739
1720 /** 1740 /**
1721 * ata_sff_thaw - Thaw SFF controller port 1741 * ata_sff_thaw - Thaw SFF controller port
1722 * @ap: port to thaw 1742 * @ap: port to thaw
1723 * 1743 *
1724 * Thaw SFF controller port. 1744 * Thaw SFF controller port.
1725 * 1745 *
1726 * LOCKING: 1746 * LOCKING:
1727 * Inherited from caller. 1747 * Inherited from caller.
1728 */ 1748 */
1729 void ata_sff_thaw(struct ata_port *ap) 1749 void ata_sff_thaw(struct ata_port *ap)
1730 { 1750 {
1731 /* clear & re-enable interrupts */ 1751 /* clear & re-enable interrupts */
1732 ap->ops->sff_check_status(ap); 1752 ap->ops->sff_check_status(ap);
1733 if (ap->ops->sff_irq_clear) 1753 if (ap->ops->sff_irq_clear)
1734 ap->ops->sff_irq_clear(ap); 1754 ap->ops->sff_irq_clear(ap);
1735 ata_sff_irq_on(ap); 1755 ata_sff_irq_on(ap);
1736 } 1756 }
1737 EXPORT_SYMBOL_GPL(ata_sff_thaw); 1757 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1738 1758
1739 /** 1759 /**
1740 * ata_sff_prereset - prepare SFF link for reset 1760 * ata_sff_prereset - prepare SFF link for reset
1741 * @link: SFF link to be reset 1761 * @link: SFF link to be reset
1742 * @deadline: deadline jiffies for the operation 1762 * @deadline: deadline jiffies for the operation
1743 * 1763 *
1744 * SFF link @link is about to be reset. Initialize it. It first 1764 * SFF link @link is about to be reset. Initialize it. It first
1745 * calls ata_std_prereset() and wait for !BSY if the port is 1765 * calls ata_std_prereset() and wait for !BSY if the port is
1746 * being softreset. 1766 * being softreset.
1747 * 1767 *
1748 * LOCKING: 1768 * LOCKING:
1749 * Kernel thread context (may sleep) 1769 * Kernel thread context (may sleep)
1750 * 1770 *
1751 * RETURNS: 1771 * RETURNS:
1752 * 0 on success, -errno otherwise. 1772 * 0 on success, -errno otherwise.
1753 */ 1773 */
1754 int ata_sff_prereset(struct ata_link *link, unsigned long deadline) 1774 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1755 { 1775 {
1756 struct ata_eh_context *ehc = &link->eh_context; 1776 struct ata_eh_context *ehc = &link->eh_context;
1757 int rc; 1777 int rc;
1758 1778
1759 rc = ata_std_prereset(link, deadline); 1779 rc = ata_std_prereset(link, deadline);
1760 if (rc) 1780 if (rc)
1761 return rc; 1781 return rc;
1762 1782
1763 /* if we're about to do hardreset, nothing more to do */ 1783 /* if we're about to do hardreset, nothing more to do */
1764 if (ehc->i.action & ATA_EH_HARDRESET) 1784 if (ehc->i.action & ATA_EH_HARDRESET)
1765 return 0; 1785 return 0;
1766 1786
1767 /* wait for !BSY if we don't know that no device is attached */ 1787 /* wait for !BSY if we don't know that no device is attached */
1768 if (!ata_link_offline(link)) { 1788 if (!ata_link_offline(link)) {
1769 rc = ata_sff_wait_ready(link, deadline); 1789 rc = ata_sff_wait_ready(link, deadline);
1770 if (rc && rc != -ENODEV) { 1790 if (rc && rc != -ENODEV) {
1771 ata_link_printk(link, KERN_WARNING, "device not ready " 1791 ata_link_printk(link, KERN_WARNING, "device not ready "
1772 "(errno=%d), forcing hardreset\n", rc); 1792 "(errno=%d), forcing hardreset\n", rc);
1773 ehc->i.action |= ATA_EH_HARDRESET; 1793 ehc->i.action |= ATA_EH_HARDRESET;
1774 } 1794 }
1775 } 1795 }
1776 1796
1777 return 0; 1797 return 0;
1778 } 1798 }
1779 EXPORT_SYMBOL_GPL(ata_sff_prereset); 1799 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1780 1800
1781 /** 1801 /**
1782 * ata_devchk - PATA device presence detection 1802 * ata_devchk - PATA device presence detection
1783 * @ap: ATA channel to examine 1803 * @ap: ATA channel to examine
1784 * @device: Device to examine (starting at zero) 1804 * @device: Device to examine (starting at zero)
1785 * 1805 *
1786 * This technique was originally described in 1806 * This technique was originally described in
1787 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1807 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1788 * later found its way into the ATA/ATAPI spec. 1808 * later found its way into the ATA/ATAPI spec.
1789 * 1809 *
1790 * Write a pattern to the ATA shadow registers, 1810 * Write a pattern to the ATA shadow registers,
1791 * and if a device is present, it will respond by 1811 * and if a device is present, it will respond by
1792 * correctly storing and echoing back the 1812 * correctly storing and echoing back the
1793 * ATA shadow register contents. 1813 * ATA shadow register contents.
1794 * 1814 *
1795 * LOCKING: 1815 * LOCKING:
1796 * caller. 1816 * caller.
1797 */ 1817 */
1798 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 1818 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1799 { 1819 {
1800 struct ata_ioports *ioaddr = &ap->ioaddr; 1820 struct ata_ioports *ioaddr = &ap->ioaddr;
1801 u8 nsect, lbal; 1821 u8 nsect, lbal;
1802 1822
1803 ap->ops->sff_dev_select(ap, device); 1823 ap->ops->sff_dev_select(ap, device);
1804 1824
1805 iowrite8(0x55, ioaddr->nsect_addr); 1825 iowrite8(0x55, ioaddr->nsect_addr);
1806 iowrite8(0xaa, ioaddr->lbal_addr); 1826 iowrite8(0xaa, ioaddr->lbal_addr);
1807 1827
1808 iowrite8(0xaa, ioaddr->nsect_addr); 1828 iowrite8(0xaa, ioaddr->nsect_addr);
1809 iowrite8(0x55, ioaddr->lbal_addr); 1829 iowrite8(0x55, ioaddr->lbal_addr);
1810 1830
1811 iowrite8(0x55, ioaddr->nsect_addr); 1831 iowrite8(0x55, ioaddr->nsect_addr);
1812 iowrite8(0xaa, ioaddr->lbal_addr); 1832 iowrite8(0xaa, ioaddr->lbal_addr);
1813 1833
1814 nsect = ioread8(ioaddr->nsect_addr); 1834 nsect = ioread8(ioaddr->nsect_addr);
1815 lbal = ioread8(ioaddr->lbal_addr); 1835 lbal = ioread8(ioaddr->lbal_addr);
1816 1836
1817 if ((nsect == 0x55) && (lbal == 0xaa)) 1837 if ((nsect == 0x55) && (lbal == 0xaa))
1818 return 1; /* we found a device */ 1838 return 1; /* we found a device */
1819 1839
1820 return 0; /* nothing found */ 1840 return 0; /* nothing found */
1821 } 1841 }
1822 1842
1823 /** 1843 /**
1824 * ata_sff_dev_classify - Parse returned ATA device signature 1844 * ata_sff_dev_classify - Parse returned ATA device signature
1825 * @dev: ATA device to classify (starting at zero) 1845 * @dev: ATA device to classify (starting at zero)
1826 * @present: device seems present 1846 * @present: device seems present
1827 * @r_err: Value of error register on completion 1847 * @r_err: Value of error register on completion
1828 * 1848 *
1829 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1849 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1830 * an ATA/ATAPI-defined set of values is placed in the ATA 1850 * an ATA/ATAPI-defined set of values is placed in the ATA
1831 * shadow registers, indicating the results of device detection 1851 * shadow registers, indicating the results of device detection
1832 * and diagnostics. 1852 * and diagnostics.
1833 * 1853 *
1834 * Select the ATA device, and read the values from the ATA shadow 1854 * Select the ATA device, and read the values from the ATA shadow
1835 * registers. Then parse according to the Error register value, 1855 * registers. Then parse according to the Error register value,
1836 * and the spec-defined values examined by ata_dev_classify(). 1856 * and the spec-defined values examined by ata_dev_classify().
1837 * 1857 *
1838 * LOCKING: 1858 * LOCKING:
1839 * caller. 1859 * caller.
1840 * 1860 *
1841 * RETURNS: 1861 * RETURNS:
1842 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1862 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1843 */ 1863 */
1844 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, 1864 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1845 u8 *r_err) 1865 u8 *r_err)
1846 { 1866 {
1847 struct ata_port *ap = dev->link->ap; 1867 struct ata_port *ap = dev->link->ap;
1848 struct ata_taskfile tf; 1868 struct ata_taskfile tf;
1849 unsigned int class; 1869 unsigned int class;
1850 u8 err; 1870 u8 err;
1851 1871
1852 ap->ops->sff_dev_select(ap, dev->devno); 1872 ap->ops->sff_dev_select(ap, dev->devno);
1853 1873
1854 memset(&tf, 0, sizeof(tf)); 1874 memset(&tf, 0, sizeof(tf));
1855 1875
1856 ap->ops->sff_tf_read(ap, &tf); 1876 ap->ops->sff_tf_read(ap, &tf);
1857 err = tf.feature; 1877 err = tf.feature;
1858 if (r_err) 1878 if (r_err)
1859 *r_err = err; 1879 *r_err = err;
1860 1880
1861 /* see if device passed diags: continue and warn later */ 1881 /* see if device passed diags: continue and warn later */
1862 if (err == 0) 1882 if (err == 0)
1863 /* diagnostic fail : do nothing _YET_ */ 1883 /* diagnostic fail : do nothing _YET_ */
1864 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1884 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1865 else if (err == 1) 1885 else if (err == 1)
1866 /* do nothing */ ; 1886 /* do nothing */ ;
1867 else if ((dev->devno == 0) && (err == 0x81)) 1887 else if ((dev->devno == 0) && (err == 0x81))
1868 /* do nothing */ ; 1888 /* do nothing */ ;
1869 else 1889 else
1870 return ATA_DEV_NONE; 1890 return ATA_DEV_NONE;
1871 1891
1872 /* determine if device is ATA or ATAPI */ 1892 /* determine if device is ATA or ATAPI */
1873 class = ata_dev_classify(&tf); 1893 class = ata_dev_classify(&tf);
1874 1894
1875 if (class == ATA_DEV_UNKNOWN) { 1895 if (class == ATA_DEV_UNKNOWN) {
1876 /* If the device failed diagnostic, it's likely to 1896 /* If the device failed diagnostic, it's likely to
1877 * have reported incorrect device signature too. 1897 * have reported incorrect device signature too.
1878 * Assume ATA device if the device seems present but 1898 * Assume ATA device if the device seems present but
1879 * device signature is invalid with diagnostic 1899 * device signature is invalid with diagnostic
1880 * failure. 1900 * failure.
1881 */ 1901 */
1882 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 1902 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1883 class = ATA_DEV_ATA; 1903 class = ATA_DEV_ATA;
1884 else 1904 else
1885 class = ATA_DEV_NONE; 1905 class = ATA_DEV_NONE;
1886 } else if ((class == ATA_DEV_ATA) && 1906 } else if ((class == ATA_DEV_ATA) &&
1887 (ap->ops->sff_check_status(ap) == 0)) 1907 (ap->ops->sff_check_status(ap) == 0))
1888 class = ATA_DEV_NONE; 1908 class = ATA_DEV_NONE;
1889 1909
1890 return class; 1910 return class;
1891 } 1911 }
1892 EXPORT_SYMBOL_GPL(ata_sff_dev_classify); 1912 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1893 1913
1894 /** 1914 /**
1895 * ata_sff_wait_after_reset - wait for devices to become ready after reset 1915 * ata_sff_wait_after_reset - wait for devices to become ready after reset
1896 * @link: SFF link which is just reset 1916 * @link: SFF link which is just reset
1897 * @devmask: mask of present devices 1917 * @devmask: mask of present devices
1898 * @deadline: deadline jiffies for the operation 1918 * @deadline: deadline jiffies for the operation
1899 * 1919 *
1900 * Wait devices attached to SFF @link to become ready after 1920 * Wait devices attached to SFF @link to become ready after
1901 * reset. It contains preceding 150ms wait to avoid accessing TF 1921 * reset. It contains preceding 150ms wait to avoid accessing TF
1902 * status register too early. 1922 * status register too early.
1903 * 1923 *
1904 * LOCKING: 1924 * LOCKING:
1905 * Kernel thread context (may sleep). 1925 * Kernel thread context (may sleep).
1906 * 1926 *
1907 * RETURNS: 1927 * RETURNS:
1908 * 0 on success, -ENODEV if some or all of devices in @devmask 1928 * 0 on success, -ENODEV if some or all of devices in @devmask
1909 * don't seem to exist. -errno on other errors. 1929 * don't seem to exist. -errno on other errors.
1910 */ 1930 */
1911 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, 1931 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1912 unsigned long deadline) 1932 unsigned long deadline)
1913 { 1933 {
1914 struct ata_port *ap = link->ap; 1934 struct ata_port *ap = link->ap;
1915 struct ata_ioports *ioaddr = &ap->ioaddr; 1935 struct ata_ioports *ioaddr = &ap->ioaddr;
1916 unsigned int dev0 = devmask & (1 << 0); 1936 unsigned int dev0 = devmask & (1 << 0);
1917 unsigned int dev1 = devmask & (1 << 1); 1937 unsigned int dev1 = devmask & (1 << 1);
1918 int rc, ret = 0; 1938 int rc, ret = 0;
1919 1939
1920 msleep(ATA_WAIT_AFTER_RESET); 1940 msleep(ATA_WAIT_AFTER_RESET);
1921 1941
1922 /* always check readiness of the master device */ 1942 /* always check readiness of the master device */
1923 rc = ata_sff_wait_ready(link, deadline); 1943 rc = ata_sff_wait_ready(link, deadline);
1924 /* -ENODEV means the odd clown forgot the D7 pulldown resistor 1944 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1925 * and TF status is 0xff, bail out on it too. 1945 * and TF status is 0xff, bail out on it too.
1926 */ 1946 */
1927 if (rc) 1947 if (rc)
1928 return rc; 1948 return rc;
1929 1949
1930 /* if device 1 was found in ata_devchk, wait for register 1950 /* if device 1 was found in ata_devchk, wait for register
1931 * access briefly, then wait for BSY to clear. 1951 * access briefly, then wait for BSY to clear.
1932 */ 1952 */
1933 if (dev1) { 1953 if (dev1) {
1934 int i; 1954 int i;
1935 1955
1936 ap->ops->sff_dev_select(ap, 1); 1956 ap->ops->sff_dev_select(ap, 1);
1937 1957
1938 /* Wait for register access. Some ATAPI devices fail 1958 /* Wait for register access. Some ATAPI devices fail
1939 * to set nsect/lbal after reset, so don't waste too 1959 * to set nsect/lbal after reset, so don't waste too
1940 * much time on it. We're gonna wait for !BSY anyway. 1960 * much time on it. We're gonna wait for !BSY anyway.
1941 */ 1961 */
1942 for (i = 0; i < 2; i++) { 1962 for (i = 0; i < 2; i++) {
1943 u8 nsect, lbal; 1963 u8 nsect, lbal;
1944 1964
1945 nsect = ioread8(ioaddr->nsect_addr); 1965 nsect = ioread8(ioaddr->nsect_addr);
1946 lbal = ioread8(ioaddr->lbal_addr); 1966 lbal = ioread8(ioaddr->lbal_addr);
1947 if ((nsect == 1) && (lbal == 1)) 1967 if ((nsect == 1) && (lbal == 1))
1948 break; 1968 break;
1949 msleep(50); /* give drive a breather */ 1969 msleep(50); /* give drive a breather */
1950 } 1970 }
1951 1971
1952 rc = ata_sff_wait_ready(link, deadline); 1972 rc = ata_sff_wait_ready(link, deadline);
1953 if (rc) { 1973 if (rc) {
1954 if (rc != -ENODEV) 1974 if (rc != -ENODEV)
1955 return rc; 1975 return rc;
1956 ret = rc; 1976 ret = rc;
1957 } 1977 }
1958 } 1978 }
1959 1979
1960 /* is all this really necessary? */ 1980 /* is all this really necessary? */
1961 ap->ops->sff_dev_select(ap, 0); 1981 ap->ops->sff_dev_select(ap, 0);
1962 if (dev1) 1982 if (dev1)
1963 ap->ops->sff_dev_select(ap, 1); 1983 ap->ops->sff_dev_select(ap, 1);
1964 if (dev0) 1984 if (dev0)
1965 ap->ops->sff_dev_select(ap, 0); 1985 ap->ops->sff_dev_select(ap, 0);
1966 1986
1967 return ret; 1987 return ret;
1968 } 1988 }
1969 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); 1989 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1970 1990
1971 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 1991 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1972 unsigned long deadline) 1992 unsigned long deadline)
1973 { 1993 {
1974 struct ata_ioports *ioaddr = &ap->ioaddr; 1994 struct ata_ioports *ioaddr = &ap->ioaddr;
1975 1995
1976 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 1996 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1977 1997
1978 /* software reset. causes dev0 to be selected */ 1998 /* software reset. causes dev0 to be selected */
1979 iowrite8(ap->ctl, ioaddr->ctl_addr); 1999 iowrite8(ap->ctl, ioaddr->ctl_addr);
1980 udelay(20); /* FIXME: flush */ 2000 udelay(20); /* FIXME: flush */
1981 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 2001 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1982 udelay(20); /* FIXME: flush */ 2002 udelay(20); /* FIXME: flush */
1983 iowrite8(ap->ctl, ioaddr->ctl_addr); 2003 iowrite8(ap->ctl, ioaddr->ctl_addr);
1984 ap->last_ctl = ap->ctl; 2004 ap->last_ctl = ap->ctl;
1985 2005
1986 /* wait the port to become ready */ 2006 /* wait the port to become ready */
1987 return ata_sff_wait_after_reset(&ap->link, devmask, deadline); 2007 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1988 } 2008 }
1989 2009
1990 /** 2010 /**
1991 * ata_sff_softreset - reset host port via ATA SRST 2011 * ata_sff_softreset - reset host port via ATA SRST
1992 * @link: ATA link to reset 2012 * @link: ATA link to reset
1993 * @classes: resulting classes of attached devices 2013 * @classes: resulting classes of attached devices
1994 * @deadline: deadline jiffies for the operation 2014 * @deadline: deadline jiffies for the operation
1995 * 2015 *
1996 * Reset host port using ATA SRST. 2016 * Reset host port using ATA SRST.
1997 * 2017 *
1998 * LOCKING: 2018 * LOCKING:
1999 * Kernel thread context (may sleep) 2019 * Kernel thread context (may sleep)
2000 * 2020 *
2001 * RETURNS: 2021 * RETURNS:
2002 * 0 on success, -errno otherwise. 2022 * 0 on success, -errno otherwise.
2003 */ 2023 */
2004 int ata_sff_softreset(struct ata_link *link, unsigned int *classes, 2024 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2005 unsigned long deadline) 2025 unsigned long deadline)
2006 { 2026 {
2007 struct ata_port *ap = link->ap; 2027 struct ata_port *ap = link->ap;
2008 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 2028 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2009 unsigned int devmask = 0; 2029 unsigned int devmask = 0;
2010 int rc; 2030 int rc;
2011 u8 err; 2031 u8 err;
2012 2032
2013 DPRINTK("ENTER\n"); 2033 DPRINTK("ENTER\n");
2014 2034
2015 /* determine if device 0/1 are present */ 2035 /* determine if device 0/1 are present */
2016 if (ata_devchk(ap, 0)) 2036 if (ata_devchk(ap, 0))
2017 devmask |= (1 << 0); 2037 devmask |= (1 << 0);
2018 if (slave_possible && ata_devchk(ap, 1)) 2038 if (slave_possible && ata_devchk(ap, 1))
2019 devmask |= (1 << 1); 2039 devmask |= (1 << 1);
2020 2040
2021 /* select device 0 again */ 2041 /* select device 0 again */
2022 ap->ops->sff_dev_select(ap, 0); 2042 ap->ops->sff_dev_select(ap, 0);
2023 2043
2024 /* issue bus reset */ 2044 /* issue bus reset */
2025 DPRINTK("about to softreset, devmask=%x\n", devmask); 2045 DPRINTK("about to softreset, devmask=%x\n", devmask);
2026 rc = ata_bus_softreset(ap, devmask, deadline); 2046 rc = ata_bus_softreset(ap, devmask, deadline);
2027 /* if link is occupied, -ENODEV too is an error */ 2047 /* if link is occupied, -ENODEV too is an error */
2028 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 2048 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2029 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 2049 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
2030 return rc; 2050 return rc;
2031 } 2051 }
2032 2052
2033 /* determine by signature whether we have ATA or ATAPI devices */ 2053 /* determine by signature whether we have ATA or ATAPI devices */
2034 classes[0] = ata_sff_dev_classify(&link->device[0], 2054 classes[0] = ata_sff_dev_classify(&link->device[0],
2035 devmask & (1 << 0), &err); 2055 devmask & (1 << 0), &err);
2036 if (slave_possible && err != 0x81) 2056 if (slave_possible && err != 0x81)
2037 classes[1] = ata_sff_dev_classify(&link->device[1], 2057 classes[1] = ata_sff_dev_classify(&link->device[1],
2038 devmask & (1 << 1), &err); 2058 devmask & (1 << 1), &err);
2039 2059
2040 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 2060 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2041 return 0; 2061 return 0;
2042 } 2062 }
2043 EXPORT_SYMBOL_GPL(ata_sff_softreset); 2063 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2044 2064
2045 /** 2065 /**
2046 * sata_sff_hardreset - reset host port via SATA phy reset 2066 * sata_sff_hardreset - reset host port via SATA phy reset
2047 * @link: link to reset 2067 * @link: link to reset
2048 * @class: resulting class of attached device 2068 * @class: resulting class of attached device
2049 * @deadline: deadline jiffies for the operation 2069 * @deadline: deadline jiffies for the operation
2050 * 2070 *
2051 * SATA phy-reset host port using DET bits of SControl register, 2071 * SATA phy-reset host port using DET bits of SControl register,
2052 * wait for !BSY and classify the attached device. 2072 * wait for !BSY and classify the attached device.
2053 * 2073 *
2054 * LOCKING: 2074 * LOCKING:
2055 * Kernel thread context (may sleep) 2075 * Kernel thread context (may sleep)
2056 * 2076 *
2057 * RETURNS: 2077 * RETURNS:
2058 * 0 on success, -errno otherwise. 2078 * 0 on success, -errno otherwise.
2059 */ 2079 */
2060 int sata_sff_hardreset(struct ata_link *link, unsigned int *class, 2080 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2061 unsigned long deadline) 2081 unsigned long deadline)
2062 { 2082 {
2063 struct ata_eh_context *ehc = &link->eh_context; 2083 struct ata_eh_context *ehc = &link->eh_context;
2064 const unsigned long *timing = sata_ehc_deb_timing(ehc); 2084 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2065 bool online; 2085 bool online;
2066 int rc; 2086 int rc;
2067 2087
2068 rc = sata_link_hardreset(link, timing, deadline, &online, 2088 rc = sata_link_hardreset(link, timing, deadline, &online,
2069 ata_sff_check_ready); 2089 ata_sff_check_ready);
2070 if (online) 2090 if (online)
2071 *class = ata_sff_dev_classify(link->device, 1, NULL); 2091 *class = ata_sff_dev_classify(link->device, 1, NULL);
2072 2092
2073 DPRINTK("EXIT, class=%u\n", *class); 2093 DPRINTK("EXIT, class=%u\n", *class);
2074 return rc; 2094 return rc;
2075 } 2095 }
2076 EXPORT_SYMBOL_GPL(sata_sff_hardreset); 2096 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2077 2097
2078 /** 2098 /**
2079 * ata_sff_postreset - SFF postreset callback 2099 * ata_sff_postreset - SFF postreset callback
2080 * @link: the target SFF ata_link 2100 * @link: the target SFF ata_link
2081 * @classes: classes of attached devices 2101 * @classes: classes of attached devices
2082 * 2102 *
2083 * This function is invoked after a successful reset. It first 2103 * This function is invoked after a successful reset. It first
2084 * calls ata_std_postreset() and performs SFF specific postreset 2104 * calls ata_std_postreset() and performs SFF specific postreset
2085 * processing. 2105 * processing.
2086 * 2106 *
2087 * LOCKING: 2107 * LOCKING:
2088 * Kernel thread context (may sleep) 2108 * Kernel thread context (may sleep)
2089 */ 2109 */
2090 void ata_sff_postreset(struct ata_link *link, unsigned int *classes) 2110 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2091 { 2111 {
2092 struct ata_port *ap = link->ap; 2112 struct ata_port *ap = link->ap;
2093 2113
2094 ata_std_postreset(link, classes); 2114 ata_std_postreset(link, classes);
2095 2115
2096 /* is double-select really necessary? */ 2116 /* is double-select really necessary? */
2097 if (classes[0] != ATA_DEV_NONE) 2117 if (classes[0] != ATA_DEV_NONE)
2098 ap->ops->sff_dev_select(ap, 1); 2118 ap->ops->sff_dev_select(ap, 1);
2099 if (classes[1] != ATA_DEV_NONE) 2119 if (classes[1] != ATA_DEV_NONE)
2100 ap->ops->sff_dev_select(ap, 0); 2120 ap->ops->sff_dev_select(ap, 0);
2101 2121
2102 /* bail out if no device is present */ 2122 /* bail out if no device is present */
2103 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 2123 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2104 DPRINTK("EXIT, no device\n"); 2124 DPRINTK("EXIT, no device\n");
2105 return; 2125 return;
2106 } 2126 }
2107 2127
2108 /* set up device control */ 2128 /* set up device control */
2109 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) { 2129 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2110 ata_sff_set_devctl(ap, ap->ctl); 2130 ata_sff_set_devctl(ap, ap->ctl);
2111 ap->last_ctl = ap->ctl; 2131 ap->last_ctl = ap->ctl;
2112 } 2132 }
2113 } 2133 }
2114 EXPORT_SYMBOL_GPL(ata_sff_postreset); 2134 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2115 2135
2116 /** 2136 /**
2117 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers 2137 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2118 * @qc: command 2138 * @qc: command
2119 * 2139 *
2120 * Drain the FIFO and device of any stuck data following a command 2140 * Drain the FIFO and device of any stuck data following a command
2121 * failing to complete. In some cases this is necessary before a 2141 * failing to complete. In some cases this is necessary before a
2122 * reset will recover the device. 2142 * reset will recover the device.
2123 * 2143 *
2124 */ 2144 */
2125 2145
2126 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) 2146 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2127 { 2147 {
2128 int count; 2148 int count;
2129 struct ata_port *ap; 2149 struct ata_port *ap;
2130 2150
2131 /* We only need to flush incoming data when a command was running */ 2151 /* We only need to flush incoming data when a command was running */
2132 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) 2152 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2133 return; 2153 return;
2134 2154
2135 ap = qc->ap; 2155 ap = qc->ap;
2136 /* Drain up to 64K of data before we give up this recovery method */ 2156 /* Drain up to 64K of data before we give up this recovery method */
2137 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) 2157 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2138 && count < 65536; count += 2) 2158 && count < 65536; count += 2)
2139 ioread16(ap->ioaddr.data_addr); 2159 ioread16(ap->ioaddr.data_addr);
2140 2160
2141 /* Can become DEBUG later */ 2161 /* Can become DEBUG later */
2142 if (count) 2162 if (count)
2143 ata_port_printk(ap, KERN_DEBUG, 2163 ata_port_printk(ap, KERN_DEBUG,
2144 "drained %d bytes to clear DRQ.\n", count); 2164 "drained %d bytes to clear DRQ.\n", count);
2145 2165
2146 } 2166 }
2147 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); 2167 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2148 2168
2149 /** 2169 /**
2150 * ata_sff_error_handler - Stock error handler for SFF controller 2170 * ata_sff_error_handler - Stock error handler for SFF controller
2151 * @ap: port to handle error for 2171 * @ap: port to handle error for
2152 * 2172 *
2153 * Stock error handler for SFF controller. It can handle both 2173 * Stock error handler for SFF controller. It can handle both
2154 * PATA and SATA controllers. Many controllers should be able to 2174 * PATA and SATA controllers. Many controllers should be able to
2155 * use this EH as-is or with some added handling before and 2175 * use this EH as-is or with some added handling before and
2156 * after. 2176 * after.
2157 * 2177 *
2158 * LOCKING: 2178 * LOCKING:
2159 * Kernel thread context (may sleep) 2179 * Kernel thread context (may sleep)
2160 */ 2180 */
2161 void ata_sff_error_handler(struct ata_port *ap) 2181 void ata_sff_error_handler(struct ata_port *ap)
2162 { 2182 {
2163 ata_reset_fn_t softreset = ap->ops->softreset; 2183 ata_reset_fn_t softreset = ap->ops->softreset;
2164 ata_reset_fn_t hardreset = ap->ops->hardreset; 2184 ata_reset_fn_t hardreset = ap->ops->hardreset;
2165 struct ata_queued_cmd *qc; 2185 struct ata_queued_cmd *qc;
2166 unsigned long flags; 2186 unsigned long flags;
2167 2187
2168 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2188 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2169 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2189 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2170 qc = NULL; 2190 qc = NULL;
2171 2191
2172 spin_lock_irqsave(ap->lock, flags); 2192 spin_lock_irqsave(ap->lock, flags);
2173 2193
2174 /* 2194 /*
2175 * We *MUST* do FIFO draining before we issue a reset as 2195 * We *MUST* do FIFO draining before we issue a reset as
2176 * several devices helpfully clear their internal state and 2196 * several devices helpfully clear their internal state and
2177 * will lock solid if we touch the data port post reset. Pass 2197 * will lock solid if we touch the data port post reset. Pass
2178 * qc in case anyone wants to do different PIO/DMA recovery or 2198 * qc in case anyone wants to do different PIO/DMA recovery or
2179 * has per command fixups 2199 * has per command fixups
2180 */ 2200 */
2181 if (ap->ops->sff_drain_fifo) 2201 if (ap->ops->sff_drain_fifo)
2182 ap->ops->sff_drain_fifo(qc); 2202 ap->ops->sff_drain_fifo(qc);
2183 2203
2184 spin_unlock_irqrestore(ap->lock, flags); 2204 spin_unlock_irqrestore(ap->lock, flags);
2185 2205
2186 /* ignore ata_sff_softreset if ctl isn't accessible */ 2206 /* ignore ata_sff_softreset if ctl isn't accessible */
2187 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) 2207 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2188 softreset = NULL; 2208 softreset = NULL;
2189 2209
2190 /* ignore built-in hardresets if SCR access is not available */ 2210 /* ignore built-in hardresets if SCR access is not available */
2191 if ((hardreset == sata_std_hardreset || 2211 if ((hardreset == sata_std_hardreset ||
2192 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) 2212 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2193 hardreset = NULL; 2213 hardreset = NULL;
2194 2214
2195 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, 2215 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2196 ap->ops->postreset); 2216 ap->ops->postreset);
2197 } 2217 }
2198 EXPORT_SYMBOL_GPL(ata_sff_error_handler); 2218 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2199 2219
2200 /** 2220 /**
2201 * ata_sff_std_ports - initialize ioaddr with standard port offsets. 2221 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2202 * @ioaddr: IO address structure to be initialized 2222 * @ioaddr: IO address structure to be initialized
2203 * 2223 *
2204 * Utility function which initializes data_addr, error_addr, 2224 * Utility function which initializes data_addr, error_addr,
2205 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 2225 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2206 * device_addr, status_addr, and command_addr to standard offsets 2226 * device_addr, status_addr, and command_addr to standard offsets
2207 * relative to cmd_addr. 2227 * relative to cmd_addr.
2208 * 2228 *
2209 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 2229 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2210 */ 2230 */
2211 void ata_sff_std_ports(struct ata_ioports *ioaddr) 2231 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2212 { 2232 {
2213 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 2233 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2214 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 2234 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2215 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 2235 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2216 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 2236 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2217 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 2237 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2218 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 2238 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2219 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 2239 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2220 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 2240 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2221 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 2241 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2222 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 2242 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2223 } 2243 }
2224 EXPORT_SYMBOL_GPL(ata_sff_std_ports); 2244 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2225 2245
2226 #ifdef CONFIG_PCI 2246 #ifdef CONFIG_PCI
2227 2247
2228 static int ata_resources_present(struct pci_dev *pdev, int port) 2248 static int ata_resources_present(struct pci_dev *pdev, int port)
2229 { 2249 {
2230 int i; 2250 int i;
2231 2251
2232 /* Check the PCI resources for this channel are enabled */ 2252 /* Check the PCI resources for this channel are enabled */
2233 port = port * 2; 2253 port = port * 2;
2234 for (i = 0; i < 2; i++) { 2254 for (i = 0; i < 2; i++) {
2235 if (pci_resource_start(pdev, port + i) == 0 || 2255 if (pci_resource_start(pdev, port + i) == 0 ||
2236 pci_resource_len(pdev, port + i) == 0) 2256 pci_resource_len(pdev, port + i) == 0)
2237 return 0; 2257 return 0;
2238 } 2258 }
2239 return 1; 2259 return 1;
2240 } 2260 }
2241 2261
2242 /** 2262 /**
2243 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host 2263 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2244 * @host: target ATA host 2264 * @host: target ATA host
2245 * 2265 *
2246 * Acquire native PCI ATA resources for @host and initialize the 2266 * Acquire native PCI ATA resources for @host and initialize the
2247 * first two ports of @host accordingly. Ports marked dummy are 2267 * first two ports of @host accordingly. Ports marked dummy are
2248 * skipped and allocation failure makes the port dummy. 2268 * skipped and allocation failure makes the port dummy.
2249 * 2269 *
2250 * Note that native PCI resources are valid even for legacy hosts 2270 * Note that native PCI resources are valid even for legacy hosts
2251 * as we fix up pdev resources array early in boot, so this 2271 * as we fix up pdev resources array early in boot, so this
2252 * function can be used for both native and legacy SFF hosts. 2272 * function can be used for both native and legacy SFF hosts.
2253 * 2273 *
2254 * LOCKING: 2274 * LOCKING:
2255 * Inherited from calling layer (may sleep). 2275 * Inherited from calling layer (may sleep).
2256 * 2276 *
2257 * RETURNS: 2277 * RETURNS:
2258 * 0 if at least one port is initialized, -ENODEV if no port is 2278 * 0 if at least one port is initialized, -ENODEV if no port is
2259 * available. 2279 * available.
2260 */ 2280 */
2261 int ata_pci_sff_init_host(struct ata_host *host) 2281 int ata_pci_sff_init_host(struct ata_host *host)
2262 { 2282 {
2263 struct device *gdev = host->dev; 2283 struct device *gdev = host->dev;
2264 struct pci_dev *pdev = to_pci_dev(gdev); 2284 struct pci_dev *pdev = to_pci_dev(gdev);
2265 unsigned int mask = 0; 2285 unsigned int mask = 0;
2266 int i, rc; 2286 int i, rc;
2267 2287
2268 /* request, iomap BARs and init port addresses accordingly */ 2288 /* request, iomap BARs and init port addresses accordingly */
2269 for (i = 0; i < 2; i++) { 2289 for (i = 0; i < 2; i++) {
2270 struct ata_port *ap = host->ports[i]; 2290 struct ata_port *ap = host->ports[i];
2271 int base = i * 2; 2291 int base = i * 2;
2272 void __iomem * const *iomap; 2292 void __iomem * const *iomap;
2273 2293
2274 if (ata_port_is_dummy(ap)) 2294 if (ata_port_is_dummy(ap))
2275 continue; 2295 continue;
2276 2296
2277 /* Discard disabled ports. Some controllers show 2297 /* Discard disabled ports. Some controllers show
2278 * their unused channels this way. Disabled ports are 2298 * their unused channels this way. Disabled ports are
2279 * made dummy. 2299 * made dummy.
2280 */ 2300 */
2281 if (!ata_resources_present(pdev, i)) { 2301 if (!ata_resources_present(pdev, i)) {
2282 ap->ops = &ata_dummy_port_ops; 2302 ap->ops = &ata_dummy_port_ops;
2283 continue; 2303 continue;
2284 } 2304 }
2285 2305
2286 rc = pcim_iomap_regions(pdev, 0x3 << base, 2306 rc = pcim_iomap_regions(pdev, 0x3 << base,
2287 dev_driver_string(gdev)); 2307 dev_driver_string(gdev));
2288 if (rc) { 2308 if (rc) {
2289 dev_printk(KERN_WARNING, gdev, 2309 dev_printk(KERN_WARNING, gdev,
2290 "failed to request/iomap BARs for port %d " 2310 "failed to request/iomap BARs for port %d "
2291 "(errno=%d)\n", i, rc); 2311 "(errno=%d)\n", i, rc);
2292 if (rc == -EBUSY) 2312 if (rc == -EBUSY)
2293 pcim_pin_device(pdev); 2313 pcim_pin_device(pdev);
2294 ap->ops = &ata_dummy_port_ops; 2314 ap->ops = &ata_dummy_port_ops;
2295 continue; 2315 continue;
2296 } 2316 }
2297 host->iomap = iomap = pcim_iomap_table(pdev); 2317 host->iomap = iomap = pcim_iomap_table(pdev);
2298 2318
2299 ap->ioaddr.cmd_addr = iomap[base]; 2319 ap->ioaddr.cmd_addr = iomap[base];
2300 ap->ioaddr.altstatus_addr = 2320 ap->ioaddr.altstatus_addr =
2301 ap->ioaddr.ctl_addr = (void __iomem *) 2321 ap->ioaddr.ctl_addr = (void __iomem *)
2302 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); 2322 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2303 ata_sff_std_ports(&ap->ioaddr); 2323 ata_sff_std_ports(&ap->ioaddr);
2304 2324
2305 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", 2325 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2306 (unsigned long long)pci_resource_start(pdev, base), 2326 (unsigned long long)pci_resource_start(pdev, base),
2307 (unsigned long long)pci_resource_start(pdev, base + 1)); 2327 (unsigned long long)pci_resource_start(pdev, base + 1));
2308 2328
2309 mask |= 1 << i; 2329 mask |= 1 << i;
2310 } 2330 }
2311 2331
2312 if (!mask) { 2332 if (!mask) {
2313 dev_printk(KERN_ERR, gdev, "no available native port\n"); 2333 dev_printk(KERN_ERR, gdev, "no available native port\n");
2314 return -ENODEV; 2334 return -ENODEV;
2315 } 2335 }
2316 2336
2317 return 0; 2337 return 0;
2318 } 2338 }
2319 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); 2339 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2320 2340
2321 /** 2341 /**
2322 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host 2342 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2323 * @pdev: target PCI device 2343 * @pdev: target PCI device
2324 * @ppi: array of port_info, must be enough for two ports 2344 * @ppi: array of port_info, must be enough for two ports
2325 * @r_host: out argument for the initialized ATA host 2345 * @r_host: out argument for the initialized ATA host
2326 * 2346 *
2327 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire 2347 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2328 * all PCI resources and initialize it accordingly in one go. 2348 * all PCI resources and initialize it accordingly in one go.
2329 * 2349 *
2330 * LOCKING: 2350 * LOCKING:
2331 * Inherited from calling layer (may sleep). 2351 * Inherited from calling layer (may sleep).
2332 * 2352 *
2333 * RETURNS: 2353 * RETURNS:
2334 * 0 on success, -errno otherwise. 2354 * 0 on success, -errno otherwise.
2335 */ 2355 */
2336 int ata_pci_sff_prepare_host(struct pci_dev *pdev, 2356 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2337 const struct ata_port_info * const *ppi, 2357 const struct ata_port_info * const *ppi,
2338 struct ata_host **r_host) 2358 struct ata_host **r_host)
2339 { 2359 {
2340 struct ata_host *host; 2360 struct ata_host *host;
2341 int rc; 2361 int rc;
2342 2362
2343 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) 2363 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2344 return -ENOMEM; 2364 return -ENOMEM;
2345 2365
2346 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2); 2366 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2347 if (!host) { 2367 if (!host) {
2348 dev_printk(KERN_ERR, &pdev->dev, 2368 dev_printk(KERN_ERR, &pdev->dev,
2349 "failed to allocate ATA host\n"); 2369 "failed to allocate ATA host\n");
2350 rc = -ENOMEM; 2370 rc = -ENOMEM;
2351 goto err_out; 2371 goto err_out;
2352 } 2372 }
2353 2373
2354 rc = ata_pci_sff_init_host(host); 2374 rc = ata_pci_sff_init_host(host);
2355 if (rc) 2375 if (rc)
2356 goto err_out; 2376 goto err_out;
2357 2377
2358 devres_remove_group(&pdev->dev, NULL); 2378 devres_remove_group(&pdev->dev, NULL);
2359 *r_host = host; 2379 *r_host = host;
2360 return 0; 2380 return 0;
2361 2381
2362 err_out: 2382 err_out:
2363 devres_release_group(&pdev->dev, NULL); 2383 devres_release_group(&pdev->dev, NULL);
2364 return rc; 2384 return rc;
2365 } 2385 }
2366 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); 2386 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2367 2387
2368 /** 2388 /**
2369 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it 2389 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2370 * @host: target SFF ATA host 2390 * @host: target SFF ATA host
2371 * @irq_handler: irq_handler used when requesting IRQ(s) 2391 * @irq_handler: irq_handler used when requesting IRQ(s)
2372 * @sht: scsi_host_template to use when registering the host 2392 * @sht: scsi_host_template to use when registering the host
2373 * 2393 *
2374 * This is the counterpart of ata_host_activate() for SFF ATA 2394 * This is the counterpart of ata_host_activate() for SFF ATA
2375 * hosts. This separate helper is necessary because SFF hosts 2395 * hosts. This separate helper is necessary because SFF hosts
2376 * use two separate interrupts in legacy mode. 2396 * use two separate interrupts in legacy mode.
2377 * 2397 *
2378 * LOCKING: 2398 * LOCKING:
2379 * Inherited from calling layer (may sleep). 2399 * Inherited from calling layer (may sleep).
2380 * 2400 *
2381 * RETURNS: 2401 * RETURNS:
2382 * 0 on success, -errno otherwise. 2402 * 0 on success, -errno otherwise.
2383 */ 2403 */
2384 int ata_pci_sff_activate_host(struct ata_host *host, 2404 int ata_pci_sff_activate_host(struct ata_host *host,
2385 irq_handler_t irq_handler, 2405 irq_handler_t irq_handler,
2386 struct scsi_host_template *sht) 2406 struct scsi_host_template *sht)
2387 { 2407 {
2388 struct device *dev = host->dev; 2408 struct device *dev = host->dev;
2389 struct pci_dev *pdev = to_pci_dev(dev); 2409 struct pci_dev *pdev = to_pci_dev(dev);
2390 const char *drv_name = dev_driver_string(host->dev); 2410 const char *drv_name = dev_driver_string(host->dev);
2391 int legacy_mode = 0, rc; 2411 int legacy_mode = 0, rc;
2392 2412
2393 rc = ata_host_start(host); 2413 rc = ata_host_start(host);
2394 if (rc) 2414 if (rc)
2395 return rc; 2415 return rc;
2396 2416
2397 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 2417 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2398 u8 tmp8, mask; 2418 u8 tmp8, mask;
2399 2419
2400 /* TODO: What if one channel is in native mode ... */ 2420 /* TODO: What if one channel is in native mode ... */
2401 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 2421 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2402 mask = (1 << 2) | (1 << 0); 2422 mask = (1 << 2) | (1 << 0);
2403 if ((tmp8 & mask) != mask) 2423 if ((tmp8 & mask) != mask)
2404 legacy_mode = 1; 2424 legacy_mode = 1;
2405 #if defined(CONFIG_NO_ATA_LEGACY) 2425 #if defined(CONFIG_NO_ATA_LEGACY)
2406 /* Some platforms with PCI limits cannot address compat 2426 /* Some platforms with PCI limits cannot address compat
2407 port space. In that case we punt if their firmware has 2427 port space. In that case we punt if their firmware has
2408 left a device in compatibility mode */ 2428 left a device in compatibility mode */
2409 if (legacy_mode) { 2429 if (legacy_mode) {
2410 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n"); 2430 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2411 return -EOPNOTSUPP; 2431 return -EOPNOTSUPP;
2412 } 2432 }
2413 #endif 2433 #endif
2414 } 2434 }
2415 2435
2416 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2436 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2417 return -ENOMEM; 2437 return -ENOMEM;
2418 2438
2419 if (!legacy_mode && pdev->irq) { 2439 if (!legacy_mode && pdev->irq) {
2420 rc = devm_request_irq(dev, pdev->irq, irq_handler, 2440 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2421 IRQF_SHARED, drv_name, host); 2441 IRQF_SHARED, drv_name, host);
2422 if (rc) 2442 if (rc)
2423 goto out; 2443 goto out;
2424 2444
2425 ata_port_desc(host->ports[0], "irq %d", pdev->irq); 2445 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
2426 ata_port_desc(host->ports[1], "irq %d", pdev->irq); 2446 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
2427 } else if (legacy_mode) { 2447 } else if (legacy_mode) {
2428 if (!ata_port_is_dummy(host->ports[0])) { 2448 if (!ata_port_is_dummy(host->ports[0])) {
2429 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), 2449 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2430 irq_handler, IRQF_SHARED, 2450 irq_handler, IRQF_SHARED,
2431 drv_name, host); 2451 drv_name, host);
2432 if (rc) 2452 if (rc)
2433 goto out; 2453 goto out;
2434 2454
2435 ata_port_desc(host->ports[0], "irq %d", 2455 ata_port_desc(host->ports[0], "irq %d",
2436 ATA_PRIMARY_IRQ(pdev)); 2456 ATA_PRIMARY_IRQ(pdev));
2437 } 2457 }
2438 2458
2439 if (!ata_port_is_dummy(host->ports[1])) { 2459 if (!ata_port_is_dummy(host->ports[1])) {
2440 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), 2460 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2441 irq_handler, IRQF_SHARED, 2461 irq_handler, IRQF_SHARED,
2442 drv_name, host); 2462 drv_name, host);
2443 if (rc) 2463 if (rc)
2444 goto out; 2464 goto out;
2445 2465
2446 ata_port_desc(host->ports[1], "irq %d", 2466 ata_port_desc(host->ports[1], "irq %d",
2447 ATA_SECONDARY_IRQ(pdev)); 2467 ATA_SECONDARY_IRQ(pdev));
2448 } 2468 }
2449 } 2469 }
2450 2470
2451 rc = ata_host_register(host, sht); 2471 rc = ata_host_register(host, sht);
2452 out: 2472 out:
2453 if (rc == 0) 2473 if (rc == 0)
2454 devres_remove_group(dev, NULL); 2474 devres_remove_group(dev, NULL);
2455 else 2475 else
2456 devres_release_group(dev, NULL); 2476 devres_release_group(dev, NULL);
2457 2477
2458 return rc; 2478 return rc;
2459 } 2479 }
2460 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); 2480 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2461 2481
2462 static const struct ata_port_info *ata_sff_find_valid_pi( 2482 static const struct ata_port_info *ata_sff_find_valid_pi(
2463 const struct ata_port_info * const *ppi) 2483 const struct ata_port_info * const *ppi)
2464 { 2484 {
2465 int i; 2485 int i;
2466 2486
2467 /* look up the first valid port_info */ 2487 /* look up the first valid port_info */
2468 for (i = 0; i < 2 && ppi[i]; i++) 2488 for (i = 0; i < 2 && ppi[i]; i++)
2469 if (ppi[i]->port_ops != &ata_dummy_port_ops) 2489 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2470 return ppi[i]; 2490 return ppi[i];
2471 2491
2472 return NULL; 2492 return NULL;
2473 } 2493 }
2474 2494
2475 /** 2495 /**
2476 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller 2496 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2477 * @pdev: Controller to be initialized 2497 * @pdev: Controller to be initialized
2478 * @ppi: array of port_info, must be enough for two ports 2498 * @ppi: array of port_info, must be enough for two ports
2479 * @sht: scsi_host_template to use when registering the host 2499 * @sht: scsi_host_template to use when registering the host
2480 * @host_priv: host private_data 2500 * @host_priv: host private_data
2481 * @hflag: host flags 2501 * @hflag: host flags
2482 * 2502 *
2483 * This is a helper function which can be called from a driver's 2503 * This is a helper function which can be called from a driver's
2484 * xxx_init_one() probe function if the hardware uses traditional 2504 * xxx_init_one() probe function if the hardware uses traditional
2485 * IDE taskfile registers and is PIO only. 2505 * IDE taskfile registers and is PIO only.
2486 * 2506 *
2487 * ASSUMPTION: 2507 * ASSUMPTION:
2488 * Nobody makes a single channel controller that appears solely as 2508 * Nobody makes a single channel controller that appears solely as
2489 * the secondary legacy port on PCI. 2509 * the secondary legacy port on PCI.
2490 * 2510 *
2491 * LOCKING: 2511 * LOCKING:
2492 * Inherited from PCI layer (may sleep). 2512 * Inherited from PCI layer (may sleep).
2493 * 2513 *
2494 * RETURNS: 2514 * RETURNS:
2495 * Zero on success, negative on errno-based value on error. 2515 * Zero on success, negative on errno-based value on error.
2496 */ 2516 */
2497 int ata_pci_sff_init_one(struct pci_dev *pdev, 2517 int ata_pci_sff_init_one(struct pci_dev *pdev,
2498 const struct ata_port_info * const *ppi, 2518 const struct ata_port_info * const *ppi,
2499 struct scsi_host_template *sht, void *host_priv, int hflag) 2519 struct scsi_host_template *sht, void *host_priv, int hflag)
2500 { 2520 {
2501 struct device *dev = &pdev->dev; 2521 struct device *dev = &pdev->dev;
2502 const struct ata_port_info *pi; 2522 const struct ata_port_info *pi;
2503 struct ata_host *host = NULL; 2523 struct ata_host *host = NULL;
2504 int rc; 2524 int rc;
2505 2525
2506 DPRINTK("ENTER\n"); 2526 DPRINTK("ENTER\n");
2507 2527
2508 pi = ata_sff_find_valid_pi(ppi); 2528 pi = ata_sff_find_valid_pi(ppi);
2509 if (!pi) { 2529 if (!pi) {
2510 dev_printk(KERN_ERR, &pdev->dev, 2530 dev_printk(KERN_ERR, &pdev->dev,
2511 "no valid port_info specified\n"); 2531 "no valid port_info specified\n");
2512 return -EINVAL; 2532 return -EINVAL;
2513 } 2533 }
2514 2534
2515 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 2535 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2516 return -ENOMEM; 2536 return -ENOMEM;
2517 2537
2518 rc = pcim_enable_device(pdev); 2538 rc = pcim_enable_device(pdev);
2519 if (rc) 2539 if (rc)
2520 goto out; 2540 goto out;
2521 2541
2522 /* prepare and activate SFF host */ 2542 /* prepare and activate SFF host */
2523 rc = ata_pci_sff_prepare_host(pdev, ppi, &host); 2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2524 if (rc) 2544 if (rc)
2525 goto out; 2545 goto out;
2526 host->private_data = host_priv; 2546 host->private_data = host_priv;
2527 host->flags |= hflag; 2547 host->flags |= hflag;
2528 2548
2529 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); 2549 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2530 out: 2550 out:
2531 if (rc == 0) 2551 if (rc == 0)
2532 devres_remove_group(&pdev->dev, NULL); 2552 devres_remove_group(&pdev->dev, NULL);
2533 else 2553 else
2534 devres_release_group(&pdev->dev, NULL); 2554 devres_release_group(&pdev->dev, NULL);
2535 2555
2536 return rc; 2556 return rc;
2537 } 2557 }
2538 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); 2558 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2539 2559
2540 #endif /* CONFIG_PCI */ 2560 #endif /* CONFIG_PCI */
2541 2561
2542 /* 2562 /*
2543 * BMDMA support 2563 * BMDMA support
2544 */ 2564 */
2545 2565
2546 #ifdef CONFIG_ATA_BMDMA 2566 #ifdef CONFIG_ATA_BMDMA
2547 2567
2548 const struct ata_port_operations ata_bmdma_port_ops = { 2568 const struct ata_port_operations ata_bmdma_port_ops = {
2549 .inherits = &ata_sff_port_ops, 2569 .inherits = &ata_sff_port_ops,
2550 2570
2551 .error_handler = ata_bmdma_error_handler, 2571 .error_handler = ata_bmdma_error_handler,
2552 .post_internal_cmd = ata_bmdma_post_internal_cmd, 2572 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2553 2573
2554 .qc_prep = ata_bmdma_qc_prep, 2574 .qc_prep = ata_bmdma_qc_prep,
2555 .qc_issue = ata_bmdma_qc_issue, 2575 .qc_issue = ata_bmdma_qc_issue,
2556 2576
2557 .sff_irq_clear = ata_bmdma_irq_clear, 2577 .sff_irq_clear = ata_bmdma_irq_clear,
2558 .bmdma_setup = ata_bmdma_setup, 2578 .bmdma_setup = ata_bmdma_setup,
2559 .bmdma_start = ata_bmdma_start, 2579 .bmdma_start = ata_bmdma_start,
2560 .bmdma_stop = ata_bmdma_stop, 2580 .bmdma_stop = ata_bmdma_stop,
2561 .bmdma_status = ata_bmdma_status, 2581 .bmdma_status = ata_bmdma_status,
2562 2582
2563 .port_start = ata_bmdma_port_start, 2583 .port_start = ata_bmdma_port_start,
2564 }; 2584 };
2565 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); 2585 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2566 2586
2567 const struct ata_port_operations ata_bmdma32_port_ops = { 2587 const struct ata_port_operations ata_bmdma32_port_ops = {
2568 .inherits = &ata_bmdma_port_ops, 2588 .inherits = &ata_bmdma_port_ops,
2569 2589
2570 .sff_data_xfer = ata_sff_data_xfer32, 2590 .sff_data_xfer = ata_sff_data_xfer32,
2571 .port_start = ata_bmdma_port_start32, 2591 .port_start = ata_bmdma_port_start32,
2572 }; 2592 };
2573 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); 2593 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2574 2594
2575 /** 2595 /**
2576 * ata_bmdma_fill_sg - Fill PCI IDE PRD table 2596 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2577 * @qc: Metadata associated with taskfile to be transferred 2597 * @qc: Metadata associated with taskfile to be transferred
2578 * 2598 *
2579 * Fill PCI IDE PRD (scatter-gather) table with segments 2599 * Fill PCI IDE PRD (scatter-gather) table with segments
2580 * associated with the current disk command. 2600 * associated with the current disk command.
2581 * 2601 *
2582 * LOCKING: 2602 * LOCKING:
2583 * spin_lock_irqsave(host lock) 2603 * spin_lock_irqsave(host lock)
2584 * 2604 *
2585 */ 2605 */
2586 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) 2606 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2587 { 2607 {
2588 struct ata_port *ap = qc->ap; 2608 struct ata_port *ap = qc->ap;
2589 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2609 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2590 struct scatterlist *sg; 2610 struct scatterlist *sg;
2591 unsigned int si, pi; 2611 unsigned int si, pi;
2592 2612
2593 pi = 0; 2613 pi = 0;
2594 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2614 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2595 u32 addr, offset; 2615 u32 addr, offset;
2596 u32 sg_len, len; 2616 u32 sg_len, len;
2597 2617
2598 /* determine if physical DMA addr spans 64K boundary. 2618 /* determine if physical DMA addr spans 64K boundary.
2599 * Note h/w doesn't support 64-bit, so we unconditionally 2619 * Note h/w doesn't support 64-bit, so we unconditionally
2600 * truncate dma_addr_t to u32. 2620 * truncate dma_addr_t to u32.
2601 */ 2621 */
2602 addr = (u32) sg_dma_address(sg); 2622 addr = (u32) sg_dma_address(sg);
2603 sg_len = sg_dma_len(sg); 2623 sg_len = sg_dma_len(sg);
2604 2624
2605 while (sg_len) { 2625 while (sg_len) {
2606 offset = addr & 0xffff; 2626 offset = addr & 0xffff;
2607 len = sg_len; 2627 len = sg_len;
2608 if ((offset + sg_len) > 0x10000) 2628 if ((offset + sg_len) > 0x10000)
2609 len = 0x10000 - offset; 2629 len = 0x10000 - offset;
2610 2630
2611 prd[pi].addr = cpu_to_le32(addr); 2631 prd[pi].addr = cpu_to_le32(addr);
2612 prd[pi].flags_len = cpu_to_le32(len & 0xffff); 2632 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2613 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 2633 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2614 2634
2615 pi++; 2635 pi++;
2616 sg_len -= len; 2636 sg_len -= len;
2617 addr += len; 2637 addr += len;
2618 } 2638 }
2619 } 2639 }
2620 2640
2621 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2641 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2622 } 2642 }
2623 2643
2624 /** 2644 /**
2625 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table 2645 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2626 * @qc: Metadata associated with taskfile to be transferred 2646 * @qc: Metadata associated with taskfile to be transferred
2627 * 2647 *
2628 * Fill PCI IDE PRD (scatter-gather) table with segments 2648 * Fill PCI IDE PRD (scatter-gather) table with segments
2629 * associated with the current disk command. Perform the fill 2649 * associated with the current disk command. Perform the fill
2630 * so that we avoid writing any length 64K records for 2650 * so that we avoid writing any length 64K records for
2631 * controllers that don't follow the spec. 2651 * controllers that don't follow the spec.
2632 * 2652 *
2633 * LOCKING: 2653 * LOCKING:
2634 * spin_lock_irqsave(host lock) 2654 * spin_lock_irqsave(host lock)
2635 * 2655 *
2636 */ 2656 */
2637 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) 2657 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2638 { 2658 {
2639 struct ata_port *ap = qc->ap; 2659 struct ata_port *ap = qc->ap;
2640 struct ata_bmdma_prd *prd = ap->bmdma_prd; 2660 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2641 struct scatterlist *sg; 2661 struct scatterlist *sg;
2642 unsigned int si, pi; 2662 unsigned int si, pi;
2643 2663
2644 pi = 0; 2664 pi = 0;
2645 for_each_sg(qc->sg, sg, qc->n_elem, si) { 2665 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2646 u32 addr, offset; 2666 u32 addr, offset;
2647 u32 sg_len, len, blen; 2667 u32 sg_len, len, blen;
2648 2668
2649 /* determine if physical DMA addr spans 64K boundary. 2669 /* determine if physical DMA addr spans 64K boundary.
2650 * Note h/w doesn't support 64-bit, so we unconditionally 2670 * Note h/w doesn't support 64-bit, so we unconditionally
2651 * truncate dma_addr_t to u32. 2671 * truncate dma_addr_t to u32.
2652 */ 2672 */
2653 addr = (u32) sg_dma_address(sg); 2673 addr = (u32) sg_dma_address(sg);
2654 sg_len = sg_dma_len(sg); 2674 sg_len = sg_dma_len(sg);
2655 2675
2656 while (sg_len) { 2676 while (sg_len) {
2657 offset = addr & 0xffff; 2677 offset = addr & 0xffff;
2658 len = sg_len; 2678 len = sg_len;
2659 if ((offset + sg_len) > 0x10000) 2679 if ((offset + sg_len) > 0x10000)
2660 len = 0x10000 - offset; 2680 len = 0x10000 - offset;
2661 2681
2662 blen = len & 0xffff; 2682 blen = len & 0xffff;
2663 prd[pi].addr = cpu_to_le32(addr); 2683 prd[pi].addr = cpu_to_le32(addr);
2664 if (blen == 0) { 2684 if (blen == 0) {
2665 /* Some PATA chipsets like the CS5530 can't 2685 /* Some PATA chipsets like the CS5530 can't
2666 cope with 0x0000 meaning 64K as the spec 2686 cope with 0x0000 meaning 64K as the spec
2667 says */ 2687 says */
2668 prd[pi].flags_len = cpu_to_le32(0x8000); 2688 prd[pi].flags_len = cpu_to_le32(0x8000);
2669 blen = 0x8000; 2689 blen = 0x8000;
2670 prd[++pi].addr = cpu_to_le32(addr + 0x8000); 2690 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2671 } 2691 }
2672 prd[pi].flags_len = cpu_to_le32(blen); 2692 prd[pi].flags_len = cpu_to_le32(blen);
2673 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 2693 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2674 2694
2675 pi++; 2695 pi++;
2676 sg_len -= len; 2696 sg_len -= len;
2677 addr += len; 2697 addr += len;
2678 } 2698 }
2679 } 2699 }
2680 2700
2681 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2701 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2682 } 2702 }
2683 2703
2684 /** 2704 /**
2685 * ata_bmdma_qc_prep - Prepare taskfile for submission 2705 * ata_bmdma_qc_prep - Prepare taskfile for submission
2686 * @qc: Metadata associated with taskfile to be prepared 2706 * @qc: Metadata associated with taskfile to be prepared
2687 * 2707 *
2688 * Prepare ATA taskfile for submission. 2708 * Prepare ATA taskfile for submission.
2689 * 2709 *
2690 * LOCKING: 2710 * LOCKING:
2691 * spin_lock_irqsave(host lock) 2711 * spin_lock_irqsave(host lock)
2692 */ 2712 */
2693 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) 2713 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2694 { 2714 {
2695 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2715 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2696 return; 2716 return;
2697 2717
2698 ata_bmdma_fill_sg(qc); 2718 ata_bmdma_fill_sg(qc);
2699 } 2719 }
2700 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); 2720 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2701 2721
2702 /** 2722 /**
2703 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission 2723 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2704 * @qc: Metadata associated with taskfile to be prepared 2724 * @qc: Metadata associated with taskfile to be prepared
2705 * 2725 *
2706 * Prepare ATA taskfile for submission. 2726 * Prepare ATA taskfile for submission.
2707 * 2727 *
2708 * LOCKING: 2728 * LOCKING:
2709 * spin_lock_irqsave(host lock) 2729 * spin_lock_irqsave(host lock)
2710 */ 2730 */
2711 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) 2731 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2712 { 2732 {
2713 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2733 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2714 return; 2734 return;
2715 2735
2716 ata_bmdma_fill_sg_dumb(qc); 2736 ata_bmdma_fill_sg_dumb(qc);
2717 } 2737 }
2718 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); 2738 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2719 2739
2720 /** 2740 /**
2721 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller 2741 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2722 * @qc: command to issue to device 2742 * @qc: command to issue to device
2723 * 2743 *
2724 * This function issues a PIO, NODATA or DMA command to a 2744 * This function issues a PIO, NODATA or DMA command to a
2725 * SFF/BMDMA controller. PIO and NODATA are handled by 2745 * SFF/BMDMA controller. PIO and NODATA are handled by
2726 * ata_sff_qc_issue(). 2746 * ata_sff_qc_issue().
2727 * 2747 *
2728 * LOCKING: 2748 * LOCKING:
2729 * spin_lock_irqsave(host lock) 2749 * spin_lock_irqsave(host lock)
2730 * 2750 *
2731 * RETURNS: 2751 * RETURNS:
2732 * Zero on success, AC_ERR_* mask on failure 2752 * Zero on success, AC_ERR_* mask on failure
2733 */ 2753 */
2734 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2754 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735 { 2755 {
2736 struct ata_port *ap = qc->ap; 2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2737 2758
2738 /* defer PIO handling to sff_qc_issue */ 2759 /* defer PIO handling to sff_qc_issue */
2739 if (!ata_is_dma(qc->tf.protocol)) 2760 if (!ata_is_dma(qc->tf.protocol))
2740 return ata_sff_qc_issue(qc); 2761 return ata_sff_qc_issue(qc);
2741 2762
2742 /* select the device */ 2763 /* select the device */
2743 ata_dev_select(ap, qc->dev->devno, 1, 0); 2764 ata_dev_select(ap, qc->dev->devno, 1, 0);
2744 2765
2745 /* start the command */ 2766 /* start the command */
2746 switch (qc->tf.protocol) { 2767 switch (qc->tf.protocol) {
2747 case ATA_PROT_DMA: 2768 case ATA_PROT_DMA:
2748 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2769 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2749 2770
2750 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2771 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2751 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2772 ap->ops->bmdma_setup(qc); /* set up bmdma */
2752 ap->ops->bmdma_start(qc); /* initiate bmdma */ 2773 ap->ops->bmdma_start(qc); /* initiate bmdma */
2753 ap->hsm_task_state = HSM_ST_LAST; 2774 ap->hsm_task_state = HSM_ST_LAST;
2754 break; 2775 break;
2755 2776
2756 case ATAPI_PROT_DMA: 2777 case ATAPI_PROT_DMA:
2757 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); 2778 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2758 2779
2759 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ 2780 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2760 ap->ops->bmdma_setup(qc); /* set up bmdma */ 2781 ap->ops->bmdma_setup(qc); /* set up bmdma */
2761 ap->hsm_task_state = HSM_ST_FIRST; 2782 ap->hsm_task_state = HSM_ST_FIRST;
2762 2783
2763 /* send cdb by polling if no cdb interrupt */ 2784 /* send cdb by polling if no cdb interrupt */
2764 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2765 ata_sff_queue_pio_task(ap, 0); 2786 ata_sff_queue_pio_task(link, 0);
2766 break; 2787 break;
2767 2788
2768 default: 2789 default:
2769 WARN_ON(1); 2790 WARN_ON(1);
2770 return AC_ERR_SYSTEM; 2791 return AC_ERR_SYSTEM;
2771 } 2792 }
2772 2793
2773 return 0; 2794 return 0;
2774 } 2795 }
2775 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); 2796 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2776 2797
2777 /** 2798 /**
2778 * ata_bmdma_port_intr - Handle BMDMA port interrupt 2799 * ata_bmdma_port_intr - Handle BMDMA port interrupt
2779 * @ap: Port on which interrupt arrived (possibly...) 2800 * @ap: Port on which interrupt arrived (possibly...)
2780 * @qc: Taskfile currently active in engine 2801 * @qc: Taskfile currently active in engine
2781 * 2802 *
2782 * Handle port interrupt for given queued command. 2803 * Handle port interrupt for given queued command.
2783 * 2804 *
2784 * LOCKING: 2805 * LOCKING:
2785 * spin_lock_irqsave(host lock) 2806 * spin_lock_irqsave(host lock)
2786 * 2807 *
2787 * RETURNS: 2808 * RETURNS:
2788 * One if interrupt was handled, zero if not (shared irq). 2809 * One if interrupt was handled, zero if not (shared irq).
2789 */ 2810 */
2790 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) 2811 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2791 { 2812 {
2792 struct ata_eh_info *ehi = &ap->link.eh_info; 2813 struct ata_eh_info *ehi = &ap->link.eh_info;
2793 u8 host_stat = 0; 2814 u8 host_stat = 0;
2794 bool bmdma_stopped = false; 2815 bool bmdma_stopped = false;
2795 unsigned int handled; 2816 unsigned int handled;
2796 2817
2797 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { 2818 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2798 /* check status of DMA engine */ 2819 /* check status of DMA engine */
2799 host_stat = ap->ops->bmdma_status(ap); 2820 host_stat = ap->ops->bmdma_status(ap);
2800 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat); 2821 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2801 2822
2802 /* if it's not our irq... */ 2823 /* if it's not our irq... */
2803 if (!(host_stat & ATA_DMA_INTR)) 2824 if (!(host_stat & ATA_DMA_INTR))
2804 return ata_sff_idle_irq(ap); 2825 return ata_sff_idle_irq(ap);
2805 2826
2806 /* before we do anything else, clear DMA-Start bit */ 2827 /* before we do anything else, clear DMA-Start bit */
2807 ap->ops->bmdma_stop(qc); 2828 ap->ops->bmdma_stop(qc);
2808 bmdma_stopped = true; 2829 bmdma_stopped = true;
2809 2830
2810 if (unlikely(host_stat & ATA_DMA_ERR)) { 2831 if (unlikely(host_stat & ATA_DMA_ERR)) {
2811 /* error when transfering data to/from memory */ 2832 /* error when transfering data to/from memory */
2812 qc->err_mask |= AC_ERR_HOST_BUS; 2833 qc->err_mask |= AC_ERR_HOST_BUS;
2813 ap->hsm_task_state = HSM_ST_ERR; 2834 ap->hsm_task_state = HSM_ST_ERR;
2814 } 2835 }
2815 } 2836 }
2816 2837
2817 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); 2838 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2818 2839
2819 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) 2840 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2820 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 2841 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2821 2842
2822 return handled; 2843 return handled;
2823 } 2844 }
2824 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); 2845 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2825 2846
2826 /** 2847 /**
2827 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler 2848 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2828 * @irq: irq line (unused) 2849 * @irq: irq line (unused)
2829 * @dev_instance: pointer to our ata_host information structure 2850 * @dev_instance: pointer to our ata_host information structure
2830 * 2851 *
2831 * Default interrupt handler for PCI IDE devices. Calls 2852 * Default interrupt handler for PCI IDE devices. Calls
2832 * ata_bmdma_port_intr() for each port that is not disabled. 2853 * ata_bmdma_port_intr() for each port that is not disabled.
2833 * 2854 *
2834 * LOCKING: 2855 * LOCKING:
2835 * Obtains host lock during operation. 2856 * Obtains host lock during operation.
2836 * 2857 *
2837 * RETURNS: 2858 * RETURNS:
2838 * IRQ_NONE or IRQ_HANDLED. 2859 * IRQ_NONE or IRQ_HANDLED.
2839 */ 2860 */
2840 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) 2861 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2841 { 2862 {
2842 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr); 2863 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2843 } 2864 }
2844 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); 2865 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2845 2866
2846 /** 2867 /**
2847 * ata_bmdma_error_handler - Stock error handler for BMDMA controller 2868 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2848 * @ap: port to handle error for 2869 * @ap: port to handle error for
2849 * 2870 *
2850 * Stock error handler for BMDMA controller. It can handle both 2871 * Stock error handler for BMDMA controller. It can handle both
2851 * PATA and SATA controllers. Most BMDMA controllers should be 2872 * PATA and SATA controllers. Most BMDMA controllers should be
2852 * able to use this EH as-is or with some added handling before 2873 * able to use this EH as-is or with some added handling before
2853 * and after. 2874 * and after.
2854 * 2875 *
2855 * LOCKING: 2876 * LOCKING:
2856 * Kernel thread context (may sleep) 2877 * Kernel thread context (may sleep)
2857 */ 2878 */
2858 void ata_bmdma_error_handler(struct ata_port *ap) 2879 void ata_bmdma_error_handler(struct ata_port *ap)
2859 { 2880 {
2860 struct ata_queued_cmd *qc; 2881 struct ata_queued_cmd *qc;
2861 unsigned long flags; 2882 unsigned long flags;
2862 bool thaw = false; 2883 bool thaw = false;
2863 2884
2864 qc = __ata_qc_from_tag(ap, ap->link.active_tag); 2885 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2865 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) 2886 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2866 qc = NULL; 2887 qc = NULL;
2867 2888
2868 /* reset PIO HSM and stop DMA engine */ 2889 /* reset PIO HSM and stop DMA engine */
2869 spin_lock_irqsave(ap->lock, flags); 2890 spin_lock_irqsave(ap->lock, flags);
2870 2891
2871 if (qc && ata_is_dma(qc->tf.protocol)) { 2892 if (qc && ata_is_dma(qc->tf.protocol)) {
2872 u8 host_stat; 2893 u8 host_stat;
2873 2894
2874 host_stat = ap->ops->bmdma_status(ap); 2895 host_stat = ap->ops->bmdma_status(ap);
2875 2896
2876 /* BMDMA controllers indicate host bus error by 2897 /* BMDMA controllers indicate host bus error by
2877 * setting DMA_ERR bit and timing out. As it wasn't 2898 * setting DMA_ERR bit and timing out. As it wasn't
2878 * really a timeout event, adjust error mask and 2899 * really a timeout event, adjust error mask and
2879 * cancel frozen state. 2900 * cancel frozen state.
2880 */ 2901 */
2881 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { 2902 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2882 qc->err_mask = AC_ERR_HOST_BUS; 2903 qc->err_mask = AC_ERR_HOST_BUS;
2883 thaw = true; 2904 thaw = true;
2884 } 2905 }
2885 2906
2886 ap->ops->bmdma_stop(qc); 2907 ap->ops->bmdma_stop(qc);
2887 2908
2888 /* if we're gonna thaw, make sure IRQ is clear */ 2909 /* if we're gonna thaw, make sure IRQ is clear */
2889 if (thaw) { 2910 if (thaw) {
2890 ap->ops->sff_check_status(ap); 2911 ap->ops->sff_check_status(ap);
2891 if (ap->ops->sff_irq_clear) 2912 if (ap->ops->sff_irq_clear)
2892 ap->ops->sff_irq_clear(ap); 2913 ap->ops->sff_irq_clear(ap);
2893 } 2914 }
2894 } 2915 }
2895 2916
2896 spin_unlock_irqrestore(ap->lock, flags); 2917 spin_unlock_irqrestore(ap->lock, flags);
2897 2918
2898 if (thaw) 2919 if (thaw)
2899 ata_eh_thaw_port(ap); 2920 ata_eh_thaw_port(ap);
2900 2921
2901 ata_sff_error_handler(ap); 2922 ata_sff_error_handler(ap);
2902 } 2923 }
2903 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 2924 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2904 2925
2905 /** 2926 /**
2906 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA 2927 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2907 * @qc: internal command to clean up 2928 * @qc: internal command to clean up
2908 * 2929 *
2909 * LOCKING: 2930 * LOCKING:
2910 * Kernel thread context (may sleep) 2931 * Kernel thread context (may sleep)
2911 */ 2932 */
2912 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) 2933 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2913 { 2934 {
2914 struct ata_port *ap = qc->ap; 2935 struct ata_port *ap = qc->ap;
2915 unsigned long flags; 2936 unsigned long flags;
2916 2937
2917 if (ata_is_dma(qc->tf.protocol)) { 2938 if (ata_is_dma(qc->tf.protocol)) {
2918 spin_lock_irqsave(ap->lock, flags); 2939 spin_lock_irqsave(ap->lock, flags);
2919 ap->ops->bmdma_stop(qc); 2940 ap->ops->bmdma_stop(qc);
2920 spin_unlock_irqrestore(ap->lock, flags); 2941 spin_unlock_irqrestore(ap->lock, flags);
2921 } 2942 }
2922 } 2943 }
2923 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 2944 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2924 2945
2925 /** 2946 /**
2926 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 2947 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2927 * @ap: Port associated with this ATA transaction. 2948 * @ap: Port associated with this ATA transaction.
2928 * 2949 *
2929 * Clear interrupt and error flags in DMA status register. 2950 * Clear interrupt and error flags in DMA status register.
2930 * 2951 *
2931 * May be used as the irq_clear() entry in ata_port_operations. 2952 * May be used as the irq_clear() entry in ata_port_operations.
2932 * 2953 *
2933 * LOCKING: 2954 * LOCKING:
2934 * spin_lock_irqsave(host lock) 2955 * spin_lock_irqsave(host lock)
2935 */ 2956 */
2936 void ata_bmdma_irq_clear(struct ata_port *ap) 2957 void ata_bmdma_irq_clear(struct ata_port *ap)
2937 { 2958 {
2938 void __iomem *mmio = ap->ioaddr.bmdma_addr; 2959 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2939 2960
2940 if (!mmio) 2961 if (!mmio)
2941 return; 2962 return;
2942 2963
2943 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); 2964 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2944 } 2965 }
2945 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 2966 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2946 2967
2947 /** 2968 /**
2948 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 2969 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2949 * @qc: Info associated with this ATA transaction. 2970 * @qc: Info associated with this ATA transaction.
2950 * 2971 *
2951 * LOCKING: 2972 * LOCKING:
2952 * spin_lock_irqsave(host lock) 2973 * spin_lock_irqsave(host lock)
2953 */ 2974 */
2954 void ata_bmdma_setup(struct ata_queued_cmd *qc) 2975 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2955 { 2976 {
2956 struct ata_port *ap = qc->ap; 2977 struct ata_port *ap = qc->ap;
2957 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 2978 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2958 u8 dmactl; 2979 u8 dmactl;
2959 2980
2960 /* load PRD table addr. */ 2981 /* load PRD table addr. */
2961 mb(); /* make sure PRD table writes are visible to controller */ 2982 mb(); /* make sure PRD table writes are visible to controller */
2962 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 2983 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2963 2984
2964 /* specify data direction, triple-check start bit is clear */ 2985 /* specify data direction, triple-check start bit is clear */
2965 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2986 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2966 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 2987 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2967 if (!rw) 2988 if (!rw)
2968 dmactl |= ATA_DMA_WR; 2989 dmactl |= ATA_DMA_WR;
2969 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 2990 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2970 2991
2971 /* issue r/w command */ 2992 /* issue r/w command */
2972 ap->ops->sff_exec_command(ap, &qc->tf); 2993 ap->ops->sff_exec_command(ap, &qc->tf);
2973 } 2994 }
2974 EXPORT_SYMBOL_GPL(ata_bmdma_setup); 2995 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2975 2996
2976 /** 2997 /**
2977 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 2998 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2978 * @qc: Info associated with this ATA transaction. 2999 * @qc: Info associated with this ATA transaction.
2979 * 3000 *
2980 * LOCKING: 3001 * LOCKING:
2981 * spin_lock_irqsave(host lock) 3002 * spin_lock_irqsave(host lock)
2982 */ 3003 */
2983 void ata_bmdma_start(struct ata_queued_cmd *qc) 3004 void ata_bmdma_start(struct ata_queued_cmd *qc)
2984 { 3005 {
2985 struct ata_port *ap = qc->ap; 3006 struct ata_port *ap = qc->ap;
2986 u8 dmactl; 3007 u8 dmactl;
2987 3008
2988 /* start host DMA transaction */ 3009 /* start host DMA transaction */
2989 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3010 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2990 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3011 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2991 3012
2992 /* Strictly, one may wish to issue an ioread8() here, to 3013 /* Strictly, one may wish to issue an ioread8() here, to
2993 * flush the mmio write. However, control also passes 3014 * flush the mmio write. However, control also passes
2994 * to the hardware at this point, and it will interrupt 3015 * to the hardware at this point, and it will interrupt
2995 * us when we are to resume control. So, in effect, 3016 * us when we are to resume control. So, in effect,
2996 * we don't care when the mmio write flushes. 3017 * we don't care when the mmio write flushes.
2997 * Further, a read of the DMA status register _immediately_ 3018 * Further, a read of the DMA status register _immediately_
2998 * following the write may not be what certain flaky hardware 3019 * following the write may not be what certain flaky hardware
2999 * is expected, so I think it is best to not add a readb() 3020 * is expected, so I think it is best to not add a readb()
3000 * without first all the MMIO ATA cards/mobos. 3021 * without first all the MMIO ATA cards/mobos.
3001 * Or maybe I'm just being paranoid. 3022 * Or maybe I'm just being paranoid.
3002 * 3023 *
3003 * FIXME: The posting of this write means I/O starts are 3024 * FIXME: The posting of this write means I/O starts are
3004 * unneccessarily delayed for MMIO 3025 * unneccessarily delayed for MMIO
3005 */ 3026 */
3006 } 3027 }
3007 EXPORT_SYMBOL_GPL(ata_bmdma_start); 3028 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3008 3029
3009 /** 3030 /**
3010 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3031 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3011 * @qc: Command we are ending DMA for 3032 * @qc: Command we are ending DMA for
3012 * 3033 *
3013 * Clears the ATA_DMA_START flag in the dma control register 3034 * Clears the ATA_DMA_START flag in the dma control register
3014 * 3035 *
3015 * May be used as the bmdma_stop() entry in ata_port_operations. 3036 * May be used as the bmdma_stop() entry in ata_port_operations.
3016 * 3037 *
3017 * LOCKING: 3038 * LOCKING:
3018 * spin_lock_irqsave(host lock) 3039 * spin_lock_irqsave(host lock)
3019 */ 3040 */
3020 void ata_bmdma_stop(struct ata_queued_cmd *qc) 3041 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3021 { 3042 {
3022 struct ata_port *ap = qc->ap; 3043 struct ata_port *ap = qc->ap;
3023 void __iomem *mmio = ap->ioaddr.bmdma_addr; 3044 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3024 3045
3025 /* clear start/stop bit */ 3046 /* clear start/stop bit */
3026 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 3047 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3027 mmio + ATA_DMA_CMD); 3048 mmio + ATA_DMA_CMD);
3028 3049
3029 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 3050 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3030 ata_sff_dma_pause(ap); 3051 ata_sff_dma_pause(ap);
3031 } 3052 }
3032 EXPORT_SYMBOL_GPL(ata_bmdma_stop); 3053 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3033 3054
3034 /** 3055 /**
3035 * ata_bmdma_status - Read PCI IDE BMDMA status 3056 * ata_bmdma_status - Read PCI IDE BMDMA status
3036 * @ap: Port associated with this ATA transaction. 3057 * @ap: Port associated with this ATA transaction.
3037 * 3058 *
3038 * Read and return BMDMA status register. 3059 * Read and return BMDMA status register.
3039 * 3060 *
3040 * May be used as the bmdma_status() entry in ata_port_operations. 3061 * May be used as the bmdma_status() entry in ata_port_operations.
3041 * 3062 *
3042 * LOCKING: 3063 * LOCKING:
3043 * spin_lock_irqsave(host lock) 3064 * spin_lock_irqsave(host lock)
3044 */ 3065 */
3045 u8 ata_bmdma_status(struct ata_port *ap) 3066 u8 ata_bmdma_status(struct ata_port *ap)
3046 { 3067 {
3047 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3068 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3048 } 3069 }
3049 EXPORT_SYMBOL_GPL(ata_bmdma_status); 3070 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3050 3071
3051 3072
3052 /** 3073 /**
3053 * ata_bmdma_port_start - Set port up for bmdma. 3074 * ata_bmdma_port_start - Set port up for bmdma.
3054 * @ap: Port to initialize 3075 * @ap: Port to initialize
3055 * 3076 *
3056 * Called just after data structures for each port are 3077 * Called just after data structures for each port are
3057 * initialized. Allocates space for PRD table. 3078 * initialized. Allocates space for PRD table.
3058 * 3079 *
3059 * May be used as the port_start() entry in ata_port_operations. 3080 * May be used as the port_start() entry in ata_port_operations.
3060 * 3081 *
3061 * LOCKING: 3082 * LOCKING:
3062 * Inherited from caller. 3083 * Inherited from caller.
3063 */ 3084 */
3064 int ata_bmdma_port_start(struct ata_port *ap) 3085 int ata_bmdma_port_start(struct ata_port *ap)
3065 { 3086 {
3066 if (ap->mwdma_mask || ap->udma_mask) { 3087 if (ap->mwdma_mask || ap->udma_mask) {
3067 ap->bmdma_prd = 3088 ap->bmdma_prd =
3068 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ, 3089 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3069 &ap->bmdma_prd_dma, GFP_KERNEL); 3090 &ap->bmdma_prd_dma, GFP_KERNEL);
3070 if (!ap->bmdma_prd) 3091 if (!ap->bmdma_prd)
3071 return -ENOMEM; 3092 return -ENOMEM;
3072 } 3093 }
3073 3094
3074 return 0; 3095 return 0;
3075 } 3096 }
3076 EXPORT_SYMBOL_GPL(ata_bmdma_port_start); 3097 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3077 3098
3078 /** 3099 /**
3079 * ata_bmdma_port_start32 - Set port up for dma. 3100 * ata_bmdma_port_start32 - Set port up for dma.
3080 * @ap: Port to initialize 3101 * @ap: Port to initialize
3081 * 3102 *
3082 * Called just after data structures for each port are 3103 * Called just after data structures for each port are
3083 * initialized. Enables 32bit PIO and allocates space for PRD 3104 * initialized. Enables 32bit PIO and allocates space for PRD
3084 * table. 3105 * table.
3085 * 3106 *
3086 * May be used as the port_start() entry in ata_port_operations for 3107 * May be used as the port_start() entry in ata_port_operations for
3087 * devices that are capable of 32bit PIO. 3108 * devices that are capable of 32bit PIO.
3088 * 3109 *
3089 * LOCKING: 3110 * LOCKING:
3090 * Inherited from caller. 3111 * Inherited from caller.
3091 */ 3112 */
3092 int ata_bmdma_port_start32(struct ata_port *ap) 3113 int ata_bmdma_port_start32(struct ata_port *ap)
3093 { 3114 {
3094 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; 3115 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3095 return ata_bmdma_port_start(ap); 3116 return ata_bmdma_port_start(ap);
3096 } 3117 }
3097 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); 3118 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3098 3119
3099 #ifdef CONFIG_PCI 3120 #ifdef CONFIG_PCI
3100 3121
3101 /** 3122 /**
3102 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex 3123 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3103 * @pdev: PCI device 3124 * @pdev: PCI device
3104 * 3125 *
3105 * Some PCI ATA devices report simplex mode but in fact can be told to 3126 * Some PCI ATA devices report simplex mode but in fact can be told to
3106 * enter non simplex mode. This implements the necessary logic to 3127 * enter non simplex mode. This implements the necessary logic to
3107 * perform the task on such devices. Calling it on other devices will 3128 * perform the task on such devices. Calling it on other devices will
3108 * have -undefined- behaviour. 3129 * have -undefined- behaviour.
3109 */ 3130 */
3110 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) 3131 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3111 { 3132 {
3112 unsigned long bmdma = pci_resource_start(pdev, 4); 3133 unsigned long bmdma = pci_resource_start(pdev, 4);
3113 u8 simplex; 3134 u8 simplex;
3114 3135
3115 if (bmdma == 0) 3136 if (bmdma == 0)
3116 return -ENOENT; 3137 return -ENOENT;
3117 3138
3118 simplex = inb(bmdma + 0x02); 3139 simplex = inb(bmdma + 0x02);
3119 outb(simplex & 0x60, bmdma + 0x02); 3140 outb(simplex & 0x60, bmdma + 0x02);
3120 simplex = inb(bmdma + 0x02); 3141 simplex = inb(bmdma + 0x02);
3121 if (simplex & 0x80) 3142 if (simplex & 0x80)
3122 return -EOPNOTSUPP; 3143 return -EOPNOTSUPP;
3123 return 0; 3144 return 0;
3124 } 3145 }
3125 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); 3146 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3126 3147
3127 static void ata_bmdma_nodma(struct ata_host *host, const char *reason) 3148 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3128 { 3149 {
3129 int i; 3150 int i;
3130 3151
3131 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n", 3152 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
3132 reason); 3153 reason);
3133 3154
3134 for (i = 0; i < 2; i++) { 3155 for (i = 0; i < 2; i++) {
3135 host->ports[i]->mwdma_mask = 0; 3156 host->ports[i]->mwdma_mask = 0;
3136 host->ports[i]->udma_mask = 0; 3157 host->ports[i]->udma_mask = 0;
3137 } 3158 }
3138 } 3159 }
3139 3160
3140 /** 3161 /**
3141 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host 3162 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3142 * @host: target ATA host 3163 * @host: target ATA host
3143 * 3164 *
3144 * Acquire PCI BMDMA resources and initialize @host accordingly. 3165 * Acquire PCI BMDMA resources and initialize @host accordingly.
3145 * 3166 *
3146 * LOCKING: 3167 * LOCKING:
3147 * Inherited from calling layer (may sleep). 3168 * Inherited from calling layer (may sleep).
3148 */ 3169 */
3149 void ata_pci_bmdma_init(struct ata_host *host) 3170 void ata_pci_bmdma_init(struct ata_host *host)
3150 { 3171 {
3151 struct device *gdev = host->dev; 3172 struct device *gdev = host->dev;
3152 struct pci_dev *pdev = to_pci_dev(gdev); 3173 struct pci_dev *pdev = to_pci_dev(gdev);
3153 int i, rc; 3174 int i, rc;
3154 3175
3155 /* No BAR4 allocation: No DMA */ 3176 /* No BAR4 allocation: No DMA */
3156 if (pci_resource_start(pdev, 4) == 0) { 3177 if (pci_resource_start(pdev, 4) == 0) {
3157 ata_bmdma_nodma(host, "BAR4 is zero"); 3178 ata_bmdma_nodma(host, "BAR4 is zero");
3158 return; 3179 return;
3159 } 3180 }
3160 3181
3161 /* 3182 /*
3162 * Some controllers require BMDMA region to be initialized 3183 * Some controllers require BMDMA region to be initialized
3163 * even if DMA is not in use to clear IRQ status via 3184 * even if DMA is not in use to clear IRQ status via
3164 * ->sff_irq_clear method. Try to initialize bmdma_addr 3185 * ->sff_irq_clear method. Try to initialize bmdma_addr
3165 * regardless of dma masks. 3186 * regardless of dma masks.
3166 */ 3187 */
3167 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 3188 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3168 if (rc) 3189 if (rc)
3169 ata_bmdma_nodma(host, "failed to set dma mask"); 3190 ata_bmdma_nodma(host, "failed to set dma mask");
3170 if (!rc) { 3191 if (!rc) {
3171 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 3192 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3172 if (rc) 3193 if (rc)
3173 ata_bmdma_nodma(host, 3194 ata_bmdma_nodma(host,
3174 "failed to set consistent dma mask"); 3195 "failed to set consistent dma mask");
3175 } 3196 }
3176 3197
3177 /* request and iomap DMA region */ 3198 /* request and iomap DMA region */
3178 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev)); 3199 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3179 if (rc) { 3200 if (rc) {
3180 ata_bmdma_nodma(host, "failed to request/iomap BAR4"); 3201 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3181 return; 3202 return;
3182 } 3203 }
3183 host->iomap = pcim_iomap_table(pdev); 3204 host->iomap = pcim_iomap_table(pdev);
3184 3205
3185 for (i = 0; i < 2; i++) { 3206 for (i = 0; i < 2; i++) {
3186 struct ata_port *ap = host->ports[i]; 3207 struct ata_port *ap = host->ports[i];
3187 void __iomem *bmdma = host->iomap[4] + 8 * i; 3208 void __iomem *bmdma = host->iomap[4] + 8 * i;
3188 3209
3189 if (ata_port_is_dummy(ap)) 3210 if (ata_port_is_dummy(ap))
3190 continue; 3211 continue;
3191 3212
3192 ap->ioaddr.bmdma_addr = bmdma; 3213 ap->ioaddr.bmdma_addr = bmdma;
3193 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && 3214 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3194 (ioread8(bmdma + 2) & 0x80)) 3215 (ioread8(bmdma + 2) & 0x80))
3195 host->flags |= ATA_HOST_SIMPLEX; 3216 host->flags |= ATA_HOST_SIMPLEX;
3196 3217
3197 ata_port_desc(ap, "bmdma 0x%llx", 3218 ata_port_desc(ap, "bmdma 0x%llx",
3198 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); 3219 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3199 } 3220 }
3200 } 3221 }
3201 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); 3222 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3202 3223
3203 /** 3224 /**
3204 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host 3225 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3205 * @pdev: target PCI device 3226 * @pdev: target PCI device
3206 * @ppi: array of port_info, must be enough for two ports 3227 * @ppi: array of port_info, must be enough for two ports
3207 * @r_host: out argument for the initialized ATA host 3228 * @r_host: out argument for the initialized ATA host
3208 * 3229 *
3209 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI 3230 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3210 * resources and initialize it accordingly in one go. 3231 * resources and initialize it accordingly in one go.
3211 * 3232 *
3212 * LOCKING: 3233 * LOCKING:
3213 * Inherited from calling layer (may sleep). 3234 * Inherited from calling layer (may sleep).
3214 * 3235 *
3215 * RETURNS: 3236 * RETURNS:
3216 * 0 on success, -errno otherwise. 3237 * 0 on success, -errno otherwise.
3217 */ 3238 */
3218 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, 3239 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3219 const struct ata_port_info * const * ppi, 3240 const struct ata_port_info * const * ppi,
3220 struct ata_host **r_host) 3241 struct ata_host **r_host)
3221 { 3242 {
3222 int rc; 3243 int rc;
3223 3244
3224 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); 3245 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3225 if (rc) 3246 if (rc)
3226 return rc; 3247 return rc;
3227 3248
3228 ata_pci_bmdma_init(*r_host); 3249 ata_pci_bmdma_init(*r_host);
3229 return 0; 3250 return 0;
3230 } 3251 }
3231 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); 3252 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3232 3253
3233 /** 3254 /**
3234 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller 3255 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3235 * @pdev: Controller to be initialized 3256 * @pdev: Controller to be initialized
3236 * @ppi: array of port_info, must be enough for two ports 3257 * @ppi: array of port_info, must be enough for two ports
3237 * @sht: scsi_host_template to use when registering the host 3258 * @sht: scsi_host_template to use when registering the host
3238 * @host_priv: host private_data 3259 * @host_priv: host private_data
3239 * @hflags: host flags 3260 * @hflags: host flags
3240 * 3261 *
3241 * This function is similar to ata_pci_sff_init_one() but also 3262 * This function is similar to ata_pci_sff_init_one() but also
3242 * takes care of BMDMA initialization. 3263 * takes care of BMDMA initialization.
3243 * 3264 *
3244 * LOCKING: 3265 * LOCKING:
3245 * Inherited from PCI layer (may sleep). 3266 * Inherited from PCI layer (may sleep).
3246 * 3267 *
3247 * RETURNS: 3268 * RETURNS:
3248 * Zero on success, negative on errno-based value on error. 3269 * Zero on success, negative on errno-based value on error.
3249 */ 3270 */
3250 int ata_pci_bmdma_init_one(struct pci_dev *pdev, 3271 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3251 const struct ata_port_info * const * ppi, 3272 const struct ata_port_info * const * ppi,
3252 struct scsi_host_template *sht, void *host_priv, 3273 struct scsi_host_template *sht, void *host_priv,
3253 int hflags) 3274 int hflags)
3254 { 3275 {
3255 struct device *dev = &pdev->dev; 3276 struct device *dev = &pdev->dev;
3256 const struct ata_port_info *pi; 3277 const struct ata_port_info *pi;
3257 struct ata_host *host = NULL; 3278 struct ata_host *host = NULL;
3258 int rc; 3279 int rc;
3259 3280
3260 DPRINTK("ENTER\n"); 3281 DPRINTK("ENTER\n");
3261 3282
3262 pi = ata_sff_find_valid_pi(ppi); 3283 pi = ata_sff_find_valid_pi(ppi);
3263 if (!pi) { 3284 if (!pi) {
3264 dev_printk(KERN_ERR, &pdev->dev, 3285 dev_printk(KERN_ERR, &pdev->dev,
3265 "no valid port_info specified\n"); 3286 "no valid port_info specified\n");
3266 return -EINVAL; 3287 return -EINVAL;
3267 } 3288 }
3268 3289
3269 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 3290 if (!devres_open_group(dev, NULL, GFP_KERNEL))
3270 return -ENOMEM; 3291 return -ENOMEM;
3271 3292
3272 rc = pcim_enable_device(pdev); 3293 rc = pcim_enable_device(pdev);
3273 if (rc) 3294 if (rc)
3274 goto out; 3295 goto out;
3275 3296
3276 /* prepare and activate BMDMA host */ 3297 /* prepare and activate BMDMA host */
3277 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host); 3298 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
3278 if (rc) 3299 if (rc)
3279 goto out; 3300 goto out;
3280 host->private_data = host_priv; 3301 host->private_data = host_priv;
3281 host->flags |= hflags; 3302 host->flags |= hflags;
3282 3303
3283 pci_set_master(pdev); 3304 pci_set_master(pdev);
3284 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); 3305 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
3285 out: 3306 out:
3286 if (rc == 0) 3307 if (rc == 0)
3287 devres_remove_group(&pdev->dev, NULL); 3308 devres_remove_group(&pdev->dev, NULL);
3288 else 3309 else
3289 devres_release_group(&pdev->dev, NULL); 3310 devres_release_group(&pdev->dev, NULL);
3290 3311
3291 return rc; 3312 return rc;
3292 } 3313 }
3293 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); 3314 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3294 3315
3295 #endif /* CONFIG_PCI */ 3316 #endif /* CONFIG_PCI */
3296 #endif /* CONFIG_ATA_BMDMA */ 3317 #endif /* CONFIG_ATA_BMDMA */
3297 3318
3298 /** 3319 /**
3299 * ata_sff_port_init - Initialize SFF/BMDMA ATA port 3320 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
3300 * @ap: Port to initialize 3321 * @ap: Port to initialize
3301 * 3322 *
3302 * Called on port allocation to initialize SFF/BMDMA specific 3323 * Called on port allocation to initialize SFF/BMDMA specific
3303 * fields. 3324 * fields.
3304 * 3325 *
3305 * LOCKING: 3326 * LOCKING:
3306 * None. 3327 * None.
3307 */ 3328 */
3308 void ata_sff_port_init(struct ata_port *ap) 3329 void ata_sff_port_init(struct ata_port *ap)
3309 { 3330 {
3310 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); 3331 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3311 ap->ctl = ATA_DEVCTL_OBS; 3332 ap->ctl = ATA_DEVCTL_OBS;
3312 ap->last_ctl = 0xFF; 3333 ap->last_ctl = 0xFF;
3313 } 3334 }
3314 3335
3315 int __init ata_sff_init(void) 3336 int __init ata_sff_init(void)
3316 { 3337 {
3317 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); 3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE);
3318 if (!ata_sff_wq) 3339 if (!ata_sff_wq)
3319 return -ENOMEM; 3340 return -ENOMEM;
3320 3341
3321 return 0; 3342 return 0;
3322 } 3343 }
3323 3344
3324 void __exit ata_sff_exit(void) 3345 void __exit ata_sff_exit(void)
3325 { 3346 {
3326 destroy_workqueue(ata_sff_wq); 3347 destroy_workqueue(ata_sff_wq);
3327 } 3348 }
3328 3349
drivers/ata/pata_artop.c
1 /* 1 /*
2 * pata_artop.c - ARTOP ATA controller driver 2 * pata_artop.c - ARTOP ATA controller driver
3 * 3 *
4 * (C) 2006 Red Hat 4 * (C) 2006 Red Hat
5 * (C) 2007 Bartlomiej Zolnierkiewicz 5 * (C) 2007 Bartlomiej Zolnierkiewicz
6 * 6 *
7 * Based in part on drivers/ide/pci/aec62xx.c 7 * Based in part on drivers/ide/pci/aec62xx.c
8 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> 8 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
9 * 865/865R fixes for Macintosh card version from a patch to the old 9 * 865/865R fixes for Macintosh card version from a patch to the old
10 * driver by Thibaut VARENE <varenet@parisc-linux.org> 10 * driver by Thibaut VARENE <varenet@parisc-linux.org>
11 * When setting the PCI latency we must set 0x80 or higher for burst 11 * When setting the PCI latency we must set 0x80 or higher for burst
12 * performance Alessandro Zummo <alessandro.zummo@towertech.it> 12 * performance Alessandro Zummo <alessandro.zummo@towertech.it>
13 * 13 *
14 * TODO 14 * TODO
15 * Investigate no_dsc on 850R 15 * Investigate no_dsc on 850R
16 * Clock detect 16 * Clock detect
17 */ 17 */
18 18
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/module.h> 20 #include <linux/module.h>
21 #include <linux/pci.h> 21 #include <linux/pci.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/blkdev.h> 23 #include <linux/blkdev.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/device.h> 25 #include <linux/device.h>
26 #include <scsi/scsi_host.h> 26 #include <scsi/scsi_host.h>
27 #include <linux/libata.h> 27 #include <linux/libata.h>
28 #include <linux/ata.h> 28 #include <linux/ata.h>
29 29
30 #define DRV_NAME "pata_artop" 30 #define DRV_NAME "pata_artop"
31 #define DRV_VERSION "0.4.5" 31 #define DRV_VERSION "0.4.5"
32 32
33 /* 33 /*
34 * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we 34 * The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
35 * get PCI bus speed functionality we leave this as 0. Its a variable 35 * get PCI bus speed functionality we leave this as 0. Its a variable
36 * for when we get the functionality and also for folks wanting to 36 * for when we get the functionality and also for folks wanting to
37 * test stuff. 37 * test stuff.
38 */ 38 */
39 39
40 static int clock = 0; 40 static int clock = 0;
41 41
42 static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline) 42 static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
43 { 43 {
44 struct ata_port *ap = link->ap; 44 struct ata_port *ap = link->ap;
45 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 45 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
46 const struct pci_bits artop_enable_bits[] = { 46 const struct pci_bits artop_enable_bits[] = {
47 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ 47 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
48 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ 48 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
49 }; 49 };
50 50
51 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 51 if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
52 return -ENOENT; 52 return -ENOENT;
53 53
54 return ata_sff_prereset(link, deadline); 54 return ata_sff_prereset(link, deadline);
55 } 55 }
56 56
57 /** 57 /**
58 * artop6260_pre_reset - check for 40/80 pin 58 * artop6260_pre_reset - check for 40/80 pin
59 * @link: link 59 * @link: link
60 * @deadline: deadline jiffies for the operation 60 * @deadline: deadline jiffies for the operation
61 * 61 *
62 * The ARTOP hardware reports the cable detect bits in register 0x49. 62 * The ARTOP hardware reports the cable detect bits in register 0x49.
63 * Nothing complicated needed here. 63 * Nothing complicated needed here.
64 */ 64 */
65 65
66 static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline) 66 static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
67 { 67 {
68 static const struct pci_bits artop_enable_bits[] = { 68 static const struct pci_bits artop_enable_bits[] = {
69 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */ 69 { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
70 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */ 70 { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
71 }; 71 };
72 72
73 struct ata_port *ap = link->ap; 73 struct ata_port *ap = link->ap;
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
81 } 82 }
82 83
83 /** 84 /**
84 * artop6260_cable_detect - identify cable type 85 * artop6260_cable_detect - identify cable type
85 * @ap: Port 86 * @ap: Port
86 * 87 *
87 * Identify the cable type for the ARTOP interface in question 88 * Identify the cable type for the ARTOP interface in question
88 */ 89 */
89 90
90 static int artop6260_cable_detect(struct ata_port *ap) 91 static int artop6260_cable_detect(struct ata_port *ap)
91 { 92 {
92 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 93 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
93 u8 tmp; 94 u8 tmp;
94 pci_read_config_byte(pdev, 0x49, &tmp); 95 pci_read_config_byte(pdev, 0x49, &tmp);
95 if (tmp & (1 << ap->port_no)) 96 if (tmp & (1 << ap->port_no))
96 return ATA_CBL_PATA40; 97 return ATA_CBL_PATA40;
97 return ATA_CBL_PATA80; 98 return ATA_CBL_PATA80;
98 } 99 }
99 100
100 /** 101 /**
101 * artop6210_load_piomode - Load a set of PATA PIO timings 102 * artop6210_load_piomode - Load a set of PATA PIO timings
102 * @ap: Port whose timings we are configuring 103 * @ap: Port whose timings we are configuring
103 * @adev: Device 104 * @adev: Device
104 * @pio: PIO mode 105 * @pio: PIO mode
105 * 106 *
106 * Set PIO mode for device, in host controller PCI config space. This 107 * Set PIO mode for device, in host controller PCI config space. This
107 * is used both to set PIO timings in PIO mode and also to set the 108 * is used both to set PIO timings in PIO mode and also to set the
108 * matching PIO clocking for UDMA, as well as the MWDMA timings. 109 * matching PIO clocking for UDMA, as well as the MWDMA timings.
109 * 110 *
110 * LOCKING: 111 * LOCKING:
111 * None (inherited from caller). 112 * None (inherited from caller).
112 */ 113 */
113 114
114 static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio) 115 static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
115 { 116 {
116 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 117 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
117 int dn = adev->devno + 2 * ap->port_no; 118 int dn = adev->devno + 2 * ap->port_no;
118 const u16 timing[2][5] = { 119 const u16 timing[2][5] = {
119 { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 }, 120 { 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
120 { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 } 121 { 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
121 122
122 }; 123 };
123 /* Load the PIO timing active/recovery bits */ 124 /* Load the PIO timing active/recovery bits */
124 pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]); 125 pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
125 } 126 }
126 127
127 /** 128 /**
128 * artop6210_set_piomode - Initialize host controller PATA PIO timings 129 * artop6210_set_piomode - Initialize host controller PATA PIO timings
129 * @ap: Port whose timings we are configuring 130 * @ap: Port whose timings we are configuring
130 * @adev: Device we are configuring 131 * @adev: Device we are configuring
131 * 132 *
132 * Set PIO mode for device, in host controller PCI config space. For 133 * Set PIO mode for device, in host controller PCI config space. For
133 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In 134 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
134 * the event UDMA is used the later call to set_dmamode will set the 135 * the event UDMA is used the later call to set_dmamode will set the
135 * bits as required. 136 * bits as required.
136 * 137 *
137 * LOCKING: 138 * LOCKING:
138 * None (inherited from caller). 139 * None (inherited from caller).
139 */ 140 */
140 141
141 static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev) 142 static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
142 { 143 {
143 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 144 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
144 int dn = adev->devno + 2 * ap->port_no; 145 int dn = adev->devno + 2 * ap->port_no;
145 u8 ultra; 146 u8 ultra;
146 147
147 artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); 148 artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
148 149
149 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ 150 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
150 pci_read_config_byte(pdev, 0x54, &ultra); 151 pci_read_config_byte(pdev, 0x54, &ultra);
151 ultra &= ~(3 << (2 * dn)); 152 ultra &= ~(3 << (2 * dn));
152 pci_write_config_byte(pdev, 0x54, ultra); 153 pci_write_config_byte(pdev, 0x54, ultra);
153 } 154 }
154 155
155 /** 156 /**
156 * artop6260_load_piomode - Initialize host controller PATA PIO timings 157 * artop6260_load_piomode - Initialize host controller PATA PIO timings
157 * @ap: Port whose timings we are configuring 158 * @ap: Port whose timings we are configuring
158 * @adev: Device we are configuring 159 * @adev: Device we are configuring
159 * @pio: PIO mode 160 * @pio: PIO mode
160 * 161 *
161 * Set PIO mode for device, in host controller PCI config space. The 162 * Set PIO mode for device, in host controller PCI config space. The
162 * ARTOP6260 and relatives store the timing data differently. 163 * ARTOP6260 and relatives store the timing data differently.
163 * 164 *
164 * LOCKING: 165 * LOCKING:
165 * None (inherited from caller). 166 * None (inherited from caller).
166 */ 167 */
167 168
168 static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio) 169 static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
169 { 170 {
170 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 171 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
171 int dn = adev->devno + 2 * ap->port_no; 172 int dn = adev->devno + 2 * ap->port_no;
172 const u8 timing[2][5] = { 173 const u8 timing[2][5] = {
173 { 0x00, 0x0A, 0x08, 0x33, 0x31 }, 174 { 0x00, 0x0A, 0x08, 0x33, 0x31 },
174 { 0x70, 0x7A, 0x78, 0x43, 0x41 } 175 { 0x70, 0x7A, 0x78, 0x43, 0x41 }
175 176
176 }; 177 };
177 /* Load the PIO timing active/recovery bits */ 178 /* Load the PIO timing active/recovery bits */
178 pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]); 179 pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
179 } 180 }
180 181
181 /** 182 /**
182 * artop6260_set_piomode - Initialize host controller PATA PIO timings 183 * artop6260_set_piomode - Initialize host controller PATA PIO timings
183 * @ap: Port whose timings we are configuring 184 * @ap: Port whose timings we are configuring
184 * @adev: Device we are configuring 185 * @adev: Device we are configuring
185 * 186 *
186 * Set PIO mode for device, in host controller PCI config space. For 187 * Set PIO mode for device, in host controller PCI config space. For
187 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In 188 * ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
188 * the event UDMA is used the later call to set_dmamode will set the 189 * the event UDMA is used the later call to set_dmamode will set the
189 * bits as required. 190 * bits as required.
190 * 191 *
191 * LOCKING: 192 * LOCKING:
192 * None (inherited from caller). 193 * None (inherited from caller).
193 */ 194 */
194 195
195 static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev) 196 static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
196 { 197 {
197 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 198 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
198 u8 ultra; 199 u8 ultra;
199 200
200 artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0); 201 artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
201 202
202 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */ 203 /* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
203 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); 204 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
204 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ 205 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
205 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); 206 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
206 } 207 }
207 208
208 /** 209 /**
209 * artop6210_set_dmamode - Initialize host controller PATA PIO timings 210 * artop6210_set_dmamode - Initialize host controller PATA PIO timings
210 * @ap: Port whose timings we are configuring 211 * @ap: Port whose timings we are configuring
211 * @adev: Device whose timings we are configuring 212 * @adev: Device whose timings we are configuring
212 * 213 *
213 * Set DMA mode for device, in host controller PCI config space. 214 * Set DMA mode for device, in host controller PCI config space.
214 * 215 *
215 * LOCKING: 216 * LOCKING:
216 * None (inherited from caller). 217 * None (inherited from caller).
217 */ 218 */
218 219
219 static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev) 220 static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
220 { 221 {
221 unsigned int pio; 222 unsigned int pio;
222 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 223 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
223 int dn = adev->devno + 2 * ap->port_no; 224 int dn = adev->devno + 2 * ap->port_no;
224 u8 ultra; 225 u8 ultra;
225 226
226 if (adev->dma_mode == XFER_MW_DMA_0) 227 if (adev->dma_mode == XFER_MW_DMA_0)
227 pio = 1; 228 pio = 1;
228 else 229 else
229 pio = 4; 230 pio = 4;
230 231
231 /* Load the PIO timing active/recovery bits */ 232 /* Load the PIO timing active/recovery bits */
232 artop6210_load_piomode(ap, adev, pio); 233 artop6210_load_piomode(ap, adev, pio);
233 234
234 pci_read_config_byte(pdev, 0x54, &ultra); 235 pci_read_config_byte(pdev, 0x54, &ultra);
235 ultra &= ~(3 << (2 * dn)); 236 ultra &= ~(3 << (2 * dn));
236 237
237 /* Add ultra DMA bits if in UDMA mode */ 238 /* Add ultra DMA bits if in UDMA mode */
238 if (adev->dma_mode >= XFER_UDMA_0) { 239 if (adev->dma_mode >= XFER_UDMA_0) {
239 u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock; 240 u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
240 if (mode == 0) 241 if (mode == 0)
241 mode = 1; 242 mode = 1;
242 ultra |= (mode << (2 * dn)); 243 ultra |= (mode << (2 * dn));
243 } 244 }
244 pci_write_config_byte(pdev, 0x54, ultra); 245 pci_write_config_byte(pdev, 0x54, ultra);
245 } 246 }
246 247
247 /** 248 /**
248 * artop6260_set_dmamode - Initialize host controller PATA PIO timings 249 * artop6260_set_dmamode - Initialize host controller PATA PIO timings
249 * @ap: Port whose timings we are configuring 250 * @ap: Port whose timings we are configuring
250 * @adev: Device we are configuring 251 * @adev: Device we are configuring
251 * 252 *
252 * Set DMA mode for device, in host controller PCI config space. The 253 * Set DMA mode for device, in host controller PCI config space. The
253 * ARTOP6260 and relatives store the timing data differently. 254 * ARTOP6260 and relatives store the timing data differently.
254 * 255 *
255 * LOCKING: 256 * LOCKING:
256 * None (inherited from caller). 257 * None (inherited from caller).
257 */ 258 */
258 259
259 static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev) 260 static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
260 { 261 {
261 unsigned int pio = adev->pio_mode - XFER_PIO_0; 262 unsigned int pio = adev->pio_mode - XFER_PIO_0;
262 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 263 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
263 u8 ultra; 264 u8 ultra;
264 265
265 if (adev->dma_mode == XFER_MW_DMA_0) 266 if (adev->dma_mode == XFER_MW_DMA_0)
266 pio = 1; 267 pio = 1;
267 else 268 else
268 pio = 4; 269 pio = 4;
269 270
270 /* Load the PIO timing active/recovery bits */ 271 /* Load the PIO timing active/recovery bits */
271 artop6260_load_piomode(ap, adev, pio); 272 artop6260_load_piomode(ap, adev, pio);
272 273
273 /* Add ultra DMA bits if in UDMA mode */ 274 /* Add ultra DMA bits if in UDMA mode */
274 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra); 275 pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
275 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */ 276 ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
276 if (adev->dma_mode >= XFER_UDMA_0) { 277 if (adev->dma_mode >= XFER_UDMA_0) {
277 u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock; 278 u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
278 if (mode == 0) 279 if (mode == 0)
279 mode = 1; 280 mode = 1;
280 ultra |= (mode << (4 * adev->devno)); 281 ultra |= (mode << (4 * adev->devno));
281 } 282 }
282 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra); 283 pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
283 } 284 }
284 285
285 /** 286 /**
286 * artop_6210_qc_defer - implement serialization 287 * artop_6210_qc_defer - implement serialization
287 * @qc: command 288 * @qc: command
288 * 289 *
289 * Issue commands per host on this chip. 290 * Issue commands per host on this chip.
290 */ 291 */
291 292
292 static int artop6210_qc_defer(struct ata_queued_cmd *qc) 293 static int artop6210_qc_defer(struct ata_queued_cmd *qc)
293 { 294 {
294 struct ata_host *host = qc->ap->host; 295 struct ata_host *host = qc->ap->host;
295 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no]; 296 struct ata_port *alt = host->ports[1 ^ qc->ap->port_no];
296 int rc; 297 int rc;
297 298
298 /* First apply the usual rules */ 299 /* First apply the usual rules */
299 rc = ata_std_qc_defer(qc); 300 rc = ata_std_qc_defer(qc);
300 if (rc != 0) 301 if (rc != 0)
301 return rc; 302 return rc;
302 303
303 /* Now apply serialization rules. Only allow a command if the 304 /* Now apply serialization rules. Only allow a command if the
304 other channel state machine is idle */ 305 other channel state machine is idle */
305 if (alt && alt->qc_active) 306 if (alt && alt->qc_active)
306 return ATA_DEFER_PORT; 307 return ATA_DEFER_PORT;
307 return 0; 308 return 0;
308 } 309 }
309 310
310 static struct scsi_host_template artop_sht = { 311 static struct scsi_host_template artop_sht = {
311 ATA_BMDMA_SHT(DRV_NAME), 312 ATA_BMDMA_SHT(DRV_NAME),
312 }; 313 };
313 314
314 static struct ata_port_operations artop6210_ops = { 315 static struct ata_port_operations artop6210_ops = {
315 .inherits = &ata_bmdma_port_ops, 316 .inherits = &ata_bmdma_port_ops,
316 .cable_detect = ata_cable_40wire, 317 .cable_detect = ata_cable_40wire,
317 .set_piomode = artop6210_set_piomode, 318 .set_piomode = artop6210_set_piomode,
318 .set_dmamode = artop6210_set_dmamode, 319 .set_dmamode = artop6210_set_dmamode,
319 .prereset = artop6210_pre_reset, 320 .prereset = artop6210_pre_reset,
320 .qc_defer = artop6210_qc_defer, 321 .qc_defer = artop6210_qc_defer,
321 }; 322 };
322 323
323 static struct ata_port_operations artop6260_ops = { 324 static struct ata_port_operations artop6260_ops = {
324 .inherits = &ata_bmdma_port_ops, 325 .inherits = &ata_bmdma_port_ops,
325 .cable_detect = artop6260_cable_detect, 326 .cable_detect = artop6260_cable_detect,
326 .set_piomode = artop6260_set_piomode, 327 .set_piomode = artop6260_set_piomode,
327 .set_dmamode = artop6260_set_dmamode, 328 .set_dmamode = artop6260_set_dmamode,
328 .prereset = artop6260_pre_reset, 329 .prereset = artop6260_pre_reset,
329 }; 330 };
330 331
331 332
332 /** 333 /**
333 * artop_init_one - Register ARTOP ATA PCI device with kernel services 334 * artop_init_one - Register ARTOP ATA PCI device with kernel services
334 * @pdev: PCI device to register 335 * @pdev: PCI device to register
335 * @ent: Entry in artop_pci_tbl matching with @pdev 336 * @ent: Entry in artop_pci_tbl matching with @pdev
336 * 337 *
337 * Called from kernel PCI layer. 338 * Called from kernel PCI layer.
338 * 339 *
339 * LOCKING: 340 * LOCKING:
340 * Inherited from PCI layer (may sleep). 341 * Inherited from PCI layer (may sleep).
341 * 342 *
342 * RETURNS: 343 * RETURNS:
343 * Zero on success, or -ERRNO value. 344 * Zero on success, or -ERRNO value.
344 */ 345 */
345 346
346 static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id) 347 static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
347 { 348 {
348 static int printed_version; 349 static int printed_version;
349 static const struct ata_port_info info_6210 = { 350 static const struct ata_port_info info_6210 = {
350 .flags = ATA_FLAG_SLAVE_POSS, 351 .flags = ATA_FLAG_SLAVE_POSS,
351 .pio_mask = ATA_PIO4, 352 .pio_mask = ATA_PIO4,
352 .mwdma_mask = ATA_MWDMA2, 353 .mwdma_mask = ATA_MWDMA2,
353 .udma_mask = ATA_UDMA2, 354 .udma_mask = ATA_UDMA2,
354 .port_ops = &artop6210_ops, 355 .port_ops = &artop6210_ops,
355 }; 356 };
356 static const struct ata_port_info info_626x = { 357 static const struct ata_port_info info_626x = {
357 .flags = ATA_FLAG_SLAVE_POSS, 358 .flags = ATA_FLAG_SLAVE_POSS,
358 .pio_mask = ATA_PIO4, 359 .pio_mask = ATA_PIO4,
359 .mwdma_mask = ATA_MWDMA2, 360 .mwdma_mask = ATA_MWDMA2,
360 .udma_mask = ATA_UDMA4, 361 .udma_mask = ATA_UDMA4,
361 .port_ops = &artop6260_ops, 362 .port_ops = &artop6260_ops,
362 }; 363 };
363 static const struct ata_port_info info_628x = { 364 static const struct ata_port_info info_628x = {
364 .flags = ATA_FLAG_SLAVE_POSS, 365 .flags = ATA_FLAG_SLAVE_POSS,
365 .pio_mask = ATA_PIO4, 366 .pio_mask = ATA_PIO4,
366 .mwdma_mask = ATA_MWDMA2, 367 .mwdma_mask = ATA_MWDMA2,
367 .udma_mask = ATA_UDMA5, 368 .udma_mask = ATA_UDMA5,
368 .port_ops = &artop6260_ops, 369 .port_ops = &artop6260_ops,
369 }; 370 };
370 static const struct ata_port_info info_628x_fast = { 371 static const struct ata_port_info info_628x_fast = {
371 .flags = ATA_FLAG_SLAVE_POSS, 372 .flags = ATA_FLAG_SLAVE_POSS,
372 .pio_mask = ATA_PIO4, 373 .pio_mask = ATA_PIO4,
373 .mwdma_mask = ATA_MWDMA2, 374 .mwdma_mask = ATA_MWDMA2,
374 .udma_mask = ATA_UDMA6, 375 .udma_mask = ATA_UDMA6,
375 .port_ops = &artop6260_ops, 376 .port_ops = &artop6260_ops,
376 }; 377 };
377 const struct ata_port_info *ppi[] = { NULL, NULL }; 378 const struct ata_port_info *ppi[] = { NULL, NULL };
378 int rc; 379 int rc;
379 380
380 if (!printed_version++) 381 if (!printed_version++)
381 dev_printk(KERN_DEBUG, &pdev->dev, 382 dev_printk(KERN_DEBUG, &pdev->dev,
382 "version " DRV_VERSION "\n"); 383 "version " DRV_VERSION "\n");
383 384
384 rc = pcim_enable_device(pdev); 385 rc = pcim_enable_device(pdev);
385 if (rc) 386 if (rc)
386 return rc; 387 return rc;
387 388
388 if (id->driver_data == 0) { /* 6210 variant */ 389 if (id->driver_data == 0) { /* 6210 variant */
389 ppi[0] = &info_6210; 390 ppi[0] = &info_6210;
390 /* BIOS may have left us in UDMA, clear it before libata probe */ 391 /* BIOS may have left us in UDMA, clear it before libata probe */
391 pci_write_config_byte(pdev, 0x54, 0); 392 pci_write_config_byte(pdev, 0x54, 0);
392 } 393 }
393 else if (id->driver_data == 1) /* 6260 */ 394 else if (id->driver_data == 1) /* 6260 */
394 ppi[0] = &info_626x; 395 ppi[0] = &info_626x;
395 else if (id->driver_data == 2) { /* 6280 or 6280 + fast */ 396 else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
396 unsigned long io = pci_resource_start(pdev, 4); 397 unsigned long io = pci_resource_start(pdev, 4);
397 u8 reg; 398 u8 reg;
398 399
399 ppi[0] = &info_628x; 400 ppi[0] = &info_628x;
400 if (inb(io) & 0x10) 401 if (inb(io) & 0x10)
401 ppi[0] = &info_628x_fast; 402 ppi[0] = &info_628x_fast;
402 /* Mac systems come up with some registers not set as we 403 /* Mac systems come up with some registers not set as we
403 will need them */ 404 will need them */
404 405
405 /* Clear reset & test bits */ 406 /* Clear reset & test bits */
406 pci_read_config_byte(pdev, 0x49, &reg); 407 pci_read_config_byte(pdev, 0x49, &reg);
407 pci_write_config_byte(pdev, 0x49, reg & ~ 0x30); 408 pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
408 409
409 /* PCI latency must be > 0x80 for burst mode, tweak it 410 /* PCI latency must be > 0x80 for burst mode, tweak it
410 * if required. 411 * if required.
411 */ 412 */
412 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg); 413 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
413 if (reg <= 0x80) 414 if (reg <= 0x80)
414 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90); 415 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
415 416
416 /* Enable IRQ output and burst mode */ 417 /* Enable IRQ output and burst mode */
417 pci_read_config_byte(pdev, 0x4a, &reg); 418 pci_read_config_byte(pdev, 0x4a, &reg);
418 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); 419 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
419 420
420 } 421 }
421 422
422 BUG_ON(ppi[0] == NULL); 423 BUG_ON(ppi[0] == NULL);
423 424
424 return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0); 425 return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
425 } 426 }
426 427
427 static const struct pci_device_id artop_pci_tbl[] = { 428 static const struct pci_device_id artop_pci_tbl[] = {
428 { PCI_VDEVICE(ARTOP, 0x0005), 0 }, 429 { PCI_VDEVICE(ARTOP, 0x0005), 0 },
429 { PCI_VDEVICE(ARTOP, 0x0006), 1 }, 430 { PCI_VDEVICE(ARTOP, 0x0006), 1 },
430 { PCI_VDEVICE(ARTOP, 0x0007), 1 }, 431 { PCI_VDEVICE(ARTOP, 0x0007), 1 },
431 { PCI_VDEVICE(ARTOP, 0x0008), 2 }, 432 { PCI_VDEVICE(ARTOP, 0x0008), 2 },
432 { PCI_VDEVICE(ARTOP, 0x0009), 2 }, 433 { PCI_VDEVICE(ARTOP, 0x0009), 2 },
433 434
434 { } /* terminate list */ 435 { } /* terminate list */
435 }; 436 };
436 437
437 static struct pci_driver artop_pci_driver = { 438 static struct pci_driver artop_pci_driver = {
438 .name = DRV_NAME, 439 .name = DRV_NAME,
439 .id_table = artop_pci_tbl, 440 .id_table = artop_pci_tbl,
440 .probe = artop_init_one, 441 .probe = artop_init_one,
441 .remove = ata_pci_remove_one, 442 .remove = ata_pci_remove_one,
442 }; 443 };
443 444
444 static int __init artop_init(void) 445 static int __init artop_init(void)
445 { 446 {
446 return pci_register_driver(&artop_pci_driver); 447 return pci_register_driver(&artop_pci_driver);
447 } 448 }
448 449
449 static void __exit artop_exit(void) 450 static void __exit artop_exit(void)
450 { 451 {
451 pci_unregister_driver(&artop_pci_driver); 452 pci_unregister_driver(&artop_pci_driver);
452 } 453 }
453 454
454 module_init(artop_init); 455 module_init(artop_init);
455 module_exit(artop_exit); 456 module_exit(artop_exit);
456 457
457 MODULE_AUTHOR("Alan Cox"); 458 MODULE_AUTHOR("Alan Cox");
458 MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA"); 459 MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
459 MODULE_LICENSE("GPL"); 460 MODULE_LICENSE("GPL");
460 MODULE_DEVICE_TABLE(pci, artop_pci_tbl); 461 MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
461 MODULE_VERSION(DRV_VERSION); 462 MODULE_VERSION(DRV_VERSION);
462 463
463 464
drivers/ata/pata_via.c
1 /* 1 /*
2 * pata_via.c - VIA PATA for new ATA layer 2 * pata_via.c - VIA PATA for new ATA layer
3 * (C) 2005-2006 Red Hat Inc 3 * (C) 2005-2006 Red Hat Inc
4 * 4 *
5 * Documentation 5 * Documentation
6 * Most chipset documentation available under NDA only 6 * Most chipset documentation available under NDA only
7 * 7 *
8 * VIA version guide 8 * VIA version guide
9 * VIA VT82C561 - early design, uses ata_generic currently 9 * VIA VT82C561 - early design, uses ata_generic currently
10 * VIA VT82C576 - MWDMA, 33Mhz 10 * VIA VT82C576 - MWDMA, 33Mhz
11 * VIA VT82C586 - MWDMA, 33Mhz 11 * VIA VT82C586 - MWDMA, 33Mhz
12 * VIA VT82C586a - Added UDMA to 33Mhz 12 * VIA VT82C586a - Added UDMA to 33Mhz
13 * VIA VT82C586b - UDMA33 13 * VIA VT82C586b - UDMA33
14 * VIA VT82C596a - Nonfunctional UDMA66 14 * VIA VT82C596a - Nonfunctional UDMA66
15 * VIA VT82C596b - Working UDMA66 15 * VIA VT82C596b - Working UDMA66
16 * VIA VT82C686 - Nonfunctional UDMA66 16 * VIA VT82C686 - Nonfunctional UDMA66
17 * VIA VT82C686a - Working UDMA66 17 * VIA VT82C686a - Working UDMA66
18 * VIA VT82C686b - Updated to UDMA100 18 * VIA VT82C686b - Updated to UDMA100
19 * VIA VT8231 - UDMA100 19 * VIA VT8231 - UDMA100
20 * VIA VT8233 - UDMA100 20 * VIA VT8233 - UDMA100
21 * VIA VT8233a - UDMA133 21 * VIA VT8233a - UDMA133
22 * VIA VT8233c - UDMA100 22 * VIA VT8233c - UDMA100
23 * VIA VT8235 - UDMA133 23 * VIA VT8235 - UDMA133
24 * VIA VT8237 - UDMA133 24 * VIA VT8237 - UDMA133
25 * VIA VT8237A - UDMA133 25 * VIA VT8237A - UDMA133
26 * VIA VT8237S - UDMA133 26 * VIA VT8237S - UDMA133
27 * VIA VT8251 - UDMA133 27 * VIA VT8251 - UDMA133
28 * 28 *
29 * Most registers remain compatible across chips. Others start reserved 29 * Most registers remain compatible across chips. Others start reserved
30 * and acquire sensible semantics if set to 1 (eg cable detect). A few 30 * and acquire sensible semantics if set to 1 (eg cable detect). A few
31 * exceptions exist, notably around the FIFO settings. 31 * exceptions exist, notably around the FIFO settings.
32 * 32 *
33 * One additional quirk of the VIA design is that like ALi they use few 33 * One additional quirk of the VIA design is that like ALi they use few
34 * PCI IDs for a lot of chips. 34 * PCI IDs for a lot of chips.
35 * 35 *
36 * Based heavily on: 36 * Based heavily on:
37 * 37 *
38 * Version 3.38 38 * Version 3.38
39 * 39 *
40 * VIA IDE driver for Linux. Supported southbridges: 40 * VIA IDE driver for Linux. Supported southbridges:
41 * 41 *
42 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b, 42 * vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
43 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a, 43 * vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
44 * vt8235, vt8237 44 * vt8235, vt8237
45 * 45 *
46 * Copyright (c) 2000-2002 Vojtech Pavlik 46 * Copyright (c) 2000-2002 Vojtech Pavlik
47 * 47 *
48 * Based on the work of: 48 * Based on the work of:
49 * Michel Aubry 49 * Michel Aubry
50 * Jeff Garzik 50 * Jeff Garzik
51 * Andre Hedrick 51 * Andre Hedrick
52 52
53 */ 53 */
54 54
55 #include <linux/kernel.h> 55 #include <linux/kernel.h>
56 #include <linux/module.h> 56 #include <linux/module.h>
57 #include <linux/pci.h> 57 #include <linux/pci.h>
58 #include <linux/init.h> 58 #include <linux/init.h>
59 #include <linux/blkdev.h> 59 #include <linux/blkdev.h>
60 #include <linux/delay.h> 60 #include <linux/delay.h>
61 #include <linux/gfp.h> 61 #include <linux/gfp.h>
62 #include <scsi/scsi_host.h> 62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h> 63 #include <linux/libata.h>
64 #include <linux/dmi.h> 64 #include <linux/dmi.h>
65 65
66 #define DRV_NAME "pata_via" 66 #define DRV_NAME "pata_via"
67 #define DRV_VERSION "0.3.4" 67 #define DRV_VERSION "0.3.4"
68 68
69 enum { 69 enum {
70 VIA_BAD_PREQ = 0x01, /* Crashes if PREQ# till DDACK# set */ 70 VIA_BAD_PREQ = 0x01, /* Crashes if PREQ# till DDACK# set */
71 VIA_BAD_CLK66 = 0x02, /* 66 MHz clock doesn't work correctly */ 71 VIA_BAD_CLK66 = 0x02, /* 66 MHz clock doesn't work correctly */
72 VIA_SET_FIFO = 0x04, /* Needs to have FIFO split set */ 72 VIA_SET_FIFO = 0x04, /* Needs to have FIFO split set */
73 VIA_NO_UNMASK = 0x08, /* Doesn't work with IRQ unmasking on */ 73 VIA_NO_UNMASK = 0x08, /* Doesn't work with IRQ unmasking on */
74 VIA_BAD_ID = 0x10, /* Has wrong vendor ID (0x1107) */ 74 VIA_BAD_ID = 0x10, /* Has wrong vendor ID (0x1107) */
75 VIA_BAD_AST = 0x20, /* Don't touch Address Setup Timing */ 75 VIA_BAD_AST = 0x20, /* Don't touch Address Setup Timing */
76 VIA_NO_ENABLES = 0x40, /* Has no enablebits */ 76 VIA_NO_ENABLES = 0x40, /* Has no enablebits */
77 VIA_SATA_PATA = 0x80, /* SATA/PATA combined configuration */ 77 VIA_SATA_PATA = 0x80, /* SATA/PATA combined configuration */
78 }; 78 };
79 79
80 enum { 80 enum {
81 VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */ 81 VIA_IDFLAG_SINGLE = (1 << 0), /* single channel controller) */
82 }; 82 };
83 83
84 /* 84 /*
85 * VIA SouthBridge chips. 85 * VIA SouthBridge chips.
86 */ 86 */
87 87
88 static const struct via_isa_bridge { 88 static const struct via_isa_bridge {
89 const char *name; 89 const char *name;
90 u16 id; 90 u16 id;
91 u8 rev_min; 91 u8 rev_min;
92 u8 rev_max; 92 u8 rev_max;
93 u8 udma_mask; 93 u8 udma_mask;
94 u8 flags; 94 u8 flags;
95 } via_isa_bridges[] = { 95 } via_isa_bridges[] = {
96 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, 96 { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
97 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, 97 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
98 { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 98 { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
99 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 99 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
100 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 100 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
101 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, 101 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
102 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES }, 102 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
103 { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES }, 103 { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST | VIA_NO_ENABLES },
104 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 104 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
105 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 105 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
106 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 106 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
107 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 107 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
108 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, }, 108 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, },
109 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, }, 109 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, },
110 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, }, 110 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, },
111 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, }, 111 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, },
112 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, }, 112 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, },
113 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 }, 113 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
114 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, }, 114 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, },
115 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 }, 115 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
116 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO }, 116 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
117 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ }, 117 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
118 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO }, 118 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
119 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO }, 119 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
120 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO }, 120 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
121 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK }, 121 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
122 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, 122 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
123 { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, 123 { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
124 { NULL } 124 { NULL }
125 }; 125 };
126 126
127 struct via_port { 127 struct via_port {
128 u8 cached_device; 128 u8 cached_device;
129 }; 129 };
130 130
131 /* 131 /*
132 * Cable special cases 132 * Cable special cases
133 */ 133 */
134 134
135 static const struct dmi_system_id cable_dmi_table[] = { 135 static const struct dmi_system_id cable_dmi_table[] = {
136 { 136 {
137 .ident = "Acer Ferrari 3400", 137 .ident = "Acer Ferrari 3400",
138 .matches = { 138 .matches = {
139 DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."), 139 DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
140 DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"), 140 DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
141 }, 141 },
142 }, 142 },
143 { } 143 { }
144 }; 144 };
145 145
146 static int via_cable_override(struct pci_dev *pdev) 146 static int via_cable_override(struct pci_dev *pdev)
147 { 147 {
148 /* Systems by DMI */ 148 /* Systems by DMI */
149 if (dmi_check_system(cable_dmi_table)) 149 if (dmi_check_system(cable_dmi_table))
150 return 1; 150 return 1;
151 /* Arima W730-K8/Targa Visionary 811/... */ 151 /* Arima W730-K8/Targa Visionary 811/... */
152 if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032) 152 if (pdev->subsystem_vendor == 0x161F && pdev->subsystem_device == 0x2032)
153 return 1; 153 return 1;
154 return 0; 154 return 0;
155 } 155 }
156 156
157 157
158 /** 158 /**
159 * via_cable_detect - cable detection 159 * via_cable_detect - cable detection
160 * @ap: ATA port 160 * @ap: ATA port
161 * 161 *
162 * Perform cable detection. Actually for the VIA case the BIOS 162 * Perform cable detection. Actually for the VIA case the BIOS
163 * already did this for us. We read the values provided by the 163 * already did this for us. We read the values provided by the
164 * BIOS. If you are using an 8235 in a non-PC configuration you 164 * BIOS. If you are using an 8235 in a non-PC configuration you
165 * may need to update this code. 165 * may need to update this code.
166 * 166 *
167 * Hotplug also impacts on this. 167 * Hotplug also impacts on this.
168 */ 168 */
169 169
170 static int via_cable_detect(struct ata_port *ap) { 170 static int via_cable_detect(struct ata_port *ap) {
171 const struct via_isa_bridge *config = ap->host->private_data; 171 const struct via_isa_bridge *config = ap->host->private_data;
172 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 172 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
173 u32 ata66; 173 u32 ata66;
174 174
175 if (via_cable_override(pdev)) 175 if (via_cable_override(pdev))
176 return ATA_CBL_PATA40_SHORT; 176 return ATA_CBL_PATA40_SHORT;
177 177
178 if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0) 178 if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
179 return ATA_CBL_SATA; 179 return ATA_CBL_SATA;
180 180
181 /* Early chips are 40 wire */ 181 /* Early chips are 40 wire */
182 if (config->udma_mask < ATA_UDMA4) 182 if (config->udma_mask < ATA_UDMA4)
183 return ATA_CBL_PATA40; 183 return ATA_CBL_PATA40;
184 /* UDMA 66 chips have only drive side logic */ 184 /* UDMA 66 chips have only drive side logic */
185 else if (config->udma_mask < ATA_UDMA5) 185 else if (config->udma_mask < ATA_UDMA5)
186 return ATA_CBL_PATA_UNK; 186 return ATA_CBL_PATA_UNK;
187 /* UDMA 100 or later */ 187 /* UDMA 100 or later */
188 pci_read_config_dword(pdev, 0x50, &ata66); 188 pci_read_config_dword(pdev, 0x50, &ata66);
189 /* Check both the drive cable reporting bits, we might not have 189 /* Check both the drive cable reporting bits, we might not have
190 two drives */ 190 two drives */
191 if (ata66 & (0x10100000 >> (16 * ap->port_no))) 191 if (ata66 & (0x10100000 >> (16 * ap->port_no)))
192 return ATA_CBL_PATA80; 192 return ATA_CBL_PATA80;
193 /* Check with ACPI so we can spot BIOS reported SATA bridges */ 193 /* Check with ACPI so we can spot BIOS reported SATA bridges */
194 if (ata_acpi_init_gtm(ap) && 194 if (ata_acpi_init_gtm(ap) &&
195 ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) 195 ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
196 return ATA_CBL_PATA80; 196 return ATA_CBL_PATA80;
197 return ATA_CBL_PATA40; 197 return ATA_CBL_PATA40;
198 } 198 }
199 199
200 static int via_pre_reset(struct ata_link *link, unsigned long deadline) 200 static int via_pre_reset(struct ata_link *link, unsigned long deadline)
201 { 201 {
202 struct ata_port *ap = link->ap; 202 struct ata_port *ap = link->ap;
203 const struct via_isa_bridge *config = ap->host->private_data; 203 const struct via_isa_bridge *config = ap->host->private_data;
204 204
205 if (!(config->flags & VIA_NO_ENABLES)) { 205 if (!(config->flags & VIA_NO_ENABLES)) {
206 static const struct pci_bits via_enable_bits[] = { 206 static const struct pci_bits via_enable_bits[] = {
207 { 0x40, 1, 0x02, 0x02 }, 207 { 0x40, 1, 0x02, 0x02 },
208 { 0x40, 1, 0x01, 0x01 } 208 { 0x40, 1, 0x01, 0x01 }
209 }; 209 };
210 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 210 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
211 if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) 211 if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no]))
212 return -ENOENT; 212 return -ENOENT;
213 } 213 }
214 214
215 return ata_sff_prereset(link, deadline); 215 return ata_sff_prereset(link, deadline);
216 } 216 }
217 217
218 218
219 /** 219 /**
220 * via_do_set_mode - set transfer mode data 220 * via_do_set_mode - set transfer mode data
221 * @ap: ATA interface 221 * @ap: ATA interface
222 * @adev: ATA device 222 * @adev: ATA device
223 * @mode: ATA mode being programmed 223 * @mode: ATA mode being programmed
224 * @set_ast: Set to program address setup 224 * @set_ast: Set to program address setup
225 * @udma_type: UDMA mode/format of registers 225 * @udma_type: UDMA mode/format of registers
226 * 226 *
227 * Program the VIA registers for DMA and PIO modes. Uses the ata timing 227 * Program the VIA registers for DMA and PIO modes. Uses the ata timing
228 * support in order to compute modes. 228 * support in order to compute modes.
229 * 229 *
230 * FIXME: Hotplug will require we serialize multiple mode changes 230 * FIXME: Hotplug will require we serialize multiple mode changes
231 * on the two channels. 231 * on the two channels.
232 */ 232 */
233 233
234 static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, 234 static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev,
235 int mode, int set_ast, int udma_type) 235 int mode, int set_ast, int udma_type)
236 { 236 {
237 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 237 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
238 struct ata_device *peer = ata_dev_pair(adev); 238 struct ata_device *peer = ata_dev_pair(adev);
239 struct ata_timing t, p; 239 struct ata_timing t, p;
240 static int via_clock = 33333; /* Bus clock in kHZ */ 240 static int via_clock = 33333; /* Bus clock in kHZ */
241 unsigned long T = 1000000000 / via_clock; 241 unsigned long T = 1000000000 / via_clock;
242 unsigned long UT = T; 242 unsigned long UT = T;
243 int ut; 243 int ut;
244 int offset = 3 - (2*ap->port_no) - adev->devno; 244 int offset = 3 - (2*ap->port_no) - adev->devno;
245 245
246 switch (udma_type) { 246 switch (udma_type) {
247 case ATA_UDMA4: 247 case ATA_UDMA4:
248 UT = T / 2; break; 248 UT = T / 2; break;
249 case ATA_UDMA5: 249 case ATA_UDMA5:
250 UT = T / 3; break; 250 UT = T / 3; break;
251 case ATA_UDMA6: 251 case ATA_UDMA6:
252 UT = T / 4; break; 252 UT = T / 4; break;
253 } 253 }
254 254
255 /* Calculate the timing values we require */ 255 /* Calculate the timing values we require */
256 ata_timing_compute(adev, mode, &t, T, UT); 256 ata_timing_compute(adev, mode, &t, T, UT);
257 257
258 /* We share 8bit timing so we must merge the constraints */ 258 /* We share 8bit timing so we must merge the constraints */
259 if (peer) { 259 if (peer) {
260 if (peer->pio_mode) { 260 if (peer->pio_mode) {
261 ata_timing_compute(peer, peer->pio_mode, &p, T, UT); 261 ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
262 ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT); 262 ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
263 } 263 }
264 } 264 }
265 265
266 /* Address setup is programmable but breaks on UDMA133 setups */ 266 /* Address setup is programmable but breaks on UDMA133 setups */
267 if (set_ast) { 267 if (set_ast) {
268 u8 setup; /* 2 bits per drive */ 268 u8 setup; /* 2 bits per drive */
269 int shift = 2 * offset; 269 int shift = 2 * offset;
270 270
271 pci_read_config_byte(pdev, 0x4C, &setup); 271 pci_read_config_byte(pdev, 0x4C, &setup);
272 setup &= ~(3 << shift); 272 setup &= ~(3 << shift);
273 setup |= (clamp_val(t.setup, 1, 4) - 1) << shift; 273 setup |= (clamp_val(t.setup, 1, 4) - 1) << shift;
274 pci_write_config_byte(pdev, 0x4C, setup); 274 pci_write_config_byte(pdev, 0x4C, setup);
275 } 275 }
276 276
277 /* Load the PIO mode bits */ 277 /* Load the PIO mode bits */
278 pci_write_config_byte(pdev, 0x4F - ap->port_no, 278 pci_write_config_byte(pdev, 0x4F - ap->port_no,
279 ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1)); 279 ((clamp_val(t.act8b, 1, 16) - 1) << 4) | (clamp_val(t.rec8b, 1, 16) - 1));
280 pci_write_config_byte(pdev, 0x48 + offset, 280 pci_write_config_byte(pdev, 0x48 + offset,
281 ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1)); 281 ((clamp_val(t.active, 1, 16) - 1) << 4) | (clamp_val(t.recover, 1, 16) - 1));
282 282
283 /* Load the UDMA bits according to type */ 283 /* Load the UDMA bits according to type */
284 switch (udma_type) { 284 switch (udma_type) {
285 case ATA_UDMA2: 285 case ATA_UDMA2:
286 default: 286 default:
287 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03; 287 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 5) - 2)) : 0x03;
288 break; 288 break;
289 case ATA_UDMA4: 289 case ATA_UDMA4:
290 ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f; 290 ut = t.udma ? (0xe8 | (clamp_val(t.udma, 2, 9) - 2)) : 0x0f;
291 break; 291 break;
292 case ATA_UDMA5: 292 case ATA_UDMA5:
293 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07; 293 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
294 break; 294 break;
295 case ATA_UDMA6: 295 case ATA_UDMA6:
296 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07; 296 ut = t.udma ? (0xe0 | (clamp_val(t.udma, 2, 9) - 2)) : 0x07;
297 break; 297 break;
298 } 298 }
299 299
300 /* Set UDMA unless device is not UDMA capable */ 300 /* Set UDMA unless device is not UDMA capable */
301 if (udma_type) { 301 if (udma_type) {
302 u8 udma_etc; 302 u8 udma_etc;
303 303
304 pci_read_config_byte(pdev, 0x50 + offset, &udma_etc); 304 pci_read_config_byte(pdev, 0x50 + offset, &udma_etc);
305 305
306 /* clear transfer mode bit */ 306 /* clear transfer mode bit */
307 udma_etc &= ~0x20; 307 udma_etc &= ~0x20;
308 308
309 if (t.udma) { 309 if (t.udma) {
310 /* preserve 80-wire cable detection bit */ 310 /* preserve 80-wire cable detection bit */
311 udma_etc &= 0x10; 311 udma_etc &= 0x10;
312 udma_etc |= ut; 312 udma_etc |= ut;
313 } 313 }
314 314
315 pci_write_config_byte(pdev, 0x50 + offset, udma_etc); 315 pci_write_config_byte(pdev, 0x50 + offset, udma_etc);
316 } 316 }
317 } 317 }
318 318
319 static void via_set_piomode(struct ata_port *ap, struct ata_device *adev) 319 static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
320 { 320 {
321 const struct via_isa_bridge *config = ap->host->private_data; 321 const struct via_isa_bridge *config = ap->host->private_data;
322 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1; 322 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
323 323
324 via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask); 324 via_do_set_mode(ap, adev, adev->pio_mode, set_ast, config->udma_mask);
325 } 325 }
326 326
327 static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev) 327 static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
328 { 328 {
329 const struct via_isa_bridge *config = ap->host->private_data; 329 const struct via_isa_bridge *config = ap->host->private_data;
330 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1; 330 int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
331 331
332 via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask); 332 via_do_set_mode(ap, adev, adev->dma_mode, set_ast, config->udma_mask);
333 } 333 }
334 334
335 /** 335 /**
336 * via_mode_filter - filter buggy device/mode pairs 336 * via_mode_filter - filter buggy device/mode pairs
337 * @dev: ATA device 337 * @dev: ATA device
338 * @mask: Mode bitmask 338 * @mask: Mode bitmask
339 * 339 *
340 * We need to apply some minimal filtering for old controllers and at least 340 * We need to apply some minimal filtering for old controllers and at least
341 * one breed of Transcend SSD. Return the updated mask. 341 * one breed of Transcend SSD. Return the updated mask.
342 */ 342 */
343 343
344 static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) 344 static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
345 { 345 {
346 struct ata_host *host = dev->link->ap->host; 346 struct ata_host *host = dev->link->ap->host;
347 const struct via_isa_bridge *config = host->private_data; 347 const struct via_isa_bridge *config = host->private_data;
348 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 348 unsigned char model_num[ATA_ID_PROD_LEN + 1];
349 349
350 if (config->id == PCI_DEVICE_ID_VIA_82C586_0) { 350 if (config->id == PCI_DEVICE_ID_VIA_82C586_0) {
351 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 351 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
352 if (strcmp(model_num, "TS64GSSD25-M") == 0) { 352 if (strcmp(model_num, "TS64GSSD25-M") == 0) {
353 ata_dev_printk(dev, KERN_WARNING, 353 ata_dev_printk(dev, KERN_WARNING,
354 "disabling UDMA mode due to reported lockups with this device.\n"); 354 "disabling UDMA mode due to reported lockups with this device.\n");
355 mask &= ~ ATA_MASK_UDMA; 355 mask &= ~ ATA_MASK_UDMA;
356 } 356 }
357 } 357 }
358 return mask; 358 return mask;
359 } 359 }
360 360
361 /** 361 /**
362 * via_tf_load - send taskfile registers to host controller 362 * via_tf_load - send taskfile registers to host controller
363 * @ap: Port to which output is sent 363 * @ap: Port to which output is sent
364 * @tf: ATA taskfile register set 364 * @tf: ATA taskfile register set
365 * 365 *
366 * Outputs ATA taskfile to standard ATA host controller. 366 * Outputs ATA taskfile to standard ATA host controller.
367 * 367 *
368 * Note: This is to fix the internal bug of via chipsets, which 368 * Note: This is to fix the internal bug of via chipsets, which
369 * will reset the device register after changing the IEN bit on 369 * will reset the device register after changing the IEN bit on
370 * ctl register 370 * ctl register
371 */ 371 */
372 static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 372 static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
373 { 373 {
374 struct ata_ioports *ioaddr = &ap->ioaddr; 374 struct ata_ioports *ioaddr = &ap->ioaddr;
375 struct via_port *vp = ap->private_data; 375 struct via_port *vp = ap->private_data;
376 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 376 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
377 int newctl = 0; 377 int newctl = 0;
378 378
379 if (tf->ctl != ap->last_ctl) { 379 if (tf->ctl != ap->last_ctl) {
380 iowrite8(tf->ctl, ioaddr->ctl_addr); 380 iowrite8(tf->ctl, ioaddr->ctl_addr);
381 ap->last_ctl = tf->ctl; 381 ap->last_ctl = tf->ctl;
382 ata_wait_idle(ap); 382 ata_wait_idle(ap);
383 newctl = 1; 383 newctl = 1;
384 } 384 }
385 385
386 if (tf->flags & ATA_TFLAG_DEVICE) { 386 if (tf->flags & ATA_TFLAG_DEVICE) {
387 iowrite8(tf->device, ioaddr->device_addr); 387 iowrite8(tf->device, ioaddr->device_addr);
388 vp->cached_device = tf->device; 388 vp->cached_device = tf->device;
389 } else if (newctl) 389 } else if (newctl)
390 iowrite8(vp->cached_device, ioaddr->device_addr); 390 iowrite8(vp->cached_device, ioaddr->device_addr);
391 391
392 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 392 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
393 WARN_ON_ONCE(!ioaddr->ctl_addr); 393 WARN_ON_ONCE(!ioaddr->ctl_addr);
394 iowrite8(tf->hob_feature, ioaddr->feature_addr); 394 iowrite8(tf->hob_feature, ioaddr->feature_addr);
395 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 395 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
396 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 396 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
397 iowrite8(tf->hob_lbam, ioaddr->lbam_addr); 397 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
398 iowrite8(tf->hob_lbah, ioaddr->lbah_addr); 398 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
399 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 399 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
400 tf->hob_feature, 400 tf->hob_feature,
401 tf->hob_nsect, 401 tf->hob_nsect,
402 tf->hob_lbal, 402 tf->hob_lbal,
403 tf->hob_lbam, 403 tf->hob_lbam,
404 tf->hob_lbah); 404 tf->hob_lbah);
405 } 405 }
406 406
407 if (is_addr) { 407 if (is_addr) {
408 iowrite8(tf->feature, ioaddr->feature_addr); 408 iowrite8(tf->feature, ioaddr->feature_addr);
409 iowrite8(tf->nsect, ioaddr->nsect_addr); 409 iowrite8(tf->nsect, ioaddr->nsect_addr);
410 iowrite8(tf->lbal, ioaddr->lbal_addr); 410 iowrite8(tf->lbal, ioaddr->lbal_addr);
411 iowrite8(tf->lbam, ioaddr->lbam_addr); 411 iowrite8(tf->lbam, ioaddr->lbam_addr);
412 iowrite8(tf->lbah, ioaddr->lbah_addr); 412 iowrite8(tf->lbah, ioaddr->lbah_addr);
413 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 413 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
414 tf->feature, 414 tf->feature,
415 tf->nsect, 415 tf->nsect,
416 tf->lbal, 416 tf->lbal,
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
420 } 422 }
421 423
422 static int via_port_start(struct ata_port *ap) 424 static int via_port_start(struct ata_port *ap)
423 { 425 {
424 struct via_port *vp; 426 struct via_port *vp;
425 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 427 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
426 428
427 int ret = ata_bmdma_port_start(ap); 429 int ret = ata_bmdma_port_start(ap);
428 if (ret < 0) 430 if (ret < 0)
429 return ret; 431 return ret;
430 432
431 vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL); 433 vp = devm_kzalloc(&pdev->dev, sizeof(struct via_port), GFP_KERNEL);
432 if (vp == NULL) 434 if (vp == NULL)
433 return -ENOMEM; 435 return -ENOMEM;
434 ap->private_data = vp; 436 ap->private_data = vp;
435 return 0; 437 return 0;
436 } 438 }
437 439
438 static struct scsi_host_template via_sht = { 440 static struct scsi_host_template via_sht = {
439 ATA_BMDMA_SHT(DRV_NAME), 441 ATA_BMDMA_SHT(DRV_NAME),
440 }; 442 };
441 443
442 static struct ata_port_operations via_port_ops = { 444 static struct ata_port_operations via_port_ops = {
443 .inherits = &ata_bmdma_port_ops, 445 .inherits = &ata_bmdma_port_ops,
444 .cable_detect = via_cable_detect, 446 .cable_detect = via_cable_detect,
445 .set_piomode = via_set_piomode, 447 .set_piomode = via_set_piomode,
446 .set_dmamode = via_set_dmamode, 448 .set_dmamode = via_set_dmamode,
447 .prereset = via_pre_reset, 449 .prereset = via_pre_reset,
448 .sff_tf_load = via_tf_load, 450 .sff_tf_load = via_tf_load,
449 .port_start = via_port_start, 451 .port_start = via_port_start,
450 .mode_filter = via_mode_filter, 452 .mode_filter = via_mode_filter,
451 }; 453 };
452 454
453 static struct ata_port_operations via_port_ops_noirq = { 455 static struct ata_port_operations via_port_ops_noirq = {
454 .inherits = &via_port_ops, 456 .inherits = &via_port_ops,
455 .sff_data_xfer = ata_sff_data_xfer_noirq, 457 .sff_data_xfer = ata_sff_data_xfer_noirq,
456 }; 458 };
457 459
458 /** 460 /**
459 * via_config_fifo - set up the FIFO 461 * via_config_fifo - set up the FIFO
460 * @pdev: PCI device 462 * @pdev: PCI device
461 * @flags: configuration flags 463 * @flags: configuration flags
462 * 464 *
463 * Set the FIFO properties for this device if necessary. Used both on 465 * Set the FIFO properties for this device if necessary. Used both on
464 * set up and on and the resume path 466 * set up and on and the resume path
465 */ 467 */
466 468
467 static void via_config_fifo(struct pci_dev *pdev, unsigned int flags) 469 static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
468 { 470 {
469 u8 enable; 471 u8 enable;
470 472
471 /* 0x40 low bits indicate enabled channels */ 473 /* 0x40 low bits indicate enabled channels */
472 pci_read_config_byte(pdev, 0x40 , &enable); 474 pci_read_config_byte(pdev, 0x40 , &enable);
473 enable &= 3; 475 enable &= 3;
474 476
475 if (flags & VIA_SET_FIFO) { 477 if (flags & VIA_SET_FIFO) {
476 static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20}; 478 static const u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
477 u8 fifo; 479 u8 fifo;
478 480
479 pci_read_config_byte(pdev, 0x43, &fifo); 481 pci_read_config_byte(pdev, 0x43, &fifo);
480 482
481 /* Clear PREQ# until DDACK# for errata */ 483 /* Clear PREQ# until DDACK# for errata */
482 if (flags & VIA_BAD_PREQ) 484 if (flags & VIA_BAD_PREQ)
483 fifo &= 0x7F; 485 fifo &= 0x7F;
484 else 486 else
485 fifo &= 0x9f; 487 fifo &= 0x9f;
486 /* Turn on FIFO for enabled channels */ 488 /* Turn on FIFO for enabled channels */
487 fifo |= fifo_setting[enable]; 489 fifo |= fifo_setting[enable];
488 pci_write_config_byte(pdev, 0x43, fifo); 490 pci_write_config_byte(pdev, 0x43, fifo);
489 } 491 }
490 } 492 }
491 493
492 /** 494 /**
493 * via_init_one - discovery callback 495 * via_init_one - discovery callback
494 * @pdev: PCI device 496 * @pdev: PCI device
495 * @id: PCI table info 497 * @id: PCI table info
496 * 498 *
497 * A VIA IDE interface has been discovered. Figure out what revision 499 * A VIA IDE interface has been discovered. Figure out what revision
498 * and perform configuration work before handing it to the ATA layer 500 * and perform configuration work before handing it to the ATA layer
499 */ 501 */
500 502
501 static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 503 static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
502 { 504 {
503 /* Early VIA without UDMA support */ 505 /* Early VIA without UDMA support */
504 static const struct ata_port_info via_mwdma_info = { 506 static const struct ata_port_info via_mwdma_info = {
505 .flags = ATA_FLAG_SLAVE_POSS, 507 .flags = ATA_FLAG_SLAVE_POSS,
506 .pio_mask = ATA_PIO4, 508 .pio_mask = ATA_PIO4,
507 .mwdma_mask = ATA_MWDMA2, 509 .mwdma_mask = ATA_MWDMA2,
508 .port_ops = &via_port_ops 510 .port_ops = &via_port_ops
509 }; 511 };
510 /* Ditto with IRQ masking required */ 512 /* Ditto with IRQ masking required */
511 static const struct ata_port_info via_mwdma_info_borked = { 513 static const struct ata_port_info via_mwdma_info_borked = {
512 .flags = ATA_FLAG_SLAVE_POSS, 514 .flags = ATA_FLAG_SLAVE_POSS,
513 .pio_mask = ATA_PIO4, 515 .pio_mask = ATA_PIO4,
514 .mwdma_mask = ATA_MWDMA2, 516 .mwdma_mask = ATA_MWDMA2,
515 .port_ops = &via_port_ops_noirq, 517 .port_ops = &via_port_ops_noirq,
516 }; 518 };
517 /* VIA UDMA 33 devices (and borked 66) */ 519 /* VIA UDMA 33 devices (and borked 66) */
518 static const struct ata_port_info via_udma33_info = { 520 static const struct ata_port_info via_udma33_info = {
519 .flags = ATA_FLAG_SLAVE_POSS, 521 .flags = ATA_FLAG_SLAVE_POSS,
520 .pio_mask = ATA_PIO4, 522 .pio_mask = ATA_PIO4,
521 .mwdma_mask = ATA_MWDMA2, 523 .mwdma_mask = ATA_MWDMA2,
522 .udma_mask = ATA_UDMA2, 524 .udma_mask = ATA_UDMA2,
523 .port_ops = &via_port_ops 525 .port_ops = &via_port_ops
524 }; 526 };
525 /* VIA UDMA 66 devices */ 527 /* VIA UDMA 66 devices */
526 static const struct ata_port_info via_udma66_info = { 528 static const struct ata_port_info via_udma66_info = {
527 .flags = ATA_FLAG_SLAVE_POSS, 529 .flags = ATA_FLAG_SLAVE_POSS,
528 .pio_mask = ATA_PIO4, 530 .pio_mask = ATA_PIO4,
529 .mwdma_mask = ATA_MWDMA2, 531 .mwdma_mask = ATA_MWDMA2,
530 .udma_mask = ATA_UDMA4, 532 .udma_mask = ATA_UDMA4,
531 .port_ops = &via_port_ops 533 .port_ops = &via_port_ops
532 }; 534 };
533 /* VIA UDMA 100 devices */ 535 /* VIA UDMA 100 devices */
534 static const struct ata_port_info via_udma100_info = { 536 static const struct ata_port_info via_udma100_info = {
535 .flags = ATA_FLAG_SLAVE_POSS, 537 .flags = ATA_FLAG_SLAVE_POSS,
536 .pio_mask = ATA_PIO4, 538 .pio_mask = ATA_PIO4,
537 .mwdma_mask = ATA_MWDMA2, 539 .mwdma_mask = ATA_MWDMA2,
538 .udma_mask = ATA_UDMA5, 540 .udma_mask = ATA_UDMA5,
539 .port_ops = &via_port_ops 541 .port_ops = &via_port_ops
540 }; 542 };
541 /* UDMA133 with bad AST (All current 133) */ 543 /* UDMA133 with bad AST (All current 133) */
542 static const struct ata_port_info via_udma133_info = { 544 static const struct ata_port_info via_udma133_info = {
543 .flags = ATA_FLAG_SLAVE_POSS, 545 .flags = ATA_FLAG_SLAVE_POSS,
544 .pio_mask = ATA_PIO4, 546 .pio_mask = ATA_PIO4,
545 .mwdma_mask = ATA_MWDMA2, 547 .mwdma_mask = ATA_MWDMA2,
546 .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */ 548 .udma_mask = ATA_UDMA6, /* FIXME: should check north bridge */
547 .port_ops = &via_port_ops 549 .port_ops = &via_port_ops
548 }; 550 };
549 const struct ata_port_info *ppi[] = { NULL, NULL }; 551 const struct ata_port_info *ppi[] = { NULL, NULL };
550 struct pci_dev *isa; 552 struct pci_dev *isa;
551 const struct via_isa_bridge *config; 553 const struct via_isa_bridge *config;
552 static int printed_version; 554 static int printed_version;
553 u8 enable; 555 u8 enable;
554 u32 timing; 556 u32 timing;
555 unsigned long flags = id->driver_data; 557 unsigned long flags = id->driver_data;
556 int rc; 558 int rc;
557 559
558 if (!printed_version++) 560 if (!printed_version++)
559 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 561 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
560 562
561 rc = pcim_enable_device(pdev); 563 rc = pcim_enable_device(pdev);
562 if (rc) 564 if (rc)
563 return rc; 565 return rc;
564 566
565 if (flags & VIA_IDFLAG_SINGLE) 567 if (flags & VIA_IDFLAG_SINGLE)
566 ppi[1] = &ata_dummy_port_info; 568 ppi[1] = &ata_dummy_port_info;
567 569
568 /* To find out how the IDE will behave and what features we 570 /* To find out how the IDE will behave and what features we
569 actually have to look at the bridge not the IDE controller */ 571 actually have to look at the bridge not the IDE controller */
570 for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON; 572 for (config = via_isa_bridges; config->id != PCI_DEVICE_ID_VIA_ANON;
571 config++) 573 config++)
572 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA + 574 if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
573 !!(config->flags & VIA_BAD_ID), 575 !!(config->flags & VIA_BAD_ID),
574 config->id, NULL))) { 576 config->id, NULL))) {
575 u8 rev = isa->revision; 577 u8 rev = isa->revision;
576 pci_dev_put(isa); 578 pci_dev_put(isa);
577 579
578 if ((id->device == 0x0415 || id->device == 0x3164) && 580 if ((id->device == 0x0415 || id->device == 0x3164) &&
579 (config->id != id->device)) 581 (config->id != id->device))
580 continue; 582 continue;
581 583
582 if (rev >= config->rev_min && rev <= config->rev_max) 584 if (rev >= config->rev_min && rev <= config->rev_max)
583 break; 585 break;
584 } 586 }
585 587
586 if (!(config->flags & VIA_NO_ENABLES)) { 588 if (!(config->flags & VIA_NO_ENABLES)) {
587 /* 0x40 low bits indicate enabled channels */ 589 /* 0x40 low bits indicate enabled channels */
588 pci_read_config_byte(pdev, 0x40 , &enable); 590 pci_read_config_byte(pdev, 0x40 , &enable);
589 enable &= 3; 591 enable &= 3;
590 if (enable == 0) 592 if (enable == 0)
591 return -ENODEV; 593 return -ENODEV;
592 } 594 }
593 595
594 /* Initialise the FIFO for the enabled channels. */ 596 /* Initialise the FIFO for the enabled channels. */
595 via_config_fifo(pdev, config->flags); 597 via_config_fifo(pdev, config->flags);
596 598
597 /* Clock set up */ 599 /* Clock set up */
598 switch (config->udma_mask) { 600 switch (config->udma_mask) {
599 case 0x00: 601 case 0x00:
600 if (config->flags & VIA_NO_UNMASK) 602 if (config->flags & VIA_NO_UNMASK)
601 ppi[0] = &via_mwdma_info_borked; 603 ppi[0] = &via_mwdma_info_borked;
602 else 604 else
603 ppi[0] = &via_mwdma_info; 605 ppi[0] = &via_mwdma_info;
604 break; 606 break;
605 case ATA_UDMA2: 607 case ATA_UDMA2:
606 ppi[0] = &via_udma33_info; 608 ppi[0] = &via_udma33_info;
607 break; 609 break;
608 case ATA_UDMA4: 610 case ATA_UDMA4:
609 ppi[0] = &via_udma66_info; 611 ppi[0] = &via_udma66_info;
610 break; 612 break;
611 case ATA_UDMA5: 613 case ATA_UDMA5:
612 ppi[0] = &via_udma100_info; 614 ppi[0] = &via_udma100_info;
613 break; 615 break;
614 case ATA_UDMA6: 616 case ATA_UDMA6:
615 ppi[0] = &via_udma133_info; 617 ppi[0] = &via_udma133_info;
616 break; 618 break;
617 default: 619 default:
618 WARN_ON(1); 620 WARN_ON(1);
619 return -ENODEV; 621 return -ENODEV;
620 } 622 }
621 623
622 if (config->flags & VIA_BAD_CLK66) { 624 if (config->flags & VIA_BAD_CLK66) {
623 /* Disable the 66MHz clock on problem devices */ 625 /* Disable the 66MHz clock on problem devices */
624 pci_read_config_dword(pdev, 0x50, &timing); 626 pci_read_config_dword(pdev, 0x50, &timing);
625 timing &= ~0x80008; 627 timing &= ~0x80008;
626 pci_write_config_dword(pdev, 0x50, timing); 628 pci_write_config_dword(pdev, 0x50, timing);
627 } 629 }
628 630
629 /* We have established the device type, now fire it up */ 631 /* We have established the device type, now fire it up */
630 return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0); 632 return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
631 } 633 }
632 634
633 #ifdef CONFIG_PM 635 #ifdef CONFIG_PM
634 /** 636 /**
635 * via_reinit_one - reinit after resume 637 * via_reinit_one - reinit after resume
636 * @pdev; PCI device 638 * @pdev; PCI device
637 * 639 *
638 * Called when the VIA PATA device is resumed. We must then 640 * Called when the VIA PATA device is resumed. We must then
639 * reconfigure the fifo and other setup we may have altered. In 641 * reconfigure the fifo and other setup we may have altered. In
640 * addition the kernel needs to have the resume methods on PCI 642 * addition the kernel needs to have the resume methods on PCI
641 * quirk supported. 643 * quirk supported.
642 */ 644 */
643 645
644 static int via_reinit_one(struct pci_dev *pdev) 646 static int via_reinit_one(struct pci_dev *pdev)
645 { 647 {
646 u32 timing; 648 u32 timing;
647 struct ata_host *host = dev_get_drvdata(&pdev->dev); 649 struct ata_host *host = dev_get_drvdata(&pdev->dev);
648 const struct via_isa_bridge *config = host->private_data; 650 const struct via_isa_bridge *config = host->private_data;
649 int rc; 651 int rc;
650 652
651 rc = ata_pci_device_do_resume(pdev); 653 rc = ata_pci_device_do_resume(pdev);
652 if (rc) 654 if (rc)
653 return rc; 655 return rc;
654 656
655 via_config_fifo(pdev, config->flags); 657 via_config_fifo(pdev, config->flags);
656 658
657 if (config->udma_mask == ATA_UDMA4) { 659 if (config->udma_mask == ATA_UDMA4) {
658 /* The 66 MHz devices require we enable the clock */ 660 /* The 66 MHz devices require we enable the clock */
659 pci_read_config_dword(pdev, 0x50, &timing); 661 pci_read_config_dword(pdev, 0x50, &timing);
660 timing |= 0x80008; 662 timing |= 0x80008;
661 pci_write_config_dword(pdev, 0x50, timing); 663 pci_write_config_dword(pdev, 0x50, timing);
662 } 664 }
663 if (config->flags & VIA_BAD_CLK66) { 665 if (config->flags & VIA_BAD_CLK66) {
664 /* Disable the 66MHz clock on problem devices */ 666 /* Disable the 66MHz clock on problem devices */
665 pci_read_config_dword(pdev, 0x50, &timing); 667 pci_read_config_dword(pdev, 0x50, &timing);
666 timing &= ~0x80008; 668 timing &= ~0x80008;
667 pci_write_config_dword(pdev, 0x50, timing); 669 pci_write_config_dword(pdev, 0x50, timing);
668 } 670 }
669 671
670 ata_host_resume(host); 672 ata_host_resume(host);
671 return 0; 673 return 0;
672 } 674 }
673 #endif 675 #endif
674 676
675 static const struct pci_device_id via[] = { 677 static const struct pci_device_id via[] = {
676 { PCI_VDEVICE(VIA, 0x0415), }, 678 { PCI_VDEVICE(VIA, 0x0415), },
677 { PCI_VDEVICE(VIA, 0x0571), }, 679 { PCI_VDEVICE(VIA, 0x0571), },
678 { PCI_VDEVICE(VIA, 0x0581), }, 680 { PCI_VDEVICE(VIA, 0x0581), },
679 { PCI_VDEVICE(VIA, 0x1571), }, 681 { PCI_VDEVICE(VIA, 0x1571), },
680 { PCI_VDEVICE(VIA, 0x3164), }, 682 { PCI_VDEVICE(VIA, 0x3164), },
681 { PCI_VDEVICE(VIA, 0x5324), }, 683 { PCI_VDEVICE(VIA, 0x5324), },
682 { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE }, 684 { PCI_VDEVICE(VIA, 0xC409), VIA_IDFLAG_SINGLE },
683 { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE }, 685 { PCI_VDEVICE(VIA, 0x9001), VIA_IDFLAG_SINGLE },
684 686
685 { }, 687 { },
686 }; 688 };
687 689
688 static struct pci_driver via_pci_driver = { 690 static struct pci_driver via_pci_driver = {
689 .name = DRV_NAME, 691 .name = DRV_NAME,
690 .id_table = via, 692 .id_table = via,
691 .probe = via_init_one, 693 .probe = via_init_one,
692 .remove = ata_pci_remove_one, 694 .remove = ata_pci_remove_one,
693 #ifdef CONFIG_PM 695 #ifdef CONFIG_PM
694 .suspend = ata_pci_device_suspend, 696 .suspend = ata_pci_device_suspend,
695 .resume = via_reinit_one, 697 .resume = via_reinit_one,
696 #endif 698 #endif
697 }; 699 };
698 700
699 static int __init via_init(void) 701 static int __init via_init(void)
700 { 702 {
701 return pci_register_driver(&via_pci_driver); 703 return pci_register_driver(&via_pci_driver);
702 } 704 }
703 705
704 static void __exit via_exit(void) 706 static void __exit via_exit(void)
705 { 707 {
706 pci_unregister_driver(&via_pci_driver); 708 pci_unregister_driver(&via_pci_driver);
707 } 709 }
708 710
709 MODULE_AUTHOR("Alan Cox"); 711 MODULE_AUTHOR("Alan Cox");
710 MODULE_DESCRIPTION("low-level driver for VIA PATA"); 712 MODULE_DESCRIPTION("low-level driver for VIA PATA");
711 MODULE_LICENSE("GPL"); 713 MODULE_LICENSE("GPL");
712 MODULE_DEVICE_TABLE(pci, via); 714 MODULE_DEVICE_TABLE(pci, via);
713 MODULE_VERSION(DRV_VERSION); 715 MODULE_VERSION(DRV_VERSION);
714 716
715 module_init(via_init); 717 module_init(via_init);
716 module_exit(via_exit); 718 module_exit(via_exit);
717 719
drivers/ata/sata_mv.c
1 /* 1 /*
2 * sata_mv.c - Marvell SATA support 2 * sata_mv.c - Marvell SATA support
3 * 3 *
4 * Copyright 2008-2009: Marvell Corporation, all rights reserved. 4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved. 5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved. 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * 7 *
8 * Originally written by Brett Russ. 8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. 9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 * 10 *
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails. 11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License. 15 * the Free Software Foundation; version 2 of the License.
16 * 16 *
17 * This program is distributed in the hope that it will be useful, 17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software 23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * 25 *
26 */ 26 */
27 27
28 /* 28 /*
29 * sata_mv TODO list: 29 * sata_mv TODO list:
30 * 30 *
31 * --> Develop a low-power-consumption strategy, and implement it. 31 * --> Develop a low-power-consumption strategy, and implement it.
32 * 32 *
33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. 33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
34 * 34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target 35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so, 36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting. 37 * creating LibATA target mode support would be very interesting.
38 * 38 *
39 * Target mode, for those without docs, is the ability to directly 39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports. 40 * connect two SATA ports.
41 */ 41 */
42 42
43 /* 43 /*
44 * 80x1-B2 errata PCI#11: 44 * 80x1-B2 errata PCI#11:
45 * 45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0) 46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0, 47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not 48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition). 49 * work correctly otherwise (note: this is a pretty rare condition).
50 */ 50 */
51 51
52 #include <linux/kernel.h> 52 #include <linux/kernel.h>
53 #include <linux/module.h> 53 #include <linux/module.h>
54 #include <linux/pci.h> 54 #include <linux/pci.h>
55 #include <linux/init.h> 55 #include <linux/init.h>
56 #include <linux/blkdev.h> 56 #include <linux/blkdev.h>
57 #include <linux/delay.h> 57 #include <linux/delay.h>
58 #include <linux/interrupt.h> 58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h> 59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h> 60 #include <linux/dma-mapping.h>
61 #include <linux/device.h> 61 #include <linux/device.h>
62 #include <linux/clk.h> 62 #include <linux/clk.h>
63 #include <linux/platform_device.h> 63 #include <linux/platform_device.h>
64 #include <linux/ata_platform.h> 64 #include <linux/ata_platform.h>
65 #include <linux/mbus.h> 65 #include <linux/mbus.h>
66 #include <linux/bitops.h> 66 #include <linux/bitops.h>
67 #include <linux/gfp.h> 67 #include <linux/gfp.h>
68 #include <scsi/scsi_host.h> 68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_cmnd.h> 69 #include <scsi/scsi_cmnd.h>
70 #include <scsi/scsi_device.h> 70 #include <scsi/scsi_device.h>
71 #include <linux/libata.h> 71 #include <linux/libata.h>
72 72
73 #define DRV_NAME "sata_mv" 73 #define DRV_NAME "sata_mv"
74 #define DRV_VERSION "1.28" 74 #define DRV_VERSION "1.28"
75 75
76 /* 76 /*
77 * module options 77 * module options
78 */ 78 */
79 79
80 static int msi; 80 static int msi;
81 #ifdef CONFIG_PCI 81 #ifdef CONFIG_PCI
82 module_param(msi, int, S_IRUGO); 82 module_param(msi, int, S_IRUGO);
83 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); 83 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
84 #endif 84 #endif
85 85
86 static int irq_coalescing_io_count; 86 static int irq_coalescing_io_count;
87 module_param(irq_coalescing_io_count, int, S_IRUGO); 87 module_param(irq_coalescing_io_count, int, S_IRUGO);
88 MODULE_PARM_DESC(irq_coalescing_io_count, 88 MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)"); 89 "IRQ coalescing I/O count threshold (0..255)");
90 90
91 static int irq_coalescing_usecs; 91 static int irq_coalescing_usecs;
92 module_param(irq_coalescing_usecs, int, S_IRUGO); 92 module_param(irq_coalescing_usecs, int, S_IRUGO);
93 MODULE_PARM_DESC(irq_coalescing_usecs, 93 MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs"); 94 "IRQ coalescing time threshold in usecs");
95 95
96 enum { 96 enum {
97 /* BAR's are enumerated in terms of pci_resource_start() terms */ 97 /* BAR's are enumerated in terms of pci_resource_start() terms */
98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ 98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
99 MV_IO_BAR = 2, /* offset 0x18: IO space */ 99 MV_IO_BAR = 2, /* offset 0x18: IO space */
100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ 100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
101 101
102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ 102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ 103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
104 104
105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ 105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ 106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ 107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */ 108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
109 109
110 MV_PCI_REG_BASE = 0, 110 MV_PCI_REG_BASE = 0,
111 111
112 /* 112 /*
113 * Per-chip ("all ports") interrupt coalescing feature. 113 * Per-chip ("all ports") interrupt coalescing feature.
114 * This is only for GEN_II / GEN_IIE hardware. 114 * This is only for GEN_II / GEN_IIE hardware.
115 * 115 *
116 * Coalescing defers the interrupt until either the IO_THRESHOLD 116 * Coalescing defers the interrupt until either the IO_THRESHOLD
117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
118 */ 118 */
119 COAL_REG_BASE = 0x18000, 119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), 120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ 121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
122 122
123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), 123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), 124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
125 125
126 /* 126 /*
127 * Registers for the (unused here) transaction coalescing feature: 127 * Registers for the (unused here) transaction coalescing feature:
128 */ 128 */
129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), 129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), 130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
131 131
132 SATAHC0_REG_BASE = 0x20000, 132 SATAHC0_REG_BASE = 0x20000,
133 FLASH_CTL = 0x1046c, 133 FLASH_CTL = 0x1046c,
134 GPIO_PORT_CTL = 0x104f0, 134 GPIO_PORT_CTL = 0x104f0,
135 RESET_CFG = 0x180d8, 135 RESET_CFG = 0x180d8,
136 136
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, 137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, 138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ 139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, 140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
141 141
142 MV_MAX_Q_DEPTH = 32, 142 MV_MAX_Q_DEPTH = 32,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, 143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
144 144
145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB 145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
146 * CRPB needs alignment on a 256B boundary. Size == 256B 146 * CRPB needs alignment on a 256B boundary. Size == 256B
147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B 147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
148 */ 148 */
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), 149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), 150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
151 MV_MAX_SG_CT = 256, 151 MV_MAX_SG_CT = 256,
152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
153 153
154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ 154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
155 MV_PORT_HC_SHIFT = 2, 155 MV_PORT_HC_SHIFT = 2,
156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ 156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ 157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ 158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
159 159
160 /* Host Flags */ 160 /* Host Flags */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
162 162
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, 164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
165 165
166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, 166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
167 167
168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | 168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, 169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
170 170
171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, 171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
172 172
173 CRQB_FLAG_READ = (1 << 0), 173 CRQB_FLAG_READ = (1 << 0),
174 CRQB_TAG_SHIFT = 1, 174 CRQB_TAG_SHIFT = 1,
175 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 175 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
176 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ 176 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
177 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ 177 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
178 CRQB_CMD_ADDR_SHIFT = 8, 178 CRQB_CMD_ADDR_SHIFT = 8,
179 CRQB_CMD_CS = (0x2 << 11), 179 CRQB_CMD_CS = (0x2 << 11),
180 CRQB_CMD_LAST = (1 << 15), 180 CRQB_CMD_LAST = (1 << 15),
181 181
182 CRPB_FLAG_STATUS_SHIFT = 8, 182 CRPB_FLAG_STATUS_SHIFT = 8,
183 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ 183 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
184 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ 184 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
185 185
186 EPRD_FLAG_END_OF_TBL = (1 << 31), 186 EPRD_FLAG_END_OF_TBL = (1 << 31),
187 187
188 /* PCI interface registers */ 188 /* PCI interface registers */
189 189
190 MV_PCI_COMMAND = 0xc00, 190 MV_PCI_COMMAND = 0xc00,
191 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ 191 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
192 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ 192 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
193 193
194 PCI_MAIN_CMD_STS = 0xd30, 194 PCI_MAIN_CMD_STS = 0xd30,
195 STOP_PCI_MASTER = (1 << 2), 195 STOP_PCI_MASTER = (1 << 2),
196 PCI_MASTER_EMPTY = (1 << 3), 196 PCI_MASTER_EMPTY = (1 << 3),
197 GLOB_SFT_RST = (1 << 4), 197 GLOB_SFT_RST = (1 << 4),
198 198
199 MV_PCI_MODE = 0xd00, 199 MV_PCI_MODE = 0xd00,
200 MV_PCI_MODE_MASK = 0x30, 200 MV_PCI_MODE_MASK = 0x30,
201 201
202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, 202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
203 MV_PCI_DISC_TIMER = 0xd04, 203 MV_PCI_DISC_TIMER = 0xd04,
204 MV_PCI_MSI_TRIGGER = 0xc38, 204 MV_PCI_MSI_TRIGGER = 0xc38,
205 MV_PCI_SERR_MASK = 0xc28, 205 MV_PCI_SERR_MASK = 0xc28,
206 MV_PCI_XBAR_TMOUT = 0x1d04, 206 MV_PCI_XBAR_TMOUT = 0x1d04,
207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40, 207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, 208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
209 MV_PCI_ERR_ATTRIBUTE = 0x1d48, 209 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
210 MV_PCI_ERR_COMMAND = 0x1d50, 210 MV_PCI_ERR_COMMAND = 0x1d50,
211 211
212 PCI_IRQ_CAUSE = 0x1d58, 212 PCI_IRQ_CAUSE = 0x1d58,
213 PCI_IRQ_MASK = 0x1d5c, 213 PCI_IRQ_MASK = 0x1d5c,
214 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ 214 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
215 215
216 PCIE_IRQ_CAUSE = 0x1900, 216 PCIE_IRQ_CAUSE = 0x1900,
217 PCIE_IRQ_MASK = 0x1910, 217 PCIE_IRQ_MASK = 0x1910,
218 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ 218 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
219 219
220 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ 220 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, 221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
222 PCI_HC_MAIN_IRQ_MASK = 0x1d64, 222 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020, 223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
224 SOC_HC_MAIN_IRQ_MASK = 0x20024, 224 SOC_HC_MAIN_IRQ_MASK = 0x20024,
225 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ 225 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
226 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ 226 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
227 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 227 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
228 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 228 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
229 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ 229 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ 230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
231 PCI_ERR = (1 << 18), 231 PCI_ERR = (1 << 18),
232 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ 232 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
233 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ 233 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
234 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ 234 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
235 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ 235 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
236 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ 236 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
237 GPIO_INT = (1 << 22), 237 GPIO_INT = (1 << 22),
238 SELF_INT = (1 << 23), 238 SELF_INT = (1 << 23),
239 TWSI_INT = (1 << 24), 239 TWSI_INT = (1 << 24),
240 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 240 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
241 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 241 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
243 243
244 /* SATAHC registers */ 244 /* SATAHC registers */
245 HC_CFG = 0x00, 245 HC_CFG = 0x00,
246 246
247 HC_IRQ_CAUSE = 0x14, 247 HC_IRQ_CAUSE = 0x14,
248 DMA_IRQ = (1 << 0), /* shift by port # */ 248 DMA_IRQ = (1 << 0), /* shift by port # */
249 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ 249 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
250 DEV_IRQ = (1 << 8), /* shift by port # */ 250 DEV_IRQ = (1 << 8), /* shift by port # */
251 251
252 /* 252 /*
253 * Per-HC (Host-Controller) interrupt coalescing feature. 253 * Per-HC (Host-Controller) interrupt coalescing feature.
254 * This is present on all chip generations. 254 * This is present on all chip generations.
255 * 255 *
256 * Coalescing defers the interrupt until either the IO_THRESHOLD 256 * Coalescing defers the interrupt until either the IO_THRESHOLD
257 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. 257 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
258 */ 258 */
259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c, 259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, 260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
261 261
262 SOC_LED_CTRL = 0x2c, 262 SOC_LED_CTRL = 0x2c,
263 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ 263 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ 264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
265 /* with dev activity LED */ 265 /* with dev activity LED */
266 266
267 /* Shadow block registers */ 267 /* Shadow block registers */
268 SHD_BLK = 0x100, 268 SHD_BLK = 0x100,
269 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ 269 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
270 270
271 /* SATA registers */ 271 /* SATA registers */
272 SATA_STATUS = 0x300, /* ctrl, err regs follow status */ 272 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
273 SATA_ACTIVE = 0x350, 273 SATA_ACTIVE = 0x350,
274 FIS_IRQ_CAUSE = 0x364, 274 FIS_IRQ_CAUSE = 0x364,
275 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ 275 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
276 276
277 LTMODE = 0x30c, /* requires read-after-write */ 277 LTMODE = 0x30c, /* requires read-after-write */
278 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 278 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
279 279
280 PHY_MODE2 = 0x330, 280 PHY_MODE2 = 0x330,
281 PHY_MODE3 = 0x310, 281 PHY_MODE3 = 0x310,
282 282
283 PHY_MODE4 = 0x314, /* requires read-after-write */ 283 PHY_MODE4 = 0x314, /* requires read-after-write */
284 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ 284 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
285 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ 285 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ 286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
287 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ 287 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
288 288
289 SATA_IFCTL = 0x344, 289 SATA_IFCTL = 0x344,
290 SATA_TESTCTL = 0x348, 290 SATA_TESTCTL = 0x348,
291 SATA_IFSTAT = 0x34c, 291 SATA_IFSTAT = 0x34c,
292 VENDOR_UNIQUE_FIS = 0x35c, 292 VENDOR_UNIQUE_FIS = 0x35c,
293 293
294 FISCFG = 0x360, 294 FISCFG = 0x360,
295 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ 295 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
296 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ 296 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
297 297
298 PHY_MODE9_GEN2 = 0x398, 298 PHY_MODE9_GEN2 = 0x398,
299 PHY_MODE9_GEN1 = 0x39c, 299 PHY_MODE9_GEN1 = 0x39c,
300 PHYCFG_OFS = 0x3a0, /* only in 65n devices */ 300 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
301 301
302 MV5_PHY_MODE = 0x74, 302 MV5_PHY_MODE = 0x74,
303 MV5_LTMODE = 0x30, 303 MV5_LTMODE = 0x30,
304 MV5_PHY_CTL = 0x0C, 304 MV5_PHY_CTL = 0x0C,
305 SATA_IFCFG = 0x050, 305 SATA_IFCFG = 0x050,
306 306
307 MV_M2_PREAMP_MASK = 0x7e0, 307 MV_M2_PREAMP_MASK = 0x7e0,
308 308
309 /* Port registers */ 309 /* Port registers */
310 EDMA_CFG = 0, 310 EDMA_CFG = 0,
311 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ 311 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
312 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ 312 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
314 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 314 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
315 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 315 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
316 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ 316 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
317 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ 317 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
318 318
319 EDMA_ERR_IRQ_CAUSE = 0x8, 319 EDMA_ERR_IRQ_CAUSE = 0x8,
320 EDMA_ERR_IRQ_MASK = 0xc, 320 EDMA_ERR_IRQ_MASK = 0xc,
321 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ 321 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
322 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ 322 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
323 EDMA_ERR_DEV = (1 << 2), /* device error */ 323 EDMA_ERR_DEV = (1 << 2), /* device error */
324 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ 324 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
325 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ 325 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
326 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ 326 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
327 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ 327 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
328 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ 328 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
329 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ 329 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ 330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
331 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ 331 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
332 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ 332 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
333 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ 333 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
334 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ 334 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
335 335
336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ 336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ 337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ 338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ 339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ 340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
341 341
342 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ 342 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
343 343
344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ 344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ 345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ 346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ 347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ 348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ 349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
350 350
351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ 351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
352 352
353 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ 353 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
354 EDMA_ERR_OVERRUN_5 = (1 << 5), 354 EDMA_ERR_OVERRUN_5 = (1 << 5),
355 EDMA_ERR_UNDERRUN_5 = (1 << 6), 355 EDMA_ERR_UNDERRUN_5 = (1 << 6),
356 356
357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | 357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
358 EDMA_ERR_LNK_CTRL_RX_1 | 358 EDMA_ERR_LNK_CTRL_RX_1 |
359 EDMA_ERR_LNK_CTRL_RX_3 | 359 EDMA_ERR_LNK_CTRL_RX_3 |
360 EDMA_ERR_LNK_CTRL_TX, 360 EDMA_ERR_LNK_CTRL_TX,
361 361
362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR | 362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
363 EDMA_ERR_PRD_PAR | 363 EDMA_ERR_PRD_PAR |
364 EDMA_ERR_DEV_DCON | 364 EDMA_ERR_DEV_DCON |
365 EDMA_ERR_DEV_CON | 365 EDMA_ERR_DEV_CON |
366 EDMA_ERR_SERR | 366 EDMA_ERR_SERR |
367 EDMA_ERR_SELF_DIS | 367 EDMA_ERR_SELF_DIS |
368 EDMA_ERR_CRQB_PAR | 368 EDMA_ERR_CRQB_PAR |
369 EDMA_ERR_CRPB_PAR | 369 EDMA_ERR_CRPB_PAR |
370 EDMA_ERR_INTRL_PAR | 370 EDMA_ERR_INTRL_PAR |
371 EDMA_ERR_IORDY | 371 EDMA_ERR_IORDY |
372 EDMA_ERR_LNK_CTRL_RX_2 | 372 EDMA_ERR_LNK_CTRL_RX_2 |
373 EDMA_ERR_LNK_DATA_RX | 373 EDMA_ERR_LNK_DATA_RX |
374 EDMA_ERR_LNK_DATA_TX | 374 EDMA_ERR_LNK_DATA_TX |
375 EDMA_ERR_TRANS_PROTO, 375 EDMA_ERR_TRANS_PROTO,
376 376
377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | 377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
378 EDMA_ERR_PRD_PAR | 378 EDMA_ERR_PRD_PAR |
379 EDMA_ERR_DEV_DCON | 379 EDMA_ERR_DEV_DCON |
380 EDMA_ERR_DEV_CON | 380 EDMA_ERR_DEV_CON |
381 EDMA_ERR_OVERRUN_5 | 381 EDMA_ERR_OVERRUN_5 |
382 EDMA_ERR_UNDERRUN_5 | 382 EDMA_ERR_UNDERRUN_5 |
383 EDMA_ERR_SELF_DIS_5 | 383 EDMA_ERR_SELF_DIS_5 |
384 EDMA_ERR_CRQB_PAR | 384 EDMA_ERR_CRQB_PAR |
385 EDMA_ERR_CRPB_PAR | 385 EDMA_ERR_CRPB_PAR |
386 EDMA_ERR_INTRL_PAR | 386 EDMA_ERR_INTRL_PAR |
387 EDMA_ERR_IORDY, 387 EDMA_ERR_IORDY,
388 388
389 EDMA_REQ_Q_BASE_HI = 0x10, 389 EDMA_REQ_Q_BASE_HI = 0x10,
390 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ 390 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
391 391
392 EDMA_REQ_Q_OUT_PTR = 0x18, 392 EDMA_REQ_Q_OUT_PTR = 0x18,
393 EDMA_REQ_Q_PTR_SHIFT = 5, 393 EDMA_REQ_Q_PTR_SHIFT = 5,
394 394
395 EDMA_RSP_Q_BASE_HI = 0x1c, 395 EDMA_RSP_Q_BASE_HI = 0x1c,
396 EDMA_RSP_Q_IN_PTR = 0x20, 396 EDMA_RSP_Q_IN_PTR = 0x20,
397 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ 397 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
398 EDMA_RSP_Q_PTR_SHIFT = 3, 398 EDMA_RSP_Q_PTR_SHIFT = 3,
399 399
400 EDMA_CMD = 0x28, /* EDMA command register */ 400 EDMA_CMD = 0x28, /* EDMA command register */
401 EDMA_EN = (1 << 0), /* enable EDMA */ 401 EDMA_EN = (1 << 0), /* enable EDMA */
402 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ 402 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
403 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ 403 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
404 404
405 EDMA_STATUS = 0x30, /* EDMA engine status */ 405 EDMA_STATUS = 0x30, /* EDMA engine status */
406 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ 406 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
407 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ 407 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
408 408
409 EDMA_IORDY_TMOUT = 0x34, 409 EDMA_IORDY_TMOUT = 0x34,
410 EDMA_ARB_CFG = 0x38, 410 EDMA_ARB_CFG = 0x38,
411 411
412 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ 412 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
413 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ 413 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
414 414
415 BMDMA_CMD = 0x224, /* bmdma command register */ 415 BMDMA_CMD = 0x224, /* bmdma command register */
416 BMDMA_STATUS = 0x228, /* bmdma status register */ 416 BMDMA_STATUS = 0x228, /* bmdma status register */
417 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ 417 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
418 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ 418 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
419 419
420 /* Host private flags (hp_flags) */ 420 /* Host private flags (hp_flags) */
421 MV_HP_FLAG_MSI = (1 << 0), 421 MV_HP_FLAG_MSI = (1 << 0),
422 MV_HP_ERRATA_50XXB0 = (1 << 1), 422 MV_HP_ERRATA_50XXB0 = (1 << 1),
423 MV_HP_ERRATA_50XXB2 = (1 << 2), 423 MV_HP_ERRATA_50XXB2 = (1 << 2),
424 MV_HP_ERRATA_60X1B2 = (1 << 3), 424 MV_HP_ERRATA_60X1B2 = (1 << 3),
425 MV_HP_ERRATA_60X1C0 = (1 << 4), 425 MV_HP_ERRATA_60X1C0 = (1 << 4),
426 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 426 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
427 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 427 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
428 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 428 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
429 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 429 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
430 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 430 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
431 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ 431 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ 432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
433 433
434 /* Port private flags (pp_flags) */ 434 /* Port private flags (pp_flags) */
435 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 435 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
436 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ 436 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
437 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ 437 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
438 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ 438 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ 439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
440 }; 440 };
441 441
442 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) 442 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
443 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 443 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
444 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 444 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
445 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 445 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
446 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) 446 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
447 447
448 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 448 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
449 #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 449 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
450 450
451 enum { 451 enum {
452 /* DMA boundary 0xffff is required by the s/g splitting 452 /* DMA boundary 0xffff is required by the s/g splitting
453 * we need on /length/ in mv_fill-sg(). 453 * we need on /length/ in mv_fill-sg().
454 */ 454 */
455 MV_DMA_BOUNDARY = 0xffffU, 455 MV_DMA_BOUNDARY = 0xffffU,
456 456
457 /* mask of register bits containing lower 32 bits 457 /* mask of register bits containing lower 32 bits
458 * of EDMA request queue DMA address 458 * of EDMA request queue DMA address
459 */ 459 */
460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, 460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
461 461
462 /* ditto, for response queue */ 462 /* ditto, for response queue */
463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, 463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
464 }; 464 };
465 465
466 enum chip_type { 466 enum chip_type {
467 chip_504x, 467 chip_504x,
468 chip_508x, 468 chip_508x,
469 chip_5080, 469 chip_5080,
470 chip_604x, 470 chip_604x,
471 chip_608x, 471 chip_608x,
472 chip_6042, 472 chip_6042,
473 chip_7042, 473 chip_7042,
474 chip_soc, 474 chip_soc,
475 }; 475 };
476 476
477 /* Command ReQuest Block: 32B */ 477 /* Command ReQuest Block: 32B */
478 struct mv_crqb { 478 struct mv_crqb {
479 __le32 sg_addr; 479 __le32 sg_addr;
480 __le32 sg_addr_hi; 480 __le32 sg_addr_hi;
481 __le16 ctrl_flags; 481 __le16 ctrl_flags;
482 __le16 ata_cmd[11]; 482 __le16 ata_cmd[11];
483 }; 483 };
484 484
485 struct mv_crqb_iie { 485 struct mv_crqb_iie {
486 __le32 addr; 486 __le32 addr;
487 __le32 addr_hi; 487 __le32 addr_hi;
488 __le32 flags; 488 __le32 flags;
489 __le32 len; 489 __le32 len;
490 __le32 ata_cmd[4]; 490 __le32 ata_cmd[4];
491 }; 491 };
492 492
493 /* Command ResPonse Block: 8B */ 493 /* Command ResPonse Block: 8B */
494 struct mv_crpb { 494 struct mv_crpb {
495 __le16 id; 495 __le16 id;
496 __le16 flags; 496 __le16 flags;
497 __le32 tmstmp; 497 __le32 tmstmp;
498 }; 498 };
499 499
500 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ 500 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
501 struct mv_sg { 501 struct mv_sg {
502 __le32 addr; 502 __le32 addr;
503 __le32 flags_size; 503 __le32 flags_size;
504 __le32 addr_hi; 504 __le32 addr_hi;
505 __le32 reserved; 505 __le32 reserved;
506 }; 506 };
507 507
508 /* 508 /*
509 * We keep a local cache of a few frequently accessed port 509 * We keep a local cache of a few frequently accessed port
510 * registers here, to avoid having to read them (very slow) 510 * registers here, to avoid having to read them (very slow)
511 * when switching between EDMA and non-EDMA modes. 511 * when switching between EDMA and non-EDMA modes.
512 */ 512 */
513 struct mv_cached_regs { 513 struct mv_cached_regs {
514 u32 fiscfg; 514 u32 fiscfg;
515 u32 ltmode; 515 u32 ltmode;
516 u32 haltcond; 516 u32 haltcond;
517 u32 unknown_rsvd; 517 u32 unknown_rsvd;
518 }; 518 };
519 519
520 struct mv_port_priv { 520 struct mv_port_priv {
521 struct mv_crqb *crqb; 521 struct mv_crqb *crqb;
522 dma_addr_t crqb_dma; 522 dma_addr_t crqb_dma;
523 struct mv_crpb *crpb; 523 struct mv_crpb *crpb;
524 dma_addr_t crpb_dma; 524 dma_addr_t crpb_dma;
525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; 525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; 526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
527 527
528 unsigned int req_idx; 528 unsigned int req_idx;
529 unsigned int resp_idx; 529 unsigned int resp_idx;
530 530
531 u32 pp_flags; 531 u32 pp_flags;
532 struct mv_cached_regs cached; 532 struct mv_cached_regs cached;
533 unsigned int delayed_eh_pmp_map; 533 unsigned int delayed_eh_pmp_map;
534 }; 534 };
535 535
536 struct mv_port_signal { 536 struct mv_port_signal {
537 u32 amps; 537 u32 amps;
538 u32 pre; 538 u32 pre;
539 }; 539 };
540 540
541 struct mv_host_priv { 541 struct mv_host_priv {
542 u32 hp_flags; 542 u32 hp_flags;
543 unsigned int board_idx; 543 unsigned int board_idx;
544 u32 main_irq_mask; 544 u32 main_irq_mask;
545 struct mv_port_signal signal[8]; 545 struct mv_port_signal signal[8];
546 const struct mv_hw_ops *ops; 546 const struct mv_hw_ops *ops;
547 int n_ports; 547 int n_ports;
548 void __iomem *base; 548 void __iomem *base;
549 void __iomem *main_irq_cause_addr; 549 void __iomem *main_irq_cause_addr;
550 void __iomem *main_irq_mask_addr; 550 void __iomem *main_irq_mask_addr;
551 u32 irq_cause_offset; 551 u32 irq_cause_offset;
552 u32 irq_mask_offset; 552 u32 irq_mask_offset;
553 u32 unmask_all_irqs; 553 u32 unmask_all_irqs;
554 554
555 #if defined(CONFIG_HAVE_CLK) 555 #if defined(CONFIG_HAVE_CLK)
556 struct clk *clk; 556 struct clk *clk;
557 #endif 557 #endif
558 /* 558 /*
559 * These consistent DMA memory pools give us guaranteed 559 * These consistent DMA memory pools give us guaranteed
560 * alignment for hardware-accessed data structures, 560 * alignment for hardware-accessed data structures,
561 * and less memory waste in accomplishing the alignment. 561 * and less memory waste in accomplishing the alignment.
562 */ 562 */
563 struct dma_pool *crqb_pool; 563 struct dma_pool *crqb_pool;
564 struct dma_pool *crpb_pool; 564 struct dma_pool *crpb_pool;
565 struct dma_pool *sg_tbl_pool; 565 struct dma_pool *sg_tbl_pool;
566 }; 566 };
567 567
568 struct mv_hw_ops { 568 struct mv_hw_ops {
569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, 569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
570 unsigned int port); 570 unsigned int port);
571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); 571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx, 572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
573 void __iomem *mmio); 573 void __iomem *mmio);
574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, 574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
575 unsigned int n_hc); 575 unsigned int n_hc);
576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); 576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio); 577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
578 }; 578 };
579 579
580 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 580 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
581 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 581 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
582 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); 582 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
583 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); 583 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
584 static int mv_port_start(struct ata_port *ap); 584 static int mv_port_start(struct ata_port *ap);
585 static void mv_port_stop(struct ata_port *ap); 585 static void mv_port_stop(struct ata_port *ap);
586 static int mv_qc_defer(struct ata_queued_cmd *qc); 586 static int mv_qc_defer(struct ata_queued_cmd *qc);
587 static void mv_qc_prep(struct ata_queued_cmd *qc); 587 static void mv_qc_prep(struct ata_queued_cmd *qc);
588 static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 588 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
589 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 589 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
590 static int mv_hardreset(struct ata_link *link, unsigned int *class, 590 static int mv_hardreset(struct ata_link *link, unsigned int *class,
591 unsigned long deadline); 591 unsigned long deadline);
592 static void mv_eh_freeze(struct ata_port *ap); 592 static void mv_eh_freeze(struct ata_port *ap);
593 static void mv_eh_thaw(struct ata_port *ap); 593 static void mv_eh_thaw(struct ata_port *ap);
594 static void mv6_dev_config(struct ata_device *dev); 594 static void mv6_dev_config(struct ata_device *dev);
595 595
596 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 596 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
597 unsigned int port); 597 unsigned int port);
598 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 598 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
599 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 599 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
600 void __iomem *mmio); 600 void __iomem *mmio);
601 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 601 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
602 unsigned int n_hc); 602 unsigned int n_hc);
603 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 603 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
604 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); 604 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
605 605
606 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 606 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
607 unsigned int port); 607 unsigned int port);
608 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); 608 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
609 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 609 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
610 void __iomem *mmio); 610 void __iomem *mmio);
611 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 611 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
612 unsigned int n_hc); 612 unsigned int n_hc);
613 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); 613 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
614 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 614 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
615 void __iomem *mmio); 615 void __iomem *mmio);
616 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 616 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
617 void __iomem *mmio); 617 void __iomem *mmio);
618 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 618 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
619 void __iomem *mmio, unsigned int n_hc); 619 void __iomem *mmio, unsigned int n_hc);
620 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 620 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
621 void __iomem *mmio); 621 void __iomem *mmio);
622 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); 622 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
623 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 623 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
624 void __iomem *mmio, unsigned int port); 624 void __iomem *mmio, unsigned int port);
625 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); 625 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
626 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 626 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
627 unsigned int port_no); 627 unsigned int port_no);
628 static int mv_stop_edma(struct ata_port *ap); 628 static int mv_stop_edma(struct ata_port *ap);
629 static int mv_stop_edma_engine(void __iomem *port_mmio); 629 static int mv_stop_edma_engine(void __iomem *port_mmio);
630 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); 630 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
631 631
632 static void mv_pmp_select(struct ata_port *ap, int pmp); 632 static void mv_pmp_select(struct ata_port *ap, int pmp);
633 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 633 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
634 unsigned long deadline); 634 unsigned long deadline);
635 static int mv_softreset(struct ata_link *link, unsigned int *class, 635 static int mv_softreset(struct ata_link *link, unsigned int *class,
636 unsigned long deadline); 636 unsigned long deadline);
637 static void mv_pmp_error_handler(struct ata_port *ap); 637 static void mv_pmp_error_handler(struct ata_port *ap);
638 static void mv_process_crpb_entries(struct ata_port *ap, 638 static void mv_process_crpb_entries(struct ata_port *ap,
639 struct mv_port_priv *pp); 639 struct mv_port_priv *pp);
640 640
641 static void mv_sff_irq_clear(struct ata_port *ap); 641 static void mv_sff_irq_clear(struct ata_port *ap);
642 static int mv_check_atapi_dma(struct ata_queued_cmd *qc); 642 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
643 static void mv_bmdma_setup(struct ata_queued_cmd *qc); 643 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
644 static void mv_bmdma_start(struct ata_queued_cmd *qc); 644 static void mv_bmdma_start(struct ata_queued_cmd *qc);
645 static void mv_bmdma_stop(struct ata_queued_cmd *qc); 645 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
646 static u8 mv_bmdma_status(struct ata_port *ap); 646 static u8 mv_bmdma_status(struct ata_port *ap);
647 static u8 mv_sff_check_status(struct ata_port *ap); 647 static u8 mv_sff_check_status(struct ata_port *ap);
648 648
649 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 649 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
650 * because we have to allow room for worst case splitting of 650 * because we have to allow room for worst case splitting of
651 * PRDs for 64K boundaries in mv_fill_sg(). 651 * PRDs for 64K boundaries in mv_fill_sg().
652 */ 652 */
653 static struct scsi_host_template mv5_sht = { 653 static struct scsi_host_template mv5_sht = {
654 ATA_BASE_SHT(DRV_NAME), 654 ATA_BASE_SHT(DRV_NAME),
655 .sg_tablesize = MV_MAX_SG_CT / 2, 655 .sg_tablesize = MV_MAX_SG_CT / 2,
656 .dma_boundary = MV_DMA_BOUNDARY, 656 .dma_boundary = MV_DMA_BOUNDARY,
657 }; 657 };
658 658
659 static struct scsi_host_template mv6_sht = { 659 static struct scsi_host_template mv6_sht = {
660 ATA_NCQ_SHT(DRV_NAME), 660 ATA_NCQ_SHT(DRV_NAME),
661 .can_queue = MV_MAX_Q_DEPTH - 1, 661 .can_queue = MV_MAX_Q_DEPTH - 1,
662 .sg_tablesize = MV_MAX_SG_CT / 2, 662 .sg_tablesize = MV_MAX_SG_CT / 2,
663 .dma_boundary = MV_DMA_BOUNDARY, 663 .dma_boundary = MV_DMA_BOUNDARY,
664 }; 664 };
665 665
666 static struct ata_port_operations mv5_ops = { 666 static struct ata_port_operations mv5_ops = {
667 .inherits = &ata_sff_port_ops, 667 .inherits = &ata_sff_port_ops,
668 668
669 .lost_interrupt = ATA_OP_NULL, 669 .lost_interrupt = ATA_OP_NULL,
670 670
671 .qc_defer = mv_qc_defer, 671 .qc_defer = mv_qc_defer,
672 .qc_prep = mv_qc_prep, 672 .qc_prep = mv_qc_prep,
673 .qc_issue = mv_qc_issue, 673 .qc_issue = mv_qc_issue,
674 674
675 .freeze = mv_eh_freeze, 675 .freeze = mv_eh_freeze,
676 .thaw = mv_eh_thaw, 676 .thaw = mv_eh_thaw,
677 .hardreset = mv_hardreset, 677 .hardreset = mv_hardreset,
678 678
679 .scr_read = mv5_scr_read, 679 .scr_read = mv5_scr_read,
680 .scr_write = mv5_scr_write, 680 .scr_write = mv5_scr_write,
681 681
682 .port_start = mv_port_start, 682 .port_start = mv_port_start,
683 .port_stop = mv_port_stop, 683 .port_stop = mv_port_stop,
684 }; 684 };
685 685
686 static struct ata_port_operations mv6_ops = { 686 static struct ata_port_operations mv6_ops = {
687 .inherits = &ata_bmdma_port_ops, 687 .inherits = &ata_bmdma_port_ops,
688 688
689 .lost_interrupt = ATA_OP_NULL, 689 .lost_interrupt = ATA_OP_NULL,
690 690
691 .qc_defer = mv_qc_defer, 691 .qc_defer = mv_qc_defer,
692 .qc_prep = mv_qc_prep, 692 .qc_prep = mv_qc_prep,
693 .qc_issue = mv_qc_issue, 693 .qc_issue = mv_qc_issue,
694 694
695 .dev_config = mv6_dev_config, 695 .dev_config = mv6_dev_config,
696 696
697 .freeze = mv_eh_freeze, 697 .freeze = mv_eh_freeze,
698 .thaw = mv_eh_thaw, 698 .thaw = mv_eh_thaw,
699 .hardreset = mv_hardreset, 699 .hardreset = mv_hardreset,
700 .softreset = mv_softreset, 700 .softreset = mv_softreset,
701 .pmp_hardreset = mv_pmp_hardreset, 701 .pmp_hardreset = mv_pmp_hardreset,
702 .pmp_softreset = mv_softreset, 702 .pmp_softreset = mv_softreset,
703 .error_handler = mv_pmp_error_handler, 703 .error_handler = mv_pmp_error_handler,
704 704
705 .scr_read = mv_scr_read, 705 .scr_read = mv_scr_read,
706 .scr_write = mv_scr_write, 706 .scr_write = mv_scr_write,
707 707
708 .sff_check_status = mv_sff_check_status, 708 .sff_check_status = mv_sff_check_status,
709 .sff_irq_clear = mv_sff_irq_clear, 709 .sff_irq_clear = mv_sff_irq_clear,
710 .check_atapi_dma = mv_check_atapi_dma, 710 .check_atapi_dma = mv_check_atapi_dma,
711 .bmdma_setup = mv_bmdma_setup, 711 .bmdma_setup = mv_bmdma_setup,
712 .bmdma_start = mv_bmdma_start, 712 .bmdma_start = mv_bmdma_start,
713 .bmdma_stop = mv_bmdma_stop, 713 .bmdma_stop = mv_bmdma_stop,
714 .bmdma_status = mv_bmdma_status, 714 .bmdma_status = mv_bmdma_status,
715 715
716 .port_start = mv_port_start, 716 .port_start = mv_port_start,
717 .port_stop = mv_port_stop, 717 .port_stop = mv_port_stop,
718 }; 718 };
719 719
720 static struct ata_port_operations mv_iie_ops = { 720 static struct ata_port_operations mv_iie_ops = {
721 .inherits = &mv6_ops, 721 .inherits = &mv6_ops,
722 .dev_config = ATA_OP_NULL, 722 .dev_config = ATA_OP_NULL,
723 .qc_prep = mv_qc_prep_iie, 723 .qc_prep = mv_qc_prep_iie,
724 }; 724 };
725 725
726 static const struct ata_port_info mv_port_info[] = { 726 static const struct ata_port_info mv_port_info[] = {
727 { /* chip_504x */ 727 { /* chip_504x */
728 .flags = MV_GEN_I_FLAGS, 728 .flags = MV_GEN_I_FLAGS,
729 .pio_mask = ATA_PIO4, 729 .pio_mask = ATA_PIO4,
730 .udma_mask = ATA_UDMA6, 730 .udma_mask = ATA_UDMA6,
731 .port_ops = &mv5_ops, 731 .port_ops = &mv5_ops,
732 }, 732 },
733 { /* chip_508x */ 733 { /* chip_508x */
734 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 734 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
735 .pio_mask = ATA_PIO4, 735 .pio_mask = ATA_PIO4,
736 .udma_mask = ATA_UDMA6, 736 .udma_mask = ATA_UDMA6,
737 .port_ops = &mv5_ops, 737 .port_ops = &mv5_ops,
738 }, 738 },
739 { /* chip_5080 */ 739 { /* chip_5080 */
740 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, 740 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
741 .pio_mask = ATA_PIO4, 741 .pio_mask = ATA_PIO4,
742 .udma_mask = ATA_UDMA6, 742 .udma_mask = ATA_UDMA6,
743 .port_ops = &mv5_ops, 743 .port_ops = &mv5_ops,
744 }, 744 },
745 { /* chip_604x */ 745 { /* chip_604x */
746 .flags = MV_GEN_II_FLAGS, 746 .flags = MV_GEN_II_FLAGS,
747 .pio_mask = ATA_PIO4, 747 .pio_mask = ATA_PIO4,
748 .udma_mask = ATA_UDMA6, 748 .udma_mask = ATA_UDMA6,
749 .port_ops = &mv6_ops, 749 .port_ops = &mv6_ops,
750 }, 750 },
751 { /* chip_608x */ 751 { /* chip_608x */
752 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, 752 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
753 .pio_mask = ATA_PIO4, 753 .pio_mask = ATA_PIO4,
754 .udma_mask = ATA_UDMA6, 754 .udma_mask = ATA_UDMA6,
755 .port_ops = &mv6_ops, 755 .port_ops = &mv6_ops,
756 }, 756 },
757 { /* chip_6042 */ 757 { /* chip_6042 */
758 .flags = MV_GEN_IIE_FLAGS, 758 .flags = MV_GEN_IIE_FLAGS,
759 .pio_mask = ATA_PIO4, 759 .pio_mask = ATA_PIO4,
760 .udma_mask = ATA_UDMA6, 760 .udma_mask = ATA_UDMA6,
761 .port_ops = &mv_iie_ops, 761 .port_ops = &mv_iie_ops,
762 }, 762 },
763 { /* chip_7042 */ 763 { /* chip_7042 */
764 .flags = MV_GEN_IIE_FLAGS, 764 .flags = MV_GEN_IIE_FLAGS,
765 .pio_mask = ATA_PIO4, 765 .pio_mask = ATA_PIO4,
766 .udma_mask = ATA_UDMA6, 766 .udma_mask = ATA_UDMA6,
767 .port_ops = &mv_iie_ops, 767 .port_ops = &mv_iie_ops,
768 }, 768 },
769 { /* chip_soc */ 769 { /* chip_soc */
770 .flags = MV_GEN_IIE_FLAGS, 770 .flags = MV_GEN_IIE_FLAGS,
771 .pio_mask = ATA_PIO4, 771 .pio_mask = ATA_PIO4,
772 .udma_mask = ATA_UDMA6, 772 .udma_mask = ATA_UDMA6,
773 .port_ops = &mv_iie_ops, 773 .port_ops = &mv_iie_ops,
774 }, 774 },
775 }; 775 };
776 776
777 static const struct pci_device_id mv_pci_tbl[] = { 777 static const struct pci_device_id mv_pci_tbl[] = {
778 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, 778 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
779 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, 779 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
780 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, 780 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
781 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, 781 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
782 /* RocketRAID 1720/174x have different identifiers */ 782 /* RocketRAID 1720/174x have different identifiers */
783 { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, 783 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
784 { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, 784 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
785 { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, 785 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
786 786
787 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, 787 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
788 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, 788 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
789 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, 789 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
790 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, 790 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
791 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, 791 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
792 792
793 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, 793 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
794 794
795 /* Adaptec 1430SA */ 795 /* Adaptec 1430SA */
796 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, 796 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
797 797
798 /* Marvell 7042 support */ 798 /* Marvell 7042 support */
799 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, 799 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
800 800
801 /* Highpoint RocketRAID PCIe series */ 801 /* Highpoint RocketRAID PCIe series */
802 { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, 802 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
803 { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, 803 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
804 804
805 { } /* terminate list */ 805 { } /* terminate list */
806 }; 806 };
807 807
808 static const struct mv_hw_ops mv5xxx_ops = { 808 static const struct mv_hw_ops mv5xxx_ops = {
809 .phy_errata = mv5_phy_errata, 809 .phy_errata = mv5_phy_errata,
810 .enable_leds = mv5_enable_leds, 810 .enable_leds = mv5_enable_leds,
811 .read_preamp = mv5_read_preamp, 811 .read_preamp = mv5_read_preamp,
812 .reset_hc = mv5_reset_hc, 812 .reset_hc = mv5_reset_hc,
813 .reset_flash = mv5_reset_flash, 813 .reset_flash = mv5_reset_flash,
814 .reset_bus = mv5_reset_bus, 814 .reset_bus = mv5_reset_bus,
815 }; 815 };
816 816
817 static const struct mv_hw_ops mv6xxx_ops = { 817 static const struct mv_hw_ops mv6xxx_ops = {
818 .phy_errata = mv6_phy_errata, 818 .phy_errata = mv6_phy_errata,
819 .enable_leds = mv6_enable_leds, 819 .enable_leds = mv6_enable_leds,
820 .read_preamp = mv6_read_preamp, 820 .read_preamp = mv6_read_preamp,
821 .reset_hc = mv6_reset_hc, 821 .reset_hc = mv6_reset_hc,
822 .reset_flash = mv6_reset_flash, 822 .reset_flash = mv6_reset_flash,
823 .reset_bus = mv_reset_pci_bus, 823 .reset_bus = mv_reset_pci_bus,
824 }; 824 };
825 825
826 static const struct mv_hw_ops mv_soc_ops = { 826 static const struct mv_hw_ops mv_soc_ops = {
827 .phy_errata = mv6_phy_errata, 827 .phy_errata = mv6_phy_errata,
828 .enable_leds = mv_soc_enable_leds, 828 .enable_leds = mv_soc_enable_leds,
829 .read_preamp = mv_soc_read_preamp, 829 .read_preamp = mv_soc_read_preamp,
830 .reset_hc = mv_soc_reset_hc, 830 .reset_hc = mv_soc_reset_hc,
831 .reset_flash = mv_soc_reset_flash, 831 .reset_flash = mv_soc_reset_flash,
832 .reset_bus = mv_soc_reset_bus, 832 .reset_bus = mv_soc_reset_bus,
833 }; 833 };
834 834
835 static const struct mv_hw_ops mv_soc_65n_ops = { 835 static const struct mv_hw_ops mv_soc_65n_ops = {
836 .phy_errata = mv_soc_65n_phy_errata, 836 .phy_errata = mv_soc_65n_phy_errata,
837 .enable_leds = mv_soc_enable_leds, 837 .enable_leds = mv_soc_enable_leds,
838 .reset_hc = mv_soc_reset_hc, 838 .reset_hc = mv_soc_reset_hc,
839 .reset_flash = mv_soc_reset_flash, 839 .reset_flash = mv_soc_reset_flash,
840 .reset_bus = mv_soc_reset_bus, 840 .reset_bus = mv_soc_reset_bus,
841 }; 841 };
842 842
843 /* 843 /*
844 * Functions 844 * Functions
845 */ 845 */
846 846
847 static inline void writelfl(unsigned long data, void __iomem *addr) 847 static inline void writelfl(unsigned long data, void __iomem *addr)
848 { 848 {
849 writel(data, addr); 849 writel(data, addr);
850 (void) readl(addr); /* flush to avoid PCI posted write */ 850 (void) readl(addr); /* flush to avoid PCI posted write */
851 } 851 }
852 852
853 static inline unsigned int mv_hc_from_port(unsigned int port) 853 static inline unsigned int mv_hc_from_port(unsigned int port)
854 { 854 {
855 return port >> MV_PORT_HC_SHIFT; 855 return port >> MV_PORT_HC_SHIFT;
856 } 856 }
857 857
858 static inline unsigned int mv_hardport_from_port(unsigned int port) 858 static inline unsigned int mv_hardport_from_port(unsigned int port)
859 { 859 {
860 return port & MV_PORT_MASK; 860 return port & MV_PORT_MASK;
861 } 861 }
862 862
863 /* 863 /*
864 * Consolidate some rather tricky bit shift calculations. 864 * Consolidate some rather tricky bit shift calculations.
865 * This is hot-path stuff, so not a function. 865 * This is hot-path stuff, so not a function.
866 * Simple code, with two return values, so macro rather than inline. 866 * Simple code, with two return values, so macro rather than inline.
867 * 867 *
868 * port is the sole input, in range 0..7. 868 * port is the sole input, in range 0..7.
869 * shift is one output, for use with main_irq_cause / main_irq_mask registers. 869 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
870 * hardport is the other output, in range 0..3. 870 * hardport is the other output, in range 0..3.
871 * 871 *
872 * Note that port and hardport may be the same variable in some cases. 872 * Note that port and hardport may be the same variable in some cases.
873 */ 873 */
874 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ 874 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
875 { \ 875 { \
876 shift = mv_hc_from_port(port) * HC_SHIFT; \ 876 shift = mv_hc_from_port(port) * HC_SHIFT; \
877 hardport = mv_hardport_from_port(port); \ 877 hardport = mv_hardport_from_port(port); \
878 shift += hardport * 2; \ 878 shift += hardport * 2; \
879 } 879 }
880 880
881 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) 881 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
882 { 882 {
883 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); 883 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
884 } 884 }
885 885
886 static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 886 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
887 unsigned int port) 887 unsigned int port)
888 { 888 {
889 return mv_hc_base(base, mv_hc_from_port(port)); 889 return mv_hc_base(base, mv_hc_from_port(port));
890 } 890 }
891 891
892 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) 892 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
893 { 893 {
894 return mv_hc_base_from_port(base, port) + 894 return mv_hc_base_from_port(base, port) +
895 MV_SATAHC_ARBTR_REG_SZ + 895 MV_SATAHC_ARBTR_REG_SZ +
896 (mv_hardport_from_port(port) * MV_PORT_REG_SZ); 896 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
897 } 897 }
898 898
899 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) 899 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
900 { 900 {
901 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); 901 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
902 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; 902 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
903 903
904 return hc_mmio + ofs; 904 return hc_mmio + ofs;
905 } 905 }
906 906
907 static inline void __iomem *mv_host_base(struct ata_host *host) 907 static inline void __iomem *mv_host_base(struct ata_host *host)
908 { 908 {
909 struct mv_host_priv *hpriv = host->private_data; 909 struct mv_host_priv *hpriv = host->private_data;
910 return hpriv->base; 910 return hpriv->base;
911 } 911 }
912 912
913 static inline void __iomem *mv_ap_base(struct ata_port *ap) 913 static inline void __iomem *mv_ap_base(struct ata_port *ap)
914 { 914 {
915 return mv_port_base(mv_host_base(ap->host), ap->port_no); 915 return mv_port_base(mv_host_base(ap->host), ap->port_no);
916 } 916 }
917 917
918 static inline int mv_get_hc_count(unsigned long port_flags) 918 static inline int mv_get_hc_count(unsigned long port_flags)
919 { 919 {
920 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 920 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
921 } 921 }
922 922
923 /** 923 /**
924 * mv_save_cached_regs - (re-)initialize cached port registers 924 * mv_save_cached_regs - (re-)initialize cached port registers
925 * @ap: the port whose registers we are caching 925 * @ap: the port whose registers we are caching
926 * 926 *
927 * Initialize the local cache of port registers, 927 * Initialize the local cache of port registers,
928 * so that reading them over and over again can 928 * so that reading them over and over again can
929 * be avoided on the hotter paths of this driver. 929 * be avoided on the hotter paths of this driver.
930 * This saves a few microseconds each time we switch 930 * This saves a few microseconds each time we switch
931 * to/from EDMA mode to perform (eg.) a drive cache flush. 931 * to/from EDMA mode to perform (eg.) a drive cache flush.
932 */ 932 */
933 static void mv_save_cached_regs(struct ata_port *ap) 933 static void mv_save_cached_regs(struct ata_port *ap)
934 { 934 {
935 void __iomem *port_mmio = mv_ap_base(ap); 935 void __iomem *port_mmio = mv_ap_base(ap);
936 struct mv_port_priv *pp = ap->private_data; 936 struct mv_port_priv *pp = ap->private_data;
937 937
938 pp->cached.fiscfg = readl(port_mmio + FISCFG); 938 pp->cached.fiscfg = readl(port_mmio + FISCFG);
939 pp->cached.ltmode = readl(port_mmio + LTMODE); 939 pp->cached.ltmode = readl(port_mmio + LTMODE);
940 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); 940 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
941 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); 941 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
942 } 942 }
943 943
944 /** 944 /**
945 * mv_write_cached_reg - write to a cached port register 945 * mv_write_cached_reg - write to a cached port register
946 * @addr: hardware address of the register 946 * @addr: hardware address of the register
947 * @old: pointer to cached value of the register 947 * @old: pointer to cached value of the register
948 * @new: new value for the register 948 * @new: new value for the register
949 * 949 *
950 * Write a new value to a cached register, 950 * Write a new value to a cached register,
951 * but only if the value is different from before. 951 * but only if the value is different from before.
952 */ 952 */
953 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) 953 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
954 { 954 {
955 if (new != *old) { 955 if (new != *old) {
956 unsigned long laddr; 956 unsigned long laddr;
957 *old = new; 957 *old = new;
958 /* 958 /*
959 * Workaround for 88SX60x1-B2 FEr SATA#13: 959 * Workaround for 88SX60x1-B2 FEr SATA#13:
960 * Read-after-write is needed to prevent generating 64-bit 960 * Read-after-write is needed to prevent generating 64-bit
961 * write cycles on the PCI bus for SATA interface registers 961 * write cycles on the PCI bus for SATA interface registers
962 * at offsets ending in 0x4 or 0xc. 962 * at offsets ending in 0x4 or 0xc.
963 * 963 *
964 * Looks like a lot of fuss, but it avoids an unnecessary 964 * Looks like a lot of fuss, but it avoids an unnecessary
965 * +1 usec read-after-write delay for unaffected registers. 965 * +1 usec read-after-write delay for unaffected registers.
966 */ 966 */
967 laddr = (long)addr & 0xffff; 967 laddr = (long)addr & 0xffff;
968 if (laddr >= 0x300 && laddr <= 0x33c) { 968 if (laddr >= 0x300 && laddr <= 0x33c) {
969 laddr &= 0x000f; 969 laddr &= 0x000f;
970 if (laddr == 0x4 || laddr == 0xc) { 970 if (laddr == 0x4 || laddr == 0xc) {
971 writelfl(new, addr); /* read after write */ 971 writelfl(new, addr); /* read after write */
972 return; 972 return;
973 } 973 }
974 } 974 }
975 writel(new, addr); /* unaffected by the errata */ 975 writel(new, addr); /* unaffected by the errata */
976 } 976 }
977 } 977 }
978 978
979 static void mv_set_edma_ptrs(void __iomem *port_mmio, 979 static void mv_set_edma_ptrs(void __iomem *port_mmio,
980 struct mv_host_priv *hpriv, 980 struct mv_host_priv *hpriv,
981 struct mv_port_priv *pp) 981 struct mv_port_priv *pp)
982 { 982 {
983 u32 index; 983 u32 index;
984 984
985 /* 985 /*
986 * initialize request queue 986 * initialize request queue
987 */ 987 */
988 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 988 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
989 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 989 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
990 990
991 WARN_ON(pp->crqb_dma & 0x3ff); 991 WARN_ON(pp->crqb_dma & 0x3ff);
992 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); 992 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
993 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 993 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
994 port_mmio + EDMA_REQ_Q_IN_PTR); 994 port_mmio + EDMA_REQ_Q_IN_PTR);
995 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); 995 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
996 996
997 /* 997 /*
998 * initialize response queue 998 * initialize response queue
999 */ 999 */
1000 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ 1000 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1001 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; 1001 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
1002 1002
1003 WARN_ON(pp->crpb_dma & 0xff); 1003 WARN_ON(pp->crpb_dma & 0xff);
1004 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); 1004 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1005 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); 1005 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
1006 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 1006 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
1007 port_mmio + EDMA_RSP_Q_OUT_PTR); 1007 port_mmio + EDMA_RSP_Q_OUT_PTR);
1008 } 1008 }
1009 1009
1010 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) 1010 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1011 { 1011 {
1012 /* 1012 /*
1013 * When writing to the main_irq_mask in hardware, 1013 * When writing to the main_irq_mask in hardware,
1014 * we must ensure exclusivity between the interrupt coalescing bits 1014 * we must ensure exclusivity between the interrupt coalescing bits
1015 * and the corresponding individual port DONE_IRQ bits. 1015 * and the corresponding individual port DONE_IRQ bits.
1016 * 1016 *
1017 * Note that this register is really an "IRQ enable" register, 1017 * Note that this register is really an "IRQ enable" register,
1018 * not an "IRQ mask" register as Marvell's naming might suggest. 1018 * not an "IRQ mask" register as Marvell's naming might suggest.
1019 */ 1019 */
1020 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) 1020 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1021 mask &= ~DONE_IRQ_0_3; 1021 mask &= ~DONE_IRQ_0_3;
1022 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) 1022 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1023 mask &= ~DONE_IRQ_4_7; 1023 mask &= ~DONE_IRQ_4_7;
1024 writelfl(mask, hpriv->main_irq_mask_addr); 1024 writelfl(mask, hpriv->main_irq_mask_addr);
1025 } 1025 }
1026 1026
1027 static void mv_set_main_irq_mask(struct ata_host *host, 1027 static void mv_set_main_irq_mask(struct ata_host *host,
1028 u32 disable_bits, u32 enable_bits) 1028 u32 disable_bits, u32 enable_bits)
1029 { 1029 {
1030 struct mv_host_priv *hpriv = host->private_data; 1030 struct mv_host_priv *hpriv = host->private_data;
1031 u32 old_mask, new_mask; 1031 u32 old_mask, new_mask;
1032 1032
1033 old_mask = hpriv->main_irq_mask; 1033 old_mask = hpriv->main_irq_mask;
1034 new_mask = (old_mask & ~disable_bits) | enable_bits; 1034 new_mask = (old_mask & ~disable_bits) | enable_bits;
1035 if (new_mask != old_mask) { 1035 if (new_mask != old_mask) {
1036 hpriv->main_irq_mask = new_mask; 1036 hpriv->main_irq_mask = new_mask;
1037 mv_write_main_irq_mask(new_mask, hpriv); 1037 mv_write_main_irq_mask(new_mask, hpriv);
1038 } 1038 }
1039 } 1039 }
1040 1040
1041 static void mv_enable_port_irqs(struct ata_port *ap, 1041 static void mv_enable_port_irqs(struct ata_port *ap,
1042 unsigned int port_bits) 1042 unsigned int port_bits)
1043 { 1043 {
1044 unsigned int shift, hardport, port = ap->port_no; 1044 unsigned int shift, hardport, port = ap->port_no;
1045 u32 disable_bits, enable_bits; 1045 u32 disable_bits, enable_bits;
1046 1046
1047 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 1047 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1048 1048
1049 disable_bits = (DONE_IRQ | ERR_IRQ) << shift; 1049 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1050 enable_bits = port_bits << shift; 1050 enable_bits = port_bits << shift;
1051 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); 1051 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1052 } 1052 }
1053 1053
1054 static void mv_clear_and_enable_port_irqs(struct ata_port *ap, 1054 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1055 void __iomem *port_mmio, 1055 void __iomem *port_mmio,
1056 unsigned int port_irqs) 1056 unsigned int port_irqs)
1057 { 1057 {
1058 struct mv_host_priv *hpriv = ap->host->private_data; 1058 struct mv_host_priv *hpriv = ap->host->private_data;
1059 int hardport = mv_hardport_from_port(ap->port_no); 1059 int hardport = mv_hardport_from_port(ap->port_no);
1060 void __iomem *hc_mmio = mv_hc_base_from_port( 1060 void __iomem *hc_mmio = mv_hc_base_from_port(
1061 mv_host_base(ap->host), ap->port_no); 1061 mv_host_base(ap->host), ap->port_no);
1062 u32 hc_irq_cause; 1062 u32 hc_irq_cause;
1063 1063
1064 /* clear EDMA event indicators, if any */ 1064 /* clear EDMA event indicators, if any */
1065 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 1065 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1066 1066
1067 /* clear pending irq events */ 1067 /* clear pending irq events */
1068 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 1068 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1069 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 1069 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1070 1070
1071 /* clear FIS IRQ Cause */ 1071 /* clear FIS IRQ Cause */
1072 if (IS_GEN_IIE(hpriv)) 1072 if (IS_GEN_IIE(hpriv))
1073 writelfl(0, port_mmio + FIS_IRQ_CAUSE); 1073 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1074 1074
1075 mv_enable_port_irqs(ap, port_irqs); 1075 mv_enable_port_irqs(ap, port_irqs);
1076 } 1076 }
1077 1077
1078 static void mv_set_irq_coalescing(struct ata_host *host, 1078 static void mv_set_irq_coalescing(struct ata_host *host,
1079 unsigned int count, unsigned int usecs) 1079 unsigned int count, unsigned int usecs)
1080 { 1080 {
1081 struct mv_host_priv *hpriv = host->private_data; 1081 struct mv_host_priv *hpriv = host->private_data;
1082 void __iomem *mmio = hpriv->base, *hc_mmio; 1082 void __iomem *mmio = hpriv->base, *hc_mmio;
1083 u32 coal_enable = 0; 1083 u32 coal_enable = 0;
1084 unsigned long flags; 1084 unsigned long flags;
1085 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; 1085 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1086 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | 1086 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1087 ALL_PORTS_COAL_DONE; 1087 ALL_PORTS_COAL_DONE;
1088 1088
1089 /* Disable IRQ coalescing if either threshold is zero */ 1089 /* Disable IRQ coalescing if either threshold is zero */
1090 if (!usecs || !count) { 1090 if (!usecs || !count) {
1091 clks = count = 0; 1091 clks = count = 0;
1092 } else { 1092 } else {
1093 /* Respect maximum limits of the hardware */ 1093 /* Respect maximum limits of the hardware */
1094 clks = usecs * COAL_CLOCKS_PER_USEC; 1094 clks = usecs * COAL_CLOCKS_PER_USEC;
1095 if (clks > MAX_COAL_TIME_THRESHOLD) 1095 if (clks > MAX_COAL_TIME_THRESHOLD)
1096 clks = MAX_COAL_TIME_THRESHOLD; 1096 clks = MAX_COAL_TIME_THRESHOLD;
1097 if (count > MAX_COAL_IO_COUNT) 1097 if (count > MAX_COAL_IO_COUNT)
1098 count = MAX_COAL_IO_COUNT; 1098 count = MAX_COAL_IO_COUNT;
1099 } 1099 }
1100 1100
1101 spin_lock_irqsave(&host->lock, flags); 1101 spin_lock_irqsave(&host->lock, flags);
1102 mv_set_main_irq_mask(host, coal_disable, 0); 1102 mv_set_main_irq_mask(host, coal_disable, 0);
1103 1103
1104 if (is_dual_hc && !IS_GEN_I(hpriv)) { 1104 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1105 /* 1105 /*
1106 * GEN_II/GEN_IIE with dual host controllers: 1106 * GEN_II/GEN_IIE with dual host controllers:
1107 * one set of global thresholds for the entire chip. 1107 * one set of global thresholds for the entire chip.
1108 */ 1108 */
1109 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); 1109 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1110 writel(count, mmio + IRQ_COAL_IO_THRESHOLD); 1110 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1111 /* clear leftover coal IRQ bit */ 1111 /* clear leftover coal IRQ bit */
1112 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 1112 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1113 if (count) 1113 if (count)
1114 coal_enable = ALL_PORTS_COAL_DONE; 1114 coal_enable = ALL_PORTS_COAL_DONE;
1115 clks = count = 0; /* force clearing of regular regs below */ 1115 clks = count = 0; /* force clearing of regular regs below */
1116 } 1116 }
1117 1117
1118 /* 1118 /*
1119 * All chips: independent thresholds for each HC on the chip. 1119 * All chips: independent thresholds for each HC on the chip.
1120 */ 1120 */
1121 hc_mmio = mv_hc_base_from_port(mmio, 0); 1121 hc_mmio = mv_hc_base_from_port(mmio, 0);
1122 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1122 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1123 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1123 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1124 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1124 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1125 if (count) 1125 if (count)
1126 coal_enable |= PORTS_0_3_COAL_DONE; 1126 coal_enable |= PORTS_0_3_COAL_DONE;
1127 if (is_dual_hc) { 1127 if (is_dual_hc) {
1128 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); 1128 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1129 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); 1129 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1130 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); 1130 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1131 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); 1131 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1132 if (count) 1132 if (count)
1133 coal_enable |= PORTS_4_7_COAL_DONE; 1133 coal_enable |= PORTS_4_7_COAL_DONE;
1134 } 1134 }
1135 1135
1136 mv_set_main_irq_mask(host, 0, coal_enable); 1136 mv_set_main_irq_mask(host, 0, coal_enable);
1137 spin_unlock_irqrestore(&host->lock, flags); 1137 spin_unlock_irqrestore(&host->lock, flags);
1138 } 1138 }
1139 1139
1140 /** 1140 /**
1141 * mv_start_edma - Enable eDMA engine 1141 * mv_start_edma - Enable eDMA engine
1142 * @base: port base address 1142 * @base: port base address
1143 * @pp: port private data 1143 * @pp: port private data
1144 * 1144 *
1145 * Verify the local cache of the eDMA state is accurate with a 1145 * Verify the local cache of the eDMA state is accurate with a
1146 * WARN_ON. 1146 * WARN_ON.
1147 * 1147 *
1148 * LOCKING: 1148 * LOCKING:
1149 * Inherited from caller. 1149 * Inherited from caller.
1150 */ 1150 */
1151 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, 1151 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1152 struct mv_port_priv *pp, u8 protocol) 1152 struct mv_port_priv *pp, u8 protocol)
1153 { 1153 {
1154 int want_ncq = (protocol == ATA_PROT_NCQ); 1154 int want_ncq = (protocol == ATA_PROT_NCQ);
1155 1155
1156 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1156 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1157 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); 1157 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1158 if (want_ncq != using_ncq) 1158 if (want_ncq != using_ncq)
1159 mv_stop_edma(ap); 1159 mv_stop_edma(ap);
1160 } 1160 }
1161 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 1161 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1162 struct mv_host_priv *hpriv = ap->host->private_data; 1162 struct mv_host_priv *hpriv = ap->host->private_data;
1163 1163
1164 mv_edma_cfg(ap, want_ncq, 1); 1164 mv_edma_cfg(ap, want_ncq, 1);
1165 1165
1166 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1166 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1167 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); 1167 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1168 1168
1169 writelfl(EDMA_EN, port_mmio + EDMA_CMD); 1169 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1170 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 1170 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1171 } 1171 }
1172 } 1172 }
1173 1173
1174 static void mv_wait_for_edma_empty_idle(struct ata_port *ap) 1174 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1175 { 1175 {
1176 void __iomem *port_mmio = mv_ap_base(ap); 1176 void __iomem *port_mmio = mv_ap_base(ap);
1177 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); 1177 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1178 const int per_loop = 5, timeout = (15 * 1000 / per_loop); 1178 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1179 int i; 1179 int i;
1180 1180
1181 /* 1181 /*
1182 * Wait for the EDMA engine to finish transactions in progress. 1182 * Wait for the EDMA engine to finish transactions in progress.
1183 * No idea what a good "timeout" value might be, but measurements 1183 * No idea what a good "timeout" value might be, but measurements
1184 * indicate that it often requires hundreds of microseconds 1184 * indicate that it often requires hundreds of microseconds
1185 * with two drives in-use. So we use the 15msec value above 1185 * with two drives in-use. So we use the 15msec value above
1186 * as a rough guess at what even more drives might require. 1186 * as a rough guess at what even more drives might require.
1187 */ 1187 */
1188 for (i = 0; i < timeout; ++i) { 1188 for (i = 0; i < timeout; ++i) {
1189 u32 edma_stat = readl(port_mmio + EDMA_STATUS); 1189 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1190 if ((edma_stat & empty_idle) == empty_idle) 1190 if ((edma_stat & empty_idle) == empty_idle)
1191 break; 1191 break;
1192 udelay(per_loop); 1192 udelay(per_loop);
1193 } 1193 }
1194 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */ 1194 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1195 } 1195 }
1196 1196
1197 /** 1197 /**
1198 * mv_stop_edma_engine - Disable eDMA engine 1198 * mv_stop_edma_engine - Disable eDMA engine
1199 * @port_mmio: io base address 1199 * @port_mmio: io base address
1200 * 1200 *
1201 * LOCKING: 1201 * LOCKING:
1202 * Inherited from caller. 1202 * Inherited from caller.
1203 */ 1203 */
1204 static int mv_stop_edma_engine(void __iomem *port_mmio) 1204 static int mv_stop_edma_engine(void __iomem *port_mmio)
1205 { 1205 {
1206 int i; 1206 int i;
1207 1207
1208 /* Disable eDMA. The disable bit auto clears. */ 1208 /* Disable eDMA. The disable bit auto clears. */
1209 writelfl(EDMA_DS, port_mmio + EDMA_CMD); 1209 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1210 1210
1211 /* Wait for the chip to confirm eDMA is off. */ 1211 /* Wait for the chip to confirm eDMA is off. */
1212 for (i = 10000; i > 0; i--) { 1212 for (i = 10000; i > 0; i--) {
1213 u32 reg = readl(port_mmio + EDMA_CMD); 1213 u32 reg = readl(port_mmio + EDMA_CMD);
1214 if (!(reg & EDMA_EN)) 1214 if (!(reg & EDMA_EN))
1215 return 0; 1215 return 0;
1216 udelay(10); 1216 udelay(10);
1217 } 1217 }
1218 return -EIO; 1218 return -EIO;
1219 } 1219 }
1220 1220
1221 static int mv_stop_edma(struct ata_port *ap) 1221 static int mv_stop_edma(struct ata_port *ap)
1222 { 1222 {
1223 void __iomem *port_mmio = mv_ap_base(ap); 1223 void __iomem *port_mmio = mv_ap_base(ap);
1224 struct mv_port_priv *pp = ap->private_data; 1224 struct mv_port_priv *pp = ap->private_data;
1225 int err = 0; 1225 int err = 0;
1226 1226
1227 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 1227 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1228 return 0; 1228 return 0;
1229 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1229 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1230 mv_wait_for_edma_empty_idle(ap); 1230 mv_wait_for_edma_empty_idle(ap);
1231 if (mv_stop_edma_engine(port_mmio)) { 1231 if (mv_stop_edma_engine(port_mmio)) {
1232 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); 1232 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1233 err = -EIO; 1233 err = -EIO;
1234 } 1234 }
1235 mv_edma_cfg(ap, 0, 0); 1235 mv_edma_cfg(ap, 0, 0);
1236 return err; 1236 return err;
1237 } 1237 }
1238 1238
1239 #ifdef ATA_DEBUG 1239 #ifdef ATA_DEBUG
1240 static void mv_dump_mem(void __iomem *start, unsigned bytes) 1240 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1241 { 1241 {
1242 int b, w; 1242 int b, w;
1243 for (b = 0; b < bytes; ) { 1243 for (b = 0; b < bytes; ) {
1244 DPRINTK("%p: ", start + b); 1244 DPRINTK("%p: ", start + b);
1245 for (w = 0; b < bytes && w < 4; w++) { 1245 for (w = 0; b < bytes && w < 4; w++) {
1246 printk("%08x ", readl(start + b)); 1246 printk("%08x ", readl(start + b));
1247 b += sizeof(u32); 1247 b += sizeof(u32);
1248 } 1248 }
1249 printk("\n"); 1249 printk("\n");
1250 } 1250 }
1251 } 1251 }
1252 #endif 1252 #endif
1253 1253
1254 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) 1254 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1255 { 1255 {
1256 #ifdef ATA_DEBUG 1256 #ifdef ATA_DEBUG
1257 int b, w; 1257 int b, w;
1258 u32 dw; 1258 u32 dw;
1259 for (b = 0; b < bytes; ) { 1259 for (b = 0; b < bytes; ) {
1260 DPRINTK("%02x: ", b); 1260 DPRINTK("%02x: ", b);
1261 for (w = 0; b < bytes && w < 4; w++) { 1261 for (w = 0; b < bytes && w < 4; w++) {
1262 (void) pci_read_config_dword(pdev, b, &dw); 1262 (void) pci_read_config_dword(pdev, b, &dw);
1263 printk("%08x ", dw); 1263 printk("%08x ", dw);
1264 b += sizeof(u32); 1264 b += sizeof(u32);
1265 } 1265 }
1266 printk("\n"); 1266 printk("\n");
1267 } 1267 }
1268 #endif 1268 #endif
1269 } 1269 }
1270 static void mv_dump_all_regs(void __iomem *mmio_base, int port, 1270 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1271 struct pci_dev *pdev) 1271 struct pci_dev *pdev)
1272 { 1272 {
1273 #ifdef ATA_DEBUG 1273 #ifdef ATA_DEBUG
1274 void __iomem *hc_base = mv_hc_base(mmio_base, 1274 void __iomem *hc_base = mv_hc_base(mmio_base,
1275 port >> MV_PORT_HC_SHIFT); 1275 port >> MV_PORT_HC_SHIFT);
1276 void __iomem *port_base; 1276 void __iomem *port_base;
1277 int start_port, num_ports, p, start_hc, num_hcs, hc; 1277 int start_port, num_ports, p, start_hc, num_hcs, hc;
1278 1278
1279 if (0 > port) { 1279 if (0 > port) {
1280 start_hc = start_port = 0; 1280 start_hc = start_port = 0;
1281 num_ports = 8; /* shld be benign for 4 port devs */ 1281 num_ports = 8; /* shld be benign for 4 port devs */
1282 num_hcs = 2; 1282 num_hcs = 2;
1283 } else { 1283 } else {
1284 start_hc = port >> MV_PORT_HC_SHIFT; 1284 start_hc = port >> MV_PORT_HC_SHIFT;
1285 start_port = port; 1285 start_port = port;
1286 num_ports = num_hcs = 1; 1286 num_ports = num_hcs = 1;
1287 } 1287 }
1288 DPRINTK("All registers for port(s) %u-%u:\n", start_port, 1288 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1289 num_ports > 1 ? num_ports - 1 : start_port); 1289 num_ports > 1 ? num_ports - 1 : start_port);
1290 1290
1291 if (NULL != pdev) { 1291 if (NULL != pdev) {
1292 DPRINTK("PCI config space regs:\n"); 1292 DPRINTK("PCI config space regs:\n");
1293 mv_dump_pci_cfg(pdev, 0x68); 1293 mv_dump_pci_cfg(pdev, 0x68);
1294 } 1294 }
1295 DPRINTK("PCI regs:\n"); 1295 DPRINTK("PCI regs:\n");
1296 mv_dump_mem(mmio_base+0xc00, 0x3c); 1296 mv_dump_mem(mmio_base+0xc00, 0x3c);
1297 mv_dump_mem(mmio_base+0xd00, 0x34); 1297 mv_dump_mem(mmio_base+0xd00, 0x34);
1298 mv_dump_mem(mmio_base+0xf00, 0x4); 1298 mv_dump_mem(mmio_base+0xf00, 0x4);
1299 mv_dump_mem(mmio_base+0x1d00, 0x6c); 1299 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1300 for (hc = start_hc; hc < start_hc + num_hcs; hc++) { 1300 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1301 hc_base = mv_hc_base(mmio_base, hc); 1301 hc_base = mv_hc_base(mmio_base, hc);
1302 DPRINTK("HC regs (HC %i):\n", hc); 1302 DPRINTK("HC regs (HC %i):\n", hc);
1303 mv_dump_mem(hc_base, 0x1c); 1303 mv_dump_mem(hc_base, 0x1c);
1304 } 1304 }
1305 for (p = start_port; p < start_port + num_ports; p++) { 1305 for (p = start_port; p < start_port + num_ports; p++) {
1306 port_base = mv_port_base(mmio_base, p); 1306 port_base = mv_port_base(mmio_base, p);
1307 DPRINTK("EDMA regs (port %i):\n", p); 1307 DPRINTK("EDMA regs (port %i):\n", p);
1308 mv_dump_mem(port_base, 0x54); 1308 mv_dump_mem(port_base, 0x54);
1309 DPRINTK("SATA regs (port %i):\n", p); 1309 DPRINTK("SATA regs (port %i):\n", p);
1310 mv_dump_mem(port_base+0x300, 0x60); 1310 mv_dump_mem(port_base+0x300, 0x60);
1311 } 1311 }
1312 #endif 1312 #endif
1313 } 1313 }
1314 1314
1315 static unsigned int mv_scr_offset(unsigned int sc_reg_in) 1315 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1316 { 1316 {
1317 unsigned int ofs; 1317 unsigned int ofs;
1318 1318
1319 switch (sc_reg_in) { 1319 switch (sc_reg_in) {
1320 case SCR_STATUS: 1320 case SCR_STATUS:
1321 case SCR_CONTROL: 1321 case SCR_CONTROL:
1322 case SCR_ERROR: 1322 case SCR_ERROR:
1323 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); 1323 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1324 break; 1324 break;
1325 case SCR_ACTIVE: 1325 case SCR_ACTIVE:
1326 ofs = SATA_ACTIVE; /* active is not with the others */ 1326 ofs = SATA_ACTIVE; /* active is not with the others */
1327 break; 1327 break;
1328 default: 1328 default:
1329 ofs = 0xffffffffU; 1329 ofs = 0xffffffffU;
1330 break; 1330 break;
1331 } 1331 }
1332 return ofs; 1332 return ofs;
1333 } 1333 }
1334 1334
1335 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 1335 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1336 { 1336 {
1337 unsigned int ofs = mv_scr_offset(sc_reg_in); 1337 unsigned int ofs = mv_scr_offset(sc_reg_in);
1338 1338
1339 if (ofs != 0xffffffffU) { 1339 if (ofs != 0xffffffffU) {
1340 *val = readl(mv_ap_base(link->ap) + ofs); 1340 *val = readl(mv_ap_base(link->ap) + ofs);
1341 return 0; 1341 return 0;
1342 } else 1342 } else
1343 return -EINVAL; 1343 return -EINVAL;
1344 } 1344 }
1345 1345
1346 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 1346 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1347 { 1347 {
1348 unsigned int ofs = mv_scr_offset(sc_reg_in); 1348 unsigned int ofs = mv_scr_offset(sc_reg_in);
1349 1349
1350 if (ofs != 0xffffffffU) { 1350 if (ofs != 0xffffffffU) {
1351 void __iomem *addr = mv_ap_base(link->ap) + ofs; 1351 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1352 if (sc_reg_in == SCR_CONTROL) { 1352 if (sc_reg_in == SCR_CONTROL) {
1353 /* 1353 /*
1354 * Workaround for 88SX60x1 FEr SATA#26: 1354 * Workaround for 88SX60x1 FEr SATA#26:
1355 * 1355 *
1356 * COMRESETs have to take care not to accidently 1356 * COMRESETs have to take care not to accidently
1357 * put the drive to sleep when writing SCR_CONTROL. 1357 * put the drive to sleep when writing SCR_CONTROL.
1358 * Setting bits 12..15 prevents this problem. 1358 * Setting bits 12..15 prevents this problem.
1359 * 1359 *
1360 * So if we see an outbound COMMRESET, set those bits. 1360 * So if we see an outbound COMMRESET, set those bits.
1361 * Ditto for the followup write that clears the reset. 1361 * Ditto for the followup write that clears the reset.
1362 * 1362 *
1363 * The proprietary driver does this for 1363 * The proprietary driver does this for
1364 * all chip versions, and so do we. 1364 * all chip versions, and so do we.
1365 */ 1365 */
1366 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) 1366 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1367 val |= 0xf000; 1367 val |= 0xf000;
1368 } 1368 }
1369 writelfl(val, addr); 1369 writelfl(val, addr);
1370 return 0; 1370 return 0;
1371 } else 1371 } else
1372 return -EINVAL; 1372 return -EINVAL;
1373 } 1373 }
1374 1374
1375 static void mv6_dev_config(struct ata_device *adev) 1375 static void mv6_dev_config(struct ata_device *adev)
1376 { 1376 {
1377 /* 1377 /*
1378 * Deal with Gen-II ("mv6") hardware quirks/restrictions: 1378 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1379 * 1379 *
1380 * Gen-II does not support NCQ over a port multiplier 1380 * Gen-II does not support NCQ over a port multiplier
1381 * (no FIS-based switching). 1381 * (no FIS-based switching).
1382 */ 1382 */
1383 if (adev->flags & ATA_DFLAG_NCQ) { 1383 if (adev->flags & ATA_DFLAG_NCQ) {
1384 if (sata_pmp_attached(adev->link->ap)) { 1384 if (sata_pmp_attached(adev->link->ap)) {
1385 adev->flags &= ~ATA_DFLAG_NCQ; 1385 adev->flags &= ~ATA_DFLAG_NCQ;
1386 ata_dev_printk(adev, KERN_INFO, 1386 ata_dev_printk(adev, KERN_INFO,
1387 "NCQ disabled for command-based switching\n"); 1387 "NCQ disabled for command-based switching\n");
1388 } 1388 }
1389 } 1389 }
1390 } 1390 }
1391 1391
1392 static int mv_qc_defer(struct ata_queued_cmd *qc) 1392 static int mv_qc_defer(struct ata_queued_cmd *qc)
1393 { 1393 {
1394 struct ata_link *link = qc->dev->link; 1394 struct ata_link *link = qc->dev->link;
1395 struct ata_port *ap = link->ap; 1395 struct ata_port *ap = link->ap;
1396 struct mv_port_priv *pp = ap->private_data; 1396 struct mv_port_priv *pp = ap->private_data;
1397 1397
1398 /* 1398 /*
1399 * Don't allow new commands if we're in a delayed EH state 1399 * Don't allow new commands if we're in a delayed EH state
1400 * for NCQ and/or FIS-based switching. 1400 * for NCQ and/or FIS-based switching.
1401 */ 1401 */
1402 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 1402 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1403 return ATA_DEFER_PORT; 1403 return ATA_DEFER_PORT;
1404 1404
1405 /* PIO commands need exclusive link: no other commands [DMA or PIO] 1405 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1406 * can run concurrently. 1406 * can run concurrently.
1407 * set excl_link when we want to send a PIO command in DMA mode 1407 * set excl_link when we want to send a PIO command in DMA mode
1408 * or a non-NCQ command in NCQ mode. 1408 * or a non-NCQ command in NCQ mode.
1409 * When we receive a command from that link, and there are no 1409 * When we receive a command from that link, and there are no
1410 * outstanding commands, mark a flag to clear excl_link and let 1410 * outstanding commands, mark a flag to clear excl_link and let
1411 * the command go through. 1411 * the command go through.
1412 */ 1412 */
1413 if (unlikely(ap->excl_link)) { 1413 if (unlikely(ap->excl_link)) {
1414 if (link == ap->excl_link) { 1414 if (link == ap->excl_link) {
1415 if (ap->nr_active_links) 1415 if (ap->nr_active_links)
1416 return ATA_DEFER_PORT; 1416 return ATA_DEFER_PORT;
1417 qc->flags |= ATA_QCFLAG_CLEAR_EXCL; 1417 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1418 return 0; 1418 return 0;
1419 } else 1419 } else
1420 return ATA_DEFER_PORT; 1420 return ATA_DEFER_PORT;
1421 } 1421 }
1422 1422
1423 /* 1423 /*
1424 * If the port is completely idle, then allow the new qc. 1424 * If the port is completely idle, then allow the new qc.
1425 */ 1425 */
1426 if (ap->nr_active_links == 0) 1426 if (ap->nr_active_links == 0)
1427 return 0; 1427 return 0;
1428 1428
1429 /* 1429 /*
1430 * The port is operating in host queuing mode (EDMA) with NCQ 1430 * The port is operating in host queuing mode (EDMA) with NCQ
1431 * enabled, allow multiple NCQ commands. EDMA also allows 1431 * enabled, allow multiple NCQ commands. EDMA also allows
1432 * queueing multiple DMA commands but libata core currently 1432 * queueing multiple DMA commands but libata core currently
1433 * doesn't allow it. 1433 * doesn't allow it.
1434 */ 1434 */
1435 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && 1435 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1436 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { 1436 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1437 if (ata_is_ncq(qc->tf.protocol)) 1437 if (ata_is_ncq(qc->tf.protocol))
1438 return 0; 1438 return 0;
1439 else { 1439 else {
1440 ap->excl_link = link; 1440 ap->excl_link = link;
1441 return ATA_DEFER_PORT; 1441 return ATA_DEFER_PORT;
1442 } 1442 }
1443 } 1443 }
1444 1444
1445 return ATA_DEFER_PORT; 1445 return ATA_DEFER_PORT;
1446 } 1446 }
1447 1447
1448 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) 1448 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1449 { 1449 {
1450 struct mv_port_priv *pp = ap->private_data; 1450 struct mv_port_priv *pp = ap->private_data;
1451 void __iomem *port_mmio; 1451 void __iomem *port_mmio;
1452 1452
1453 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; 1453 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1454 u32 ltmode, *old_ltmode = &pp->cached.ltmode; 1454 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1455 u32 haltcond, *old_haltcond = &pp->cached.haltcond; 1455 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1456 1456
1457 ltmode = *old_ltmode & ~LTMODE_BIT8; 1457 ltmode = *old_ltmode & ~LTMODE_BIT8;
1458 haltcond = *old_haltcond | EDMA_ERR_DEV; 1458 haltcond = *old_haltcond | EDMA_ERR_DEV;
1459 1459
1460 if (want_fbs) { 1460 if (want_fbs) {
1461 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; 1461 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1462 ltmode = *old_ltmode | LTMODE_BIT8; 1462 ltmode = *old_ltmode | LTMODE_BIT8;
1463 if (want_ncq) 1463 if (want_ncq)
1464 haltcond &= ~EDMA_ERR_DEV; 1464 haltcond &= ~EDMA_ERR_DEV;
1465 else 1465 else
1466 fiscfg |= FISCFG_WAIT_DEV_ERR; 1466 fiscfg |= FISCFG_WAIT_DEV_ERR;
1467 } else { 1467 } else {
1468 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); 1468 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1469 } 1469 }
1470 1470
1471 port_mmio = mv_ap_base(ap); 1471 port_mmio = mv_ap_base(ap);
1472 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); 1472 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1473 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); 1473 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1474 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); 1474 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1475 } 1475 }
1476 1476
1477 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) 1477 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1478 { 1478 {
1479 struct mv_host_priv *hpriv = ap->host->private_data; 1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 u32 old, new; 1480 u32 old, new;
1481 1481
1482 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ 1482 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1483 old = readl(hpriv->base + GPIO_PORT_CTL); 1483 old = readl(hpriv->base + GPIO_PORT_CTL);
1484 if (want_ncq) 1484 if (want_ncq)
1485 new = old | (1 << 22); 1485 new = old | (1 << 22);
1486 else 1486 else
1487 new = old & ~(1 << 22); 1487 new = old & ~(1 << 22);
1488 if (new != old) 1488 if (new != old)
1489 writel(new, hpriv->base + GPIO_PORT_CTL); 1489 writel(new, hpriv->base + GPIO_PORT_CTL);
1490 } 1490 }
1491 1491
1492 /** 1492 /**
1493 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma 1493 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1494 * @ap: Port being initialized 1494 * @ap: Port being initialized
1495 * 1495 *
1496 * There are two DMA modes on these chips: basic DMA, and EDMA. 1496 * There are two DMA modes on these chips: basic DMA, and EDMA.
1497 * 1497 *
1498 * Bit-0 of the "EDMA RESERVED" register enables/disables use 1498 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1499 * of basic DMA on the GEN_IIE versions of the chips. 1499 * of basic DMA on the GEN_IIE versions of the chips.
1500 * 1500 *
1501 * This bit survives EDMA resets, and must be set for basic DMA 1501 * This bit survives EDMA resets, and must be set for basic DMA
1502 * to function, and should be cleared when EDMA is active. 1502 * to function, and should be cleared when EDMA is active.
1503 */ 1503 */
1504 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) 1504 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1505 { 1505 {
1506 struct mv_port_priv *pp = ap->private_data; 1506 struct mv_port_priv *pp = ap->private_data;
1507 u32 new, *old = &pp->cached.unknown_rsvd; 1507 u32 new, *old = &pp->cached.unknown_rsvd;
1508 1508
1509 if (enable_bmdma) 1509 if (enable_bmdma)
1510 new = *old | 1; 1510 new = *old | 1;
1511 else 1511 else
1512 new = *old & ~1; 1512 new = *old & ~1;
1513 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); 1513 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1514 } 1514 }
1515 1515
1516 /* 1516 /*
1517 * SOC chips have an issue whereby the HDD LEDs don't always blink 1517 * SOC chips have an issue whereby the HDD LEDs don't always blink
1518 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode 1518 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1519 * of the SOC takes care of it, generating a steady blink rate when 1519 * of the SOC takes care of it, generating a steady blink rate when
1520 * any drive on the chip is active. 1520 * any drive on the chip is active.
1521 * 1521 *
1522 * Unfortunately, the blink mode is a global hardware setting for the SOC, 1522 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1523 * so we must use it whenever at least one port on the SOC has NCQ enabled. 1523 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1524 * 1524 *
1525 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal 1525 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1526 * LED operation works then, and provides better (more accurate) feedback. 1526 * LED operation works then, and provides better (more accurate) feedback.
1527 * 1527 *
1528 * Note that this code assumes that an SOC never has more than one HC onboard. 1528 * Note that this code assumes that an SOC never has more than one HC onboard.
1529 */ 1529 */
1530 static void mv_soc_led_blink_enable(struct ata_port *ap) 1530 static void mv_soc_led_blink_enable(struct ata_port *ap)
1531 { 1531 {
1532 struct ata_host *host = ap->host; 1532 struct ata_host *host = ap->host;
1533 struct mv_host_priv *hpriv = host->private_data; 1533 struct mv_host_priv *hpriv = host->private_data;
1534 void __iomem *hc_mmio; 1534 void __iomem *hc_mmio;
1535 u32 led_ctrl; 1535 u32 led_ctrl;
1536 1536
1537 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) 1537 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1538 return; 1538 return;
1539 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; 1539 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1540 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1540 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1541 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1541 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1542 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1542 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1543 } 1543 }
1544 1544
1545 static void mv_soc_led_blink_disable(struct ata_port *ap) 1545 static void mv_soc_led_blink_disable(struct ata_port *ap)
1546 { 1546 {
1547 struct ata_host *host = ap->host; 1547 struct ata_host *host = ap->host;
1548 struct mv_host_priv *hpriv = host->private_data; 1548 struct mv_host_priv *hpriv = host->private_data;
1549 void __iomem *hc_mmio; 1549 void __iomem *hc_mmio;
1550 u32 led_ctrl; 1550 u32 led_ctrl;
1551 unsigned int port; 1551 unsigned int port;
1552 1552
1553 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) 1553 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1554 return; 1554 return;
1555 1555
1556 /* disable led-blink only if no ports are using NCQ */ 1556 /* disable led-blink only if no ports are using NCQ */
1557 for (port = 0; port < hpriv->n_ports; port++) { 1557 for (port = 0; port < hpriv->n_ports; port++) {
1558 struct ata_port *this_ap = host->ports[port]; 1558 struct ata_port *this_ap = host->ports[port];
1559 struct mv_port_priv *pp = this_ap->private_data; 1559 struct mv_port_priv *pp = this_ap->private_data;
1560 1560
1561 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 1561 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1562 return; 1562 return;
1563 } 1563 }
1564 1564
1565 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; 1565 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1566 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); 1566 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1567 led_ctrl = readl(hc_mmio + SOC_LED_CTRL); 1567 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1568 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); 1568 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1569 } 1569 }
1570 1570
1571 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) 1571 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1572 { 1572 {
1573 u32 cfg; 1573 u32 cfg;
1574 struct mv_port_priv *pp = ap->private_data; 1574 struct mv_port_priv *pp = ap->private_data;
1575 struct mv_host_priv *hpriv = ap->host->private_data; 1575 struct mv_host_priv *hpriv = ap->host->private_data;
1576 void __iomem *port_mmio = mv_ap_base(ap); 1576 void __iomem *port_mmio = mv_ap_base(ap);
1577 1577
1578 /* set up non-NCQ EDMA configuration */ 1578 /* set up non-NCQ EDMA configuration */
1579 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ 1579 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1580 pp->pp_flags &= 1580 pp->pp_flags &=
1581 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 1581 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1582 1582
1583 if (IS_GEN_I(hpriv)) 1583 if (IS_GEN_I(hpriv))
1584 cfg |= (1 << 8); /* enab config burst size mask */ 1584 cfg |= (1 << 8); /* enab config burst size mask */
1585 1585
1586 else if (IS_GEN_II(hpriv)) { 1586 else if (IS_GEN_II(hpriv)) {
1587 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1587 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1588 mv_60x1_errata_sata25(ap, want_ncq); 1588 mv_60x1_errata_sata25(ap, want_ncq);
1589 1589
1590 } else if (IS_GEN_IIE(hpriv)) { 1590 } else if (IS_GEN_IIE(hpriv)) {
1591 int want_fbs = sata_pmp_attached(ap); 1591 int want_fbs = sata_pmp_attached(ap);
1592 /* 1592 /*
1593 * Possible future enhancement: 1593 * Possible future enhancement:
1594 * 1594 *
1595 * The chip can use FBS with non-NCQ, if we allow it, 1595 * The chip can use FBS with non-NCQ, if we allow it,
1596 * But first we need to have the error handling in place 1596 * But first we need to have the error handling in place
1597 * for this mode (datasheet section 7.3.15.4.2.3). 1597 * for this mode (datasheet section 7.3.15.4.2.3).
1598 * So disallow non-NCQ FBS for now. 1598 * So disallow non-NCQ FBS for now.
1599 */ 1599 */
1600 want_fbs &= want_ncq; 1600 want_fbs &= want_ncq;
1601 1601
1602 mv_config_fbs(ap, want_ncq, want_fbs); 1602 mv_config_fbs(ap, want_ncq, want_fbs);
1603 1603
1604 if (want_fbs) { 1604 if (want_fbs) {
1605 pp->pp_flags |= MV_PP_FLAG_FBS_EN; 1605 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1606 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ 1606 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1607 } 1607 }
1608 1608
1609 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1609 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1610 if (want_edma) { 1610 if (want_edma) {
1611 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1611 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1612 if (!IS_SOC(hpriv)) 1612 if (!IS_SOC(hpriv))
1613 cfg |= (1 << 18); /* enab early completion */ 1613 cfg |= (1 << 18); /* enab early completion */
1614 } 1614 }
1615 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1615 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1616 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1616 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1617 mv_bmdma_enable_iie(ap, !want_edma); 1617 mv_bmdma_enable_iie(ap, !want_edma);
1618 1618
1619 if (IS_SOC(hpriv)) { 1619 if (IS_SOC(hpriv)) {
1620 if (want_ncq) 1620 if (want_ncq)
1621 mv_soc_led_blink_enable(ap); 1621 mv_soc_led_blink_enable(ap);
1622 else 1622 else
1623 mv_soc_led_blink_disable(ap); 1623 mv_soc_led_blink_disable(ap);
1624 } 1624 }
1625 } 1625 }
1626 1626
1627 if (want_ncq) { 1627 if (want_ncq) {
1628 cfg |= EDMA_CFG_NCQ; 1628 cfg |= EDMA_CFG_NCQ;
1629 pp->pp_flags |= MV_PP_FLAG_NCQ_EN; 1629 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1630 } 1630 }
1631 1631
1632 writelfl(cfg, port_mmio + EDMA_CFG); 1632 writelfl(cfg, port_mmio + EDMA_CFG);
1633 } 1633 }
1634 1634
1635 static void mv_port_free_dma_mem(struct ata_port *ap) 1635 static void mv_port_free_dma_mem(struct ata_port *ap)
1636 { 1636 {
1637 struct mv_host_priv *hpriv = ap->host->private_data; 1637 struct mv_host_priv *hpriv = ap->host->private_data;
1638 struct mv_port_priv *pp = ap->private_data; 1638 struct mv_port_priv *pp = ap->private_data;
1639 int tag; 1639 int tag;
1640 1640
1641 if (pp->crqb) { 1641 if (pp->crqb) {
1642 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); 1642 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1643 pp->crqb = NULL; 1643 pp->crqb = NULL;
1644 } 1644 }
1645 if (pp->crpb) { 1645 if (pp->crpb) {
1646 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); 1646 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1647 pp->crpb = NULL; 1647 pp->crpb = NULL;
1648 } 1648 }
1649 /* 1649 /*
1650 * For GEN_I, there's no NCQ, so we have only a single sg_tbl. 1650 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1651 * For later hardware, we have one unique sg_tbl per NCQ tag. 1651 * For later hardware, we have one unique sg_tbl per NCQ tag.
1652 */ 1652 */
1653 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1653 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1654 if (pp->sg_tbl[tag]) { 1654 if (pp->sg_tbl[tag]) {
1655 if (tag == 0 || !IS_GEN_I(hpriv)) 1655 if (tag == 0 || !IS_GEN_I(hpriv))
1656 dma_pool_free(hpriv->sg_tbl_pool, 1656 dma_pool_free(hpriv->sg_tbl_pool,
1657 pp->sg_tbl[tag], 1657 pp->sg_tbl[tag],
1658 pp->sg_tbl_dma[tag]); 1658 pp->sg_tbl_dma[tag]);
1659 pp->sg_tbl[tag] = NULL; 1659 pp->sg_tbl[tag] = NULL;
1660 } 1660 }
1661 } 1661 }
1662 } 1662 }
1663 1663
1664 /** 1664 /**
1665 * mv_port_start - Port specific init/start routine. 1665 * mv_port_start - Port specific init/start routine.
1666 * @ap: ATA channel to manipulate 1666 * @ap: ATA channel to manipulate
1667 * 1667 *
1668 * Allocate and point to DMA memory, init port private memory, 1668 * Allocate and point to DMA memory, init port private memory,
1669 * zero indices. 1669 * zero indices.
1670 * 1670 *
1671 * LOCKING: 1671 * LOCKING:
1672 * Inherited from caller. 1672 * Inherited from caller.
1673 */ 1673 */
1674 static int mv_port_start(struct ata_port *ap) 1674 static int mv_port_start(struct ata_port *ap)
1675 { 1675 {
1676 struct device *dev = ap->host->dev; 1676 struct device *dev = ap->host->dev;
1677 struct mv_host_priv *hpriv = ap->host->private_data; 1677 struct mv_host_priv *hpriv = ap->host->private_data;
1678 struct mv_port_priv *pp; 1678 struct mv_port_priv *pp;
1679 unsigned long flags; 1679 unsigned long flags;
1680 int tag; 1680 int tag;
1681 1681
1682 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1682 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1683 if (!pp) 1683 if (!pp)
1684 return -ENOMEM; 1684 return -ENOMEM;
1685 ap->private_data = pp; 1685 ap->private_data = pp;
1686 1686
1687 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1687 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1688 if (!pp->crqb) 1688 if (!pp->crqb)
1689 return -ENOMEM; 1689 return -ENOMEM;
1690 memset(pp->crqb, 0, MV_CRQB_Q_SZ); 1690 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1691 1691
1692 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); 1692 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1693 if (!pp->crpb) 1693 if (!pp->crpb)
1694 goto out_port_free_dma_mem; 1694 goto out_port_free_dma_mem;
1695 memset(pp->crpb, 0, MV_CRPB_Q_SZ); 1695 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1696 1696
1697 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ 1697 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1698 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) 1698 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1699 ap->flags |= ATA_FLAG_AN; 1699 ap->flags |= ATA_FLAG_AN;
1700 /* 1700 /*
1701 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. 1701 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1702 * For later hardware, we need one unique sg_tbl per NCQ tag. 1702 * For later hardware, we need one unique sg_tbl per NCQ tag.
1703 */ 1703 */
1704 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { 1704 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1705 if (tag == 0 || !IS_GEN_I(hpriv)) { 1705 if (tag == 0 || !IS_GEN_I(hpriv)) {
1706 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, 1706 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1707 GFP_KERNEL, &pp->sg_tbl_dma[tag]); 1707 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1708 if (!pp->sg_tbl[tag]) 1708 if (!pp->sg_tbl[tag])
1709 goto out_port_free_dma_mem; 1709 goto out_port_free_dma_mem;
1710 } else { 1710 } else {
1711 pp->sg_tbl[tag] = pp->sg_tbl[0]; 1711 pp->sg_tbl[tag] = pp->sg_tbl[0];
1712 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; 1712 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1713 } 1713 }
1714 } 1714 }
1715 1715
1716 spin_lock_irqsave(ap->lock, flags); 1716 spin_lock_irqsave(ap->lock, flags);
1717 mv_save_cached_regs(ap); 1717 mv_save_cached_regs(ap);
1718 mv_edma_cfg(ap, 0, 0); 1718 mv_edma_cfg(ap, 0, 0);
1719 spin_unlock_irqrestore(ap->lock, flags); 1719 spin_unlock_irqrestore(ap->lock, flags);
1720 1720
1721 return 0; 1721 return 0;
1722 1722
1723 out_port_free_dma_mem: 1723 out_port_free_dma_mem:
1724 mv_port_free_dma_mem(ap); 1724 mv_port_free_dma_mem(ap);
1725 return -ENOMEM; 1725 return -ENOMEM;
1726 } 1726 }
1727 1727
1728 /** 1728 /**
1729 * mv_port_stop - Port specific cleanup/stop routine. 1729 * mv_port_stop - Port specific cleanup/stop routine.
1730 * @ap: ATA channel to manipulate 1730 * @ap: ATA channel to manipulate
1731 * 1731 *
1732 * Stop DMA, cleanup port memory. 1732 * Stop DMA, cleanup port memory.
1733 * 1733 *
1734 * LOCKING: 1734 * LOCKING:
1735 * This routine uses the host lock to protect the DMA stop. 1735 * This routine uses the host lock to protect the DMA stop.
1736 */ 1736 */
1737 static void mv_port_stop(struct ata_port *ap) 1737 static void mv_port_stop(struct ata_port *ap)
1738 { 1738 {
1739 unsigned long flags; 1739 unsigned long flags;
1740 1740
1741 spin_lock_irqsave(ap->lock, flags); 1741 spin_lock_irqsave(ap->lock, flags);
1742 mv_stop_edma(ap); 1742 mv_stop_edma(ap);
1743 mv_enable_port_irqs(ap, 0); 1743 mv_enable_port_irqs(ap, 0);
1744 spin_unlock_irqrestore(ap->lock, flags); 1744 spin_unlock_irqrestore(ap->lock, flags);
1745 mv_port_free_dma_mem(ap); 1745 mv_port_free_dma_mem(ap);
1746 } 1746 }
1747 1747
1748 /** 1748 /**
1749 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries 1749 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1750 * @qc: queued command whose SG list to source from 1750 * @qc: queued command whose SG list to source from
1751 * 1751 *
1752 * Populate the SG list and mark the last entry. 1752 * Populate the SG list and mark the last entry.
1753 * 1753 *
1754 * LOCKING: 1754 * LOCKING:
1755 * Inherited from caller. 1755 * Inherited from caller.
1756 */ 1756 */
1757 static void mv_fill_sg(struct ata_queued_cmd *qc) 1757 static void mv_fill_sg(struct ata_queued_cmd *qc)
1758 { 1758 {
1759 struct mv_port_priv *pp = qc->ap->private_data; 1759 struct mv_port_priv *pp = qc->ap->private_data;
1760 struct scatterlist *sg; 1760 struct scatterlist *sg;
1761 struct mv_sg *mv_sg, *last_sg = NULL; 1761 struct mv_sg *mv_sg, *last_sg = NULL;
1762 unsigned int si; 1762 unsigned int si;
1763 1763
1764 mv_sg = pp->sg_tbl[qc->tag]; 1764 mv_sg = pp->sg_tbl[qc->tag];
1765 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1765 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1766 dma_addr_t addr = sg_dma_address(sg); 1766 dma_addr_t addr = sg_dma_address(sg);
1767 u32 sg_len = sg_dma_len(sg); 1767 u32 sg_len = sg_dma_len(sg);
1768 1768
1769 while (sg_len) { 1769 while (sg_len) {
1770 u32 offset = addr & 0xffff; 1770 u32 offset = addr & 0xffff;
1771 u32 len = sg_len; 1771 u32 len = sg_len;
1772 1772
1773 if (offset + len > 0x10000) 1773 if (offset + len > 0x10000)
1774 len = 0x10000 - offset; 1774 len = 0x10000 - offset;
1775 1775
1776 mv_sg->addr = cpu_to_le32(addr & 0xffffffff); 1776 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1777 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); 1777 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1778 mv_sg->flags_size = cpu_to_le32(len & 0xffff); 1778 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1779 mv_sg->reserved = 0; 1779 mv_sg->reserved = 0;
1780 1780
1781 sg_len -= len; 1781 sg_len -= len;
1782 addr += len; 1782 addr += len;
1783 1783
1784 last_sg = mv_sg; 1784 last_sg = mv_sg;
1785 mv_sg++; 1785 mv_sg++;
1786 } 1786 }
1787 } 1787 }
1788 1788
1789 if (likely(last_sg)) 1789 if (likely(last_sg))
1790 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); 1790 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1791 mb(); /* ensure data structure is visible to the chipset */ 1791 mb(); /* ensure data structure is visible to the chipset */
1792 } 1792 }
1793 1793
1794 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) 1794 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1795 { 1795 {
1796 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | 1796 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1797 (last ? CRQB_CMD_LAST : 0); 1797 (last ? CRQB_CMD_LAST : 0);
1798 *cmdw = cpu_to_le16(tmp); 1798 *cmdw = cpu_to_le16(tmp);
1799 } 1799 }
1800 1800
1801 /** 1801 /**
1802 * mv_sff_irq_clear - Clear hardware interrupt after DMA. 1802 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1803 * @ap: Port associated with this ATA transaction. 1803 * @ap: Port associated with this ATA transaction.
1804 * 1804 *
1805 * We need this only for ATAPI bmdma transactions, 1805 * We need this only for ATAPI bmdma transactions,
1806 * as otherwise we experience spurious interrupts 1806 * as otherwise we experience spurious interrupts
1807 * after libata-sff handles the bmdma interrupts. 1807 * after libata-sff handles the bmdma interrupts.
1808 */ 1808 */
1809 static void mv_sff_irq_clear(struct ata_port *ap) 1809 static void mv_sff_irq_clear(struct ata_port *ap)
1810 { 1810 {
1811 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); 1811 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1812 } 1812 }
1813 1813
1814 /** 1814 /**
1815 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. 1815 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1816 * @qc: queued command to check for chipset/DMA compatibility. 1816 * @qc: queued command to check for chipset/DMA compatibility.
1817 * 1817 *
1818 * The bmdma engines cannot handle speculative data sizes 1818 * The bmdma engines cannot handle speculative data sizes
1819 * (bytecount under/over flow). So only allow DMA for 1819 * (bytecount under/over flow). So only allow DMA for
1820 * data transfer commands with known data sizes. 1820 * data transfer commands with known data sizes.
1821 * 1821 *
1822 * LOCKING: 1822 * LOCKING:
1823 * Inherited from caller. 1823 * Inherited from caller.
1824 */ 1824 */
1825 static int mv_check_atapi_dma(struct ata_queued_cmd *qc) 1825 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1826 { 1826 {
1827 struct scsi_cmnd *scmd = qc->scsicmd; 1827 struct scsi_cmnd *scmd = qc->scsicmd;
1828 1828
1829 if (scmd) { 1829 if (scmd) {
1830 switch (scmd->cmnd[0]) { 1830 switch (scmd->cmnd[0]) {
1831 case READ_6: 1831 case READ_6:
1832 case READ_10: 1832 case READ_10:
1833 case READ_12: 1833 case READ_12:
1834 case WRITE_6: 1834 case WRITE_6:
1835 case WRITE_10: 1835 case WRITE_10:
1836 case WRITE_12: 1836 case WRITE_12:
1837 case GPCMD_READ_CD: 1837 case GPCMD_READ_CD:
1838 case GPCMD_SEND_DVD_STRUCTURE: 1838 case GPCMD_SEND_DVD_STRUCTURE:
1839 case GPCMD_SEND_CUE_SHEET: 1839 case GPCMD_SEND_CUE_SHEET:
1840 return 0; /* DMA is safe */ 1840 return 0; /* DMA is safe */
1841 } 1841 }
1842 } 1842 }
1843 return -EOPNOTSUPP; /* use PIO instead */ 1843 return -EOPNOTSUPP; /* use PIO instead */
1844 } 1844 }
1845 1845
1846 /** 1846 /**
1847 * mv_bmdma_setup - Set up BMDMA transaction 1847 * mv_bmdma_setup - Set up BMDMA transaction
1848 * @qc: queued command to prepare DMA for. 1848 * @qc: queued command to prepare DMA for.
1849 * 1849 *
1850 * LOCKING: 1850 * LOCKING:
1851 * Inherited from caller. 1851 * Inherited from caller.
1852 */ 1852 */
1853 static void mv_bmdma_setup(struct ata_queued_cmd *qc) 1853 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1854 { 1854 {
1855 struct ata_port *ap = qc->ap; 1855 struct ata_port *ap = qc->ap;
1856 void __iomem *port_mmio = mv_ap_base(ap); 1856 void __iomem *port_mmio = mv_ap_base(ap);
1857 struct mv_port_priv *pp = ap->private_data; 1857 struct mv_port_priv *pp = ap->private_data;
1858 1858
1859 mv_fill_sg(qc); 1859 mv_fill_sg(qc);
1860 1860
1861 /* clear all DMA cmd bits */ 1861 /* clear all DMA cmd bits */
1862 writel(0, port_mmio + BMDMA_CMD); 1862 writel(0, port_mmio + BMDMA_CMD);
1863 1863
1864 /* load PRD table addr. */ 1864 /* load PRD table addr. */
1865 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, 1865 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1866 port_mmio + BMDMA_PRD_HIGH); 1866 port_mmio + BMDMA_PRD_HIGH);
1867 writelfl(pp->sg_tbl_dma[qc->tag], 1867 writelfl(pp->sg_tbl_dma[qc->tag],
1868 port_mmio + BMDMA_PRD_LOW); 1868 port_mmio + BMDMA_PRD_LOW);
1869 1869
1870 /* issue r/w command */ 1870 /* issue r/w command */
1871 ap->ops->sff_exec_command(ap, &qc->tf); 1871 ap->ops->sff_exec_command(ap, &qc->tf);
1872 } 1872 }
1873 1873
1874 /** 1874 /**
1875 * mv_bmdma_start - Start a BMDMA transaction 1875 * mv_bmdma_start - Start a BMDMA transaction
1876 * @qc: queued command to start DMA on. 1876 * @qc: queued command to start DMA on.
1877 * 1877 *
1878 * LOCKING: 1878 * LOCKING:
1879 * Inherited from caller. 1879 * Inherited from caller.
1880 */ 1880 */
1881 static void mv_bmdma_start(struct ata_queued_cmd *qc) 1881 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1882 { 1882 {
1883 struct ata_port *ap = qc->ap; 1883 struct ata_port *ap = qc->ap;
1884 void __iomem *port_mmio = mv_ap_base(ap); 1884 void __iomem *port_mmio = mv_ap_base(ap);
1885 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 1885 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1886 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; 1886 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1887 1887
1888 /* start host DMA transaction */ 1888 /* start host DMA transaction */
1889 writelfl(cmd, port_mmio + BMDMA_CMD); 1889 writelfl(cmd, port_mmio + BMDMA_CMD);
1890 } 1890 }
1891 1891
1892 /** 1892 /**
1893 * mv_bmdma_stop - Stop BMDMA transfer 1893 * mv_bmdma_stop - Stop BMDMA transfer
1894 * @qc: queued command to stop DMA on. 1894 * @qc: queued command to stop DMA on.
1895 * 1895 *
1896 * Clears the ATA_DMA_START flag in the bmdma control register 1896 * Clears the ATA_DMA_START flag in the bmdma control register
1897 * 1897 *
1898 * LOCKING: 1898 * LOCKING:
1899 * Inherited from caller. 1899 * Inherited from caller.
1900 */ 1900 */
1901 static void mv_bmdma_stop_ap(struct ata_port *ap) 1901 static void mv_bmdma_stop_ap(struct ata_port *ap)
1902 { 1902 {
1903 void __iomem *port_mmio = mv_ap_base(ap); 1903 void __iomem *port_mmio = mv_ap_base(ap);
1904 u32 cmd; 1904 u32 cmd;
1905 1905
1906 /* clear start/stop bit */ 1906 /* clear start/stop bit */
1907 cmd = readl(port_mmio + BMDMA_CMD); 1907 cmd = readl(port_mmio + BMDMA_CMD);
1908 if (cmd & ATA_DMA_START) { 1908 if (cmd & ATA_DMA_START) {
1909 cmd &= ~ATA_DMA_START; 1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD); 1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911 1911
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap); 1913 ata_sff_dma_pause(ap);
1914 } 1914 }
1915 } 1915 }
1916 1916
1917 static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1917 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1918 { 1918 {
1919 mv_bmdma_stop_ap(qc->ap); 1919 mv_bmdma_stop_ap(qc->ap);
1920 } 1920 }
1921 1921
1922 /** 1922 /**
1923 * mv_bmdma_status - Read BMDMA status 1923 * mv_bmdma_status - Read BMDMA status
1924 * @ap: port for which to retrieve DMA status. 1924 * @ap: port for which to retrieve DMA status.
1925 * 1925 *
1926 * Read and return equivalent of the sff BMDMA status register. 1926 * Read and return equivalent of the sff BMDMA status register.
1927 * 1927 *
1928 * LOCKING: 1928 * LOCKING:
1929 * Inherited from caller. 1929 * Inherited from caller.
1930 */ 1930 */
1931 static u8 mv_bmdma_status(struct ata_port *ap) 1931 static u8 mv_bmdma_status(struct ata_port *ap)
1932 { 1932 {
1933 void __iomem *port_mmio = mv_ap_base(ap); 1933 void __iomem *port_mmio = mv_ap_base(ap);
1934 u32 reg, status; 1934 u32 reg, status;
1935 1935
1936 /* 1936 /*
1937 * Other bits are valid only if ATA_DMA_ACTIVE==0, 1937 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1938 * and the ATA_DMA_INTR bit doesn't exist. 1938 * and the ATA_DMA_INTR bit doesn't exist.
1939 */ 1939 */
1940 reg = readl(port_mmio + BMDMA_STATUS); 1940 reg = readl(port_mmio + BMDMA_STATUS);
1941 if (reg & ATA_DMA_ACTIVE) 1941 if (reg & ATA_DMA_ACTIVE)
1942 status = ATA_DMA_ACTIVE; 1942 status = ATA_DMA_ACTIVE;
1943 else if (reg & ATA_DMA_ERR) 1943 else if (reg & ATA_DMA_ERR)
1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else { 1945 else {
1946 /* 1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed), 1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done". 1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR 1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others. 1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */ 1951 */
1952 mv_bmdma_stop_ap(ap); 1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) 1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0; 1954 status = 0;
1955 else 1955 else
1956 status = ATA_DMA_INTR; 1956 status = ATA_DMA_INTR;
1957 } 1957 }
1958 return status; 1958 return status;
1959 } 1959 }
1960 1960
1961 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) 1961 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1962 { 1962 {
1963 struct ata_taskfile *tf = &qc->tf; 1963 struct ata_taskfile *tf = &qc->tf;
1964 /* 1964 /*
1965 * Workaround for 88SX60x1 FEr SATA#24. 1965 * Workaround for 88SX60x1 FEr SATA#24.
1966 * 1966 *
1967 * Chip may corrupt WRITEs if multi_count >= 4kB. 1967 * Chip may corrupt WRITEs if multi_count >= 4kB.
1968 * Note that READs are unaffected. 1968 * Note that READs are unaffected.
1969 * 1969 *
1970 * It's not clear if this errata really means "4K bytes", 1970 * It's not clear if this errata really means "4K bytes",
1971 * or if it always happens for multi_count > 7 1971 * or if it always happens for multi_count > 7
1972 * regardless of device sector_size. 1972 * regardless of device sector_size.
1973 * 1973 *
1974 * So, for safety, any write with multi_count > 7 1974 * So, for safety, any write with multi_count > 7
1975 * gets converted here into a regular PIO write instead: 1975 * gets converted here into a regular PIO write instead:
1976 */ 1976 */
1977 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { 1977 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1978 if (qc->dev->multi_count > 7) { 1978 if (qc->dev->multi_count > 7) {
1979 switch (tf->command) { 1979 switch (tf->command) {
1980 case ATA_CMD_WRITE_MULTI: 1980 case ATA_CMD_WRITE_MULTI:
1981 tf->command = ATA_CMD_PIO_WRITE; 1981 tf->command = ATA_CMD_PIO_WRITE;
1982 break; 1982 break;
1983 case ATA_CMD_WRITE_MULTI_FUA_EXT: 1983 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1984 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ 1984 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1985 /* fall through */ 1985 /* fall through */
1986 case ATA_CMD_WRITE_MULTI_EXT: 1986 case ATA_CMD_WRITE_MULTI_EXT:
1987 tf->command = ATA_CMD_PIO_WRITE_EXT; 1987 tf->command = ATA_CMD_PIO_WRITE_EXT;
1988 break; 1988 break;
1989 } 1989 }
1990 } 1990 }
1991 } 1991 }
1992 } 1992 }
1993 1993
1994 /** 1994 /**
1995 * mv_qc_prep - Host specific command preparation. 1995 * mv_qc_prep - Host specific command preparation.
1996 * @qc: queued command to prepare 1996 * @qc: queued command to prepare
1997 * 1997 *
1998 * This routine simply redirects to the general purpose routine 1998 * This routine simply redirects to the general purpose routine
1999 * if command is not DMA. Else, it handles prep of the CRQB 1999 * if command is not DMA. Else, it handles prep of the CRQB
2000 * (command request block), does some sanity checking, and calls 2000 * (command request block), does some sanity checking, and calls
2001 * the SG load routine. 2001 * the SG load routine.
2002 * 2002 *
2003 * LOCKING: 2003 * LOCKING:
2004 * Inherited from caller. 2004 * Inherited from caller.
2005 */ 2005 */
2006 static void mv_qc_prep(struct ata_queued_cmd *qc) 2006 static void mv_qc_prep(struct ata_queued_cmd *qc)
2007 { 2007 {
2008 struct ata_port *ap = qc->ap; 2008 struct ata_port *ap = qc->ap;
2009 struct mv_port_priv *pp = ap->private_data; 2009 struct mv_port_priv *pp = ap->private_data;
2010 __le16 *cw; 2010 __le16 *cw;
2011 struct ata_taskfile *tf = &qc->tf; 2011 struct ata_taskfile *tf = &qc->tf;
2012 u16 flags = 0; 2012 u16 flags = 0;
2013 unsigned in_index; 2013 unsigned in_index;
2014 2014
2015 switch (tf->protocol) { 2015 switch (tf->protocol) {
2016 case ATA_PROT_DMA: 2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM) 2017 if (tf->command == ATA_CMD_DSM)
2018 return; 2018 return;
2019 /* fall-thru */ 2019 /* fall-thru */
2020 case ATA_PROT_NCQ: 2020 case ATA_PROT_NCQ:
2021 break; /* continue below */ 2021 break; /* continue below */
2022 case ATA_PROT_PIO: 2022 case ATA_PROT_PIO:
2023 mv_rw_multi_errata_sata24(qc); 2023 mv_rw_multi_errata_sata24(qc);
2024 return; 2024 return;
2025 default: 2025 default:
2026 return; 2026 return;
2027 } 2027 }
2028 2028
2029 /* Fill in command request block 2029 /* Fill in command request block
2030 */ 2030 */
2031 if (!(tf->flags & ATA_TFLAG_WRITE)) 2031 if (!(tf->flags & ATA_TFLAG_WRITE))
2032 flags |= CRQB_FLAG_READ; 2032 flags |= CRQB_FLAG_READ;
2033 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 2033 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2034 flags |= qc->tag << CRQB_TAG_SHIFT; 2034 flags |= qc->tag << CRQB_TAG_SHIFT;
2035 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2035 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2036 2036
2037 /* get current queue index from software */ 2037 /* get current queue index from software */
2038 in_index = pp->req_idx; 2038 in_index = pp->req_idx;
2039 2039
2040 pp->crqb[in_index].sg_addr = 2040 pp->crqb[in_index].sg_addr =
2041 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 2041 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2042 pp->crqb[in_index].sg_addr_hi = 2042 pp->crqb[in_index].sg_addr_hi =
2043 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 2043 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2044 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); 2044 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2045 2045
2046 cw = &pp->crqb[in_index].ata_cmd[0]; 2046 cw = &pp->crqb[in_index].ata_cmd[0];
2047 2047
2048 /* Sadly, the CRQB cannot accomodate all registers--there are 2048 /* Sadly, the CRQB cannot accomodate all registers--there are
2049 * only 11 bytes...so we must pick and choose required 2049 * only 11 bytes...so we must pick and choose required
2050 * registers based on the command. So, we drop feature and 2050 * registers based on the command. So, we drop feature and
2051 * hob_feature for [RW] DMA commands, but they are needed for 2051 * hob_feature for [RW] DMA commands, but they are needed for
2052 * NCQ. NCQ will drop hob_nsect, which is not needed there 2052 * NCQ. NCQ will drop hob_nsect, which is not needed there
2053 * (nsect is used only for the tag; feat/hob_feat hold true nsect). 2053 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2054 */ 2054 */
2055 switch (tf->command) { 2055 switch (tf->command) {
2056 case ATA_CMD_READ: 2056 case ATA_CMD_READ:
2057 case ATA_CMD_READ_EXT: 2057 case ATA_CMD_READ_EXT:
2058 case ATA_CMD_WRITE: 2058 case ATA_CMD_WRITE:
2059 case ATA_CMD_WRITE_EXT: 2059 case ATA_CMD_WRITE_EXT:
2060 case ATA_CMD_WRITE_FUA_EXT: 2060 case ATA_CMD_WRITE_FUA_EXT:
2061 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); 2061 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2062 break; 2062 break;
2063 case ATA_CMD_FPDMA_READ: 2063 case ATA_CMD_FPDMA_READ:
2064 case ATA_CMD_FPDMA_WRITE: 2064 case ATA_CMD_FPDMA_WRITE:
2065 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); 2065 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2066 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); 2066 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2067 break; 2067 break;
2068 default: 2068 default:
2069 /* The only other commands EDMA supports in non-queued and 2069 /* The only other commands EDMA supports in non-queued and
2070 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none 2070 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2071 * of which are defined/used by Linux. If we get here, this 2071 * of which are defined/used by Linux. If we get here, this
2072 * driver needs work. 2072 * driver needs work.
2073 * 2073 *
2074 * FIXME: modify libata to give qc_prep a return value and 2074 * FIXME: modify libata to give qc_prep a return value and
2075 * return error here. 2075 * return error here.
2076 */ 2076 */
2077 BUG_ON(tf->command); 2077 BUG_ON(tf->command);
2078 break; 2078 break;
2079 } 2079 }
2080 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); 2080 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2081 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); 2081 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2082 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); 2082 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2083 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); 2083 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2084 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); 2084 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2085 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); 2085 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2086 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); 2086 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2087 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); 2087 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2088 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 2088 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2089 2089
2090 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2090 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2091 return; 2091 return;
2092 mv_fill_sg(qc); 2092 mv_fill_sg(qc);
2093 } 2093 }
2094 2094
2095 /** 2095 /**
2096 * mv_qc_prep_iie - Host specific command preparation. 2096 * mv_qc_prep_iie - Host specific command preparation.
2097 * @qc: queued command to prepare 2097 * @qc: queued command to prepare
2098 * 2098 *
2099 * This routine simply redirects to the general purpose routine 2099 * This routine simply redirects to the general purpose routine
2100 * if command is not DMA. Else, it handles prep of the CRQB 2100 * if command is not DMA. Else, it handles prep of the CRQB
2101 * (command request block), does some sanity checking, and calls 2101 * (command request block), does some sanity checking, and calls
2102 * the SG load routine. 2102 * the SG load routine.
2103 * 2103 *
2104 * LOCKING: 2104 * LOCKING:
2105 * Inherited from caller. 2105 * Inherited from caller.
2106 */ 2106 */
2107 static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 2107 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2108 { 2108 {
2109 struct ata_port *ap = qc->ap; 2109 struct ata_port *ap = qc->ap;
2110 struct mv_port_priv *pp = ap->private_data; 2110 struct mv_port_priv *pp = ap->private_data;
2111 struct mv_crqb_iie *crqb; 2111 struct mv_crqb_iie *crqb;
2112 struct ata_taskfile *tf = &qc->tf; 2112 struct ata_taskfile *tf = &qc->tf;
2113 unsigned in_index; 2113 unsigned in_index;
2114 u32 flags = 0; 2114 u32 flags = 0;
2115 2115
2116 if ((tf->protocol != ATA_PROT_DMA) && 2116 if ((tf->protocol != ATA_PROT_DMA) &&
2117 (tf->protocol != ATA_PROT_NCQ)) 2117 (tf->protocol != ATA_PROT_NCQ))
2118 return; 2118 return;
2119 if (tf->command == ATA_CMD_DSM) 2119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */ 2120 return; /* use bmdma for this */
2121 2121
2122 /* Fill in Gen IIE command request block */ 2122 /* Fill in Gen IIE command request block */
2123 if (!(tf->flags & ATA_TFLAG_WRITE)) 2123 if (!(tf->flags & ATA_TFLAG_WRITE))
2124 flags |= CRQB_FLAG_READ; 2124 flags |= CRQB_FLAG_READ;
2125 2125
2126 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); 2126 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2127 flags |= qc->tag << CRQB_TAG_SHIFT; 2127 flags |= qc->tag << CRQB_TAG_SHIFT;
2128 flags |= qc->tag << CRQB_HOSTQ_SHIFT; 2128 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2129 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; 2129 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2130 2130
2131 /* get current queue index from software */ 2131 /* get current queue index from software */
2132 in_index = pp->req_idx; 2132 in_index = pp->req_idx;
2133 2133
2134 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; 2134 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2135 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); 2135 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2136 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); 2136 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2137 crqb->flags = cpu_to_le32(flags); 2137 crqb->flags = cpu_to_le32(flags);
2138 2138
2139 crqb->ata_cmd[0] = cpu_to_le32( 2139 crqb->ata_cmd[0] = cpu_to_le32(
2140 (tf->command << 16) | 2140 (tf->command << 16) |
2141 (tf->feature << 24) 2141 (tf->feature << 24)
2142 ); 2142 );
2143 crqb->ata_cmd[1] = cpu_to_le32( 2143 crqb->ata_cmd[1] = cpu_to_le32(
2144 (tf->lbal << 0) | 2144 (tf->lbal << 0) |
2145 (tf->lbam << 8) | 2145 (tf->lbam << 8) |
2146 (tf->lbah << 16) | 2146 (tf->lbah << 16) |
2147 (tf->device << 24) 2147 (tf->device << 24)
2148 ); 2148 );
2149 crqb->ata_cmd[2] = cpu_to_le32( 2149 crqb->ata_cmd[2] = cpu_to_le32(
2150 (tf->hob_lbal << 0) | 2150 (tf->hob_lbal << 0) |
2151 (tf->hob_lbam << 8) | 2151 (tf->hob_lbam << 8) |
2152 (tf->hob_lbah << 16) | 2152 (tf->hob_lbah << 16) |
2153 (tf->hob_feature << 24) 2153 (tf->hob_feature << 24)
2154 ); 2154 );
2155 crqb->ata_cmd[3] = cpu_to_le32( 2155 crqb->ata_cmd[3] = cpu_to_le32(
2156 (tf->nsect << 0) | 2156 (tf->nsect << 0) |
2157 (tf->hob_nsect << 8) 2157 (tf->hob_nsect << 8)
2158 ); 2158 );
2159 2159
2160 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2160 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2161 return; 2161 return;
2162 mv_fill_sg(qc); 2162 mv_fill_sg(qc);
2163 } 2163 }
2164 2164
2165 /** 2165 /**
2166 * mv_sff_check_status - fetch device status, if valid 2166 * mv_sff_check_status - fetch device status, if valid
2167 * @ap: ATA port to fetch status from 2167 * @ap: ATA port to fetch status from
2168 * 2168 *
2169 * When using command issue via mv_qc_issue_fis(), 2169 * When using command issue via mv_qc_issue_fis(),
2170 * the initial ATA_BUSY state does not show up in the 2170 * the initial ATA_BUSY state does not show up in the
2171 * ATA status (shadow) register. This can confuse libata! 2171 * ATA status (shadow) register. This can confuse libata!
2172 * 2172 *
2173 * So we have a hook here to fake ATA_BUSY for that situation, 2173 * So we have a hook here to fake ATA_BUSY for that situation,
2174 * until the first time a BUSY, DRQ, or ERR bit is seen. 2174 * until the first time a BUSY, DRQ, or ERR bit is seen.
2175 * 2175 *
2176 * The rest of the time, it simply returns the ATA status register. 2176 * The rest of the time, it simply returns the ATA status register.
2177 */ 2177 */
2178 static u8 mv_sff_check_status(struct ata_port *ap) 2178 static u8 mv_sff_check_status(struct ata_port *ap)
2179 { 2179 {
2180 u8 stat = ioread8(ap->ioaddr.status_addr); 2180 u8 stat = ioread8(ap->ioaddr.status_addr);
2181 struct mv_port_priv *pp = ap->private_data; 2181 struct mv_port_priv *pp = ap->private_data;
2182 2182
2183 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { 2183 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2184 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) 2184 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2185 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; 2185 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2186 else 2186 else
2187 stat = ATA_BUSY; 2187 stat = ATA_BUSY;
2188 } 2188 }
2189 return stat; 2189 return stat;
2190 } 2190 }
2191 2191
2192 /** 2192 /**
2193 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register 2193 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2194 * @fis: fis to be sent 2194 * @fis: fis to be sent
2195 * @nwords: number of 32-bit words in the fis 2195 * @nwords: number of 32-bit words in the fis
2196 */ 2196 */
2197 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) 2197 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2198 { 2198 {
2199 void __iomem *port_mmio = mv_ap_base(ap); 2199 void __iomem *port_mmio = mv_ap_base(ap);
2200 u32 ifctl, old_ifctl, ifstat; 2200 u32 ifctl, old_ifctl, ifstat;
2201 int i, timeout = 200, final_word = nwords - 1; 2201 int i, timeout = 200, final_word = nwords - 1;
2202 2202
2203 /* Initiate FIS transmission mode */ 2203 /* Initiate FIS transmission mode */
2204 old_ifctl = readl(port_mmio + SATA_IFCTL); 2204 old_ifctl = readl(port_mmio + SATA_IFCTL);
2205 ifctl = 0x100 | (old_ifctl & 0xf); 2205 ifctl = 0x100 | (old_ifctl & 0xf);
2206 writelfl(ifctl, port_mmio + SATA_IFCTL); 2206 writelfl(ifctl, port_mmio + SATA_IFCTL);
2207 2207
2208 /* Send all words of the FIS except for the final word */ 2208 /* Send all words of the FIS except for the final word */
2209 for (i = 0; i < final_word; ++i) 2209 for (i = 0; i < final_word; ++i)
2210 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); 2210 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2211 2211
2212 /* Flag end-of-transmission, and then send the final word */ 2212 /* Flag end-of-transmission, and then send the final word */
2213 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); 2213 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2214 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); 2214 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2215 2215
2216 /* 2216 /*
2217 * Wait for FIS transmission to complete. 2217 * Wait for FIS transmission to complete.
2218 * This typically takes just a single iteration. 2218 * This typically takes just a single iteration.
2219 */ 2219 */
2220 do { 2220 do {
2221 ifstat = readl(port_mmio + SATA_IFSTAT); 2221 ifstat = readl(port_mmio + SATA_IFSTAT);
2222 } while (!(ifstat & 0x1000) && --timeout); 2222 } while (!(ifstat & 0x1000) && --timeout);
2223 2223
2224 /* Restore original port configuration */ 2224 /* Restore original port configuration */
2225 writelfl(old_ifctl, port_mmio + SATA_IFCTL); 2225 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2226 2226
2227 /* See if it worked */ 2227 /* See if it worked */
2228 if ((ifstat & 0x3000) != 0x1000) { 2228 if ((ifstat & 0x3000) != 0x1000) {
2229 ata_port_printk(ap, KERN_WARNING, 2229 ata_port_printk(ap, KERN_WARNING,
2230 "%s transmission error, ifstat=%08x\n", 2230 "%s transmission error, ifstat=%08x\n",
2231 __func__, ifstat); 2231 __func__, ifstat);
2232 return AC_ERR_OTHER; 2232 return AC_ERR_OTHER;
2233 } 2233 }
2234 return 0; 2234 return 0;
2235 } 2235 }
2236 2236
2237 /** 2237 /**
2238 * mv_qc_issue_fis - Issue a command directly as a FIS 2238 * mv_qc_issue_fis - Issue a command directly as a FIS
2239 * @qc: queued command to start 2239 * @qc: queued command to start
2240 * 2240 *
2241 * Note that the ATA shadow registers are not updated 2241 * Note that the ATA shadow registers are not updated
2242 * after command issue, so the device will appear "READY" 2242 * after command issue, so the device will appear "READY"
2243 * if polled, even while it is BUSY processing the command. 2243 * if polled, even while it is BUSY processing the command.
2244 * 2244 *
2245 * So we use a status hook to fake ATA_BUSY until the drive changes state. 2245 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2246 * 2246 *
2247 * Note: we don't get updated shadow regs on *completion* 2247 * Note: we don't get updated shadow regs on *completion*
2248 * of non-data commands. So avoid sending them via this function, 2248 * of non-data commands. So avoid sending them via this function,
2249 * as they will appear to have completed immediately. 2249 * as they will appear to have completed immediately.
2250 * 2250 *
2251 * GEN_IIE has special registers that we could get the result tf from, 2251 * GEN_IIE has special registers that we could get the result tf from,
2252 * but earlier chipsets do not. For now, we ignore those registers. 2252 * but earlier chipsets do not. For now, we ignore those registers.
2253 */ 2253 */
2254 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) 2254 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2255 { 2255 {
2256 struct ata_port *ap = qc->ap; 2256 struct ata_port *ap = qc->ap;
2257 struct mv_port_priv *pp = ap->private_data; 2257 struct mv_port_priv *pp = ap->private_data;
2258 struct ata_link *link = qc->dev->link; 2258 struct ata_link *link = qc->dev->link;
2259 u32 fis[5]; 2259 u32 fis[5];
2260 int err = 0; 2260 int err = 0;
2261 2261
2262 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); 2262 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2263 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); 2263 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2264 if (err) 2264 if (err)
2265 return err; 2265 return err;
2266 2266
2267 switch (qc->tf.protocol) { 2267 switch (qc->tf.protocol) {
2268 case ATAPI_PROT_PIO: 2268 case ATAPI_PROT_PIO:
2269 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2269 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2270 /* fall through */ 2270 /* fall through */
2271 case ATAPI_PROT_NODATA: 2271 case ATAPI_PROT_NODATA:
2272 ap->hsm_task_state = HSM_ST_FIRST; 2272 ap->hsm_task_state = HSM_ST_FIRST;
2273 break; 2273 break;
2274 case ATA_PROT_PIO: 2274 case ATA_PROT_PIO:
2275 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; 2275 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2276 if (qc->tf.flags & ATA_TFLAG_WRITE) 2276 if (qc->tf.flags & ATA_TFLAG_WRITE)
2277 ap->hsm_task_state = HSM_ST_FIRST; 2277 ap->hsm_task_state = HSM_ST_FIRST;
2278 else 2278 else
2279 ap->hsm_task_state = HSM_ST; 2279 ap->hsm_task_state = HSM_ST;
2280 break; 2280 break;
2281 default: 2281 default:
2282 ap->hsm_task_state = HSM_ST_LAST; 2282 ap->hsm_task_state = HSM_ST_LAST;
2283 break; 2283 break;
2284 } 2284 }
2285 2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2287 ata_sff_queue_pio_task(ap, 0); 2287 ata_sff_queue_pio_task(link, 0);
2288 return 0; 2288 return 0;
2289 } 2289 }
2290 2290
2291 /** 2291 /**
2292 * mv_qc_issue - Initiate a command to the host 2292 * mv_qc_issue - Initiate a command to the host
2293 * @qc: queued command to start 2293 * @qc: queued command to start
2294 * 2294 *
2295 * This routine simply redirects to the general purpose routine 2295 * This routine simply redirects to the general purpose routine
2296 * if command is not DMA. Else, it sanity checks our local 2296 * if command is not DMA. Else, it sanity checks our local
2297 * caches of the request producer/consumer indices then enables 2297 * caches of the request producer/consumer indices then enables
2298 * DMA and bumps the request producer index. 2298 * DMA and bumps the request producer index.
2299 * 2299 *
2300 * LOCKING: 2300 * LOCKING:
2301 * Inherited from caller. 2301 * Inherited from caller.
2302 */ 2302 */
2303 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) 2303 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2304 { 2304 {
2305 static int limit_warnings = 10; 2305 static int limit_warnings = 10;
2306 struct ata_port *ap = qc->ap; 2306 struct ata_port *ap = qc->ap;
2307 void __iomem *port_mmio = mv_ap_base(ap); 2307 void __iomem *port_mmio = mv_ap_base(ap);
2308 struct mv_port_priv *pp = ap->private_data; 2308 struct mv_port_priv *pp = ap->private_data;
2309 u32 in_index; 2309 u32 in_index;
2310 unsigned int port_irqs; 2310 unsigned int port_irqs;
2311 2311
2312 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ 2312 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2313 2313
2314 switch (qc->tf.protocol) { 2314 switch (qc->tf.protocol) {
2315 case ATA_PROT_DMA: 2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) { 2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ 2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER; 2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */ 2319 break; /* use bmdma for this */
2320 } 2320 }
2321 /* fall thru */ 2321 /* fall thru */
2322 case ATA_PROT_NCQ: 2322 case ATA_PROT_NCQ:
2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2325 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; 2325 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2326 2326
2327 /* Write the request in pointer to kick the EDMA to life */ 2327 /* Write the request in pointer to kick the EDMA to life */
2328 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, 2328 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2329 port_mmio + EDMA_REQ_Q_IN_PTR); 2329 port_mmio + EDMA_REQ_Q_IN_PTR);
2330 return 0; 2330 return 0;
2331 2331
2332 case ATA_PROT_PIO: 2332 case ATA_PROT_PIO:
2333 /* 2333 /*
2334 * Errata SATA#16, SATA#24: warn if multiple DRQs expected. 2334 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2335 * 2335 *
2336 * Someday, we might implement special polling workarounds 2336 * Someday, we might implement special polling workarounds
2337 * for these, but it all seems rather unnecessary since we 2337 * for these, but it all seems rather unnecessary since we
2338 * normally use only DMA for commands which transfer more 2338 * normally use only DMA for commands which transfer more
2339 * than a single block of data. 2339 * than a single block of data.
2340 * 2340 *
2341 * Much of the time, this could just work regardless. 2341 * Much of the time, this could just work regardless.
2342 * So for now, just log the incident, and allow the attempt. 2342 * So for now, just log the incident, and allow the attempt.
2343 */ 2343 */
2344 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { 2344 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2345 --limit_warnings; 2345 --limit_warnings;
2346 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME 2346 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2347 ": attempting PIO w/multiple DRQ: " 2347 ": attempting PIO w/multiple DRQ: "
2348 "this may fail due to h/w errata\n"); 2348 "this may fail due to h/w errata\n");
2349 } 2349 }
2350 /* drop through */ 2350 /* drop through */
2351 case ATA_PROT_NODATA: 2351 case ATA_PROT_NODATA:
2352 case ATAPI_PROT_PIO: 2352 case ATAPI_PROT_PIO:
2353 case ATAPI_PROT_NODATA: 2353 case ATAPI_PROT_NODATA:
2354 if (ap->flags & ATA_FLAG_PIO_POLLING) 2354 if (ap->flags & ATA_FLAG_PIO_POLLING)
2355 qc->tf.flags |= ATA_TFLAG_POLLING; 2355 qc->tf.flags |= ATA_TFLAG_POLLING;
2356 break; 2356 break;
2357 } 2357 }
2358 2358
2359 if (qc->tf.flags & ATA_TFLAG_POLLING) 2359 if (qc->tf.flags & ATA_TFLAG_POLLING)
2360 port_irqs = ERR_IRQ; /* mask device interrupt when polling */ 2360 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2361 else 2361 else
2362 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ 2362 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2363 2363
2364 /* 2364 /*
2365 * We're about to send a non-EDMA capable command to the 2365 * We're about to send a non-EDMA capable command to the
2366 * port. Turn off EDMA so there won't be problems accessing 2366 * port. Turn off EDMA so there won't be problems accessing
2367 * shadow block, etc registers. 2367 * shadow block, etc registers.
2368 */ 2368 */
2369 mv_stop_edma(ap); 2369 mv_stop_edma(ap);
2370 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); 2370 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2371 mv_pmp_select(ap, qc->dev->link->pmp); 2371 mv_pmp_select(ap, qc->dev->link->pmp);
2372 2372
2373 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { 2373 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2374 struct mv_host_priv *hpriv = ap->host->private_data; 2374 struct mv_host_priv *hpriv = ap->host->private_data;
2375 /* 2375 /*
2376 * Workaround for 88SX60x1 FEr SATA#25 (part 2). 2376 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2377 * 2377 *
2378 * After any NCQ error, the READ_LOG_EXT command 2378 * After any NCQ error, the READ_LOG_EXT command
2379 * from libata-eh *must* use mv_qc_issue_fis(). 2379 * from libata-eh *must* use mv_qc_issue_fis().
2380 * Otherwise it might fail, due to chip errata. 2380 * Otherwise it might fail, due to chip errata.
2381 * 2381 *
2382 * Rather than special-case it, we'll just *always* 2382 * Rather than special-case it, we'll just *always*
2383 * use this method here for READ_LOG_EXT, making for 2383 * use this method here for READ_LOG_EXT, making for
2384 * easier testing. 2384 * easier testing.
2385 */ 2385 */
2386 if (IS_GEN_II(hpriv)) 2386 if (IS_GEN_II(hpriv))
2387 return mv_qc_issue_fis(qc); 2387 return mv_qc_issue_fis(qc);
2388 } 2388 }
2389 return ata_bmdma_qc_issue(qc); 2389 return ata_bmdma_qc_issue(qc);
2390 } 2390 }
2391 2391
2392 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) 2392 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2393 { 2393 {
2394 struct mv_port_priv *pp = ap->private_data; 2394 struct mv_port_priv *pp = ap->private_data;
2395 struct ata_queued_cmd *qc; 2395 struct ata_queued_cmd *qc;
2396 2396
2397 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) 2397 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2398 return NULL; 2398 return NULL;
2399 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2399 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2400 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) 2400 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2401 return qc; 2401 return qc;
2402 return NULL; 2402 return NULL;
2403 } 2403 }
2404 2404
2405 static void mv_pmp_error_handler(struct ata_port *ap) 2405 static void mv_pmp_error_handler(struct ata_port *ap)
2406 { 2406 {
2407 unsigned int pmp, pmp_map; 2407 unsigned int pmp, pmp_map;
2408 struct mv_port_priv *pp = ap->private_data; 2408 struct mv_port_priv *pp = ap->private_data;
2409 2409
2410 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { 2410 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2411 /* 2411 /*
2412 * Perform NCQ error analysis on failed PMPs 2412 * Perform NCQ error analysis on failed PMPs
2413 * before we freeze the port entirely. 2413 * before we freeze the port entirely.
2414 * 2414 *
2415 * The failed PMPs are marked earlier by mv_pmp_eh_prep(). 2415 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2416 */ 2416 */
2417 pmp_map = pp->delayed_eh_pmp_map; 2417 pmp_map = pp->delayed_eh_pmp_map;
2418 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; 2418 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2419 for (pmp = 0; pmp_map != 0; pmp++) { 2419 for (pmp = 0; pmp_map != 0; pmp++) {
2420 unsigned int this_pmp = (1 << pmp); 2420 unsigned int this_pmp = (1 << pmp);
2421 if (pmp_map & this_pmp) { 2421 if (pmp_map & this_pmp) {
2422 struct ata_link *link = &ap->pmp_link[pmp]; 2422 struct ata_link *link = &ap->pmp_link[pmp];
2423 pmp_map &= ~this_pmp; 2423 pmp_map &= ~this_pmp;
2424 ata_eh_analyze_ncq_error(link); 2424 ata_eh_analyze_ncq_error(link);
2425 } 2425 }
2426 } 2426 }
2427 ata_port_freeze(ap); 2427 ata_port_freeze(ap);
2428 } 2428 }
2429 sata_pmp_error_handler(ap); 2429 sata_pmp_error_handler(ap);
2430 } 2430 }
2431 2431
2432 static unsigned int mv_get_err_pmp_map(struct ata_port *ap) 2432 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2433 { 2433 {
2434 void __iomem *port_mmio = mv_ap_base(ap); 2434 void __iomem *port_mmio = mv_ap_base(ap);
2435 2435
2436 return readl(port_mmio + SATA_TESTCTL) >> 16; 2436 return readl(port_mmio + SATA_TESTCTL) >> 16;
2437 } 2437 }
2438 2438
2439 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) 2439 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2440 { 2440 {
2441 struct ata_eh_info *ehi; 2441 struct ata_eh_info *ehi;
2442 unsigned int pmp; 2442 unsigned int pmp;
2443 2443
2444 /* 2444 /*
2445 * Initialize EH info for PMPs which saw device errors 2445 * Initialize EH info for PMPs which saw device errors
2446 */ 2446 */
2447 ehi = &ap->link.eh_info; 2447 ehi = &ap->link.eh_info;
2448 for (pmp = 0; pmp_map != 0; pmp++) { 2448 for (pmp = 0; pmp_map != 0; pmp++) {
2449 unsigned int this_pmp = (1 << pmp); 2449 unsigned int this_pmp = (1 << pmp);
2450 if (pmp_map & this_pmp) { 2450 if (pmp_map & this_pmp) {
2451 struct ata_link *link = &ap->pmp_link[pmp]; 2451 struct ata_link *link = &ap->pmp_link[pmp];
2452 2452
2453 pmp_map &= ~this_pmp; 2453 pmp_map &= ~this_pmp;
2454 ehi = &link->eh_info; 2454 ehi = &link->eh_info;
2455 ata_ehi_clear_desc(ehi); 2455 ata_ehi_clear_desc(ehi);
2456 ata_ehi_push_desc(ehi, "dev err"); 2456 ata_ehi_push_desc(ehi, "dev err");
2457 ehi->err_mask |= AC_ERR_DEV; 2457 ehi->err_mask |= AC_ERR_DEV;
2458 ehi->action |= ATA_EH_RESET; 2458 ehi->action |= ATA_EH_RESET;
2459 ata_link_abort(link); 2459 ata_link_abort(link);
2460 } 2460 }
2461 } 2461 }
2462 } 2462 }
2463 2463
2464 static int mv_req_q_empty(struct ata_port *ap) 2464 static int mv_req_q_empty(struct ata_port *ap)
2465 { 2465 {
2466 void __iomem *port_mmio = mv_ap_base(ap); 2466 void __iomem *port_mmio = mv_ap_base(ap);
2467 u32 in_ptr, out_ptr; 2467 u32 in_ptr, out_ptr;
2468 2468
2469 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) 2469 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2471 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) 2471 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2472 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2472 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2473 return (in_ptr == out_ptr); /* 1 == queue_is_empty */ 2473 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2474 } 2474 }
2475 2475
2476 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 2476 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2477 { 2477 {
2478 struct mv_port_priv *pp = ap->private_data; 2478 struct mv_port_priv *pp = ap->private_data;
2479 int failed_links; 2479 int failed_links;
2480 unsigned int old_map, new_map; 2480 unsigned int old_map, new_map;
2481 2481
2482 /* 2482 /*
2483 * Device error during FBS+NCQ operation: 2483 * Device error during FBS+NCQ operation:
2484 * 2484 *
2485 * Set a port flag to prevent further I/O being enqueued. 2485 * Set a port flag to prevent further I/O being enqueued.
2486 * Leave the EDMA running to drain outstanding commands from this port. 2486 * Leave the EDMA running to drain outstanding commands from this port.
2487 * Perform the post-mortem/EH only when all responses are complete. 2487 * Perform the post-mortem/EH only when all responses are complete.
2488 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). 2488 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2489 */ 2489 */
2490 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { 2490 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2491 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; 2491 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2492 pp->delayed_eh_pmp_map = 0; 2492 pp->delayed_eh_pmp_map = 0;
2493 } 2493 }
2494 old_map = pp->delayed_eh_pmp_map; 2494 old_map = pp->delayed_eh_pmp_map;
2495 new_map = old_map | mv_get_err_pmp_map(ap); 2495 new_map = old_map | mv_get_err_pmp_map(ap);
2496 2496
2497 if (old_map != new_map) { 2497 if (old_map != new_map) {
2498 pp->delayed_eh_pmp_map = new_map; 2498 pp->delayed_eh_pmp_map = new_map;
2499 mv_pmp_eh_prep(ap, new_map & ~old_map); 2499 mv_pmp_eh_prep(ap, new_map & ~old_map);
2500 } 2500 }
2501 failed_links = hweight16(new_map); 2501 failed_links = hweight16(new_map);
2502 2502
2503 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x " 2503 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2504 "failed_links=%d nr_active_links=%d\n", 2504 "failed_links=%d nr_active_links=%d\n",
2505 __func__, pp->delayed_eh_pmp_map, 2505 __func__, pp->delayed_eh_pmp_map,
2506 ap->qc_active, failed_links, 2506 ap->qc_active, failed_links,
2507 ap->nr_active_links); 2507 ap->nr_active_links);
2508 2508
2509 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { 2509 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2510 mv_process_crpb_entries(ap, pp); 2510 mv_process_crpb_entries(ap, pp);
2511 mv_stop_edma(ap); 2511 mv_stop_edma(ap);
2512 mv_eh_freeze(ap); 2512 mv_eh_freeze(ap);
2513 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__); 2513 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2514 return 1; /* handled */ 2514 return 1; /* handled */
2515 } 2515 }
2516 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__); 2516 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2517 return 1; /* handled */ 2517 return 1; /* handled */
2518 } 2518 }
2519 2519
2520 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) 2520 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2521 { 2521 {
2522 /* 2522 /*
2523 * Possible future enhancement: 2523 * Possible future enhancement:
2524 * 2524 *
2525 * FBS+non-NCQ operation is not yet implemented. 2525 * FBS+non-NCQ operation is not yet implemented.
2526 * See related notes in mv_edma_cfg(). 2526 * See related notes in mv_edma_cfg().
2527 * 2527 *
2528 * Device error during FBS+non-NCQ operation: 2528 * Device error during FBS+non-NCQ operation:
2529 * 2529 *
2530 * We need to snapshot the shadow registers for each failed command. 2530 * We need to snapshot the shadow registers for each failed command.
2531 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). 2531 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2532 */ 2532 */
2533 return 0; /* not handled */ 2533 return 0; /* not handled */
2534 } 2534 }
2535 2535
2536 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) 2536 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2537 { 2537 {
2538 struct mv_port_priv *pp = ap->private_data; 2538 struct mv_port_priv *pp = ap->private_data;
2539 2539
2540 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) 2540 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2541 return 0; /* EDMA was not active: not handled */ 2541 return 0; /* EDMA was not active: not handled */
2542 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) 2542 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2543 return 0; /* FBS was not active: not handled */ 2543 return 0; /* FBS was not active: not handled */
2544 2544
2545 if (!(edma_err_cause & EDMA_ERR_DEV)) 2545 if (!(edma_err_cause & EDMA_ERR_DEV))
2546 return 0; /* non DEV error: not handled */ 2546 return 0; /* non DEV error: not handled */
2547 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; 2547 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2548 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) 2548 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2549 return 0; /* other problems: not handled */ 2549 return 0; /* other problems: not handled */
2550 2550
2551 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { 2551 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2552 /* 2552 /*
2553 * EDMA should NOT have self-disabled for this case. 2553 * EDMA should NOT have self-disabled for this case.
2554 * If it did, then something is wrong elsewhere, 2554 * If it did, then something is wrong elsewhere,
2555 * and we cannot handle it here. 2555 * and we cannot handle it here.
2556 */ 2556 */
2557 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2557 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2558 ata_port_printk(ap, KERN_WARNING, 2558 ata_port_printk(ap, KERN_WARNING,
2559 "%s: err_cause=0x%x pp_flags=0x%x\n", 2559 "%s: err_cause=0x%x pp_flags=0x%x\n",
2560 __func__, edma_err_cause, pp->pp_flags); 2560 __func__, edma_err_cause, pp->pp_flags);
2561 return 0; /* not handled */ 2561 return 0; /* not handled */
2562 } 2562 }
2563 return mv_handle_fbs_ncq_dev_err(ap); 2563 return mv_handle_fbs_ncq_dev_err(ap);
2564 } else { 2564 } else {
2565 /* 2565 /*
2566 * EDMA should have self-disabled for this case. 2566 * EDMA should have self-disabled for this case.
2567 * If it did not, then something is wrong elsewhere, 2567 * If it did not, then something is wrong elsewhere,
2568 * and we cannot handle it here. 2568 * and we cannot handle it here.
2569 */ 2569 */
2570 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { 2570 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2571 ata_port_printk(ap, KERN_WARNING, 2571 ata_port_printk(ap, KERN_WARNING,
2572 "%s: err_cause=0x%x pp_flags=0x%x\n", 2572 "%s: err_cause=0x%x pp_flags=0x%x\n",
2573 __func__, edma_err_cause, pp->pp_flags); 2573 __func__, edma_err_cause, pp->pp_flags);
2574 return 0; /* not handled */ 2574 return 0; /* not handled */
2575 } 2575 }
2576 return mv_handle_fbs_non_ncq_dev_err(ap); 2576 return mv_handle_fbs_non_ncq_dev_err(ap);
2577 } 2577 }
2578 return 0; /* not handled */ 2578 return 0; /* not handled */
2579 } 2579 }
2580 2580
2581 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 2581 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2582 { 2582 {
2583 struct ata_eh_info *ehi = &ap->link.eh_info; 2583 struct ata_eh_info *ehi = &ap->link.eh_info;
2584 char *when = "idle"; 2584 char *when = "idle";
2585 2585
2586 ata_ehi_clear_desc(ehi); 2586 ata_ehi_clear_desc(ehi);
2587 if (edma_was_enabled) { 2587 if (edma_was_enabled) {
2588 when = "EDMA enabled"; 2588 when = "EDMA enabled";
2589 } else { 2589 } else {
2590 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); 2590 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2591 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) 2591 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2592 when = "polling"; 2592 when = "polling";
2593 } 2593 }
2594 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); 2594 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2595 ehi->err_mask |= AC_ERR_OTHER; 2595 ehi->err_mask |= AC_ERR_OTHER;
2596 ehi->action |= ATA_EH_RESET; 2596 ehi->action |= ATA_EH_RESET;
2597 ata_port_freeze(ap); 2597 ata_port_freeze(ap);
2598 } 2598 }
2599 2599
2600 /** 2600 /**
2601 * mv_err_intr - Handle error interrupts on the port 2601 * mv_err_intr - Handle error interrupts on the port
2602 * @ap: ATA channel to manipulate 2602 * @ap: ATA channel to manipulate
2603 * 2603 *
2604 * Most cases require a full reset of the chip's state machine, 2604 * Most cases require a full reset of the chip's state machine,
2605 * which also performs a COMRESET. 2605 * which also performs a COMRESET.
2606 * Also, if the port disabled DMA, update our cached copy to match. 2606 * Also, if the port disabled DMA, update our cached copy to match.
2607 * 2607 *
2608 * LOCKING: 2608 * LOCKING:
2609 * Inherited from caller. 2609 * Inherited from caller.
2610 */ 2610 */
2611 static void mv_err_intr(struct ata_port *ap) 2611 static void mv_err_intr(struct ata_port *ap)
2612 { 2612 {
2613 void __iomem *port_mmio = mv_ap_base(ap); 2613 void __iomem *port_mmio = mv_ap_base(ap);
2614 u32 edma_err_cause, eh_freeze_mask, serr = 0; 2614 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2615 u32 fis_cause = 0; 2615 u32 fis_cause = 0;
2616 struct mv_port_priv *pp = ap->private_data; 2616 struct mv_port_priv *pp = ap->private_data;
2617 struct mv_host_priv *hpriv = ap->host->private_data; 2617 struct mv_host_priv *hpriv = ap->host->private_data;
2618 unsigned int action = 0, err_mask = 0; 2618 unsigned int action = 0, err_mask = 0;
2619 struct ata_eh_info *ehi = &ap->link.eh_info; 2619 struct ata_eh_info *ehi = &ap->link.eh_info;
2620 struct ata_queued_cmd *qc; 2620 struct ata_queued_cmd *qc;
2621 int abort = 0; 2621 int abort = 0;
2622 2622
2623 /* 2623 /*
2624 * Read and clear the SError and err_cause bits. 2624 * Read and clear the SError and err_cause bits.
2625 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear 2625 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2626 * the FIS_IRQ_CAUSE register before clearing edma_err_cause. 2626 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2627 */ 2627 */
2628 sata_scr_read(&ap->link, SCR_ERROR, &serr); 2628 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2629 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 2629 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2630 2630
2631 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); 2631 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2632 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2632 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2633 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); 2633 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2634 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); 2634 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2635 } 2635 }
2636 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); 2636 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2637 2637
2638 if (edma_err_cause & EDMA_ERR_DEV) { 2638 if (edma_err_cause & EDMA_ERR_DEV) {
2639 /* 2639 /*
2640 * Device errors during FIS-based switching operation 2640 * Device errors during FIS-based switching operation
2641 * require special handling. 2641 * require special handling.
2642 */ 2642 */
2643 if (mv_handle_dev_err(ap, edma_err_cause)) 2643 if (mv_handle_dev_err(ap, edma_err_cause))
2644 return; 2644 return;
2645 } 2645 }
2646 2646
2647 qc = mv_get_active_qc(ap); 2647 qc = mv_get_active_qc(ap);
2648 ata_ehi_clear_desc(ehi); 2648 ata_ehi_clear_desc(ehi);
2649 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 2649 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2650 edma_err_cause, pp->pp_flags); 2650 edma_err_cause, pp->pp_flags);
2651 2651
2652 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { 2652 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2653 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); 2653 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2654 if (fis_cause & FIS_IRQ_CAUSE_AN) { 2654 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2655 u32 ec = edma_err_cause & 2655 u32 ec = edma_err_cause &
2656 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); 2656 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2657 sata_async_notification(ap); 2657 sata_async_notification(ap);
2658 if (!ec) 2658 if (!ec)
2659 return; /* Just an AN; no need for the nukes */ 2659 return; /* Just an AN; no need for the nukes */
2660 ata_ehi_push_desc(ehi, "SDB notify"); 2660 ata_ehi_push_desc(ehi, "SDB notify");
2661 } 2661 }
2662 } 2662 }
2663 /* 2663 /*
2664 * All generations share these EDMA error cause bits: 2664 * All generations share these EDMA error cause bits:
2665 */ 2665 */
2666 if (edma_err_cause & EDMA_ERR_DEV) { 2666 if (edma_err_cause & EDMA_ERR_DEV) {
2667 err_mask |= AC_ERR_DEV; 2667 err_mask |= AC_ERR_DEV;
2668 action |= ATA_EH_RESET; 2668 action |= ATA_EH_RESET;
2669 ata_ehi_push_desc(ehi, "dev error"); 2669 ata_ehi_push_desc(ehi, "dev error");
2670 } 2670 }
2671 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 2671 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2672 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | 2672 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2673 EDMA_ERR_INTRL_PAR)) { 2673 EDMA_ERR_INTRL_PAR)) {
2674 err_mask |= AC_ERR_ATA_BUS; 2674 err_mask |= AC_ERR_ATA_BUS;
2675 action |= ATA_EH_RESET; 2675 action |= ATA_EH_RESET;
2676 ata_ehi_push_desc(ehi, "parity error"); 2676 ata_ehi_push_desc(ehi, "parity error");
2677 } 2677 }
2678 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { 2678 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2679 ata_ehi_hotplugged(ehi); 2679 ata_ehi_hotplugged(ehi);
2680 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? 2680 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2681 "dev disconnect" : "dev connect"); 2681 "dev disconnect" : "dev connect");
2682 action |= ATA_EH_RESET; 2682 action |= ATA_EH_RESET;
2683 } 2683 }
2684 2684
2685 /* 2685 /*
2686 * Gen-I has a different SELF_DIS bit, 2686 * Gen-I has a different SELF_DIS bit,
2687 * different FREEZE bits, and no SERR bit: 2687 * different FREEZE bits, and no SERR bit:
2688 */ 2688 */
2689 if (IS_GEN_I(hpriv)) { 2689 if (IS_GEN_I(hpriv)) {
2690 eh_freeze_mask = EDMA_EH_FREEZE_5; 2690 eh_freeze_mask = EDMA_EH_FREEZE_5;
2691 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 2691 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2692 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2692 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2693 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2693 ata_ehi_push_desc(ehi, "EDMA self-disable");
2694 } 2694 }
2695 } else { 2695 } else {
2696 eh_freeze_mask = EDMA_EH_FREEZE; 2696 eh_freeze_mask = EDMA_EH_FREEZE;
2697 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 2697 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2698 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 2698 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2699 ata_ehi_push_desc(ehi, "EDMA self-disable"); 2699 ata_ehi_push_desc(ehi, "EDMA self-disable");
2700 } 2700 }
2701 if (edma_err_cause & EDMA_ERR_SERR) { 2701 if (edma_err_cause & EDMA_ERR_SERR) {
2702 ata_ehi_push_desc(ehi, "SError=%08x", serr); 2702 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2703 err_mask |= AC_ERR_ATA_BUS; 2703 err_mask |= AC_ERR_ATA_BUS;
2704 action |= ATA_EH_RESET; 2704 action |= ATA_EH_RESET;
2705 } 2705 }
2706 } 2706 }
2707 2707
2708 if (!err_mask) { 2708 if (!err_mask) {
2709 err_mask = AC_ERR_OTHER; 2709 err_mask = AC_ERR_OTHER;
2710 action |= ATA_EH_RESET; 2710 action |= ATA_EH_RESET;
2711 } 2711 }
2712 2712
2713 ehi->serror |= serr; 2713 ehi->serror |= serr;
2714 ehi->action |= action; 2714 ehi->action |= action;
2715 2715
2716 if (qc) 2716 if (qc)
2717 qc->err_mask |= err_mask; 2717 qc->err_mask |= err_mask;
2718 else 2718 else
2719 ehi->err_mask |= err_mask; 2719 ehi->err_mask |= err_mask;
2720 2720
2721 if (err_mask == AC_ERR_DEV) { 2721 if (err_mask == AC_ERR_DEV) {
2722 /* 2722 /*
2723 * Cannot do ata_port_freeze() here, 2723 * Cannot do ata_port_freeze() here,
2724 * because it would kill PIO access, 2724 * because it would kill PIO access,
2725 * which is needed for further diagnosis. 2725 * which is needed for further diagnosis.
2726 */ 2726 */
2727 mv_eh_freeze(ap); 2727 mv_eh_freeze(ap);
2728 abort = 1; 2728 abort = 1;
2729 } else if (edma_err_cause & eh_freeze_mask) { 2729 } else if (edma_err_cause & eh_freeze_mask) {
2730 /* 2730 /*
2731 * Note to self: ata_port_freeze() calls ata_port_abort() 2731 * Note to self: ata_port_freeze() calls ata_port_abort()
2732 */ 2732 */
2733 ata_port_freeze(ap); 2733 ata_port_freeze(ap);
2734 } else { 2734 } else {
2735 abort = 1; 2735 abort = 1;
2736 } 2736 }
2737 2737
2738 if (abort) { 2738 if (abort) {
2739 if (qc) 2739 if (qc)
2740 ata_link_abort(qc->dev->link); 2740 ata_link_abort(qc->dev->link);
2741 else 2741 else
2742 ata_port_abort(ap); 2742 ata_port_abort(ap);
2743 } 2743 }
2744 } 2744 }
2745 2745
2746 static void mv_process_crpb_response(struct ata_port *ap, 2746 static void mv_process_crpb_response(struct ata_port *ap,
2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 2747 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2748 { 2748 {
2749 u8 ata_status; 2749 u8 ata_status;
2750 u16 edma_status = le16_to_cpu(response->flags); 2750 u16 edma_status = le16_to_cpu(response->flags);
2751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 2751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2752 2752
2753 if (unlikely(!qc)) { 2753 if (unlikely(!qc)) {
2754 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", 2754 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2755 __func__, tag); 2755 __func__, tag);
2756 return; 2756 return;
2757 } 2757 }
2758 2758
2759 /* 2759 /*
2760 * edma_status from a response queue entry: 2760 * edma_status from a response queue entry:
2761 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). 2761 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2762 * MSB is saved ATA status from command completion. 2762 * MSB is saved ATA status from command completion.
2763 */ 2763 */
2764 if (!ncq_enabled) { 2764 if (!ncq_enabled) {
2765 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; 2765 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2766 if (err_cause) { 2766 if (err_cause) {
2767 /* 2767 /*
2768 * Error will be seen/handled by 2768 * Error will be seen/handled by
2769 * mv_err_intr(). So do nothing at all here. 2769 * mv_err_intr(). So do nothing at all here.
2770 */ 2770 */
2771 return; 2771 return;
2772 } 2772 }
2773 } 2773 }
2774 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; 2774 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2775 if (!ac_err_mask(ata_status)) 2775 if (!ac_err_mask(ata_status))
2776 ata_qc_complete(qc); 2776 ata_qc_complete(qc);
2777 /* else: leave it for mv_err_intr() */ 2777 /* else: leave it for mv_err_intr() */
2778 } 2778 }
2779 2779
2780 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) 2780 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2781 { 2781 {
2782 void __iomem *port_mmio = mv_ap_base(ap); 2782 void __iomem *port_mmio = mv_ap_base(ap);
2783 struct mv_host_priv *hpriv = ap->host->private_data; 2783 struct mv_host_priv *hpriv = ap->host->private_data;
2784 u32 in_index; 2784 u32 in_index;
2785 bool work_done = false; 2785 bool work_done = false;
2786 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); 2786 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2787 2787
2788 /* Get the hardware queue position index */ 2788 /* Get the hardware queue position index */
2789 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) 2789 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2790 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; 2790 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2791 2791
2792 /* Process new responses from since the last time we looked */ 2792 /* Process new responses from since the last time we looked */
2793 while (in_index != pp->resp_idx) { 2793 while (in_index != pp->resp_idx) {
2794 unsigned int tag; 2794 unsigned int tag;
2795 struct mv_crpb *response = &pp->crpb[pp->resp_idx]; 2795 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2796 2796
2797 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2797 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2798 2798
2799 if (IS_GEN_I(hpriv)) { 2799 if (IS_GEN_I(hpriv)) {
2800 /* 50xx: no NCQ, only one command active at a time */ 2800 /* 50xx: no NCQ, only one command active at a time */
2801 tag = ap->link.active_tag; 2801 tag = ap->link.active_tag;
2802 } else { 2802 } else {
2803 /* Gen II/IIE: get command tag from CRPB entry */ 2803 /* Gen II/IIE: get command tag from CRPB entry */
2804 tag = le16_to_cpu(response->id) & 0x1f; 2804 tag = le16_to_cpu(response->id) & 0x1f;
2805 } 2805 }
2806 mv_process_crpb_response(ap, response, tag, ncq_enabled); 2806 mv_process_crpb_response(ap, response, tag, ncq_enabled);
2807 work_done = true; 2807 work_done = true;
2808 } 2808 }
2809 2809
2810 /* Update the software queue position index in hardware */ 2810 /* Update the software queue position index in hardware */
2811 if (work_done) 2811 if (work_done)
2812 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 2812 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2813 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), 2813 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2814 port_mmio + EDMA_RSP_Q_OUT_PTR); 2814 port_mmio + EDMA_RSP_Q_OUT_PTR);
2815 } 2815 }
2816 2816
2817 static void mv_port_intr(struct ata_port *ap, u32 port_cause) 2817 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2818 { 2818 {
2819 struct mv_port_priv *pp; 2819 struct mv_port_priv *pp;
2820 int edma_was_enabled; 2820 int edma_was_enabled;
2821 2821
2822 /* 2822 /*
2823 * Grab a snapshot of the EDMA_EN flag setting, 2823 * Grab a snapshot of the EDMA_EN flag setting,
2824 * so that we have a consistent view for this port, 2824 * so that we have a consistent view for this port,
2825 * even if something we call of our routines changes it. 2825 * even if something we call of our routines changes it.
2826 */ 2826 */
2827 pp = ap->private_data; 2827 pp = ap->private_data;
2828 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); 2828 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2829 /* 2829 /*
2830 * Process completed CRPB response(s) before other events. 2830 * Process completed CRPB response(s) before other events.
2831 */ 2831 */
2832 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2832 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2833 mv_process_crpb_entries(ap, pp); 2833 mv_process_crpb_entries(ap, pp);
2834 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) 2834 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2835 mv_handle_fbs_ncq_dev_err(ap); 2835 mv_handle_fbs_ncq_dev_err(ap);
2836 } 2836 }
2837 /* 2837 /*
2838 * Handle chip-reported errors, or continue on to handle PIO. 2838 * Handle chip-reported errors, or continue on to handle PIO.
2839 */ 2839 */
2840 if (unlikely(port_cause & ERR_IRQ)) { 2840 if (unlikely(port_cause & ERR_IRQ)) {
2841 mv_err_intr(ap); 2841 mv_err_intr(ap);
2842 } else if (!edma_was_enabled) { 2842 } else if (!edma_was_enabled) {
2843 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 2843 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2844 if (qc) 2844 if (qc)
2845 ata_bmdma_port_intr(ap, qc); 2845 ata_bmdma_port_intr(ap, qc);
2846 else 2846 else
2847 mv_unexpected_intr(ap, edma_was_enabled); 2847 mv_unexpected_intr(ap, edma_was_enabled);
2848 } 2848 }
2849 } 2849 }
2850 2850
2851 /** 2851 /**
2852 * mv_host_intr - Handle all interrupts on the given host controller 2852 * mv_host_intr - Handle all interrupts on the given host controller
2853 * @host: host specific structure 2853 * @host: host specific structure
2854 * @main_irq_cause: Main interrupt cause register for the chip. 2854 * @main_irq_cause: Main interrupt cause register for the chip.
2855 * 2855 *
2856 * LOCKING: 2856 * LOCKING:
2857 * Inherited from caller. 2857 * Inherited from caller.
2858 */ 2858 */
2859 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 2859 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2860 { 2860 {
2861 struct mv_host_priv *hpriv = host->private_data; 2861 struct mv_host_priv *hpriv = host->private_data;
2862 void __iomem *mmio = hpriv->base, *hc_mmio; 2862 void __iomem *mmio = hpriv->base, *hc_mmio;
2863 unsigned int handled = 0, port; 2863 unsigned int handled = 0, port;
2864 2864
2865 /* If asserted, clear the "all ports" IRQ coalescing bit */ 2865 /* If asserted, clear the "all ports" IRQ coalescing bit */
2866 if (main_irq_cause & ALL_PORTS_COAL_DONE) 2866 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2867 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); 2867 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2868 2868
2869 for (port = 0; port < hpriv->n_ports; port++) { 2869 for (port = 0; port < hpriv->n_ports; port++) {
2870 struct ata_port *ap = host->ports[port]; 2870 struct ata_port *ap = host->ports[port];
2871 unsigned int p, shift, hardport, port_cause; 2871 unsigned int p, shift, hardport, port_cause;
2872 2872
2873 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2873 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2874 /* 2874 /*
2875 * Each hc within the host has its own hc_irq_cause register, 2875 * Each hc within the host has its own hc_irq_cause register,
2876 * where the interrupting ports bits get ack'd. 2876 * where the interrupting ports bits get ack'd.
2877 */ 2877 */
2878 if (hardport == 0) { /* first port on this hc ? */ 2878 if (hardport == 0) { /* first port on this hc ? */
2879 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; 2879 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2880 u32 port_mask, ack_irqs; 2880 u32 port_mask, ack_irqs;
2881 /* 2881 /*
2882 * Skip this entire hc if nothing pending for any ports 2882 * Skip this entire hc if nothing pending for any ports
2883 */ 2883 */
2884 if (!hc_cause) { 2884 if (!hc_cause) {
2885 port += MV_PORTS_PER_HC - 1; 2885 port += MV_PORTS_PER_HC - 1;
2886 continue; 2886 continue;
2887 } 2887 }
2888 /* 2888 /*
2889 * We don't need/want to read the hc_irq_cause register, 2889 * We don't need/want to read the hc_irq_cause register,
2890 * because doing so hurts performance, and 2890 * because doing so hurts performance, and
2891 * main_irq_cause already gives us everything we need. 2891 * main_irq_cause already gives us everything we need.
2892 * 2892 *
2893 * But we do have to *write* to the hc_irq_cause to ack 2893 * But we do have to *write* to the hc_irq_cause to ack
2894 * the ports that we are handling this time through. 2894 * the ports that we are handling this time through.
2895 * 2895 *
2896 * This requires that we create a bitmap for those 2896 * This requires that we create a bitmap for those
2897 * ports which interrupted us, and use that bitmap 2897 * ports which interrupted us, and use that bitmap
2898 * to ack (only) those ports via hc_irq_cause. 2898 * to ack (only) those ports via hc_irq_cause.
2899 */ 2899 */
2900 ack_irqs = 0; 2900 ack_irqs = 0;
2901 if (hc_cause & PORTS_0_3_COAL_DONE) 2901 if (hc_cause & PORTS_0_3_COAL_DONE)
2902 ack_irqs = HC_COAL_IRQ; 2902 ack_irqs = HC_COAL_IRQ;
2903 for (p = 0; p < MV_PORTS_PER_HC; ++p) { 2903 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2904 if ((port + p) >= hpriv->n_ports) 2904 if ((port + p) >= hpriv->n_ports)
2905 break; 2905 break;
2906 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); 2906 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2907 if (hc_cause & port_mask) 2907 if (hc_cause & port_mask)
2908 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; 2908 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2909 } 2909 }
2910 hc_mmio = mv_hc_base_from_port(mmio, port); 2910 hc_mmio = mv_hc_base_from_port(mmio, port);
2911 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); 2911 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2912 handled = 1; 2912 handled = 1;
2913 } 2913 }
2914 /* 2914 /*
2915 * Handle interrupts signalled for this port: 2915 * Handle interrupts signalled for this port:
2916 */ 2916 */
2917 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); 2917 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2918 if (port_cause) 2918 if (port_cause)
2919 mv_port_intr(ap, port_cause); 2919 mv_port_intr(ap, port_cause);
2920 } 2920 }
2921 return handled; 2921 return handled;
2922 } 2922 }
2923 2923
2924 static int mv_pci_error(struct ata_host *host, void __iomem *mmio) 2924 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2925 { 2925 {
2926 struct mv_host_priv *hpriv = host->private_data; 2926 struct mv_host_priv *hpriv = host->private_data;
2927 struct ata_port *ap; 2927 struct ata_port *ap;
2928 struct ata_queued_cmd *qc; 2928 struct ata_queued_cmd *qc;
2929 struct ata_eh_info *ehi; 2929 struct ata_eh_info *ehi;
2930 unsigned int i, err_mask, printed = 0; 2930 unsigned int i, err_mask, printed = 0;
2931 u32 err_cause; 2931 u32 err_cause;
2932 2932
2933 err_cause = readl(mmio + hpriv->irq_cause_offset); 2933 err_cause = readl(mmio + hpriv->irq_cause_offset);
2934 2934
2935 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", 2935 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2936 err_cause); 2936 err_cause);
2937 2937
2938 DPRINTK("All regs @ PCI error\n"); 2938 DPRINTK("All regs @ PCI error\n");
2939 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 2939 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2940 2940
2941 writelfl(0, mmio + hpriv->irq_cause_offset); 2941 writelfl(0, mmio + hpriv->irq_cause_offset);
2942 2942
2943 for (i = 0; i < host->n_ports; i++) { 2943 for (i = 0; i < host->n_ports; i++) {
2944 ap = host->ports[i]; 2944 ap = host->ports[i];
2945 if (!ata_link_offline(&ap->link)) { 2945 if (!ata_link_offline(&ap->link)) {
2946 ehi = &ap->link.eh_info; 2946 ehi = &ap->link.eh_info;
2947 ata_ehi_clear_desc(ehi); 2947 ata_ehi_clear_desc(ehi);
2948 if (!printed++) 2948 if (!printed++)
2949 ata_ehi_push_desc(ehi, 2949 ata_ehi_push_desc(ehi,
2950 "PCI err cause 0x%08x", err_cause); 2950 "PCI err cause 0x%08x", err_cause);
2951 err_mask = AC_ERR_HOST_BUS; 2951 err_mask = AC_ERR_HOST_BUS;
2952 ehi->action = ATA_EH_RESET; 2952 ehi->action = ATA_EH_RESET;
2953 qc = ata_qc_from_tag(ap, ap->link.active_tag); 2953 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2954 if (qc) 2954 if (qc)
2955 qc->err_mask |= err_mask; 2955 qc->err_mask |= err_mask;
2956 else 2956 else
2957 ehi->err_mask |= err_mask; 2957 ehi->err_mask |= err_mask;
2958 2958
2959 ata_port_freeze(ap); 2959 ata_port_freeze(ap);
2960 } 2960 }
2961 } 2961 }
2962 return 1; /* handled */ 2962 return 1; /* handled */
2963 } 2963 }
2964 2964
2965 /** 2965 /**
2966 * mv_interrupt - Main interrupt event handler 2966 * mv_interrupt - Main interrupt event handler
2967 * @irq: unused 2967 * @irq: unused
2968 * @dev_instance: private data; in this case the host structure 2968 * @dev_instance: private data; in this case the host structure
2969 * 2969 *
2970 * Read the read only register to determine if any host 2970 * Read the read only register to determine if any host
2971 * controllers have pending interrupts. If so, call lower level 2971 * controllers have pending interrupts. If so, call lower level
2972 * routine to handle. Also check for PCI errors which are only 2972 * routine to handle. Also check for PCI errors which are only
2973 * reported here. 2973 * reported here.
2974 * 2974 *
2975 * LOCKING: 2975 * LOCKING:
2976 * This routine holds the host lock while processing pending 2976 * This routine holds the host lock while processing pending
2977 * interrupts. 2977 * interrupts.
2978 */ 2978 */
2979 static irqreturn_t mv_interrupt(int irq, void *dev_instance) 2979 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2980 { 2980 {
2981 struct ata_host *host = dev_instance; 2981 struct ata_host *host = dev_instance;
2982 struct mv_host_priv *hpriv = host->private_data; 2982 struct mv_host_priv *hpriv = host->private_data;
2983 unsigned int handled = 0; 2983 unsigned int handled = 0;
2984 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; 2984 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2985 u32 main_irq_cause, pending_irqs; 2985 u32 main_irq_cause, pending_irqs;
2986 2986
2987 spin_lock(&host->lock); 2987 spin_lock(&host->lock);
2988 2988
2989 /* for MSI: block new interrupts while in here */ 2989 /* for MSI: block new interrupts while in here */
2990 if (using_msi) 2990 if (using_msi)
2991 mv_write_main_irq_mask(0, hpriv); 2991 mv_write_main_irq_mask(0, hpriv);
2992 2992
2993 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2993 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2994 pending_irqs = main_irq_cause & hpriv->main_irq_mask; 2994 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2995 /* 2995 /*
2996 * Deal with cases where we either have nothing pending, or have read 2996 * Deal with cases where we either have nothing pending, or have read
2997 * a bogus register value which can indicate HW removal or PCI fault. 2997 * a bogus register value which can indicate HW removal or PCI fault.
2998 */ 2998 */
2999 if (pending_irqs && main_irq_cause != 0xffffffffU) { 2999 if (pending_irqs && main_irq_cause != 0xffffffffU) {
3000 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) 3000 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
3001 handled = mv_pci_error(host, hpriv->base); 3001 handled = mv_pci_error(host, hpriv->base);
3002 else 3002 else
3003 handled = mv_host_intr(host, pending_irqs); 3003 handled = mv_host_intr(host, pending_irqs);
3004 } 3004 }
3005 3005
3006 /* for MSI: unmask; interrupt cause bits will retrigger now */ 3006 /* for MSI: unmask; interrupt cause bits will retrigger now */
3007 if (using_msi) 3007 if (using_msi)
3008 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); 3008 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
3009 3009
3010 spin_unlock(&host->lock); 3010 spin_unlock(&host->lock);
3011 3011
3012 return IRQ_RETVAL(handled); 3012 return IRQ_RETVAL(handled);
3013 } 3013 }
3014 3014
3015 static unsigned int mv5_scr_offset(unsigned int sc_reg_in) 3015 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3016 { 3016 {
3017 unsigned int ofs; 3017 unsigned int ofs;
3018 3018
3019 switch (sc_reg_in) { 3019 switch (sc_reg_in) {
3020 case SCR_STATUS: 3020 case SCR_STATUS:
3021 case SCR_ERROR: 3021 case SCR_ERROR:
3022 case SCR_CONTROL: 3022 case SCR_CONTROL:
3023 ofs = sc_reg_in * sizeof(u32); 3023 ofs = sc_reg_in * sizeof(u32);
3024 break; 3024 break;
3025 default: 3025 default:
3026 ofs = 0xffffffffU; 3026 ofs = 0xffffffffU;
3027 break; 3027 break;
3028 } 3028 }
3029 return ofs; 3029 return ofs;
3030 } 3030 }
3031 3031
3032 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) 3032 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
3033 { 3033 {
3034 struct mv_host_priv *hpriv = link->ap->host->private_data; 3034 struct mv_host_priv *hpriv = link->ap->host->private_data;
3035 void __iomem *mmio = hpriv->base; 3035 void __iomem *mmio = hpriv->base;
3036 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 3036 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3037 unsigned int ofs = mv5_scr_offset(sc_reg_in); 3037 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3038 3038
3039 if (ofs != 0xffffffffU) { 3039 if (ofs != 0xffffffffU) {
3040 *val = readl(addr + ofs); 3040 *val = readl(addr + ofs);
3041 return 0; 3041 return 0;
3042 } else 3042 } else
3043 return -EINVAL; 3043 return -EINVAL;
3044 } 3044 }
3045 3045
3046 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) 3046 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3047 { 3047 {
3048 struct mv_host_priv *hpriv = link->ap->host->private_data; 3048 struct mv_host_priv *hpriv = link->ap->host->private_data;
3049 void __iomem *mmio = hpriv->base; 3049 void __iomem *mmio = hpriv->base;
3050 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); 3050 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3051 unsigned int ofs = mv5_scr_offset(sc_reg_in); 3051 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3052 3052
3053 if (ofs != 0xffffffffU) { 3053 if (ofs != 0xffffffffU) {
3054 writelfl(val, addr + ofs); 3054 writelfl(val, addr + ofs);
3055 return 0; 3055 return 0;
3056 } else 3056 } else
3057 return -EINVAL; 3057 return -EINVAL;
3058 } 3058 }
3059 3059
3060 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) 3060 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3061 { 3061 {
3062 struct pci_dev *pdev = to_pci_dev(host->dev); 3062 struct pci_dev *pdev = to_pci_dev(host->dev);
3063 int early_5080; 3063 int early_5080;
3064 3064
3065 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); 3065 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3066 3066
3067 if (!early_5080) { 3067 if (!early_5080) {
3068 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3068 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3069 tmp |= (1 << 0); 3069 tmp |= (1 << 0);
3070 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3070 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3071 } 3071 }
3072 3072
3073 mv_reset_pci_bus(host, mmio); 3073 mv_reset_pci_bus(host, mmio);
3074 } 3074 }
3075 3075
3076 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3076 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3077 { 3077 {
3078 writel(0x0fcfffff, mmio + FLASH_CTL); 3078 writel(0x0fcfffff, mmio + FLASH_CTL);
3079 } 3079 }
3080 3080
3081 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, 3081 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3082 void __iomem *mmio) 3082 void __iomem *mmio)
3083 { 3083 {
3084 void __iomem *phy_mmio = mv5_phy_base(mmio, idx); 3084 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3085 u32 tmp; 3085 u32 tmp;
3086 3086
3087 tmp = readl(phy_mmio + MV5_PHY_MODE); 3087 tmp = readl(phy_mmio + MV5_PHY_MODE);
3088 3088
3089 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ 3089 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3090 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ 3090 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
3091 } 3091 }
3092 3092
3093 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3093 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3094 { 3094 {
3095 u32 tmp; 3095 u32 tmp;
3096 3096
3097 writel(0, mmio + GPIO_PORT_CTL); 3097 writel(0, mmio + GPIO_PORT_CTL);
3098 3098
3099 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ 3099 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3100 3100
3101 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); 3101 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3102 tmp |= ~(1 << 0); 3102 tmp |= ~(1 << 0);
3103 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); 3103 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3104 } 3104 }
3105 3105
3106 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3106 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3107 unsigned int port) 3107 unsigned int port)
3108 { 3108 {
3109 void __iomem *phy_mmio = mv5_phy_base(mmio, port); 3109 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3110 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); 3110 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3111 u32 tmp; 3111 u32 tmp;
3112 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); 3112 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3113 3113
3114 if (fix_apm_sq) { 3114 if (fix_apm_sq) {
3115 tmp = readl(phy_mmio + MV5_LTMODE); 3115 tmp = readl(phy_mmio + MV5_LTMODE);
3116 tmp |= (1 << 19); 3116 tmp |= (1 << 19);
3117 writel(tmp, phy_mmio + MV5_LTMODE); 3117 writel(tmp, phy_mmio + MV5_LTMODE);
3118 3118
3119 tmp = readl(phy_mmio + MV5_PHY_CTL); 3119 tmp = readl(phy_mmio + MV5_PHY_CTL);
3120 tmp &= ~0x3; 3120 tmp &= ~0x3;
3121 tmp |= 0x1; 3121 tmp |= 0x1;
3122 writel(tmp, phy_mmio + MV5_PHY_CTL); 3122 writel(tmp, phy_mmio + MV5_PHY_CTL);
3123 } 3123 }
3124 3124
3125 tmp = readl(phy_mmio + MV5_PHY_MODE); 3125 tmp = readl(phy_mmio + MV5_PHY_MODE);
3126 tmp &= ~mask; 3126 tmp &= ~mask;
3127 tmp |= hpriv->signal[port].pre; 3127 tmp |= hpriv->signal[port].pre;
3128 tmp |= hpriv->signal[port].amps; 3128 tmp |= hpriv->signal[port].amps;
3129 writel(tmp, phy_mmio + MV5_PHY_MODE); 3129 writel(tmp, phy_mmio + MV5_PHY_MODE);
3130 } 3130 }
3131 3131
3132 3132
3133 #undef ZERO 3133 #undef ZERO
3134 #define ZERO(reg) writel(0, port_mmio + (reg)) 3134 #define ZERO(reg) writel(0, port_mmio + (reg))
3135 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, 3135 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3136 unsigned int port) 3136 unsigned int port)
3137 { 3137 {
3138 void __iomem *port_mmio = mv_port_base(mmio, port); 3138 void __iomem *port_mmio = mv_port_base(mmio, port);
3139 3139
3140 mv_reset_channel(hpriv, mmio, port); 3140 mv_reset_channel(hpriv, mmio, port);
3141 3141
3142 ZERO(0x028); /* command */ 3142 ZERO(0x028); /* command */
3143 writel(0x11f, port_mmio + EDMA_CFG); 3143 writel(0x11f, port_mmio + EDMA_CFG);
3144 ZERO(0x004); /* timer */ 3144 ZERO(0x004); /* timer */
3145 ZERO(0x008); /* irq err cause */ 3145 ZERO(0x008); /* irq err cause */
3146 ZERO(0x00c); /* irq err mask */ 3146 ZERO(0x00c); /* irq err mask */
3147 ZERO(0x010); /* rq bah */ 3147 ZERO(0x010); /* rq bah */
3148 ZERO(0x014); /* rq inp */ 3148 ZERO(0x014); /* rq inp */
3149 ZERO(0x018); /* rq outp */ 3149 ZERO(0x018); /* rq outp */
3150 ZERO(0x01c); /* respq bah */ 3150 ZERO(0x01c); /* respq bah */
3151 ZERO(0x024); /* respq outp */ 3151 ZERO(0x024); /* respq outp */
3152 ZERO(0x020); /* respq inp */ 3152 ZERO(0x020); /* respq inp */
3153 ZERO(0x02c); /* test control */ 3153 ZERO(0x02c); /* test control */
3154 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); 3154 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3155 } 3155 }
3156 #undef ZERO 3156 #undef ZERO
3157 3157
3158 #define ZERO(reg) writel(0, hc_mmio + (reg)) 3158 #define ZERO(reg) writel(0, hc_mmio + (reg))
3159 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3159 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3160 unsigned int hc) 3160 unsigned int hc)
3161 { 3161 {
3162 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3162 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3163 u32 tmp; 3163 u32 tmp;
3164 3164
3165 ZERO(0x00c); 3165 ZERO(0x00c);
3166 ZERO(0x010); 3166 ZERO(0x010);
3167 ZERO(0x014); 3167 ZERO(0x014);
3168 ZERO(0x018); 3168 ZERO(0x018);
3169 3169
3170 tmp = readl(hc_mmio + 0x20); 3170 tmp = readl(hc_mmio + 0x20);
3171 tmp &= 0x1c1c1c1c; 3171 tmp &= 0x1c1c1c1c;
3172 tmp |= 0x03030303; 3172 tmp |= 0x03030303;
3173 writel(tmp, hc_mmio + 0x20); 3173 writel(tmp, hc_mmio + 0x20);
3174 } 3174 }
3175 #undef ZERO 3175 #undef ZERO
3176 3176
3177 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3177 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3178 unsigned int n_hc) 3178 unsigned int n_hc)
3179 { 3179 {
3180 unsigned int hc, port; 3180 unsigned int hc, port;
3181 3181
3182 for (hc = 0; hc < n_hc; hc++) { 3182 for (hc = 0; hc < n_hc; hc++) {
3183 for (port = 0; port < MV_PORTS_PER_HC; port++) 3183 for (port = 0; port < MV_PORTS_PER_HC; port++)
3184 mv5_reset_hc_port(hpriv, mmio, 3184 mv5_reset_hc_port(hpriv, mmio,
3185 (hc * MV_PORTS_PER_HC) + port); 3185 (hc * MV_PORTS_PER_HC) + port);
3186 3186
3187 mv5_reset_one_hc(hpriv, mmio, hc); 3187 mv5_reset_one_hc(hpriv, mmio, hc);
3188 } 3188 }
3189 3189
3190 return 0; 3190 return 0;
3191 } 3191 }
3192 3192
3193 #undef ZERO 3193 #undef ZERO
3194 #define ZERO(reg) writel(0, mmio + (reg)) 3194 #define ZERO(reg) writel(0, mmio + (reg))
3195 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) 3195 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3196 { 3196 {
3197 struct mv_host_priv *hpriv = host->private_data; 3197 struct mv_host_priv *hpriv = host->private_data;
3198 u32 tmp; 3198 u32 tmp;
3199 3199
3200 tmp = readl(mmio + MV_PCI_MODE); 3200 tmp = readl(mmio + MV_PCI_MODE);
3201 tmp &= 0xff00ffff; 3201 tmp &= 0xff00ffff;
3202 writel(tmp, mmio + MV_PCI_MODE); 3202 writel(tmp, mmio + MV_PCI_MODE);
3203 3203
3204 ZERO(MV_PCI_DISC_TIMER); 3204 ZERO(MV_PCI_DISC_TIMER);
3205 ZERO(MV_PCI_MSI_TRIGGER); 3205 ZERO(MV_PCI_MSI_TRIGGER);
3206 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); 3206 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3207 ZERO(MV_PCI_SERR_MASK); 3207 ZERO(MV_PCI_SERR_MASK);
3208 ZERO(hpriv->irq_cause_offset); 3208 ZERO(hpriv->irq_cause_offset);
3209 ZERO(hpriv->irq_mask_offset); 3209 ZERO(hpriv->irq_mask_offset);
3210 ZERO(MV_PCI_ERR_LOW_ADDRESS); 3210 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3211 ZERO(MV_PCI_ERR_HIGH_ADDRESS); 3211 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3212 ZERO(MV_PCI_ERR_ATTRIBUTE); 3212 ZERO(MV_PCI_ERR_ATTRIBUTE);
3213 ZERO(MV_PCI_ERR_COMMAND); 3213 ZERO(MV_PCI_ERR_COMMAND);
3214 } 3214 }
3215 #undef ZERO 3215 #undef ZERO
3216 3216
3217 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) 3217 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3218 { 3218 {
3219 u32 tmp; 3219 u32 tmp;
3220 3220
3221 mv5_reset_flash(hpriv, mmio); 3221 mv5_reset_flash(hpriv, mmio);
3222 3222
3223 tmp = readl(mmio + GPIO_PORT_CTL); 3223 tmp = readl(mmio + GPIO_PORT_CTL);
3224 tmp &= 0x3; 3224 tmp &= 0x3;
3225 tmp |= (1 << 5) | (1 << 6); 3225 tmp |= (1 << 5) | (1 << 6);
3226 writel(tmp, mmio + GPIO_PORT_CTL); 3226 writel(tmp, mmio + GPIO_PORT_CTL);
3227 } 3227 }
3228 3228
3229 /** 3229 /**
3230 * mv6_reset_hc - Perform the 6xxx global soft reset 3230 * mv6_reset_hc - Perform the 6xxx global soft reset
3231 * @mmio: base address of the HBA 3231 * @mmio: base address of the HBA
3232 * 3232 *
3233 * This routine only applies to 6xxx parts. 3233 * This routine only applies to 6xxx parts.
3234 * 3234 *
3235 * LOCKING: 3235 * LOCKING:
3236 * Inherited from caller. 3236 * Inherited from caller.
3237 */ 3237 */
3238 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, 3238 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3239 unsigned int n_hc) 3239 unsigned int n_hc)
3240 { 3240 {
3241 void __iomem *reg = mmio + PCI_MAIN_CMD_STS; 3241 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3242 int i, rc = 0; 3242 int i, rc = 0;
3243 u32 t; 3243 u32 t;
3244 3244
3245 /* Following procedure defined in PCI "main command and status 3245 /* Following procedure defined in PCI "main command and status
3246 * register" table. 3246 * register" table.
3247 */ 3247 */
3248 t = readl(reg); 3248 t = readl(reg);
3249 writel(t | STOP_PCI_MASTER, reg); 3249 writel(t | STOP_PCI_MASTER, reg);
3250 3250
3251 for (i = 0; i < 1000; i++) { 3251 for (i = 0; i < 1000; i++) {
3252 udelay(1); 3252 udelay(1);
3253 t = readl(reg); 3253 t = readl(reg);
3254 if (PCI_MASTER_EMPTY & t) 3254 if (PCI_MASTER_EMPTY & t)
3255 break; 3255 break;
3256 } 3256 }
3257 if (!(PCI_MASTER_EMPTY & t)) { 3257 if (!(PCI_MASTER_EMPTY & t)) {
3258 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); 3258 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3259 rc = 1; 3259 rc = 1;
3260 goto done; 3260 goto done;
3261 } 3261 }
3262 3262
3263 /* set reset */ 3263 /* set reset */
3264 i = 5; 3264 i = 5;
3265 do { 3265 do {
3266 writel(t | GLOB_SFT_RST, reg); 3266 writel(t | GLOB_SFT_RST, reg);
3267 t = readl(reg); 3267 t = readl(reg);
3268 udelay(1); 3268 udelay(1);
3269 } while (!(GLOB_SFT_RST & t) && (i-- > 0)); 3269 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3270 3270
3271 if (!(GLOB_SFT_RST & t)) { 3271 if (!(GLOB_SFT_RST & t)) {
3272 printk(KERN_ERR DRV_NAME ": can't set global reset\n"); 3272 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3273 rc = 1; 3273 rc = 1;
3274 goto done; 3274 goto done;
3275 } 3275 }
3276 3276
3277 /* clear reset and *reenable the PCI master* (not mentioned in spec) */ 3277 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3278 i = 5; 3278 i = 5;
3279 do { 3279 do {
3280 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); 3280 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3281 t = readl(reg); 3281 t = readl(reg);
3282 udelay(1); 3282 udelay(1);
3283 } while ((GLOB_SFT_RST & t) && (i-- > 0)); 3283 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3284 3284
3285 if (GLOB_SFT_RST & t) { 3285 if (GLOB_SFT_RST & t) {
3286 printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); 3286 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3287 rc = 1; 3287 rc = 1;
3288 } 3288 }
3289 done: 3289 done:
3290 return rc; 3290 return rc;
3291 } 3291 }
3292 3292
3293 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, 3293 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3294 void __iomem *mmio) 3294 void __iomem *mmio)
3295 { 3295 {
3296 void __iomem *port_mmio; 3296 void __iomem *port_mmio;
3297 u32 tmp; 3297 u32 tmp;
3298 3298
3299 tmp = readl(mmio + RESET_CFG); 3299 tmp = readl(mmio + RESET_CFG);
3300 if ((tmp & (1 << 0)) == 0) { 3300 if ((tmp & (1 << 0)) == 0) {
3301 hpriv->signal[idx].amps = 0x7 << 8; 3301 hpriv->signal[idx].amps = 0x7 << 8;
3302 hpriv->signal[idx].pre = 0x1 << 5; 3302 hpriv->signal[idx].pre = 0x1 << 5;
3303 return; 3303 return;
3304 } 3304 }
3305 3305
3306 port_mmio = mv_port_base(mmio, idx); 3306 port_mmio = mv_port_base(mmio, idx);
3307 tmp = readl(port_mmio + PHY_MODE2); 3307 tmp = readl(port_mmio + PHY_MODE2);
3308 3308
3309 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3309 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3310 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3310 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3311 } 3311 }
3312 3312
3313 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) 3313 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3314 { 3314 {
3315 writel(0x00000060, mmio + GPIO_PORT_CTL); 3315 writel(0x00000060, mmio + GPIO_PORT_CTL);
3316 } 3316 }
3317 3317
3318 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, 3318 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3319 unsigned int port) 3319 unsigned int port)
3320 { 3320 {
3321 void __iomem *port_mmio = mv_port_base(mmio, port); 3321 void __iomem *port_mmio = mv_port_base(mmio, port);
3322 3322
3323 u32 hp_flags = hpriv->hp_flags; 3323 u32 hp_flags = hpriv->hp_flags;
3324 int fix_phy_mode2 = 3324 int fix_phy_mode2 =
3325 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3325 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3326 int fix_phy_mode4 = 3326 int fix_phy_mode4 =
3327 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 3327 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3328 u32 m2, m3; 3328 u32 m2, m3;
3329 3329
3330 if (fix_phy_mode2) { 3330 if (fix_phy_mode2) {
3331 m2 = readl(port_mmio + PHY_MODE2); 3331 m2 = readl(port_mmio + PHY_MODE2);
3332 m2 &= ~(1 << 16); 3332 m2 &= ~(1 << 16);
3333 m2 |= (1 << 31); 3333 m2 |= (1 << 31);
3334 writel(m2, port_mmio + PHY_MODE2); 3334 writel(m2, port_mmio + PHY_MODE2);
3335 3335
3336 udelay(200); 3336 udelay(200);
3337 3337
3338 m2 = readl(port_mmio + PHY_MODE2); 3338 m2 = readl(port_mmio + PHY_MODE2);
3339 m2 &= ~((1 << 16) | (1 << 31)); 3339 m2 &= ~((1 << 16) | (1 << 31));
3340 writel(m2, port_mmio + PHY_MODE2); 3340 writel(m2, port_mmio + PHY_MODE2);
3341 3341
3342 udelay(200); 3342 udelay(200);
3343 } 3343 }
3344 3344
3345 /* 3345 /*
3346 * Gen-II/IIe PHY_MODE3 errata RM#2: 3346 * Gen-II/IIe PHY_MODE3 errata RM#2:
3347 * Achieves better receiver noise performance than the h/w default: 3347 * Achieves better receiver noise performance than the h/w default:
3348 */ 3348 */
3349 m3 = readl(port_mmio + PHY_MODE3); 3349 m3 = readl(port_mmio + PHY_MODE3);
3350 m3 = (m3 & 0x1f) | (0x5555601 << 5); 3350 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3351 3351
3352 /* Guideline 88F5182 (GL# SATA-S11) */ 3352 /* Guideline 88F5182 (GL# SATA-S11) */
3353 if (IS_SOC(hpriv)) 3353 if (IS_SOC(hpriv))
3354 m3 &= ~0x1c; 3354 m3 &= ~0x1c;
3355 3355
3356 if (fix_phy_mode4) { 3356 if (fix_phy_mode4) {
3357 u32 m4 = readl(port_mmio + PHY_MODE4); 3357 u32 m4 = readl(port_mmio + PHY_MODE4);
3358 /* 3358 /*
3359 * Enforce reserved-bit restrictions on GenIIe devices only. 3359 * Enforce reserved-bit restrictions on GenIIe devices only.
3360 * For earlier chipsets, force only the internal config field 3360 * For earlier chipsets, force only the internal config field
3361 * (workaround for errata FEr SATA#10 part 1). 3361 * (workaround for errata FEr SATA#10 part 1).
3362 */ 3362 */
3363 if (IS_GEN_IIE(hpriv)) 3363 if (IS_GEN_IIE(hpriv))
3364 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; 3364 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3365 else 3365 else
3366 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; 3366 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3367 writel(m4, port_mmio + PHY_MODE4); 3367 writel(m4, port_mmio + PHY_MODE4);
3368 } 3368 }
3369 /* 3369 /*
3370 * Workaround for 60x1-B2 errata SATA#13: 3370 * Workaround for 60x1-B2 errata SATA#13:
3371 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, 3371 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3372 * so we must always rewrite PHY_MODE3 after PHY_MODE4. 3372 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3373 * Or ensure we use writelfl() when writing PHY_MODE4. 3373 * Or ensure we use writelfl() when writing PHY_MODE4.
3374 */ 3374 */
3375 writel(m3, port_mmio + PHY_MODE3); 3375 writel(m3, port_mmio + PHY_MODE3);
3376 3376
3377 /* Revert values of pre-emphasis and signal amps to the saved ones */ 3377 /* Revert values of pre-emphasis and signal amps to the saved ones */
3378 m2 = readl(port_mmio + PHY_MODE2); 3378 m2 = readl(port_mmio + PHY_MODE2);
3379 3379
3380 m2 &= ~MV_M2_PREAMP_MASK; 3380 m2 &= ~MV_M2_PREAMP_MASK;
3381 m2 |= hpriv->signal[port].amps; 3381 m2 |= hpriv->signal[port].amps;
3382 m2 |= hpriv->signal[port].pre; 3382 m2 |= hpriv->signal[port].pre;
3383 m2 &= ~(1 << 16); 3383 m2 &= ~(1 << 16);
3384 3384
3385 /* according to mvSata 3.6.1, some IIE values are fixed */ 3385 /* according to mvSata 3.6.1, some IIE values are fixed */
3386 if (IS_GEN_IIE(hpriv)) { 3386 if (IS_GEN_IIE(hpriv)) {
3387 m2 &= ~0xC30FF01F; 3387 m2 &= ~0xC30FF01F;
3388 m2 |= 0x0000900F; 3388 m2 |= 0x0000900F;
3389 } 3389 }
3390 3390
3391 writel(m2, port_mmio + PHY_MODE2); 3391 writel(m2, port_mmio + PHY_MODE2);
3392 } 3392 }
3393 3393
3394 /* TODO: use the generic LED interface to configure the SATA Presence */ 3394 /* TODO: use the generic LED interface to configure the SATA Presence */
3395 /* & Acitivy LEDs on the board */ 3395 /* & Acitivy LEDs on the board */
3396 static void mv_soc_enable_leds(struct mv_host_priv *hpriv, 3396 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3397 void __iomem *mmio) 3397 void __iomem *mmio)
3398 { 3398 {
3399 return; 3399 return;
3400 } 3400 }
3401 3401
3402 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, 3402 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3403 void __iomem *mmio) 3403 void __iomem *mmio)
3404 { 3404 {
3405 void __iomem *port_mmio; 3405 void __iomem *port_mmio;
3406 u32 tmp; 3406 u32 tmp;
3407 3407
3408 port_mmio = mv_port_base(mmio, idx); 3408 port_mmio = mv_port_base(mmio, idx);
3409 tmp = readl(port_mmio + PHY_MODE2); 3409 tmp = readl(port_mmio + PHY_MODE2);
3410 3410
3411 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ 3411 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3412 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ 3412 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3413 } 3413 }
3414 3414
3415 #undef ZERO 3415 #undef ZERO
3416 #define ZERO(reg) writel(0, port_mmio + (reg)) 3416 #define ZERO(reg) writel(0, port_mmio + (reg))
3417 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, 3417 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3418 void __iomem *mmio, unsigned int port) 3418 void __iomem *mmio, unsigned int port)
3419 { 3419 {
3420 void __iomem *port_mmio = mv_port_base(mmio, port); 3420 void __iomem *port_mmio = mv_port_base(mmio, port);
3421 3421
3422 mv_reset_channel(hpriv, mmio, port); 3422 mv_reset_channel(hpriv, mmio, port);
3423 3423
3424 ZERO(0x028); /* command */ 3424 ZERO(0x028); /* command */
3425 writel(0x101f, port_mmio + EDMA_CFG); 3425 writel(0x101f, port_mmio + EDMA_CFG);
3426 ZERO(0x004); /* timer */ 3426 ZERO(0x004); /* timer */
3427 ZERO(0x008); /* irq err cause */ 3427 ZERO(0x008); /* irq err cause */
3428 ZERO(0x00c); /* irq err mask */ 3428 ZERO(0x00c); /* irq err mask */
3429 ZERO(0x010); /* rq bah */ 3429 ZERO(0x010); /* rq bah */
3430 ZERO(0x014); /* rq inp */ 3430 ZERO(0x014); /* rq inp */
3431 ZERO(0x018); /* rq outp */ 3431 ZERO(0x018); /* rq outp */
3432 ZERO(0x01c); /* respq bah */ 3432 ZERO(0x01c); /* respq bah */
3433 ZERO(0x024); /* respq outp */ 3433 ZERO(0x024); /* respq outp */
3434 ZERO(0x020); /* respq inp */ 3434 ZERO(0x020); /* respq inp */
3435 ZERO(0x02c); /* test control */ 3435 ZERO(0x02c); /* test control */
3436 writel(0x800, port_mmio + EDMA_IORDY_TMOUT); 3436 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3437 } 3437 }
3438 3438
3439 #undef ZERO 3439 #undef ZERO
3440 3440
3441 #define ZERO(reg) writel(0, hc_mmio + (reg)) 3441 #define ZERO(reg) writel(0, hc_mmio + (reg))
3442 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, 3442 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3443 void __iomem *mmio) 3443 void __iomem *mmio)
3444 { 3444 {
3445 void __iomem *hc_mmio = mv_hc_base(mmio, 0); 3445 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3446 3446
3447 ZERO(0x00c); 3447 ZERO(0x00c);
3448 ZERO(0x010); 3448 ZERO(0x010);
3449 ZERO(0x014); 3449 ZERO(0x014);
3450 3450
3451 } 3451 }
3452 3452
3453 #undef ZERO 3453 #undef ZERO
3454 3454
3455 static int mv_soc_reset_hc(struct mv_host_priv *hpriv, 3455 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3456 void __iomem *mmio, unsigned int n_hc) 3456 void __iomem *mmio, unsigned int n_hc)
3457 { 3457 {
3458 unsigned int port; 3458 unsigned int port;
3459 3459
3460 for (port = 0; port < hpriv->n_ports; port++) 3460 for (port = 0; port < hpriv->n_ports; port++)
3461 mv_soc_reset_hc_port(hpriv, mmio, port); 3461 mv_soc_reset_hc_port(hpriv, mmio, port);
3462 3462
3463 mv_soc_reset_one_hc(hpriv, mmio); 3463 mv_soc_reset_one_hc(hpriv, mmio);
3464 3464
3465 return 0; 3465 return 0;
3466 } 3466 }
3467 3467
3468 static void mv_soc_reset_flash(struct mv_host_priv *hpriv, 3468 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3469 void __iomem *mmio) 3469 void __iomem *mmio)
3470 { 3470 {
3471 return; 3471 return;
3472 } 3472 }
3473 3473
3474 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) 3474 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3475 { 3475 {
3476 return; 3476 return;
3477 } 3477 }
3478 3478
3479 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, 3479 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3480 void __iomem *mmio, unsigned int port) 3480 void __iomem *mmio, unsigned int port)
3481 { 3481 {
3482 void __iomem *port_mmio = mv_port_base(mmio, port); 3482 void __iomem *port_mmio = mv_port_base(mmio, port);
3483 u32 reg; 3483 u32 reg;
3484 3484
3485 reg = readl(port_mmio + PHY_MODE3); 3485 reg = readl(port_mmio + PHY_MODE3);
3486 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ 3486 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3487 reg |= (0x1 << 27); 3487 reg |= (0x1 << 27);
3488 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ 3488 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3489 reg |= (0x1 << 29); 3489 reg |= (0x1 << 29);
3490 writel(reg, port_mmio + PHY_MODE3); 3490 writel(reg, port_mmio + PHY_MODE3);
3491 3491
3492 reg = readl(port_mmio + PHY_MODE4); 3492 reg = readl(port_mmio + PHY_MODE4);
3493 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ 3493 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3494 reg |= (0x1 << 16); 3494 reg |= (0x1 << 16);
3495 writel(reg, port_mmio + PHY_MODE4); 3495 writel(reg, port_mmio + PHY_MODE4);
3496 3496
3497 reg = readl(port_mmio + PHY_MODE9_GEN2); 3497 reg = readl(port_mmio + PHY_MODE9_GEN2);
3498 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3498 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3499 reg |= 0x8; 3499 reg |= 0x8;
3500 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3500 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3501 writel(reg, port_mmio + PHY_MODE9_GEN2); 3501 writel(reg, port_mmio + PHY_MODE9_GEN2);
3502 3502
3503 reg = readl(port_mmio + PHY_MODE9_GEN1); 3503 reg = readl(port_mmio + PHY_MODE9_GEN1);
3504 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ 3504 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3505 reg |= 0x8; 3505 reg |= 0x8;
3506 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ 3506 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3507 writel(reg, port_mmio + PHY_MODE9_GEN1); 3507 writel(reg, port_mmio + PHY_MODE9_GEN1);
3508 } 3508 }
3509 3509
3510 /** 3510 /**
3511 * soc_is_65 - check if the soc is 65 nano device 3511 * soc_is_65 - check if the soc is 65 nano device
3512 * 3512 *
3513 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS 3513 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3514 * register, this register should contain non-zero value and it exists only 3514 * register, this register should contain non-zero value and it exists only
3515 * in the 65 nano devices, when reading it from older devices we get 0. 3515 * in the 65 nano devices, when reading it from older devices we get 0.
3516 */ 3516 */
3517 static bool soc_is_65n(struct mv_host_priv *hpriv) 3517 static bool soc_is_65n(struct mv_host_priv *hpriv)
3518 { 3518 {
3519 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); 3519 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3520 3520
3521 if (readl(port0_mmio + PHYCFG_OFS)) 3521 if (readl(port0_mmio + PHYCFG_OFS))
3522 return true; 3522 return true;
3523 return false; 3523 return false;
3524 } 3524 }
3525 3525
3526 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) 3526 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3527 { 3527 {
3528 u32 ifcfg = readl(port_mmio + SATA_IFCFG); 3528 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3529 3529
3530 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ 3530 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3531 if (want_gen2i) 3531 if (want_gen2i)
3532 ifcfg |= (1 << 7); /* enable gen2i speed */ 3532 ifcfg |= (1 << 7); /* enable gen2i speed */
3533 writelfl(ifcfg, port_mmio + SATA_IFCFG); 3533 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3534 } 3534 }
3535 3535
3536 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, 3536 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3537 unsigned int port_no) 3537 unsigned int port_no)
3538 { 3538 {
3539 void __iomem *port_mmio = mv_port_base(mmio, port_no); 3539 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3540 3540
3541 /* 3541 /*
3542 * The datasheet warns against setting EDMA_RESET when EDMA is active 3542 * The datasheet warns against setting EDMA_RESET when EDMA is active
3543 * (but doesn't say what the problem might be). So we first try 3543 * (but doesn't say what the problem might be). So we first try
3544 * to disable the EDMA engine before doing the EDMA_RESET operation. 3544 * to disable the EDMA engine before doing the EDMA_RESET operation.
3545 */ 3545 */
3546 mv_stop_edma_engine(port_mmio); 3546 mv_stop_edma_engine(port_mmio);
3547 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3547 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3548 3548
3549 if (!IS_GEN_I(hpriv)) { 3549 if (!IS_GEN_I(hpriv)) {
3550 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ 3550 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3551 mv_setup_ifcfg(port_mmio, 1); 3551 mv_setup_ifcfg(port_mmio, 1);
3552 } 3552 }
3553 /* 3553 /*
3554 * Strobing EDMA_RESET here causes a hard reset of the SATA transport, 3554 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3555 * link, and physical layers. It resets all SATA interface registers 3555 * link, and physical layers. It resets all SATA interface registers
3556 * (except for SATA_IFCFG), and issues a COMRESET to the dev. 3556 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3557 */ 3557 */
3558 writelfl(EDMA_RESET, port_mmio + EDMA_CMD); 3558 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3559 udelay(25); /* allow reset propagation */ 3559 udelay(25); /* allow reset propagation */
3560 writelfl(0, port_mmio + EDMA_CMD); 3560 writelfl(0, port_mmio + EDMA_CMD);
3561 3561
3562 hpriv->ops->phy_errata(hpriv, mmio, port_no); 3562 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3563 3563
3564 if (IS_GEN_I(hpriv)) 3564 if (IS_GEN_I(hpriv))
3565 mdelay(1); 3565 mdelay(1);
3566 } 3566 }
3567 3567
3568 static void mv_pmp_select(struct ata_port *ap, int pmp) 3568 static void mv_pmp_select(struct ata_port *ap, int pmp)
3569 { 3569 {
3570 if (sata_pmp_supported(ap)) { 3570 if (sata_pmp_supported(ap)) {
3571 void __iomem *port_mmio = mv_ap_base(ap); 3571 void __iomem *port_mmio = mv_ap_base(ap);
3572 u32 reg = readl(port_mmio + SATA_IFCTL); 3572 u32 reg = readl(port_mmio + SATA_IFCTL);
3573 int old = reg & 0xf; 3573 int old = reg & 0xf;
3574 3574
3575 if (old != pmp) { 3575 if (old != pmp) {
3576 reg = (reg & ~0xf) | pmp; 3576 reg = (reg & ~0xf) | pmp;
3577 writelfl(reg, port_mmio + SATA_IFCTL); 3577 writelfl(reg, port_mmio + SATA_IFCTL);
3578 } 3578 }
3579 } 3579 }
3580 } 3580 }
3581 3581
3582 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, 3582 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3583 unsigned long deadline) 3583 unsigned long deadline)
3584 { 3584 {
3585 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3585 mv_pmp_select(link->ap, sata_srst_pmp(link));
3586 return sata_std_hardreset(link, class, deadline); 3586 return sata_std_hardreset(link, class, deadline);
3587 } 3587 }
3588 3588
3589 static int mv_softreset(struct ata_link *link, unsigned int *class, 3589 static int mv_softreset(struct ata_link *link, unsigned int *class,
3590 unsigned long deadline) 3590 unsigned long deadline)
3591 { 3591 {
3592 mv_pmp_select(link->ap, sata_srst_pmp(link)); 3592 mv_pmp_select(link->ap, sata_srst_pmp(link));
3593 return ata_sff_softreset(link, class, deadline); 3593 return ata_sff_softreset(link, class, deadline);
3594 } 3594 }
3595 3595
3596 static int mv_hardreset(struct ata_link *link, unsigned int *class, 3596 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3597 unsigned long deadline) 3597 unsigned long deadline)
3598 { 3598 {
3599 struct ata_port *ap = link->ap; 3599 struct ata_port *ap = link->ap;
3600 struct mv_host_priv *hpriv = ap->host->private_data; 3600 struct mv_host_priv *hpriv = ap->host->private_data;
3601 struct mv_port_priv *pp = ap->private_data; 3601 struct mv_port_priv *pp = ap->private_data;
3602 void __iomem *mmio = hpriv->base; 3602 void __iomem *mmio = hpriv->base;
3603 int rc, attempts = 0, extra = 0; 3603 int rc, attempts = 0, extra = 0;
3604 u32 sstatus; 3604 u32 sstatus;
3605 bool online; 3605 bool online;
3606 3606
3607 mv_reset_channel(hpriv, mmio, ap->port_no); 3607 mv_reset_channel(hpriv, mmio, ap->port_no);
3608 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 3608 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3609 pp->pp_flags &= 3609 pp->pp_flags &=
3610 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); 3610 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3611 3611
3612 /* Workaround for errata FEr SATA#10 (part 2) */ 3612 /* Workaround for errata FEr SATA#10 (part 2) */
3613 do { 3613 do {
3614 const unsigned long *timing = 3614 const unsigned long *timing =
3615 sata_ehc_deb_timing(&link->eh_context); 3615 sata_ehc_deb_timing(&link->eh_context);
3616 3616
3617 rc = sata_link_hardreset(link, timing, deadline + extra, 3617 rc = sata_link_hardreset(link, timing, deadline + extra,
3618 &online, NULL); 3618 &online, NULL);
3619 rc = online ? -EAGAIN : rc; 3619 rc = online ? -EAGAIN : rc;
3620 if (rc) 3620 if (rc)
3621 return rc; 3621 return rc;
3622 sata_scr_read(link, SCR_STATUS, &sstatus); 3622 sata_scr_read(link, SCR_STATUS, &sstatus);
3623 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { 3623 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3624 /* Force 1.5gb/s link speed and try again */ 3624 /* Force 1.5gb/s link speed and try again */
3625 mv_setup_ifcfg(mv_ap_base(ap), 0); 3625 mv_setup_ifcfg(mv_ap_base(ap), 0);
3626 if (time_after(jiffies + HZ, deadline)) 3626 if (time_after(jiffies + HZ, deadline))
3627 extra = HZ; /* only extend it once, max */ 3627 extra = HZ; /* only extend it once, max */
3628 } 3628 }
3629 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); 3629 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3630 mv_save_cached_regs(ap); 3630 mv_save_cached_regs(ap);
3631 mv_edma_cfg(ap, 0, 0); 3631 mv_edma_cfg(ap, 0, 0);
3632 3632
3633 return rc; 3633 return rc;
3634 } 3634 }
3635 3635
3636 static void mv_eh_freeze(struct ata_port *ap) 3636 static void mv_eh_freeze(struct ata_port *ap)
3637 { 3637 {
3638 mv_stop_edma(ap); 3638 mv_stop_edma(ap);
3639 mv_enable_port_irqs(ap, 0); 3639 mv_enable_port_irqs(ap, 0);
3640 } 3640 }
3641 3641
3642 static void mv_eh_thaw(struct ata_port *ap) 3642 static void mv_eh_thaw(struct ata_port *ap)
3643 { 3643 {
3644 struct mv_host_priv *hpriv = ap->host->private_data; 3644 struct mv_host_priv *hpriv = ap->host->private_data;
3645 unsigned int port = ap->port_no; 3645 unsigned int port = ap->port_no;
3646 unsigned int hardport = mv_hardport_from_port(port); 3646 unsigned int hardport = mv_hardport_from_port(port);
3647 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 3647 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3648 void __iomem *port_mmio = mv_ap_base(ap); 3648 void __iomem *port_mmio = mv_ap_base(ap);
3649 u32 hc_irq_cause; 3649 u32 hc_irq_cause;
3650 3650
3651 /* clear EDMA errors on this port */ 3651 /* clear EDMA errors on this port */
3652 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3652 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3653 3653
3654 /* clear pending irq events */ 3654 /* clear pending irq events */
3655 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); 3655 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3656 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); 3656 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3657 3657
3658 mv_enable_port_irqs(ap, ERR_IRQ); 3658 mv_enable_port_irqs(ap, ERR_IRQ);
3659 } 3659 }
3660 3660
3661 /** 3661 /**
3662 * mv_port_init - Perform some early initialization on a single port. 3662 * mv_port_init - Perform some early initialization on a single port.
3663 * @port: libata data structure storing shadow register addresses 3663 * @port: libata data structure storing shadow register addresses
3664 * @port_mmio: base address of the port 3664 * @port_mmio: base address of the port
3665 * 3665 *
3666 * Initialize shadow register mmio addresses, clear outstanding 3666 * Initialize shadow register mmio addresses, clear outstanding
3667 * interrupts on the port, and unmask interrupts for the future 3667 * interrupts on the port, and unmask interrupts for the future
3668 * start of the port. 3668 * start of the port.
3669 * 3669 *
3670 * LOCKING: 3670 * LOCKING:
3671 * Inherited from caller. 3671 * Inherited from caller.
3672 */ 3672 */
3673 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) 3673 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3674 { 3674 {
3675 void __iomem *serr, *shd_base = port_mmio + SHD_BLK; 3675 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3676 3676
3677 /* PIO related setup 3677 /* PIO related setup
3678 */ 3678 */
3679 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); 3679 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3680 port->error_addr = 3680 port->error_addr =
3681 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); 3681 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3682 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); 3682 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3683 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); 3683 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3684 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); 3684 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3685 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); 3685 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3686 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); 3686 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3687 port->status_addr = 3687 port->status_addr =
3688 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); 3688 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3689 /* special case: control/altstatus doesn't have ATA_REG_ address */ 3689 /* special case: control/altstatus doesn't have ATA_REG_ address */
3690 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; 3690 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3691 3691
3692 /* Clear any currently outstanding port interrupt conditions */ 3692 /* Clear any currently outstanding port interrupt conditions */
3693 serr = port_mmio + mv_scr_offset(SCR_ERROR); 3693 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3694 writelfl(readl(serr), serr); 3694 writelfl(readl(serr), serr);
3695 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); 3695 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3696 3696
3697 /* unmask all non-transient EDMA error interrupts */ 3697 /* unmask all non-transient EDMA error interrupts */
3698 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); 3698 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3699 3699
3700 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", 3700 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3701 readl(port_mmio + EDMA_CFG), 3701 readl(port_mmio + EDMA_CFG),
3702 readl(port_mmio + EDMA_ERR_IRQ_CAUSE), 3702 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3703 readl(port_mmio + EDMA_ERR_IRQ_MASK)); 3703 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3704 } 3704 }
3705 3705
3706 static unsigned int mv_in_pcix_mode(struct ata_host *host) 3706 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3707 { 3707 {
3708 struct mv_host_priv *hpriv = host->private_data; 3708 struct mv_host_priv *hpriv = host->private_data;
3709 void __iomem *mmio = hpriv->base; 3709 void __iomem *mmio = hpriv->base;
3710 u32 reg; 3710 u32 reg;
3711 3711
3712 if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) 3712 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3713 return 0; /* not PCI-X capable */ 3713 return 0; /* not PCI-X capable */
3714 reg = readl(mmio + MV_PCI_MODE); 3714 reg = readl(mmio + MV_PCI_MODE);
3715 if ((reg & MV_PCI_MODE_MASK) == 0) 3715 if ((reg & MV_PCI_MODE_MASK) == 0)
3716 return 0; /* conventional PCI mode */ 3716 return 0; /* conventional PCI mode */
3717 return 1; /* chip is in PCI-X mode */ 3717 return 1; /* chip is in PCI-X mode */
3718 } 3718 }
3719 3719
3720 static int mv_pci_cut_through_okay(struct ata_host *host) 3720 static int mv_pci_cut_through_okay(struct ata_host *host)
3721 { 3721 {
3722 struct mv_host_priv *hpriv = host->private_data; 3722 struct mv_host_priv *hpriv = host->private_data;
3723 void __iomem *mmio = hpriv->base; 3723 void __iomem *mmio = hpriv->base;
3724 u32 reg; 3724 u32 reg;
3725 3725
3726 if (!mv_in_pcix_mode(host)) { 3726 if (!mv_in_pcix_mode(host)) {
3727 reg = readl(mmio + MV_PCI_COMMAND); 3727 reg = readl(mmio + MV_PCI_COMMAND);
3728 if (reg & MV_PCI_COMMAND_MRDTRIG) 3728 if (reg & MV_PCI_COMMAND_MRDTRIG)
3729 return 0; /* not okay */ 3729 return 0; /* not okay */
3730 } 3730 }
3731 return 1; /* okay */ 3731 return 1; /* okay */
3732 } 3732 }
3733 3733
3734 static void mv_60x1b2_errata_pci7(struct ata_host *host) 3734 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3735 { 3735 {
3736 struct mv_host_priv *hpriv = host->private_data; 3736 struct mv_host_priv *hpriv = host->private_data;
3737 void __iomem *mmio = hpriv->base; 3737 void __iomem *mmio = hpriv->base;
3738 3738
3739 /* workaround for 60x1-B2 errata PCI#7 */ 3739 /* workaround for 60x1-B2 errata PCI#7 */
3740 if (mv_in_pcix_mode(host)) { 3740 if (mv_in_pcix_mode(host)) {
3741 u32 reg = readl(mmio + MV_PCI_COMMAND); 3741 u32 reg = readl(mmio + MV_PCI_COMMAND);
3742 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); 3742 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3743 } 3743 }
3744 } 3744 }
3745 3745
3746 static int mv_chip_id(struct ata_host *host, unsigned int board_idx) 3746 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3747 { 3747 {
3748 struct pci_dev *pdev = to_pci_dev(host->dev); 3748 struct pci_dev *pdev = to_pci_dev(host->dev);
3749 struct mv_host_priv *hpriv = host->private_data; 3749 struct mv_host_priv *hpriv = host->private_data;
3750 u32 hp_flags = hpriv->hp_flags; 3750 u32 hp_flags = hpriv->hp_flags;
3751 3751
3752 switch (board_idx) { 3752 switch (board_idx) {
3753 case chip_5080: 3753 case chip_5080:
3754 hpriv->ops = &mv5xxx_ops; 3754 hpriv->ops = &mv5xxx_ops;
3755 hp_flags |= MV_HP_GEN_I; 3755 hp_flags |= MV_HP_GEN_I;
3756 3756
3757 switch (pdev->revision) { 3757 switch (pdev->revision) {
3758 case 0x1: 3758 case 0x1:
3759 hp_flags |= MV_HP_ERRATA_50XXB0; 3759 hp_flags |= MV_HP_ERRATA_50XXB0;
3760 break; 3760 break;
3761 case 0x3: 3761 case 0x3:
3762 hp_flags |= MV_HP_ERRATA_50XXB2; 3762 hp_flags |= MV_HP_ERRATA_50XXB2;
3763 break; 3763 break;
3764 default: 3764 default:
3765 dev_printk(KERN_WARNING, &pdev->dev, 3765 dev_printk(KERN_WARNING, &pdev->dev,
3766 "Applying 50XXB2 workarounds to unknown rev\n"); 3766 "Applying 50XXB2 workarounds to unknown rev\n");
3767 hp_flags |= MV_HP_ERRATA_50XXB2; 3767 hp_flags |= MV_HP_ERRATA_50XXB2;
3768 break; 3768 break;
3769 } 3769 }
3770 break; 3770 break;
3771 3771
3772 case chip_504x: 3772 case chip_504x:
3773 case chip_508x: 3773 case chip_508x:
3774 hpriv->ops = &mv5xxx_ops; 3774 hpriv->ops = &mv5xxx_ops;
3775 hp_flags |= MV_HP_GEN_I; 3775 hp_flags |= MV_HP_GEN_I;
3776 3776
3777 switch (pdev->revision) { 3777 switch (pdev->revision) {
3778 case 0x0: 3778 case 0x0:
3779 hp_flags |= MV_HP_ERRATA_50XXB0; 3779 hp_flags |= MV_HP_ERRATA_50XXB0;
3780 break; 3780 break;
3781 case 0x3: 3781 case 0x3:
3782 hp_flags |= MV_HP_ERRATA_50XXB2; 3782 hp_flags |= MV_HP_ERRATA_50XXB2;
3783 break; 3783 break;
3784 default: 3784 default:
3785 dev_printk(KERN_WARNING, &pdev->dev, 3785 dev_printk(KERN_WARNING, &pdev->dev,
3786 "Applying B2 workarounds to unknown rev\n"); 3786 "Applying B2 workarounds to unknown rev\n");
3787 hp_flags |= MV_HP_ERRATA_50XXB2; 3787 hp_flags |= MV_HP_ERRATA_50XXB2;
3788 break; 3788 break;
3789 } 3789 }
3790 break; 3790 break;
3791 3791
3792 case chip_604x: 3792 case chip_604x:
3793 case chip_608x: 3793 case chip_608x:
3794 hpriv->ops = &mv6xxx_ops; 3794 hpriv->ops = &mv6xxx_ops;
3795 hp_flags |= MV_HP_GEN_II; 3795 hp_flags |= MV_HP_GEN_II;
3796 3796
3797 switch (pdev->revision) { 3797 switch (pdev->revision) {
3798 case 0x7: 3798 case 0x7:
3799 mv_60x1b2_errata_pci7(host); 3799 mv_60x1b2_errata_pci7(host);
3800 hp_flags |= MV_HP_ERRATA_60X1B2; 3800 hp_flags |= MV_HP_ERRATA_60X1B2;
3801 break; 3801 break;
3802 case 0x9: 3802 case 0x9:
3803 hp_flags |= MV_HP_ERRATA_60X1C0; 3803 hp_flags |= MV_HP_ERRATA_60X1C0;
3804 break; 3804 break;
3805 default: 3805 default:
3806 dev_printk(KERN_WARNING, &pdev->dev, 3806 dev_printk(KERN_WARNING, &pdev->dev,
3807 "Applying B2 workarounds to unknown rev\n"); 3807 "Applying B2 workarounds to unknown rev\n");
3808 hp_flags |= MV_HP_ERRATA_60X1B2; 3808 hp_flags |= MV_HP_ERRATA_60X1B2;
3809 break; 3809 break;
3810 } 3810 }
3811 break; 3811 break;
3812 3812
3813 case chip_7042: 3813 case chip_7042:
3814 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; 3814 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3815 if (pdev->vendor == PCI_VENDOR_ID_TTI && 3815 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3816 (pdev->device == 0x2300 || pdev->device == 0x2310)) 3816 (pdev->device == 0x2300 || pdev->device == 0x2310))
3817 { 3817 {
3818 /* 3818 /*
3819 * Highpoint RocketRAID PCIe 23xx series cards: 3819 * Highpoint RocketRAID PCIe 23xx series cards:
3820 * 3820 *
3821 * Unconfigured drives are treated as "Legacy" 3821 * Unconfigured drives are treated as "Legacy"
3822 * by the BIOS, and it overwrites sector 8 with 3822 * by the BIOS, and it overwrites sector 8 with
3823 * a "Lgcy" metadata block prior to Linux boot. 3823 * a "Lgcy" metadata block prior to Linux boot.
3824 * 3824 *
3825 * Configured drives (RAID or JBOD) leave sector 8 3825 * Configured drives (RAID or JBOD) leave sector 8
3826 * alone, but instead overwrite a high numbered 3826 * alone, but instead overwrite a high numbered
3827 * sector for the RAID metadata. This sector can 3827 * sector for the RAID metadata. This sector can
3828 * be determined exactly, by truncating the physical 3828 * be determined exactly, by truncating the physical
3829 * drive capacity to a nice even GB value. 3829 * drive capacity to a nice even GB value.
3830 * 3830 *
3831 * RAID metadata is at: (dev->n_sectors & ~0xfffff) 3831 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3832 * 3832 *
3833 * Warn the user, lest they think we're just buggy. 3833 * Warn the user, lest they think we're just buggy.
3834 */ 3834 */
3835 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" 3835 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3836 " BIOS CORRUPTS DATA on all attached drives," 3836 " BIOS CORRUPTS DATA on all attached drives,"
3837 " regardless of if/how they are configured." 3837 " regardless of if/how they are configured."
3838 " BEWARE!\n"); 3838 " BEWARE!\n");
3839 printk(KERN_WARNING DRV_NAME ": For data safety, do not" 3839 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3840 " use sectors 8-9 on \"Legacy\" drives," 3840 " use sectors 8-9 on \"Legacy\" drives,"
3841 " and avoid the final two gigabytes on" 3841 " and avoid the final two gigabytes on"
3842 " all RocketRAID BIOS initialized drives.\n"); 3842 " all RocketRAID BIOS initialized drives.\n");
3843 } 3843 }
3844 /* drop through */ 3844 /* drop through */
3845 case chip_6042: 3845 case chip_6042:
3846 hpriv->ops = &mv6xxx_ops; 3846 hpriv->ops = &mv6xxx_ops;
3847 hp_flags |= MV_HP_GEN_IIE; 3847 hp_flags |= MV_HP_GEN_IIE;
3848 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) 3848 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3849 hp_flags |= MV_HP_CUT_THROUGH; 3849 hp_flags |= MV_HP_CUT_THROUGH;
3850 3850
3851 switch (pdev->revision) { 3851 switch (pdev->revision) {
3852 case 0x2: /* Rev.B0: the first/only public release */ 3852 case 0x2: /* Rev.B0: the first/only public release */
3853 hp_flags |= MV_HP_ERRATA_60X1C0; 3853 hp_flags |= MV_HP_ERRATA_60X1C0;
3854 break; 3854 break;
3855 default: 3855 default:
3856 dev_printk(KERN_WARNING, &pdev->dev, 3856 dev_printk(KERN_WARNING, &pdev->dev,
3857 "Applying 60X1C0 workarounds to unknown rev\n"); 3857 "Applying 60X1C0 workarounds to unknown rev\n");
3858 hp_flags |= MV_HP_ERRATA_60X1C0; 3858 hp_flags |= MV_HP_ERRATA_60X1C0;
3859 break; 3859 break;
3860 } 3860 }
3861 break; 3861 break;
3862 case chip_soc: 3862 case chip_soc:
3863 if (soc_is_65n(hpriv)) 3863 if (soc_is_65n(hpriv))
3864 hpriv->ops = &mv_soc_65n_ops; 3864 hpriv->ops = &mv_soc_65n_ops;
3865 else 3865 else
3866 hpriv->ops = &mv_soc_ops; 3866 hpriv->ops = &mv_soc_ops;
3867 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | 3867 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3868 MV_HP_ERRATA_60X1C0; 3868 MV_HP_ERRATA_60X1C0;
3869 break; 3869 break;
3870 3870
3871 default: 3871 default:
3872 dev_printk(KERN_ERR, host->dev, 3872 dev_printk(KERN_ERR, host->dev,
3873 "BUG: invalid board index %u\n", board_idx); 3873 "BUG: invalid board index %u\n", board_idx);
3874 return 1; 3874 return 1;
3875 } 3875 }
3876 3876
3877 hpriv->hp_flags = hp_flags; 3877 hpriv->hp_flags = hp_flags;
3878 if (hp_flags & MV_HP_PCIE) { 3878 if (hp_flags & MV_HP_PCIE) {
3879 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; 3879 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3880 hpriv->irq_mask_offset = PCIE_IRQ_MASK; 3880 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3881 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; 3881 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3882 } else { 3882 } else {
3883 hpriv->irq_cause_offset = PCI_IRQ_CAUSE; 3883 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3884 hpriv->irq_mask_offset = PCI_IRQ_MASK; 3884 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3885 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; 3885 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3886 } 3886 }
3887 3887
3888 return 0; 3888 return 0;
3889 } 3889 }
3890 3890
3891 /** 3891 /**
3892 * mv_init_host - Perform some early initialization of the host. 3892 * mv_init_host - Perform some early initialization of the host.
3893 * @host: ATA host to initialize 3893 * @host: ATA host to initialize
3894 * 3894 *
3895 * If possible, do an early global reset of the host. Then do 3895 * If possible, do an early global reset of the host. Then do
3896 * our port init and clear/unmask all/relevant host interrupts. 3896 * our port init and clear/unmask all/relevant host interrupts.
3897 * 3897 *
3898 * LOCKING: 3898 * LOCKING:
3899 * Inherited from caller. 3899 * Inherited from caller.
3900 */ 3900 */
3901 static int mv_init_host(struct ata_host *host) 3901 static int mv_init_host(struct ata_host *host)
3902 { 3902 {
3903 int rc = 0, n_hc, port, hc; 3903 int rc = 0, n_hc, port, hc;
3904 struct mv_host_priv *hpriv = host->private_data; 3904 struct mv_host_priv *hpriv = host->private_data;
3905 void __iomem *mmio = hpriv->base; 3905 void __iomem *mmio = hpriv->base;
3906 3906
3907 rc = mv_chip_id(host, hpriv->board_idx); 3907 rc = mv_chip_id(host, hpriv->board_idx);
3908 if (rc) 3908 if (rc)
3909 goto done; 3909 goto done;
3910 3910
3911 if (IS_SOC(hpriv)) { 3911 if (IS_SOC(hpriv)) {
3912 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; 3912 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3913 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; 3913 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3914 } else { 3914 } else {
3915 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; 3915 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3916 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; 3916 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3917 } 3917 }
3918 3918
3919 /* initialize shadow irq mask with register's value */ 3919 /* initialize shadow irq mask with register's value */
3920 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); 3920 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3921 3921
3922 /* global interrupt mask: 0 == mask everything */ 3922 /* global interrupt mask: 0 == mask everything */
3923 mv_set_main_irq_mask(host, ~0, 0); 3923 mv_set_main_irq_mask(host, ~0, 0);
3924 3924
3925 n_hc = mv_get_hc_count(host->ports[0]->flags); 3925 n_hc = mv_get_hc_count(host->ports[0]->flags);
3926 3926
3927 for (port = 0; port < host->n_ports; port++) 3927 for (port = 0; port < host->n_ports; port++)
3928 if (hpriv->ops->read_preamp) 3928 if (hpriv->ops->read_preamp)
3929 hpriv->ops->read_preamp(hpriv, port, mmio); 3929 hpriv->ops->read_preamp(hpriv, port, mmio);
3930 3930
3931 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); 3931 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3932 if (rc) 3932 if (rc)
3933 goto done; 3933 goto done;
3934 3934
3935 hpriv->ops->reset_flash(hpriv, mmio); 3935 hpriv->ops->reset_flash(hpriv, mmio);
3936 hpriv->ops->reset_bus(host, mmio); 3936 hpriv->ops->reset_bus(host, mmio);
3937 hpriv->ops->enable_leds(hpriv, mmio); 3937 hpriv->ops->enable_leds(hpriv, mmio);
3938 3938
3939 for (port = 0; port < host->n_ports; port++) { 3939 for (port = 0; port < host->n_ports; port++) {
3940 struct ata_port *ap = host->ports[port]; 3940 struct ata_port *ap = host->ports[port];
3941 void __iomem *port_mmio = mv_port_base(mmio, port); 3941 void __iomem *port_mmio = mv_port_base(mmio, port);
3942 3942
3943 mv_port_init(&ap->ioaddr, port_mmio); 3943 mv_port_init(&ap->ioaddr, port_mmio);
3944 } 3944 }
3945 3945
3946 for (hc = 0; hc < n_hc; hc++) { 3946 for (hc = 0; hc < n_hc; hc++) {
3947 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 3947 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3948 3948
3949 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " 3949 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3950 "(before clear)=0x%08x\n", hc, 3950 "(before clear)=0x%08x\n", hc,
3951 readl(hc_mmio + HC_CFG), 3951 readl(hc_mmio + HC_CFG),
3952 readl(hc_mmio + HC_IRQ_CAUSE)); 3952 readl(hc_mmio + HC_IRQ_CAUSE));
3953 3953
3954 /* Clear any currently outstanding hc interrupt conditions */ 3954 /* Clear any currently outstanding hc interrupt conditions */
3955 writelfl(0, hc_mmio + HC_IRQ_CAUSE); 3955 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3956 } 3956 }
3957 3957
3958 if (!IS_SOC(hpriv)) { 3958 if (!IS_SOC(hpriv)) {
3959 /* Clear any currently outstanding host interrupt conditions */ 3959 /* Clear any currently outstanding host interrupt conditions */
3960 writelfl(0, mmio + hpriv->irq_cause_offset); 3960 writelfl(0, mmio + hpriv->irq_cause_offset);
3961 3961
3962 /* and unmask interrupt generation for host regs */ 3962 /* and unmask interrupt generation for host regs */
3963 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); 3963 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3964 } 3964 }
3965 3965
3966 /* 3966 /*
3967 * enable only global host interrupts for now. 3967 * enable only global host interrupts for now.
3968 * The per-port interrupts get done later as ports are set up. 3968 * The per-port interrupts get done later as ports are set up.
3969 */ 3969 */
3970 mv_set_main_irq_mask(host, 0, PCI_ERR); 3970 mv_set_main_irq_mask(host, 0, PCI_ERR);
3971 mv_set_irq_coalescing(host, irq_coalescing_io_count, 3971 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3972 irq_coalescing_usecs); 3972 irq_coalescing_usecs);
3973 done: 3973 done:
3974 return rc; 3974 return rc;
3975 } 3975 }
3976 3976
3977 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) 3977 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3978 { 3978 {
3979 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, 3979 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3980 MV_CRQB_Q_SZ, 0); 3980 MV_CRQB_Q_SZ, 0);
3981 if (!hpriv->crqb_pool) 3981 if (!hpriv->crqb_pool)
3982 return -ENOMEM; 3982 return -ENOMEM;
3983 3983
3984 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, 3984 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3985 MV_CRPB_Q_SZ, 0); 3985 MV_CRPB_Q_SZ, 0);
3986 if (!hpriv->crpb_pool) 3986 if (!hpriv->crpb_pool)
3987 return -ENOMEM; 3987 return -ENOMEM;
3988 3988
3989 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, 3989 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3990 MV_SG_TBL_SZ, 0); 3990 MV_SG_TBL_SZ, 0);
3991 if (!hpriv->sg_tbl_pool) 3991 if (!hpriv->sg_tbl_pool)
3992 return -ENOMEM; 3992 return -ENOMEM;
3993 3993
3994 return 0; 3994 return 0;
3995 } 3995 }
3996 3996
3997 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, 3997 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3998 struct mbus_dram_target_info *dram) 3998 struct mbus_dram_target_info *dram)
3999 { 3999 {
4000 int i; 4000 int i;
4001 4001
4002 for (i = 0; i < 4; i++) { 4002 for (i = 0; i < 4; i++) {
4003 writel(0, hpriv->base + WINDOW_CTRL(i)); 4003 writel(0, hpriv->base + WINDOW_CTRL(i));
4004 writel(0, hpriv->base + WINDOW_BASE(i)); 4004 writel(0, hpriv->base + WINDOW_BASE(i));
4005 } 4005 }
4006 4006
4007 for (i = 0; i < dram->num_cs; i++) { 4007 for (i = 0; i < dram->num_cs; i++) {
4008 struct mbus_dram_window *cs = dram->cs + i; 4008 struct mbus_dram_window *cs = dram->cs + i;
4009 4009
4010 writel(((cs->size - 1) & 0xffff0000) | 4010 writel(((cs->size - 1) & 0xffff0000) |
4011 (cs->mbus_attr << 8) | 4011 (cs->mbus_attr << 8) |
4012 (dram->mbus_dram_target_id << 4) | 1, 4012 (dram->mbus_dram_target_id << 4) | 1,
4013 hpriv->base + WINDOW_CTRL(i)); 4013 hpriv->base + WINDOW_CTRL(i));
4014 writel(cs->base, hpriv->base + WINDOW_BASE(i)); 4014 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4015 } 4015 }
4016 } 4016 }
4017 4017
4018 /** 4018 /**
4019 * mv_platform_probe - handle a positive probe of an soc Marvell 4019 * mv_platform_probe - handle a positive probe of an soc Marvell
4020 * host 4020 * host
4021 * @pdev: platform device found 4021 * @pdev: platform device found
4022 * 4022 *
4023 * LOCKING: 4023 * LOCKING:
4024 * Inherited from caller. 4024 * Inherited from caller.
4025 */ 4025 */
4026 static int mv_platform_probe(struct platform_device *pdev) 4026 static int mv_platform_probe(struct platform_device *pdev)
4027 { 4027 {
4028 static int printed_version; 4028 static int printed_version;
4029 const struct mv_sata_platform_data *mv_platform_data; 4029 const struct mv_sata_platform_data *mv_platform_data;
4030 const struct ata_port_info *ppi[] = 4030 const struct ata_port_info *ppi[] =
4031 { &mv_port_info[chip_soc], NULL }; 4031 { &mv_port_info[chip_soc], NULL };
4032 struct ata_host *host; 4032 struct ata_host *host;
4033 struct mv_host_priv *hpriv; 4033 struct mv_host_priv *hpriv;
4034 struct resource *res; 4034 struct resource *res;
4035 int n_ports, rc; 4035 int n_ports, rc;
4036 4036
4037 if (!printed_version++) 4037 if (!printed_version++)
4038 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 4038 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4039 4039
4040 /* 4040 /*
4041 * Simple resource validation .. 4041 * Simple resource validation ..
4042 */ 4042 */
4043 if (unlikely(pdev->num_resources != 2)) { 4043 if (unlikely(pdev->num_resources != 2)) {
4044 dev_err(&pdev->dev, "invalid number of resources\n"); 4044 dev_err(&pdev->dev, "invalid number of resources\n");
4045 return -EINVAL; 4045 return -EINVAL;
4046 } 4046 }
4047 4047
4048 /* 4048 /*
4049 * Get the register base first 4049 * Get the register base first
4050 */ 4050 */
4051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4052 if (res == NULL) 4052 if (res == NULL)
4053 return -EINVAL; 4053 return -EINVAL;
4054 4054
4055 /* allocate host */ 4055 /* allocate host */
4056 mv_platform_data = pdev->dev.platform_data; 4056 mv_platform_data = pdev->dev.platform_data;
4057 n_ports = mv_platform_data->n_ports; 4057 n_ports = mv_platform_data->n_ports;
4058 4058
4059 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4059 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4060 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4060 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4061 4061
4062 if (!host || !hpriv) 4062 if (!host || !hpriv)
4063 return -ENOMEM; 4063 return -ENOMEM;
4064 host->private_data = hpriv; 4064 host->private_data = hpriv;
4065 hpriv->n_ports = n_ports; 4065 hpriv->n_ports = n_ports;
4066 hpriv->board_idx = chip_soc; 4066 hpriv->board_idx = chip_soc;
4067 4067
4068 host->iomap = NULL; 4068 host->iomap = NULL;
4069 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4069 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4070 resource_size(res)); 4070 resource_size(res));
4071 hpriv->base -= SATAHC0_REG_BASE; 4071 hpriv->base -= SATAHC0_REG_BASE;
4072 4072
4073 #if defined(CONFIG_HAVE_CLK) 4073 #if defined(CONFIG_HAVE_CLK)
4074 hpriv->clk = clk_get(&pdev->dev, NULL); 4074 hpriv->clk = clk_get(&pdev->dev, NULL);
4075 if (IS_ERR(hpriv->clk)) 4075 if (IS_ERR(hpriv->clk))
4076 dev_notice(&pdev->dev, "cannot get clkdev\n"); 4076 dev_notice(&pdev->dev, "cannot get clkdev\n");
4077 else 4077 else
4078 clk_enable(hpriv->clk); 4078 clk_enable(hpriv->clk);
4079 #endif 4079 #endif
4080 4080
4081 /* 4081 /*
4082 * (Re-)program MBUS remapping windows if we are asked to. 4082 * (Re-)program MBUS remapping windows if we are asked to.
4083 */ 4083 */
4084 if (mv_platform_data->dram != NULL) 4084 if (mv_platform_data->dram != NULL)
4085 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 4085 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4086 4086
4087 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4087 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4088 if (rc) 4088 if (rc)
4089 goto err; 4089 goto err;
4090 4090
4091 /* initialize adapter */ 4091 /* initialize adapter */
4092 rc = mv_init_host(host); 4092 rc = mv_init_host(host);
4093 if (rc) 4093 if (rc)
4094 goto err; 4094 goto err;
4095 4095
4096 dev_printk(KERN_INFO, &pdev->dev, 4096 dev_printk(KERN_INFO, &pdev->dev,
4097 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, 4097 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
4098 host->n_ports); 4098 host->n_ports);
4099 4099
4100 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt, 4100 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4101 IRQF_SHARED, &mv6_sht); 4101 IRQF_SHARED, &mv6_sht);
4102 err: 4102 err:
4103 #if defined(CONFIG_HAVE_CLK) 4103 #if defined(CONFIG_HAVE_CLK)
4104 if (!IS_ERR(hpriv->clk)) { 4104 if (!IS_ERR(hpriv->clk)) {
4105 clk_disable(hpriv->clk); 4105 clk_disable(hpriv->clk);
4106 clk_put(hpriv->clk); 4106 clk_put(hpriv->clk);
4107 } 4107 }
4108 #endif 4108 #endif
4109 4109
4110 return rc; 4110 return rc;
4111 } 4111 }
4112 4112
4113 /* 4113 /*
4114 * 4114 *
4115 * mv_platform_remove - unplug a platform interface 4115 * mv_platform_remove - unplug a platform interface
4116 * @pdev: platform device 4116 * @pdev: platform device
4117 * 4117 *
4118 * A platform bus SATA device has been unplugged. Perform the needed 4118 * A platform bus SATA device has been unplugged. Perform the needed
4119 * cleanup. Also called on module unload for any active devices. 4119 * cleanup. Also called on module unload for any active devices.
4120 */ 4120 */
4121 static int __devexit mv_platform_remove(struct platform_device *pdev) 4121 static int __devexit mv_platform_remove(struct platform_device *pdev)
4122 { 4122 {
4123 struct device *dev = &pdev->dev; 4123 struct device *dev = &pdev->dev;
4124 struct ata_host *host = dev_get_drvdata(dev); 4124 struct ata_host *host = dev_get_drvdata(dev);
4125 #if defined(CONFIG_HAVE_CLK) 4125 #if defined(CONFIG_HAVE_CLK)
4126 struct mv_host_priv *hpriv = host->private_data; 4126 struct mv_host_priv *hpriv = host->private_data;
4127 #endif 4127 #endif
4128 ata_host_detach(host); 4128 ata_host_detach(host);
4129 4129
4130 #if defined(CONFIG_HAVE_CLK) 4130 #if defined(CONFIG_HAVE_CLK)
4131 if (!IS_ERR(hpriv->clk)) { 4131 if (!IS_ERR(hpriv->clk)) {
4132 clk_disable(hpriv->clk); 4132 clk_disable(hpriv->clk);
4133 clk_put(hpriv->clk); 4133 clk_put(hpriv->clk);
4134 } 4134 }
4135 #endif 4135 #endif
4136 return 0; 4136 return 0;
4137 } 4137 }
4138 4138
4139 #ifdef CONFIG_PM 4139 #ifdef CONFIG_PM
4140 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) 4140 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4141 { 4141 {
4142 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4142 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4143 if (host) 4143 if (host)
4144 return ata_host_suspend(host, state); 4144 return ata_host_suspend(host, state);
4145 else 4145 else
4146 return 0; 4146 return 0;
4147 } 4147 }
4148 4148
4149 static int mv_platform_resume(struct platform_device *pdev) 4149 static int mv_platform_resume(struct platform_device *pdev)
4150 { 4150 {
4151 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4151 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4152 int ret; 4152 int ret;
4153 4153
4154 if (host) { 4154 if (host) {
4155 struct mv_host_priv *hpriv = host->private_data; 4155 struct mv_host_priv *hpriv = host->private_data;
4156 const struct mv_sata_platform_data *mv_platform_data = \ 4156 const struct mv_sata_platform_data *mv_platform_data = \
4157 pdev->dev.platform_data; 4157 pdev->dev.platform_data;
4158 /* 4158 /*
4159 * (Re-)program MBUS remapping windows if we are asked to. 4159 * (Re-)program MBUS remapping windows if we are asked to.
4160 */ 4160 */
4161 if (mv_platform_data->dram != NULL) 4161 if (mv_platform_data->dram != NULL)
4162 mv_conf_mbus_windows(hpriv, mv_platform_data->dram); 4162 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4163 4163
4164 /* initialize adapter */ 4164 /* initialize adapter */
4165 ret = mv_init_host(host); 4165 ret = mv_init_host(host);
4166 if (ret) { 4166 if (ret) {
4167 printk(KERN_ERR DRV_NAME ": Error during HW init\n"); 4167 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4168 return ret; 4168 return ret;
4169 } 4169 }
4170 ata_host_resume(host); 4170 ata_host_resume(host);
4171 } 4171 }
4172 4172
4173 return 0; 4173 return 0;
4174 } 4174 }
4175 #else 4175 #else
4176 #define mv_platform_suspend NULL 4176 #define mv_platform_suspend NULL
4177 #define mv_platform_resume NULL 4177 #define mv_platform_resume NULL
4178 #endif 4178 #endif
4179 4179
4180 static struct platform_driver mv_platform_driver = { 4180 static struct platform_driver mv_platform_driver = {
4181 .probe = mv_platform_probe, 4181 .probe = mv_platform_probe,
4182 .remove = __devexit_p(mv_platform_remove), 4182 .remove = __devexit_p(mv_platform_remove),
4183 .suspend = mv_platform_suspend, 4183 .suspend = mv_platform_suspend,
4184 .resume = mv_platform_resume, 4184 .resume = mv_platform_resume,
4185 .driver = { 4185 .driver = {
4186 .name = DRV_NAME, 4186 .name = DRV_NAME,
4187 .owner = THIS_MODULE, 4187 .owner = THIS_MODULE,
4188 }, 4188 },
4189 }; 4189 };
4190 4190
4191 4191
4192 #ifdef CONFIG_PCI 4192 #ifdef CONFIG_PCI
4193 static int mv_pci_init_one(struct pci_dev *pdev, 4193 static int mv_pci_init_one(struct pci_dev *pdev,
4194 const struct pci_device_id *ent); 4194 const struct pci_device_id *ent);
4195 #ifdef CONFIG_PM 4195 #ifdef CONFIG_PM
4196 static int mv_pci_device_resume(struct pci_dev *pdev); 4196 static int mv_pci_device_resume(struct pci_dev *pdev);
4197 #endif 4197 #endif
4198 4198
4199 4199
4200 static struct pci_driver mv_pci_driver = { 4200 static struct pci_driver mv_pci_driver = {
4201 .name = DRV_NAME, 4201 .name = DRV_NAME,
4202 .id_table = mv_pci_tbl, 4202 .id_table = mv_pci_tbl,
4203 .probe = mv_pci_init_one, 4203 .probe = mv_pci_init_one,
4204 .remove = ata_pci_remove_one, 4204 .remove = ata_pci_remove_one,
4205 #ifdef CONFIG_PM 4205 #ifdef CONFIG_PM
4206 .suspend = ata_pci_device_suspend, 4206 .suspend = ata_pci_device_suspend,
4207 .resume = mv_pci_device_resume, 4207 .resume = mv_pci_device_resume,
4208 #endif 4208 #endif
4209 4209
4210 }; 4210 };
4211 4211
4212 /* move to PCI layer or libata core? */ 4212 /* move to PCI layer or libata core? */
4213 static int pci_go_64(struct pci_dev *pdev) 4213 static int pci_go_64(struct pci_dev *pdev)
4214 { 4214 {
4215 int rc; 4215 int rc;
4216 4216
4217 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4217 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4218 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4218 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4219 if (rc) { 4219 if (rc) {
4220 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4220 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4221 if (rc) { 4221 if (rc) {
4222 dev_printk(KERN_ERR, &pdev->dev, 4222 dev_printk(KERN_ERR, &pdev->dev,
4223 "64-bit DMA enable failed\n"); 4223 "64-bit DMA enable failed\n");
4224 return rc; 4224 return rc;
4225 } 4225 }
4226 } 4226 }
4227 } else { 4227 } else {
4228 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4228 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4229 if (rc) { 4229 if (rc) {
4230 dev_printk(KERN_ERR, &pdev->dev, 4230 dev_printk(KERN_ERR, &pdev->dev,
4231 "32-bit DMA enable failed\n"); 4231 "32-bit DMA enable failed\n");
4232 return rc; 4232 return rc;
4233 } 4233 }
4234 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 4234 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4235 if (rc) { 4235 if (rc) {
4236 dev_printk(KERN_ERR, &pdev->dev, 4236 dev_printk(KERN_ERR, &pdev->dev,
4237 "32-bit consistent DMA enable failed\n"); 4237 "32-bit consistent DMA enable failed\n");
4238 return rc; 4238 return rc;
4239 } 4239 }
4240 } 4240 }
4241 4241
4242 return rc; 4242 return rc;
4243 } 4243 }
4244 4244
4245 /** 4245 /**
4246 * mv_print_info - Dump key info to kernel log for perusal. 4246 * mv_print_info - Dump key info to kernel log for perusal.
4247 * @host: ATA host to print info about 4247 * @host: ATA host to print info about
4248 * 4248 *
4249 * FIXME: complete this. 4249 * FIXME: complete this.
4250 * 4250 *
4251 * LOCKING: 4251 * LOCKING:
4252 * Inherited from caller. 4252 * Inherited from caller.
4253 */ 4253 */
4254 static void mv_print_info(struct ata_host *host) 4254 static void mv_print_info(struct ata_host *host)
4255 { 4255 {
4256 struct pci_dev *pdev = to_pci_dev(host->dev); 4256 struct pci_dev *pdev = to_pci_dev(host->dev);
4257 struct mv_host_priv *hpriv = host->private_data; 4257 struct mv_host_priv *hpriv = host->private_data;
4258 u8 scc; 4258 u8 scc;
4259 const char *scc_s, *gen; 4259 const char *scc_s, *gen;
4260 4260
4261 /* Use this to determine the HW stepping of the chip so we know 4261 /* Use this to determine the HW stepping of the chip so we know
4262 * what errata to workaround 4262 * what errata to workaround
4263 */ 4263 */
4264 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); 4264 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4265 if (scc == 0) 4265 if (scc == 0)
4266 scc_s = "SCSI"; 4266 scc_s = "SCSI";
4267 else if (scc == 0x01) 4267 else if (scc == 0x01)
4268 scc_s = "RAID"; 4268 scc_s = "RAID";
4269 else 4269 else
4270 scc_s = "?"; 4270 scc_s = "?";
4271 4271
4272 if (IS_GEN_I(hpriv)) 4272 if (IS_GEN_I(hpriv))
4273 gen = "I"; 4273 gen = "I";
4274 else if (IS_GEN_II(hpriv)) 4274 else if (IS_GEN_II(hpriv))
4275 gen = "II"; 4275 gen = "II";
4276 else if (IS_GEN_IIE(hpriv)) 4276 else if (IS_GEN_IIE(hpriv))
4277 gen = "IIE"; 4277 gen = "IIE";
4278 else 4278 else
4279 gen = "?"; 4279 gen = "?";
4280 4280
4281 dev_printk(KERN_INFO, &pdev->dev, 4281 dev_printk(KERN_INFO, &pdev->dev,
4282 "Gen-%s %u slots %u ports %s mode IRQ via %s\n", 4282 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4283 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, 4283 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4284 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 4284 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4285 } 4285 }
4286 4286
4287 /** 4287 /**
4288 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 4288 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4289 * @pdev: PCI device found 4289 * @pdev: PCI device found
4290 * @ent: PCI device ID entry for the matched host 4290 * @ent: PCI device ID entry for the matched host
4291 * 4291 *
4292 * LOCKING: 4292 * LOCKING:
4293 * Inherited from caller. 4293 * Inherited from caller.
4294 */ 4294 */
4295 static int mv_pci_init_one(struct pci_dev *pdev, 4295 static int mv_pci_init_one(struct pci_dev *pdev,
4296 const struct pci_device_id *ent) 4296 const struct pci_device_id *ent)
4297 { 4297 {
4298 static int printed_version; 4298 static int printed_version;
4299 unsigned int board_idx = (unsigned int)ent->driver_data; 4299 unsigned int board_idx = (unsigned int)ent->driver_data;
4300 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 4300 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4301 struct ata_host *host; 4301 struct ata_host *host;
4302 struct mv_host_priv *hpriv; 4302 struct mv_host_priv *hpriv;
4303 int n_ports, port, rc; 4303 int n_ports, port, rc;
4304 4304
4305 if (!printed_version++) 4305 if (!printed_version++)
4306 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 4306 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4307 4307
4308 /* allocate host */ 4308 /* allocate host */
4309 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; 4309 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4310 4310
4311 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); 4311 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4312 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); 4312 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4313 if (!host || !hpriv) 4313 if (!host || !hpriv)
4314 return -ENOMEM; 4314 return -ENOMEM;
4315 host->private_data = hpriv; 4315 host->private_data = hpriv;
4316 hpriv->n_ports = n_ports; 4316 hpriv->n_ports = n_ports;
4317 hpriv->board_idx = board_idx; 4317 hpriv->board_idx = board_idx;
4318 4318
4319 /* acquire resources */ 4319 /* acquire resources */
4320 rc = pcim_enable_device(pdev); 4320 rc = pcim_enable_device(pdev);
4321 if (rc) 4321 if (rc)
4322 return rc; 4322 return rc;
4323 4323
4324 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); 4324 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4325 if (rc == -EBUSY) 4325 if (rc == -EBUSY)
4326 pcim_pin_device(pdev); 4326 pcim_pin_device(pdev);
4327 if (rc) 4327 if (rc)
4328 return rc; 4328 return rc;
4329 host->iomap = pcim_iomap_table(pdev); 4329 host->iomap = pcim_iomap_table(pdev);
4330 hpriv->base = host->iomap[MV_PRIMARY_BAR]; 4330 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4331 4331
4332 rc = pci_go_64(pdev); 4332 rc = pci_go_64(pdev);
4333 if (rc) 4333 if (rc)
4334 return rc; 4334 return rc;
4335 4335
4336 rc = mv_create_dma_pools(hpriv, &pdev->dev); 4336 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4337 if (rc) 4337 if (rc)
4338 return rc; 4338 return rc;
4339 4339
4340 for (port = 0; port < host->n_ports; port++) { 4340 for (port = 0; port < host->n_ports; port++) {
4341 struct ata_port *ap = host->ports[port]; 4341 struct ata_port *ap = host->ports[port];
4342 void __iomem *port_mmio = mv_port_base(hpriv->base, port); 4342 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4343 unsigned int offset = port_mmio - hpriv->base; 4343 unsigned int offset = port_mmio - hpriv->base;
4344 4344
4345 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 4345 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4346 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 4346 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4347 } 4347 }
4348 4348
4349 /* initialize adapter */ 4349 /* initialize adapter */
4350 rc = mv_init_host(host); 4350 rc = mv_init_host(host);
4351 if (rc) 4351 if (rc)
4352 return rc; 4352 return rc;
4353 4353
4354 /* Enable message-switched interrupts, if requested */ 4354 /* Enable message-switched interrupts, if requested */
4355 if (msi && pci_enable_msi(pdev) == 0) 4355 if (msi && pci_enable_msi(pdev) == 0)
4356 hpriv->hp_flags |= MV_HP_FLAG_MSI; 4356 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4357 4357
4358 mv_dump_pci_cfg(pdev, 0x68); 4358 mv_dump_pci_cfg(pdev, 0x68);
4359 mv_print_info(host); 4359 mv_print_info(host);
4360 4360
4361 pci_set_master(pdev); 4361 pci_set_master(pdev);
4362 pci_try_set_mwi(pdev); 4362 pci_try_set_mwi(pdev);
4363 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 4363 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4364 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 4364 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4365 } 4365 }
4366 4366
4367 #ifdef CONFIG_PM 4367 #ifdef CONFIG_PM
4368 static int mv_pci_device_resume(struct pci_dev *pdev) 4368 static int mv_pci_device_resume(struct pci_dev *pdev)
4369 { 4369 {
4370 struct ata_host *host = dev_get_drvdata(&pdev->dev); 4370 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4371 int rc; 4371 int rc;
4372 4372
4373 rc = ata_pci_device_do_resume(pdev); 4373 rc = ata_pci_device_do_resume(pdev);
4374 if (rc) 4374 if (rc)
4375 return rc; 4375 return rc;
4376 4376
4377 /* initialize adapter */ 4377 /* initialize adapter */
4378 rc = mv_init_host(host); 4378 rc = mv_init_host(host);
4379 if (rc) 4379 if (rc)
4380 return rc; 4380 return rc;
4381 4381
4382 ata_host_resume(host); 4382 ata_host_resume(host);
4383 4383
4384 return 0; 4384 return 0;
4385 } 4385 }
4386 #endif 4386 #endif
4387 #endif 4387 #endif
4388 4388
4389 static int mv_platform_probe(struct platform_device *pdev); 4389 static int mv_platform_probe(struct platform_device *pdev);
4390 static int __devexit mv_platform_remove(struct platform_device *pdev); 4390 static int __devexit mv_platform_remove(struct platform_device *pdev);
4391 4391
4392 static int __init mv_init(void) 4392 static int __init mv_init(void)
4393 { 4393 {
4394 int rc = -ENODEV; 4394 int rc = -ENODEV;
4395 #ifdef CONFIG_PCI 4395 #ifdef CONFIG_PCI
4396 rc = pci_register_driver(&mv_pci_driver); 4396 rc = pci_register_driver(&mv_pci_driver);
4397 if (rc < 0) 4397 if (rc < 0)
4398 return rc; 4398 return rc;
4399 #endif 4399 #endif
4400 rc = platform_driver_register(&mv_platform_driver); 4400 rc = platform_driver_register(&mv_platform_driver);
4401 4401
4402 #ifdef CONFIG_PCI 4402 #ifdef CONFIG_PCI
4403 if (rc < 0) 4403 if (rc < 0)
4404 pci_unregister_driver(&mv_pci_driver); 4404 pci_unregister_driver(&mv_pci_driver);
4405 #endif 4405 #endif
4406 return rc; 4406 return rc;
4407 } 4407 }
4408 4408
4409 static void __exit mv_exit(void) 4409 static void __exit mv_exit(void)
4410 { 4410 {
4411 #ifdef CONFIG_PCI 4411 #ifdef CONFIG_PCI
4412 pci_unregister_driver(&mv_pci_driver); 4412 pci_unregister_driver(&mv_pci_driver);
4413 #endif 4413 #endif
4414 platform_driver_unregister(&mv_platform_driver); 4414 platform_driver_unregister(&mv_platform_driver);
4415 } 4415 }
4416 4416
4417 MODULE_AUTHOR("Brett Russ"); 4417 MODULE_AUTHOR("Brett Russ");
4418 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); 4418 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4419 MODULE_LICENSE("GPL"); 4419 MODULE_LICENSE("GPL");
4420 MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 4420 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4421 MODULE_VERSION(DRV_VERSION); 4421 MODULE_VERSION(DRV_VERSION);
4422 MODULE_ALIAS("platform:" DRV_NAME); 4422 MODULE_ALIAS("platform:" DRV_NAME);
4423 4423
4424 module_init(mv_init); 4424 module_init(mv_init);
4425 module_exit(mv_exit); 4425 module_exit(mv_exit);
4426 4426
include/linux/libata.h
1 /* 1 /*
2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved. 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
3 * Copyright 2003-2005 Jeff Garzik 3 * Copyright 2003-2005 Jeff Garzik
4 * 4 *
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option) 8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version. 9 * any later version.
10 * 10 *
11 * This program is distributed in the hope that it will be useful, 11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 * 15 *
16 * You should have received a copy of the GNU General Public License 16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to 17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 * 19 *
20 * 20 *
21 * libata documentation is available via 'make {ps|pdf}docs', 21 * libata documentation is available via 'make {ps|pdf}docs',
22 * as Documentation/DocBook/libata.* 22 * as Documentation/DocBook/libata.*
23 * 23 *
24 */ 24 */
25 25
26 #ifndef __LINUX_LIBATA_H__ 26 #ifndef __LINUX_LIBATA_H__
27 #define __LINUX_LIBATA_H__ 27 #define __LINUX_LIBATA_H__
28 28
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/jiffies.h> 30 #include <linux/jiffies.h>
31 #include <linux/interrupt.h> 31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h> 32 #include <linux/dma-mapping.h>
33 #include <linux/scatterlist.h> 33 #include <linux/scatterlist.h>
34 #include <linux/io.h> 34 #include <linux/io.h>
35 #include <linux/ata.h> 35 #include <linux/ata.h>
36 #include <linux/workqueue.h> 36 #include <linux/workqueue.h>
37 #include <scsi/scsi_host.h> 37 #include <scsi/scsi_host.h>
38 #include <linux/acpi.h> 38 #include <linux/acpi.h>
39 #include <linux/cdrom.h> 39 #include <linux/cdrom.h>
40 40
41 /* 41 /*
42 * Define if arch has non-standard setup. This is a _PCI_ standard 42 * Define if arch has non-standard setup. This is a _PCI_ standard
43 * not a legacy or ISA standard. 43 * not a legacy or ISA standard.
44 */ 44 */
45 #ifdef CONFIG_ATA_NONSTANDARD 45 #ifdef CONFIG_ATA_NONSTANDARD
46 #include <asm/libata-portmap.h> 46 #include <asm/libata-portmap.h>
47 #else 47 #else
48 #include <asm-generic/libata-portmap.h> 48 #include <asm-generic/libata-portmap.h>
49 #endif 49 #endif
50 50
51 /* 51 /*
52 * compile-time options: to be removed as soon as all the drivers are 52 * compile-time options: to be removed as soon as all the drivers are
53 * converted to the new debugging mechanism 53 * converted to the new debugging mechanism
54 */ 54 */
55 #undef ATA_DEBUG /* debugging output */ 55 #undef ATA_DEBUG /* debugging output */
56 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 56 #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
57 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */ 57 #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
58 #undef ATA_NDEBUG /* define to disable quick runtime checks */ 58 #undef ATA_NDEBUG /* define to disable quick runtime checks */
59 59
60 60
61 /* note: prints function name for you */ 61 /* note: prints function name for you */
62 #ifdef ATA_DEBUG 62 #ifdef ATA_DEBUG
63 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) 63 #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
64 #ifdef ATA_VERBOSE_DEBUG 64 #ifdef ATA_VERBOSE_DEBUG
65 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) 65 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
66 #else 66 #else
67 #define VPRINTK(fmt, args...) 67 #define VPRINTK(fmt, args...)
68 #endif /* ATA_VERBOSE_DEBUG */ 68 #endif /* ATA_VERBOSE_DEBUG */
69 #else 69 #else
70 #define DPRINTK(fmt, args...) 70 #define DPRINTK(fmt, args...)
71 #define VPRINTK(fmt, args...) 71 #define VPRINTK(fmt, args...)
72 #endif /* ATA_DEBUG */ 72 #endif /* ATA_DEBUG */
73 73
74 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) 74 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
75 75
76 /* NEW: debug levels */ 76 /* NEW: debug levels */
77 #define HAVE_LIBATA_MSG 1 77 #define HAVE_LIBATA_MSG 1
78 78
79 enum { 79 enum {
80 ATA_MSG_DRV = 0x0001, 80 ATA_MSG_DRV = 0x0001,
81 ATA_MSG_INFO = 0x0002, 81 ATA_MSG_INFO = 0x0002,
82 ATA_MSG_PROBE = 0x0004, 82 ATA_MSG_PROBE = 0x0004,
83 ATA_MSG_WARN = 0x0008, 83 ATA_MSG_WARN = 0x0008,
84 ATA_MSG_MALLOC = 0x0010, 84 ATA_MSG_MALLOC = 0x0010,
85 ATA_MSG_CTL = 0x0020, 85 ATA_MSG_CTL = 0x0020,
86 ATA_MSG_INTR = 0x0040, 86 ATA_MSG_INTR = 0x0040,
87 ATA_MSG_ERR = 0x0080, 87 ATA_MSG_ERR = 0x0080,
88 }; 88 };
89 89
90 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) 90 #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
91 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) 91 #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
92 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) 92 #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
93 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) 93 #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
94 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) 94 #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
95 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) 95 #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
96 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) 96 #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
97 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) 97 #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
98 98
99 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) 99 static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
100 { 100 {
101 if (dval < 0 || dval >= (sizeof(u32) * 8)) 101 if (dval < 0 || dval >= (sizeof(u32) * 8))
102 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ 102 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
103 if (!dval) 103 if (!dval)
104 return 0; 104 return 0;
105 return (1 << dval) - 1; 105 return (1 << dval) - 1;
106 } 106 }
107 107
108 /* defines only for the constants which don't work well as enums */ 108 /* defines only for the constants which don't work well as enums */
109 #define ATA_TAG_POISON 0xfafbfcfdU 109 #define ATA_TAG_POISON 0xfafbfcfdU
110 110
111 enum { 111 enum {
112 /* various global constants */ 112 /* various global constants */
113 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 113 LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
114 LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ 114 LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
115 ATA_DEF_QUEUE = 1, 115 ATA_DEF_QUEUE = 1,
116 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ 116 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
117 ATA_MAX_QUEUE = 32, 117 ATA_MAX_QUEUE = 32,
118 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, 118 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
119 ATA_SHORT_PAUSE = 16, 119 ATA_SHORT_PAUSE = 16,
120 120
121 ATAPI_MAX_DRAIN = 16 << 10, 121 ATAPI_MAX_DRAIN = 16 << 10,
122 122
123 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1, 123 ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
124 124
125 ATA_SHT_EMULATED = 1, 125 ATA_SHT_EMULATED = 1,
126 ATA_SHT_CMD_PER_LUN = 1, 126 ATA_SHT_CMD_PER_LUN = 1,
127 ATA_SHT_THIS_ID = -1, 127 ATA_SHT_THIS_ID = -1,
128 ATA_SHT_USE_CLUSTERING = 1, 128 ATA_SHT_USE_CLUSTERING = 1,
129 129
130 /* struct ata_device stuff */ 130 /* struct ata_device stuff */
131 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 131 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
132 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 132 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
133 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ 133 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
134 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 134 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
135 ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */ 135 ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */
136 ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ 136 ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
137 ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ 137 ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
138 ATA_DFLAG_AN = (1 << 7), /* AN configured */ 138 ATA_DFLAG_AN = (1 << 7), /* AN configured */
139 ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */ 139 ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */
140 ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */ 140 ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */
141 ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ 141 ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
142 ATA_DFLAG_CFG_MASK = (1 << 12) - 1, 142 ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
143 143
144 ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ 144 ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
145 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ 145 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
146 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ 146 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
147 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ 147 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
148 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ 148 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
149 ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ 149 ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
150 ATA_DFLAG_INIT_MASK = (1 << 24) - 1, 150 ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
151 151
152 ATA_DFLAG_DETACH = (1 << 24), 152 ATA_DFLAG_DETACH = (1 << 24),
153 ATA_DFLAG_DETACHED = (1 << 25), 153 ATA_DFLAG_DETACHED = (1 << 25),
154 154
155 ATA_DEV_UNKNOWN = 0, /* unknown device */ 155 ATA_DEV_UNKNOWN = 0, /* unknown device */
156 ATA_DEV_ATA = 1, /* ATA device */ 156 ATA_DEV_ATA = 1, /* ATA device */
157 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ 157 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
158 ATA_DEV_ATAPI = 3, /* ATAPI device */ 158 ATA_DEV_ATAPI = 3, /* ATAPI device */
159 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ 159 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
160 ATA_DEV_PMP = 5, /* SATA port multiplier */ 160 ATA_DEV_PMP = 5, /* SATA port multiplier */
161 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ 161 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
162 ATA_DEV_SEMB = 7, /* SEMB */ 162 ATA_DEV_SEMB = 7, /* SEMB */
163 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ 163 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
164 ATA_DEV_NONE = 9, /* no device */ 164 ATA_DEV_NONE = 9, /* no device */
165 165
166 /* struct ata_link flags */ 166 /* struct ata_link flags */
167 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ 167 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
168 ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */ 168 ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
169 ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */ 169 ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
170 ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */ 170 ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */
171 ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB, 171 ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
172 ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ 172 ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
173 ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ 173 ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
174 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ 174 ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
175 175
176 /* struct ata_port flags */ 176 /* struct ata_port flags */
177 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 177 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
178 /* (doesn't imply presence) */ 178 /* (doesn't imply presence) */
179 ATA_FLAG_SATA = (1 << 1), 179 ATA_FLAG_SATA = (1 << 1),
180 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ 180 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
181 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ 181 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
182 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ 182 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
183 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ 183 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
184 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 184 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
185 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 185 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
186 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 186 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
187 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 187 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
188 * doesn't handle PIO interrupts */ 188 * doesn't handle PIO interrupts */
189 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ 189 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
190 ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ 190 ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */
191 ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ 191 ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */
192 ATA_FLAG_DEBUGMSG = (1 << 13), 192 ATA_FLAG_DEBUGMSG = (1 << 13),
193 ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */ 193 ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */
194 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ 194 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
195 ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ 195 ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
196 ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ 196 ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
197 ATA_FLAG_AN = (1 << 18), /* controller supports AN */ 197 ATA_FLAG_AN = (1 << 18), /* controller supports AN */
198 ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ 198 ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
199 ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ 199 ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
200 ATA_FLAG_EM = (1 << 21), /* driver supports enclosure 200 ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
201 * management */ 201 * management */
202 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity 202 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
203 * led */ 203 * led */
204 204
205 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 205 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
206 206
207 207
208 /* struct ata_port pflags */ 208 /* struct ata_port pflags */
209 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 209 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
210 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 210 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
211 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 211 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
212 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 212 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
213 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 213 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
214 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 214 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
215 ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ 215 ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */
216 ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ 216 ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */
217 ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */ 217 ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
218 ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */ 218 ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
219 219
220 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 220 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
221 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 221 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
222 ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ 222 ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
223 223
224 ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ 224 ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
225 ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ 225 ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
226 226
227 /* struct ata_queued_cmd flags */ 227 /* struct ata_queued_cmd flags */
228 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 228 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
229 ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ 229 ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
230 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 230 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
231 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 231 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
232 ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ 232 ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
233 ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ 233 ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
234 ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ 234 ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
235 235
236 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ 236 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
237 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ 237 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
238 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 238 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
239 239
240 /* host set flags */ 240 /* host set flags */
241 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ 241 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
242 ATA_HOST_STARTED = (1 << 1), /* Host started */ 242 ATA_HOST_STARTED = (1 << 1), /* Host started */
243 ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */ 243 ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
244 244
245 /* bits 24:31 of host->flags are reserved for LLD specific flags */ 245 /* bits 24:31 of host->flags are reserved for LLD specific flags */
246 246
247 /* various lengths of time */ 247 /* various lengths of time */
248 ATA_TMOUT_BOOT = 30000, /* heuristic */ 248 ATA_TMOUT_BOOT = 30000, /* heuristic */
249 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ 249 ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
250 ATA_TMOUT_INTERNAL_QUICK = 5000, 250 ATA_TMOUT_INTERNAL_QUICK = 5000,
251 ATA_TMOUT_MAX_PARK = 30000, 251 ATA_TMOUT_MAX_PARK = 30000,
252 252
253 /* 253 /*
254 * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s 254 * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s
255 * is too much without parallel probing. Use 2s if parallel 255 * is too much without parallel probing. Use 2s if parallel
256 * probing is available, 800ms otherwise. 256 * probing is available, 800ms otherwise.
257 */ 257 */
258 ATA_TMOUT_FF_WAIT_LONG = 2000, 258 ATA_TMOUT_FF_WAIT_LONG = 2000,
259 ATA_TMOUT_FF_WAIT = 800, 259 ATA_TMOUT_FF_WAIT = 800,
260 260
261 /* Spec mandates to wait for ">= 2ms" before checking status 261 /* Spec mandates to wait for ">= 2ms" before checking status
262 * after reset. We wait 150ms, because that was the magic 262 * after reset. We wait 150ms, because that was the magic
263 * delay used for ATAPI devices in Hale Landis's ATADRVR, for 263 * delay used for ATAPI devices in Hale Landis's ATADRVR, for
264 * the period of time between when the ATA command register is 264 * the period of time between when the ATA command register is
265 * written, and then status is checked. Because waiting for 265 * written, and then status is checked. Because waiting for
266 * "a while" before checking status is fine, post SRST, we 266 * "a while" before checking status is fine, post SRST, we
267 * perform this magic delay here as well. 267 * perform this magic delay here as well.
268 * 268 *
269 * Old drivers/ide uses the 2mS rule and then waits for ready. 269 * Old drivers/ide uses the 2mS rule and then waits for ready.
270 */ 270 */
271 ATA_WAIT_AFTER_RESET = 150, 271 ATA_WAIT_AFTER_RESET = 150,
272 272
273 /* If PMP is supported, we have to do follow-up SRST. As some 273 /* If PMP is supported, we have to do follow-up SRST. As some
274 * PMPs don't send D2H Reg FIS after hardreset, LLDs are 274 * PMPs don't send D2H Reg FIS after hardreset, LLDs are
275 * advised to wait only for the following duration before 275 * advised to wait only for the following duration before
276 * doing SRST. 276 * doing SRST.
277 */ 277 */
278 ATA_TMOUT_PMP_SRST_WAIT = 5000, 278 ATA_TMOUT_PMP_SRST_WAIT = 5000,
279 279
280 /* ATA bus states */ 280 /* ATA bus states */
281 BUS_UNKNOWN = 0, 281 BUS_UNKNOWN = 0,
282 BUS_DMA = 1, 282 BUS_DMA = 1,
283 BUS_IDLE = 2, 283 BUS_IDLE = 2,
284 BUS_NOINTR = 3, 284 BUS_NOINTR = 3,
285 BUS_NODATA = 4, 285 BUS_NODATA = 4,
286 BUS_TIMER = 5, 286 BUS_TIMER = 5,
287 BUS_PIO = 6, 287 BUS_PIO = 6,
288 BUS_EDD = 7, 288 BUS_EDD = 7,
289 BUS_IDENTIFY = 8, 289 BUS_IDENTIFY = 8,
290 BUS_PACKET = 9, 290 BUS_PACKET = 9,
291 291
292 /* SATA port states */ 292 /* SATA port states */
293 PORT_UNKNOWN = 0, 293 PORT_UNKNOWN = 0,
294 PORT_ENABLED = 1, 294 PORT_ENABLED = 1,
295 PORT_DISABLED = 2, 295 PORT_DISABLED = 2,
296 296
297 /* encoding various smaller bitmaps into a single 297 /* encoding various smaller bitmaps into a single
298 * unsigned long bitmap 298 * unsigned long bitmap
299 */ 299 */
300 ATA_NR_PIO_MODES = 7, 300 ATA_NR_PIO_MODES = 7,
301 ATA_NR_MWDMA_MODES = 5, 301 ATA_NR_MWDMA_MODES = 5,
302 ATA_NR_UDMA_MODES = 8, 302 ATA_NR_UDMA_MODES = 8,
303 303
304 ATA_SHIFT_PIO = 0, 304 ATA_SHIFT_PIO = 0,
305 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES, 305 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
306 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES, 306 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
307 307
308 /* size of buffer to pad xfers ending on unaligned boundaries */ 308 /* size of buffer to pad xfers ending on unaligned boundaries */
309 ATA_DMA_PAD_SZ = 4, 309 ATA_DMA_PAD_SZ = 4,
310 310
311 /* ering size */ 311 /* ering size */
312 ATA_ERING_SIZE = 32, 312 ATA_ERING_SIZE = 32,
313 313
314 /* return values for ->qc_defer */ 314 /* return values for ->qc_defer */
315 ATA_DEFER_LINK = 1, 315 ATA_DEFER_LINK = 1,
316 ATA_DEFER_PORT = 2, 316 ATA_DEFER_PORT = 2,
317 317
318 /* desc_len for ata_eh_info and context */ 318 /* desc_len for ata_eh_info and context */
319 ATA_EH_DESC_LEN = 80, 319 ATA_EH_DESC_LEN = 80,
320 320
321 /* reset / recovery action types */ 321 /* reset / recovery action types */
322 ATA_EH_REVALIDATE = (1 << 0), 322 ATA_EH_REVALIDATE = (1 << 0),
323 ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */ 323 ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */
324 ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ 324 ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */
325 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 325 ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
326 ATA_EH_ENABLE_LINK = (1 << 3), 326 ATA_EH_ENABLE_LINK = (1 << 3),
327 ATA_EH_LPM = (1 << 4), /* link power management action */ 327 ATA_EH_LPM = (1 << 4), /* link power management action */
328 ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ 328 ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
329 329
330 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, 330 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
331 ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | 331 ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
332 ATA_EH_ENABLE_LINK | ATA_EH_LPM, 332 ATA_EH_ENABLE_LINK | ATA_EH_LPM,
333 333
334 /* ata_eh_info->flags */ 334 /* ata_eh_info->flags */
335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 335 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 336 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
337 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 337 ATA_EHI_QUIET = (1 << 3), /* be quiet */
338 ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
338 339
339 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ 340 ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
340 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ 341 ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
341 ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ 342 ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
342 ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ 343 ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
343 ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ 344 ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */
344 345
345 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, 346 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
346 347
347 /* mask of flags to transfer *to* the slave link */ 348 /* mask of flags to transfer *to* the slave link */
348 ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 349 ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
349 350
350 /* max tries if error condition is still set after ->error_handler */ 351 /* max tries if error condition is still set after ->error_handler */
351 ATA_EH_MAX_TRIES = 5, 352 ATA_EH_MAX_TRIES = 5,
352 353
353 /* sometimes resuming a link requires several retries */ 354 /* sometimes resuming a link requires several retries */
354 ATA_LINK_RESUME_TRIES = 5, 355 ATA_LINK_RESUME_TRIES = 5,
355 356
356 /* how hard are we gonna try to probe/recover devices */ 357 /* how hard are we gonna try to probe/recover devices */
357 ATA_PROBE_MAX_TRIES = 3, 358 ATA_PROBE_MAX_TRIES = 3,
358 ATA_EH_DEV_TRIES = 3, 359 ATA_EH_DEV_TRIES = 3,
359 ATA_EH_PMP_TRIES = 5, 360 ATA_EH_PMP_TRIES = 5,
360 ATA_EH_PMP_LINK_TRIES = 3, 361 ATA_EH_PMP_LINK_TRIES = 3,
361 362
362 SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */ 363 SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
363 364
364 /* This should match the actual table size of 365 /* This should match the actual table size of
365 * ata_eh_cmd_timeout_table in libata-eh.c. 366 * ata_eh_cmd_timeout_table in libata-eh.c.
366 */ 367 */
367 ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6, 368 ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
368 369
369 /* Horkage types. May be set by libata or controller on drives 370 /* Horkage types. May be set by libata or controller on drives
370 (some horkage may be drive/controller pair dependant */ 371 (some horkage may be drive/controller pair dependant */
371 372
372 ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ 373 ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
373 ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ 374 ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
374 ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ 375 ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
375 ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ 376 ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
376 ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ 377 ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
377 ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ 378 ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
378 ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ 379 ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
379 ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ 380 ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */
380 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ 381 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
381 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ 382 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
382 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 383 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
383 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands 384 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
384 not multiple of 16 bytes */ 385 not multiple of 16 bytes */
385 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ 386 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
386 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 387 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
387 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ 388 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
388 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ 389 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
389 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ 390 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
390 391
391 /* DMA mask for user DMA control: User visible values; DO NOT 392 /* DMA mask for user DMA control: User visible values; DO NOT
392 renumber */ 393 renumber */
393 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ 394 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
394 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ 395 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
395 ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ 396 ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
396 397
397 /* ATAPI command types */ 398 /* ATAPI command types */
398 ATAPI_READ = 0, /* READs */ 399 ATAPI_READ = 0, /* READs */
399 ATAPI_WRITE = 1, /* WRITEs */ 400 ATAPI_WRITE = 1, /* WRITEs */
400 ATAPI_READ_CD = 2, /* READ CD [MSF] */ 401 ATAPI_READ_CD = 2, /* READ CD [MSF] */
401 ATAPI_PASS_THRU = 3, /* SAT pass-thru */ 402 ATAPI_PASS_THRU = 3, /* SAT pass-thru */
402 ATAPI_MISC = 4, /* the rest */ 403 ATAPI_MISC = 4, /* the rest */
403 404
404 /* Timing constants */ 405 /* Timing constants */
405 ATA_TIMING_SETUP = (1 << 0), 406 ATA_TIMING_SETUP = (1 << 0),
406 ATA_TIMING_ACT8B = (1 << 1), 407 ATA_TIMING_ACT8B = (1 << 1),
407 ATA_TIMING_REC8B = (1 << 2), 408 ATA_TIMING_REC8B = (1 << 2),
408 ATA_TIMING_CYC8B = (1 << 3), 409 ATA_TIMING_CYC8B = (1 << 3),
409 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | 410 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
410 ATA_TIMING_CYC8B, 411 ATA_TIMING_CYC8B,
411 ATA_TIMING_ACTIVE = (1 << 4), 412 ATA_TIMING_ACTIVE = (1 << 4),
412 ATA_TIMING_RECOVER = (1 << 5), 413 ATA_TIMING_RECOVER = (1 << 5),
413 ATA_TIMING_DMACK_HOLD = (1 << 6), 414 ATA_TIMING_DMACK_HOLD = (1 << 6),
414 ATA_TIMING_CYCLE = (1 << 7), 415 ATA_TIMING_CYCLE = (1 << 7),
415 ATA_TIMING_UDMA = (1 << 8), 416 ATA_TIMING_UDMA = (1 << 8),
416 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | 417 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
417 ATA_TIMING_REC8B | ATA_TIMING_CYC8B | 418 ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
418 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | 419 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
419 ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE | 420 ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE |
420 ATA_TIMING_UDMA, 421 ATA_TIMING_UDMA,
421 422
422 /* ACPI constants */ 423 /* ACPI constants */
423 ATA_ACPI_FILTER_SETXFER = 1 << 0, 424 ATA_ACPI_FILTER_SETXFER = 1 << 0,
424 ATA_ACPI_FILTER_LOCK = 1 << 1, 425 ATA_ACPI_FILTER_LOCK = 1 << 1,
425 ATA_ACPI_FILTER_DIPM = 1 << 2, 426 ATA_ACPI_FILTER_DIPM = 1 << 2,
426 ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */ 427 ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */
427 ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */ 428 ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */
428 429
429 ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER | 430 ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
430 ATA_ACPI_FILTER_LOCK | 431 ATA_ACPI_FILTER_LOCK |
431 ATA_ACPI_FILTER_DIPM, 432 ATA_ACPI_FILTER_DIPM,
432 }; 433 };
433 434
434 enum ata_xfer_mask { 435 enum ata_xfer_mask {
435 ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) 436 ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
436 << ATA_SHIFT_PIO, 437 << ATA_SHIFT_PIO,
437 ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) 438 ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
438 << ATA_SHIFT_MWDMA, 439 << ATA_SHIFT_MWDMA,
439 ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) 440 ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
440 << ATA_SHIFT_UDMA, 441 << ATA_SHIFT_UDMA,
441 }; 442 };
442 443
443 enum hsm_task_states { 444 enum hsm_task_states {
444 HSM_ST_IDLE, /* no command on going */ 445 HSM_ST_IDLE, /* no command on going */
445 HSM_ST_FIRST, /* (waiting the device to) 446 HSM_ST_FIRST, /* (waiting the device to)
446 write CDB or first data block */ 447 write CDB or first data block */
447 HSM_ST, /* (waiting the device to) transfer data */ 448 HSM_ST, /* (waiting the device to) transfer data */
448 HSM_ST_LAST, /* (waiting the device to) complete command */ 449 HSM_ST_LAST, /* (waiting the device to) complete command */
449 HSM_ST_ERR, /* error */ 450 HSM_ST_ERR, /* error */
450 }; 451 };
451 452
452 enum ata_completion_errors { 453 enum ata_completion_errors {
453 AC_ERR_DEV = (1 << 0), /* device reported error */ 454 AC_ERR_DEV = (1 << 0), /* device reported error */
454 AC_ERR_HSM = (1 << 1), /* host state machine violation */ 455 AC_ERR_HSM = (1 << 1), /* host state machine violation */
455 AC_ERR_TIMEOUT = (1 << 2), /* timeout */ 456 AC_ERR_TIMEOUT = (1 << 2), /* timeout */
456 AC_ERR_MEDIA = (1 << 3), /* media error */ 457 AC_ERR_MEDIA = (1 << 3), /* media error */
457 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ 458 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
458 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ 459 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
459 AC_ERR_SYSTEM = (1 << 6), /* system error */ 460 AC_ERR_SYSTEM = (1 << 6), /* system error */
460 AC_ERR_INVALID = (1 << 7), /* invalid argument */ 461 AC_ERR_INVALID = (1 << 7), /* invalid argument */
461 AC_ERR_OTHER = (1 << 8), /* unknown */ 462 AC_ERR_OTHER = (1 << 8), /* unknown */
462 AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */ 463 AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */
463 AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ 464 AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */
464 }; 465 };
465 466
466 /* forward declarations */ 467 /* forward declarations */
467 struct scsi_device; 468 struct scsi_device;
468 struct ata_port_operations; 469 struct ata_port_operations;
469 struct ata_port; 470 struct ata_port;
470 struct ata_link; 471 struct ata_link;
471 struct ata_queued_cmd; 472 struct ata_queued_cmd;
472 473
473 /* typedefs */ 474 /* typedefs */
474 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 475 typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
475 typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline); 476 typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline);
476 typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, 477 typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
477 unsigned long deadline); 478 unsigned long deadline);
478 typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); 479 typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
479 480
480 /* 481 /*
481 * host pm policy: If you alter this, you also need to alter libata-scsi.c 482 * host pm policy: If you alter this, you also need to alter libata-scsi.c
482 * (for the ascii descriptions) 483 * (for the ascii descriptions)
483 */ 484 */
484 enum link_pm { 485 enum link_pm {
485 NOT_AVAILABLE, 486 NOT_AVAILABLE,
486 MIN_POWER, 487 MIN_POWER,
487 MAX_PERFORMANCE, 488 MAX_PERFORMANCE,
488 MEDIUM_POWER, 489 MEDIUM_POWER,
489 }; 490 };
490 extern struct device_attribute dev_attr_link_power_management_policy; 491 extern struct device_attribute dev_attr_link_power_management_policy;
491 extern struct device_attribute dev_attr_unload_heads; 492 extern struct device_attribute dev_attr_unload_heads;
492 extern struct device_attribute dev_attr_em_message_type; 493 extern struct device_attribute dev_attr_em_message_type;
493 extern struct device_attribute dev_attr_em_message; 494 extern struct device_attribute dev_attr_em_message;
494 extern struct device_attribute dev_attr_sw_activity; 495 extern struct device_attribute dev_attr_sw_activity;
495 496
496 enum sw_activity { 497 enum sw_activity {
497 OFF, 498 OFF,
498 BLINK_ON, 499 BLINK_ON,
499 BLINK_OFF, 500 BLINK_OFF,
500 }; 501 };
501 502
502 #ifdef CONFIG_ATA_SFF 503 #ifdef CONFIG_ATA_SFF
503 struct ata_ioports { 504 struct ata_ioports {
504 void __iomem *cmd_addr; 505 void __iomem *cmd_addr;
505 void __iomem *data_addr; 506 void __iomem *data_addr;
506 void __iomem *error_addr; 507 void __iomem *error_addr;
507 void __iomem *feature_addr; 508 void __iomem *feature_addr;
508 void __iomem *nsect_addr; 509 void __iomem *nsect_addr;
509 void __iomem *lbal_addr; 510 void __iomem *lbal_addr;
510 void __iomem *lbam_addr; 511 void __iomem *lbam_addr;
511 void __iomem *lbah_addr; 512 void __iomem *lbah_addr;
512 void __iomem *device_addr; 513 void __iomem *device_addr;
513 void __iomem *status_addr; 514 void __iomem *status_addr;
514 void __iomem *command_addr; 515 void __iomem *command_addr;
515 void __iomem *altstatus_addr; 516 void __iomem *altstatus_addr;
516 void __iomem *ctl_addr; 517 void __iomem *ctl_addr;
517 #ifdef CONFIG_ATA_BMDMA 518 #ifdef CONFIG_ATA_BMDMA
518 void __iomem *bmdma_addr; 519 void __iomem *bmdma_addr;
519 #endif /* CONFIG_ATA_BMDMA */ 520 #endif /* CONFIG_ATA_BMDMA */
520 void __iomem *scr_addr; 521 void __iomem *scr_addr;
521 }; 522 };
522 #endif /* CONFIG_ATA_SFF */ 523 #endif /* CONFIG_ATA_SFF */
523 524
524 struct ata_host { 525 struct ata_host {
525 spinlock_t lock; 526 spinlock_t lock;
526 struct device *dev; 527 struct device *dev;
527 void __iomem * const *iomap; 528 void __iomem * const *iomap;
528 unsigned int n_ports; 529 unsigned int n_ports;
529 void *private_data; 530 void *private_data;
530 struct ata_port_operations *ops; 531 struct ata_port_operations *ops;
531 unsigned long flags; 532 unsigned long flags;
532 #ifdef CONFIG_ATA_ACPI 533 #ifdef CONFIG_ATA_ACPI
533 acpi_handle acpi_handle; 534 acpi_handle acpi_handle;
534 #endif 535 #endif
535 struct ata_port *simplex_claimed; /* channel owning the DMA */ 536 struct ata_port *simplex_claimed; /* channel owning the DMA */
536 struct ata_port *ports[0]; 537 struct ata_port *ports[0];
537 }; 538 };
538 539
539 struct ata_queued_cmd { 540 struct ata_queued_cmd {
540 struct ata_port *ap; 541 struct ata_port *ap;
541 struct ata_device *dev; 542 struct ata_device *dev;
542 543
543 struct scsi_cmnd *scsicmd; 544 struct scsi_cmnd *scsicmd;
544 void (*scsidone)(struct scsi_cmnd *); 545 void (*scsidone)(struct scsi_cmnd *);
545 546
546 struct ata_taskfile tf; 547 struct ata_taskfile tf;
547 u8 cdb[ATAPI_CDB_LEN]; 548 u8 cdb[ATAPI_CDB_LEN];
548 549
549 unsigned long flags; /* ATA_QCFLAG_xxx */ 550 unsigned long flags; /* ATA_QCFLAG_xxx */
550 unsigned int tag; 551 unsigned int tag;
551 unsigned int n_elem; 552 unsigned int n_elem;
552 unsigned int orig_n_elem; 553 unsigned int orig_n_elem;
553 554
554 int dma_dir; 555 int dma_dir;
555 556
556 unsigned int sect_size; 557 unsigned int sect_size;
557 558
558 unsigned int nbytes; 559 unsigned int nbytes;
559 unsigned int extrabytes; 560 unsigned int extrabytes;
560 unsigned int curbytes; 561 unsigned int curbytes;
561 562
562 struct scatterlist *cursg; 563 struct scatterlist *cursg;
563 unsigned int cursg_ofs; 564 unsigned int cursg_ofs;
564 565
565 struct scatterlist sgent; 566 struct scatterlist sgent;
566 567
567 struct scatterlist *sg; 568 struct scatterlist *sg;
568 569
569 unsigned int err_mask; 570 unsigned int err_mask;
570 struct ata_taskfile result_tf; 571 struct ata_taskfile result_tf;
571 ata_qc_cb_t complete_fn; 572 ata_qc_cb_t complete_fn;
572 573
573 void *private_data; 574 void *private_data;
574 void *lldd_task; 575 void *lldd_task;
575 }; 576 };
576 577
577 struct ata_port_stats { 578 struct ata_port_stats {
578 unsigned long unhandled_irq; 579 unsigned long unhandled_irq;
579 unsigned long idle_irq; 580 unsigned long idle_irq;
580 unsigned long rw_reqbuf; 581 unsigned long rw_reqbuf;
581 }; 582 };
582 583
583 struct ata_ering_entry { 584 struct ata_ering_entry {
584 unsigned int eflags; 585 unsigned int eflags;
585 unsigned int err_mask; 586 unsigned int err_mask;
586 u64 timestamp; 587 u64 timestamp;
587 }; 588 };
588 589
589 struct ata_ering { 590 struct ata_ering {
590 int cursor; 591 int cursor;
591 struct ata_ering_entry ring[ATA_ERING_SIZE]; 592 struct ata_ering_entry ring[ATA_ERING_SIZE];
592 }; 593 };
593 594
594 struct ata_device { 595 struct ata_device {
595 struct ata_link *link; 596 struct ata_link *link;
596 unsigned int devno; /* 0 or 1 */ 597 unsigned int devno; /* 0 or 1 */
597 unsigned int horkage; /* List of broken features */ 598 unsigned int horkage; /* List of broken features */
598 unsigned long flags; /* ATA_DFLAG_xxx */ 599 unsigned long flags; /* ATA_DFLAG_xxx */
599 struct scsi_device *sdev; /* attached SCSI device */ 600 struct scsi_device *sdev; /* attached SCSI device */
600 void *private_data; 601 void *private_data;
601 #ifdef CONFIG_ATA_ACPI 602 #ifdef CONFIG_ATA_ACPI
602 acpi_handle acpi_handle; 603 acpi_handle acpi_handle;
603 union acpi_object *gtf_cache; 604 union acpi_object *gtf_cache;
604 unsigned int gtf_filter; 605 unsigned int gtf_filter;
605 #endif 606 #endif
606 /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ 607 /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
607 u64 n_sectors; /* size of device, if ATA */ 608 u64 n_sectors; /* size of device, if ATA */
608 u64 n_native_sectors; /* native size, if ATA */ 609 u64 n_native_sectors; /* native size, if ATA */
609 unsigned int class; /* ATA_DEV_xxx */ 610 unsigned int class; /* ATA_DEV_xxx */
610 unsigned long unpark_deadline; 611 unsigned long unpark_deadline;
611 612
612 u8 pio_mode; 613 u8 pio_mode;
613 u8 dma_mode; 614 u8 dma_mode;
614 u8 xfer_mode; 615 u8 xfer_mode;
615 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 616 unsigned int xfer_shift; /* ATA_SHIFT_xxx */
616 617
617 unsigned int multi_count; /* sectors count for 618 unsigned int multi_count; /* sectors count for
618 READ/WRITE MULTIPLE */ 619 READ/WRITE MULTIPLE */
619 unsigned int max_sectors; /* per-device max sectors */ 620 unsigned int max_sectors; /* per-device max sectors */
620 unsigned int cdb_len; 621 unsigned int cdb_len;
621 622
622 /* per-dev xfer mask */ 623 /* per-dev xfer mask */
623 unsigned long pio_mask; 624 unsigned long pio_mask;
624 unsigned long mwdma_mask; 625 unsigned long mwdma_mask;
625 unsigned long udma_mask; 626 unsigned long udma_mask;
626 627
627 /* for CHS addressing */ 628 /* for CHS addressing */
628 u16 cylinders; /* Number of cylinders */ 629 u16 cylinders; /* Number of cylinders */
629 u16 heads; /* Number of heads */ 630 u16 heads; /* Number of heads */
630 u16 sectors; /* Number of sectors per track */ 631 u16 sectors; /* Number of sectors per track */
631 632
632 union { 633 union {
633 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 634 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
634 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ 635 u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
635 }; 636 };
636 637
637 /* error history */ 638 /* error history */
638 int spdn_cnt; 639 int spdn_cnt;
639 /* ering is CLEAR_END, read comment above CLEAR_END */ 640 /* ering is CLEAR_END, read comment above CLEAR_END */
640 struct ata_ering ering; 641 struct ata_ering ering;
641 }; 642 };
642 643
643 /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are 644 /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
644 * cleared to zero on ata_dev_init(). 645 * cleared to zero on ata_dev_init().
645 */ 646 */
646 #define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) 647 #define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors)
647 #define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) 648 #define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering)
648 649
649 struct ata_eh_info { 650 struct ata_eh_info {
650 struct ata_device *dev; /* offending device */ 651 struct ata_device *dev; /* offending device */
651 u32 serror; /* SError from LLDD */ 652 u32 serror; /* SError from LLDD */
652 unsigned int err_mask; /* port-wide err_mask */ 653 unsigned int err_mask; /* port-wide err_mask */
653 unsigned int action; /* ATA_EH_* action mask */ 654 unsigned int action; /* ATA_EH_* action mask */
654 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 655 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
655 unsigned int flags; /* ATA_EHI_* flags */ 656 unsigned int flags; /* ATA_EHI_* flags */
656 657
657 unsigned int probe_mask; 658 unsigned int probe_mask;
658 659
659 char desc[ATA_EH_DESC_LEN]; 660 char desc[ATA_EH_DESC_LEN];
660 int desc_len; 661 int desc_len;
661 }; 662 };
662 663
663 struct ata_eh_context { 664 struct ata_eh_context {
664 struct ata_eh_info i; 665 struct ata_eh_info i;
665 int tries[ATA_MAX_DEVICES]; 666 int tries[ATA_MAX_DEVICES];
666 int cmd_timeout_idx[ATA_MAX_DEVICES] 667 int cmd_timeout_idx[ATA_MAX_DEVICES]
667 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE]; 668 [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
668 unsigned int classes[ATA_MAX_DEVICES]; 669 unsigned int classes[ATA_MAX_DEVICES];
669 unsigned int did_probe_mask; 670 unsigned int did_probe_mask;
670 unsigned int unloaded_mask; 671 unsigned int unloaded_mask;
671 unsigned int saved_ncq_enabled; 672 unsigned int saved_ncq_enabled;
672 u8 saved_xfer_mode[ATA_MAX_DEVICES]; 673 u8 saved_xfer_mode[ATA_MAX_DEVICES];
673 /* timestamp for the last reset attempt or success */ 674 /* timestamp for the last reset attempt or success */
674 unsigned long last_reset; 675 unsigned long last_reset;
675 }; 676 };
676 677
677 struct ata_acpi_drive 678 struct ata_acpi_drive
678 { 679 {
679 u32 pio; 680 u32 pio;
680 u32 dma; 681 u32 dma;
681 } __packed; 682 } __packed;
682 683
683 struct ata_acpi_gtm { 684 struct ata_acpi_gtm {
684 struct ata_acpi_drive drive[2]; 685 struct ata_acpi_drive drive[2];
685 u32 flags; 686 u32 flags;
686 } __packed; 687 } __packed;
687 688
688 struct ata_link { 689 struct ata_link {
689 struct ata_port *ap; 690 struct ata_port *ap;
690 int pmp; /* port multiplier port # */ 691 int pmp; /* port multiplier port # */
691 692
692 unsigned int active_tag; /* active tag on this link */ 693 unsigned int active_tag; /* active tag on this link */
693 u32 sactive; /* active NCQ commands */ 694 u32 sactive; /* active NCQ commands */
694 695
695 unsigned int flags; /* ATA_LFLAG_xxx */ 696 unsigned int flags; /* ATA_LFLAG_xxx */
696 697
697 u32 saved_scontrol; /* SControl on probe */ 698 u32 saved_scontrol; /* SControl on probe */
698 unsigned int hw_sata_spd_limit; 699 unsigned int hw_sata_spd_limit;
699 unsigned int sata_spd_limit; 700 unsigned int sata_spd_limit;
700 unsigned int sata_spd; /* current SATA PHY speed */ 701 unsigned int sata_spd; /* current SATA PHY speed */
701 702
702 /* record runtime error info, protected by host_set lock */ 703 /* record runtime error info, protected by host_set lock */
703 struct ata_eh_info eh_info; 704 struct ata_eh_info eh_info;
704 /* EH context */ 705 /* EH context */
705 struct ata_eh_context eh_context; 706 struct ata_eh_context eh_context;
706 707
707 struct ata_device device[ATA_MAX_DEVICES]; 708 struct ata_device device[ATA_MAX_DEVICES];
708 }; 709 };
709 710
710 struct ata_port { 711 struct ata_port {
711 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ 712 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
712 struct ata_port_operations *ops; 713 struct ata_port_operations *ops;
713 spinlock_t *lock; 714 spinlock_t *lock;
714 /* Flags owned by the EH context. Only EH should touch these once the 715 /* Flags owned by the EH context. Only EH should touch these once the
715 port is active */ 716 port is active */
716 unsigned long flags; /* ATA_FLAG_xxx */ 717 unsigned long flags; /* ATA_FLAG_xxx */
717 /* Flags that change dynamically, protected by ap->lock */ 718 /* Flags that change dynamically, protected by ap->lock */
718 unsigned int pflags; /* ATA_PFLAG_xxx */ 719 unsigned int pflags; /* ATA_PFLAG_xxx */
719 unsigned int print_id; /* user visible unique port ID */ 720 unsigned int print_id; /* user visible unique port ID */
720 unsigned int port_no; /* 0 based port no. inside the host */ 721 unsigned int port_no; /* 0 based port no. inside the host */
721 722
722 #ifdef CONFIG_ATA_SFF 723 #ifdef CONFIG_ATA_SFF
723 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 724 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
724 u8 ctl; /* cache of ATA control register */ 725 u8 ctl; /* cache of ATA control register */
725 u8 last_ctl; /* Cache last written value */ 726 u8 last_ctl; /* Cache last written value */
727 struct ata_link* sff_pio_task_link; /* link currently used */
726 struct delayed_work sff_pio_task; 728 struct delayed_work sff_pio_task;
727 #ifdef CONFIG_ATA_BMDMA 729 #ifdef CONFIG_ATA_BMDMA
728 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ 730 struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
729 dma_addr_t bmdma_prd_dma; /* and its DMA mapping */ 731 dma_addr_t bmdma_prd_dma; /* and its DMA mapping */
730 #endif /* CONFIG_ATA_BMDMA */ 732 #endif /* CONFIG_ATA_BMDMA */
731 #endif /* CONFIG_ATA_SFF */ 733 #endif /* CONFIG_ATA_SFF */
732 734
733 unsigned int pio_mask; 735 unsigned int pio_mask;
734 unsigned int mwdma_mask; 736 unsigned int mwdma_mask;
735 unsigned int udma_mask; 737 unsigned int udma_mask;
736 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 738 unsigned int cbl; /* cable type; ATA_CBL_xxx */
737 739
738 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 740 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
739 unsigned long qc_allocated; 741 unsigned long qc_allocated;
740 unsigned int qc_active; 742 unsigned int qc_active;
741 int nr_active_links; /* #links with active qcs */ 743 int nr_active_links; /* #links with active qcs */
742 744
743 struct ata_link link; /* host default link */ 745 struct ata_link link; /* host default link */
744 struct ata_link *slave_link; /* see ata_slave_link_init() */ 746 struct ata_link *slave_link; /* see ata_slave_link_init() */
745 747
746 int nr_pmp_links; /* nr of available PMP links */ 748 int nr_pmp_links; /* nr of available PMP links */
747 struct ata_link *pmp_link; /* array of PMP links */ 749 struct ata_link *pmp_link; /* array of PMP links */
748 struct ata_link *excl_link; /* for PMP qc exclusion */ 750 struct ata_link *excl_link; /* for PMP qc exclusion */
749 751
750 struct ata_port_stats stats; 752 struct ata_port_stats stats;
751 struct ata_host *host; 753 struct ata_host *host;
752 struct device *dev; 754 struct device *dev;
753 755
754 struct mutex scsi_scan_mutex; 756 struct mutex scsi_scan_mutex;
755 struct delayed_work hotplug_task; 757 struct delayed_work hotplug_task;
756 struct work_struct scsi_rescan_task; 758 struct work_struct scsi_rescan_task;
757 759
758 unsigned int hsm_task_state; 760 unsigned int hsm_task_state;
759 761
760 u32 msg_enable; 762 u32 msg_enable;
761 struct list_head eh_done_q; 763 struct list_head eh_done_q;
762 wait_queue_head_t eh_wait_q; 764 wait_queue_head_t eh_wait_q;
763 int eh_tries; 765 int eh_tries;
764 struct completion park_req_pending; 766 struct completion park_req_pending;
765 767
766 pm_message_t pm_mesg; 768 pm_message_t pm_mesg;
767 int *pm_result; 769 int *pm_result;
768 enum link_pm pm_policy; 770 enum link_pm pm_policy;
769 771
770 struct timer_list fastdrain_timer; 772 struct timer_list fastdrain_timer;
771 unsigned long fastdrain_cnt; 773 unsigned long fastdrain_cnt;
772 774
773 int em_message_type; 775 int em_message_type;
774 void *private_data; 776 void *private_data;
775 777
776 #ifdef CONFIG_ATA_ACPI 778 #ifdef CONFIG_ATA_ACPI
777 acpi_handle acpi_handle; 779 acpi_handle acpi_handle;
778 struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ 780 struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
779 #endif 781 #endif
780 /* owned by EH */ 782 /* owned by EH */
781 u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; 783 u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
782 }; 784 };
783 785
784 /* The following initializer overrides a method to NULL whether one of 786 /* The following initializer overrides a method to NULL whether one of
785 * its parent has the method defined or not. This is equivalent to 787 * its parent has the method defined or not. This is equivalent to
786 * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant 788 * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant
787 * expression and thus can't be used as an initializer. 789 * expression and thus can't be used as an initializer.
788 */ 790 */
789 #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT) 791 #define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
790 792
791 struct ata_port_operations { 793 struct ata_port_operations {
792 /* 794 /*
793 * Command execution 795 * Command execution
794 */ 796 */
795 int (*qc_defer)(struct ata_queued_cmd *qc); 797 int (*qc_defer)(struct ata_queued_cmd *qc);
796 int (*check_atapi_dma)(struct ata_queued_cmd *qc); 798 int (*check_atapi_dma)(struct ata_queued_cmd *qc);
797 void (*qc_prep)(struct ata_queued_cmd *qc); 799 void (*qc_prep)(struct ata_queued_cmd *qc);
798 unsigned int (*qc_issue)(struct ata_queued_cmd *qc); 800 unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
799 bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); 801 bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
800 802
801 /* 803 /*
802 * Configuration and exception handling 804 * Configuration and exception handling
803 */ 805 */
804 int (*cable_detect)(struct ata_port *ap); 806 int (*cable_detect)(struct ata_port *ap);
805 unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); 807 unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
806 void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); 808 void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
807 void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); 809 void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
808 int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); 810 int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
809 unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id); 811 unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
810 812
811 void (*dev_config)(struct ata_device *dev); 813 void (*dev_config)(struct ata_device *dev);
812 814
813 void (*freeze)(struct ata_port *ap); 815 void (*freeze)(struct ata_port *ap);
814 void (*thaw)(struct ata_port *ap); 816 void (*thaw)(struct ata_port *ap);
815 ata_prereset_fn_t prereset; 817 ata_prereset_fn_t prereset;
816 ata_reset_fn_t softreset; 818 ata_reset_fn_t softreset;
817 ata_reset_fn_t hardreset; 819 ata_reset_fn_t hardreset;
818 ata_postreset_fn_t postreset; 820 ata_postreset_fn_t postreset;
819 ata_prereset_fn_t pmp_prereset; 821 ata_prereset_fn_t pmp_prereset;
820 ata_reset_fn_t pmp_softreset; 822 ata_reset_fn_t pmp_softreset;
821 ata_reset_fn_t pmp_hardreset; 823 ata_reset_fn_t pmp_hardreset;
822 ata_postreset_fn_t pmp_postreset; 824 ata_postreset_fn_t pmp_postreset;
823 void (*error_handler)(struct ata_port *ap); 825 void (*error_handler)(struct ata_port *ap);
824 void (*lost_interrupt)(struct ata_port *ap); 826 void (*lost_interrupt)(struct ata_port *ap);
825 void (*post_internal_cmd)(struct ata_queued_cmd *qc); 827 void (*post_internal_cmd)(struct ata_queued_cmd *qc);
826 828
827 /* 829 /*
828 * Optional features 830 * Optional features
829 */ 831 */
830 int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val); 832 int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
831 int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); 833 int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
832 void (*pmp_attach)(struct ata_port *ap); 834 void (*pmp_attach)(struct ata_port *ap);
833 void (*pmp_detach)(struct ata_port *ap); 835 void (*pmp_detach)(struct ata_port *ap);
834 int (*enable_pm)(struct ata_port *ap, enum link_pm policy); 836 int (*enable_pm)(struct ata_port *ap, enum link_pm policy);
835 void (*disable_pm)(struct ata_port *ap); 837 void (*disable_pm)(struct ata_port *ap);
836 838
837 /* 839 /*
838 * Start, stop, suspend and resume 840 * Start, stop, suspend and resume
839 */ 841 */
840 int (*port_suspend)(struct ata_port *ap, pm_message_t mesg); 842 int (*port_suspend)(struct ata_port *ap, pm_message_t mesg);
841 int (*port_resume)(struct ata_port *ap); 843 int (*port_resume)(struct ata_port *ap);
842 int (*port_start)(struct ata_port *ap); 844 int (*port_start)(struct ata_port *ap);
843 void (*port_stop)(struct ata_port *ap); 845 void (*port_stop)(struct ata_port *ap);
844 void (*host_stop)(struct ata_host *host); 846 void (*host_stop)(struct ata_host *host);
845 847
846 #ifdef CONFIG_ATA_SFF 848 #ifdef CONFIG_ATA_SFF
847 /* 849 /*
848 * SFF / taskfile oriented ops 850 * SFF / taskfile oriented ops
849 */ 851 */
850 void (*sff_dev_select)(struct ata_port *ap, unsigned int device); 852 void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
851 void (*sff_set_devctl)(struct ata_port *ap, u8 ctl); 853 void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
852 u8 (*sff_check_status)(struct ata_port *ap); 854 u8 (*sff_check_status)(struct ata_port *ap);
853 u8 (*sff_check_altstatus)(struct ata_port *ap); 855 u8 (*sff_check_altstatus)(struct ata_port *ap);
854 void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf); 856 void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
855 void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf); 857 void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
856 void (*sff_exec_command)(struct ata_port *ap, 858 void (*sff_exec_command)(struct ata_port *ap,
857 const struct ata_taskfile *tf); 859 const struct ata_taskfile *tf);
858 unsigned int (*sff_data_xfer)(struct ata_device *dev, 860 unsigned int (*sff_data_xfer)(struct ata_device *dev,
859 unsigned char *buf, unsigned int buflen, int rw); 861 unsigned char *buf, unsigned int buflen, int rw);
860 void (*sff_irq_on)(struct ata_port *); 862 void (*sff_irq_on)(struct ata_port *);
861 bool (*sff_irq_check)(struct ata_port *); 863 bool (*sff_irq_check)(struct ata_port *);
862 void (*sff_irq_clear)(struct ata_port *); 864 void (*sff_irq_clear)(struct ata_port *);
863 void (*sff_drain_fifo)(struct ata_queued_cmd *qc); 865 void (*sff_drain_fifo)(struct ata_queued_cmd *qc);
864 866
865 #ifdef CONFIG_ATA_BMDMA 867 #ifdef CONFIG_ATA_BMDMA
866 void (*bmdma_setup)(struct ata_queued_cmd *qc); 868 void (*bmdma_setup)(struct ata_queued_cmd *qc);
867 void (*bmdma_start)(struct ata_queued_cmd *qc); 869 void (*bmdma_start)(struct ata_queued_cmd *qc);
868 void (*bmdma_stop)(struct ata_queued_cmd *qc); 870 void (*bmdma_stop)(struct ata_queued_cmd *qc);
869 u8 (*bmdma_status)(struct ata_port *ap); 871 u8 (*bmdma_status)(struct ata_port *ap);
870 #endif /* CONFIG_ATA_BMDMA */ 872 #endif /* CONFIG_ATA_BMDMA */
871 #endif /* CONFIG_ATA_SFF */ 873 #endif /* CONFIG_ATA_SFF */
872 874
873 ssize_t (*em_show)(struct ata_port *ap, char *buf); 875 ssize_t (*em_show)(struct ata_port *ap, char *buf);
874 ssize_t (*em_store)(struct ata_port *ap, const char *message, 876 ssize_t (*em_store)(struct ata_port *ap, const char *message,
875 size_t size); 877 size_t size);
876 ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf); 878 ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
877 ssize_t (*sw_activity_store)(struct ata_device *dev, 879 ssize_t (*sw_activity_store)(struct ata_device *dev,
878 enum sw_activity val); 880 enum sw_activity val);
879 /* 881 /*
880 * Obsolete 882 * Obsolete
881 */ 883 */
882 void (*phy_reset)(struct ata_port *ap); 884 void (*phy_reset)(struct ata_port *ap);
883 void (*eng_timeout)(struct ata_port *ap); 885 void (*eng_timeout)(struct ata_port *ap);
884 886
885 /* 887 /*
886 * ->inherits must be the last field and all the preceding 888 * ->inherits must be the last field and all the preceding
887 * fields must be pointers. 889 * fields must be pointers.
888 */ 890 */
889 const struct ata_port_operations *inherits; 891 const struct ata_port_operations *inherits;
890 }; 892 };
891 893
892 struct ata_port_info { 894 struct ata_port_info {
893 unsigned long flags; 895 unsigned long flags;
894 unsigned long link_flags; 896 unsigned long link_flags;
895 unsigned long pio_mask; 897 unsigned long pio_mask;
896 unsigned long mwdma_mask; 898 unsigned long mwdma_mask;
897 unsigned long udma_mask; 899 unsigned long udma_mask;
898 struct ata_port_operations *port_ops; 900 struct ata_port_operations *port_ops;
899 void *private_data; 901 void *private_data;
900 }; 902 };
901 903
902 struct ata_timing { 904 struct ata_timing {
903 unsigned short mode; /* ATA mode */ 905 unsigned short mode; /* ATA mode */
904 unsigned short setup; /* t1 */ 906 unsigned short setup; /* t1 */
905 unsigned short act8b; /* t2 for 8-bit I/O */ 907 unsigned short act8b; /* t2 for 8-bit I/O */
906 unsigned short rec8b; /* t2i for 8-bit I/O */ 908 unsigned short rec8b; /* t2i for 8-bit I/O */
907 unsigned short cyc8b; /* t0 for 8-bit I/O */ 909 unsigned short cyc8b; /* t0 for 8-bit I/O */
908 unsigned short active; /* t2 or tD */ 910 unsigned short active; /* t2 or tD */
909 unsigned short recover; /* t2i or tK */ 911 unsigned short recover; /* t2i or tK */
910 unsigned short dmack_hold; /* tj */ 912 unsigned short dmack_hold; /* tj */
911 unsigned short cycle; /* t0 */ 913 unsigned short cycle; /* t0 */
912 unsigned short udma; /* t2CYCTYP/2 */ 914 unsigned short udma; /* t2CYCTYP/2 */
913 }; 915 };
914 916
915 /* 917 /*
916 * Core layer - drivers/ata/libata-core.c 918 * Core layer - drivers/ata/libata-core.c
917 */ 919 */
918 extern const unsigned long sata_deb_timing_normal[]; 920 extern const unsigned long sata_deb_timing_normal[];
919 extern const unsigned long sata_deb_timing_hotplug[]; 921 extern const unsigned long sata_deb_timing_hotplug[];
920 extern const unsigned long sata_deb_timing_long[]; 922 extern const unsigned long sata_deb_timing_long[];
921 923
922 extern struct ata_port_operations ata_dummy_port_ops; 924 extern struct ata_port_operations ata_dummy_port_ops;
923 extern const struct ata_port_info ata_dummy_port_info; 925 extern const struct ata_port_info ata_dummy_port_info;
924 926
925 static inline const unsigned long * 927 static inline const unsigned long *
926 sata_ehc_deb_timing(struct ata_eh_context *ehc) 928 sata_ehc_deb_timing(struct ata_eh_context *ehc)
927 { 929 {
928 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 930 if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
929 return sata_deb_timing_hotplug; 931 return sata_deb_timing_hotplug;
930 else 932 else
931 return sata_deb_timing_normal; 933 return sata_deb_timing_normal;
932 } 934 }
933 935
934 static inline int ata_port_is_dummy(struct ata_port *ap) 936 static inline int ata_port_is_dummy(struct ata_port *ap)
935 { 937 {
936 return ap->ops == &ata_dummy_port_ops; 938 return ap->ops == &ata_dummy_port_ops;
937 } 939 }
938 940
939 extern int sata_set_spd(struct ata_link *link); 941 extern int sata_set_spd(struct ata_link *link);
940 extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); 942 extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
941 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 943 extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
942 int (*check_ready)(struct ata_link *link)); 944 int (*check_ready)(struct ata_link *link));
943 extern int sata_link_debounce(struct ata_link *link, 945 extern int sata_link_debounce(struct ata_link *link,
944 const unsigned long *params, unsigned long deadline); 946 const unsigned long *params, unsigned long deadline);
945 extern int sata_link_resume(struct ata_link *link, const unsigned long *params, 947 extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
946 unsigned long deadline); 948 unsigned long deadline);
947 extern int sata_link_hardreset(struct ata_link *link, 949 extern int sata_link_hardreset(struct ata_link *link,
948 const unsigned long *timing, unsigned long deadline, 950 const unsigned long *timing, unsigned long deadline,
949 bool *online, int (*check_ready)(struct ata_link *)); 951 bool *online, int (*check_ready)(struct ata_link *));
950 extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, 952 extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
951 unsigned long deadline); 953 unsigned long deadline);
952 extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); 954 extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
953 955
954 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); 956 extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
955 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, 957 extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
956 const struct ata_port_info * const * ppi, int n_ports); 958 const struct ata_port_info * const * ppi, int n_ports);
957 extern int ata_slave_link_init(struct ata_port *ap); 959 extern int ata_slave_link_init(struct ata_port *ap);
958 extern int ata_host_start(struct ata_host *host); 960 extern int ata_host_start(struct ata_host *host);
959 extern int ata_host_register(struct ata_host *host, 961 extern int ata_host_register(struct ata_host *host,
960 struct scsi_host_template *sht); 962 struct scsi_host_template *sht);
961 extern int ata_host_activate(struct ata_host *host, int irq, 963 extern int ata_host_activate(struct ata_host *host, int irq,
962 irq_handler_t irq_handler, unsigned long irq_flags, 964 irq_handler_t irq_handler, unsigned long irq_flags,
963 struct scsi_host_template *sht); 965 struct scsi_host_template *sht);
964 extern void ata_host_detach(struct ata_host *host); 966 extern void ata_host_detach(struct ata_host *host);
965 extern void ata_host_init(struct ata_host *, struct device *, 967 extern void ata_host_init(struct ata_host *, struct device *,
966 unsigned long, struct ata_port_operations *); 968 unsigned long, struct ata_port_operations *);
967 extern int ata_scsi_detect(struct scsi_host_template *sht); 969 extern int ata_scsi_detect(struct scsi_host_template *sht);
968 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 970 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
969 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 971 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
970 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, 972 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
971 int cmd, void __user *arg); 973 int cmd, void __user *arg);
972 extern void ata_sas_port_destroy(struct ata_port *); 974 extern void ata_sas_port_destroy(struct ata_port *);
973 extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 975 extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
974 struct ata_port_info *, struct Scsi_Host *); 976 struct ata_port_info *, struct Scsi_Host *);
975 extern int ata_sas_port_init(struct ata_port *); 977 extern int ata_sas_port_init(struct ata_port *);
976 extern int ata_sas_port_start(struct ata_port *ap); 978 extern int ata_sas_port_start(struct ata_port *ap);
977 extern void ata_sas_port_stop(struct ata_port *ap); 979 extern void ata_sas_port_stop(struct ata_port *ap);
978 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); 980 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
979 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 981 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
980 struct ata_port *ap); 982 struct ata_port *ap);
981 extern int sata_scr_valid(struct ata_link *link); 983 extern int sata_scr_valid(struct ata_link *link);
982 extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); 984 extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
983 extern int sata_scr_write(struct ata_link *link, int reg, u32 val); 985 extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
984 extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); 986 extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
985 extern bool ata_link_online(struct ata_link *link); 987 extern bool ata_link_online(struct ata_link *link);
986 extern bool ata_link_offline(struct ata_link *link); 988 extern bool ata_link_offline(struct ata_link *link);
987 #ifdef CONFIG_PM 989 #ifdef CONFIG_PM
988 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 990 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
989 extern void ata_host_resume(struct ata_host *host); 991 extern void ata_host_resume(struct ata_host *host);
990 #endif 992 #endif
991 extern int ata_ratelimit(void); 993 extern int ata_ratelimit(void);
992 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 994 extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
993 unsigned long interval, unsigned long timeout); 995 unsigned long interval, unsigned long timeout);
994 extern int atapi_cmd_type(u8 opcode); 996 extern int atapi_cmd_type(u8 opcode);
995 extern void ata_tf_to_fis(const struct ata_taskfile *tf, 997 extern void ata_tf_to_fis(const struct ata_taskfile *tf,
996 u8 pmp, int is_cmd, u8 *fis); 998 u8 pmp, int is_cmd, u8 *fis);
997 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 999 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
998 extern unsigned long ata_pack_xfermask(unsigned long pio_mask, 1000 extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
999 unsigned long mwdma_mask, unsigned long udma_mask); 1001 unsigned long mwdma_mask, unsigned long udma_mask);
1000 extern void ata_unpack_xfermask(unsigned long xfer_mask, 1002 extern void ata_unpack_xfermask(unsigned long xfer_mask,
1001 unsigned long *pio_mask, unsigned long *mwdma_mask, 1003 unsigned long *pio_mask, unsigned long *mwdma_mask,
1002 unsigned long *udma_mask); 1004 unsigned long *udma_mask);
1003 extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); 1005 extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
1004 extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); 1006 extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
1005 extern int ata_xfer_mode2shift(unsigned long xfer_mode); 1007 extern int ata_xfer_mode2shift(unsigned long xfer_mode);
1006 extern const char *ata_mode_string(unsigned long xfer_mask); 1008 extern const char *ata_mode_string(unsigned long xfer_mask);
1007 extern unsigned long ata_id_xfermask(const u16 *id); 1009 extern unsigned long ata_id_xfermask(const u16 *id);
1008 extern int ata_std_qc_defer(struct ata_queued_cmd *qc); 1010 extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
1009 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 1011 extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
1010 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 1012 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
1011 unsigned int n_elem); 1013 unsigned int n_elem);
1012 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 1014 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
1013 extern void ata_dev_disable(struct ata_device *adev); 1015 extern void ata_dev_disable(struct ata_device *adev);
1014 extern void ata_id_string(const u16 *id, unsigned char *s, 1016 extern void ata_id_string(const u16 *id, unsigned char *s,
1015 unsigned int ofs, unsigned int len); 1017 unsigned int ofs, unsigned int len);
1016 extern void ata_id_c_string(const u16 *id, unsigned char *s, 1018 extern void ata_id_c_string(const u16 *id, unsigned char *s,
1017 unsigned int ofs, unsigned int len); 1019 unsigned int ofs, unsigned int len);
1018 extern unsigned int ata_do_dev_read_id(struct ata_device *dev, 1020 extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
1019 struct ata_taskfile *tf, u16 *id); 1021 struct ata_taskfile *tf, u16 *id);
1020 extern void ata_qc_complete(struct ata_queued_cmd *qc); 1022 extern void ata_qc_complete(struct ata_queued_cmd *qc);
1021 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); 1023 extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
1022 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 1024 extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
1023 void (*done)(struct scsi_cmnd *)); 1025 void (*done)(struct scsi_cmnd *));
1024 extern int ata_std_bios_param(struct scsi_device *sdev, 1026 extern int ata_std_bios_param(struct scsi_device *sdev,
1025 struct block_device *bdev, 1027 struct block_device *bdev,
1026 sector_t capacity, int geom[]); 1028 sector_t capacity, int geom[]);
1027 extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); 1029 extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
1028 extern int ata_scsi_slave_config(struct scsi_device *sdev); 1030 extern int ata_scsi_slave_config(struct scsi_device *sdev);
1029 extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 1031 extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
1030 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 1032 extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
1031 int queue_depth, int reason); 1033 int queue_depth, int reason);
1032 extern struct ata_device *ata_dev_pair(struct ata_device *adev); 1034 extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1033 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1035 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1034 1036
1035 extern int ata_cable_40wire(struct ata_port *ap); 1037 extern int ata_cable_40wire(struct ata_port *ap);
1036 extern int ata_cable_80wire(struct ata_port *ap); 1038 extern int ata_cable_80wire(struct ata_port *ap);
1037 extern int ata_cable_sata(struct ata_port *ap); 1039 extern int ata_cable_sata(struct ata_port *ap);
1038 extern int ata_cable_ignore(struct ata_port *ap); 1040 extern int ata_cable_ignore(struct ata_port *ap);
1039 extern int ata_cable_unknown(struct ata_port *ap); 1041 extern int ata_cable_unknown(struct ata_port *ap);
1040 1042
1041 /* Timing helpers */ 1043 /* Timing helpers */
1042 extern unsigned int ata_pio_need_iordy(const struct ata_device *); 1044 extern unsigned int ata_pio_need_iordy(const struct ata_device *);
1043 extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); 1045 extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
1044 extern int ata_timing_compute(struct ata_device *, unsigned short, 1046 extern int ata_timing_compute(struct ata_device *, unsigned short,
1045 struct ata_timing *, int, int); 1047 struct ata_timing *, int, int);
1046 extern void ata_timing_merge(const struct ata_timing *, 1048 extern void ata_timing_merge(const struct ata_timing *,
1047 const struct ata_timing *, struct ata_timing *, 1049 const struct ata_timing *, struct ata_timing *,
1048 unsigned int); 1050 unsigned int);
1049 extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle); 1051 extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
1050 1052
1051 /* PCI */ 1053 /* PCI */
1052 #ifdef CONFIG_PCI 1054 #ifdef CONFIG_PCI
1053 struct pci_dev; 1055 struct pci_dev;
1054 1056
1055 struct pci_bits { 1057 struct pci_bits {
1056 unsigned int reg; /* PCI config register to read */ 1058 unsigned int reg; /* PCI config register to read */
1057 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ 1059 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
1058 unsigned long mask; 1060 unsigned long mask;
1059 unsigned long val; 1061 unsigned long val;
1060 }; 1062 };
1061 1063
1062 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 1064 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
1063 extern void ata_pci_remove_one(struct pci_dev *pdev); 1065 extern void ata_pci_remove_one(struct pci_dev *pdev);
1064 1066
1065 #ifdef CONFIG_PM 1067 #ifdef CONFIG_PM
1066 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 1068 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
1067 extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev); 1069 extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
1068 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); 1070 extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
1069 extern int ata_pci_device_resume(struct pci_dev *pdev); 1071 extern int ata_pci_device_resume(struct pci_dev *pdev);
1070 #endif /* CONFIG_PM */ 1072 #endif /* CONFIG_PM */
1071 #endif /* CONFIG_PCI */ 1073 #endif /* CONFIG_PCI */
1072 1074
1073 /* 1075 /*
1074 * ACPI - drivers/ata/libata-acpi.c 1076 * ACPI - drivers/ata/libata-acpi.c
1075 */ 1077 */
1076 #ifdef CONFIG_ATA_ACPI 1078 #ifdef CONFIG_ATA_ACPI
1077 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) 1079 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
1078 { 1080 {
1079 if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID) 1081 if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID)
1080 return &ap->__acpi_init_gtm; 1082 return &ap->__acpi_init_gtm;
1081 return NULL; 1083 return NULL;
1082 } 1084 }
1083 int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); 1085 int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
1084 int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); 1086 int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
1085 unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, 1087 unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
1086 const struct ata_acpi_gtm *gtm); 1088 const struct ata_acpi_gtm *gtm);
1087 int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); 1089 int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
1088 #else 1090 #else
1089 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) 1091 static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
1090 { 1092 {
1091 return NULL; 1093 return NULL;
1092 } 1094 }
1093 1095
1094 static inline int ata_acpi_stm(const struct ata_port *ap, 1096 static inline int ata_acpi_stm(const struct ata_port *ap,
1095 struct ata_acpi_gtm *stm) 1097 struct ata_acpi_gtm *stm)
1096 { 1098 {
1097 return -ENOSYS; 1099 return -ENOSYS;
1098 } 1100 }
1099 1101
1100 static inline int ata_acpi_gtm(const struct ata_port *ap, 1102 static inline int ata_acpi_gtm(const struct ata_port *ap,
1101 struct ata_acpi_gtm *stm) 1103 struct ata_acpi_gtm *stm)
1102 { 1104 {
1103 return -ENOSYS; 1105 return -ENOSYS;
1104 } 1106 }
1105 1107
1106 static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, 1108 static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
1107 const struct ata_acpi_gtm *gtm) 1109 const struct ata_acpi_gtm *gtm)
1108 { 1110 {
1109 return 0; 1111 return 0;
1110 } 1112 }
1111 1113
1112 static inline int ata_acpi_cbl_80wire(struct ata_port *ap, 1114 static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
1113 const struct ata_acpi_gtm *gtm) 1115 const struct ata_acpi_gtm *gtm)
1114 { 1116 {
1115 return 0; 1117 return 0;
1116 } 1118 }
1117 #endif 1119 #endif
1118 1120
1119 /* 1121 /*
1120 * EH - drivers/ata/libata-eh.c 1122 * EH - drivers/ata/libata-eh.c
1121 */ 1123 */
1122 extern void ata_port_schedule_eh(struct ata_port *ap); 1124 extern void ata_port_schedule_eh(struct ata_port *ap);
1123 extern int ata_link_abort(struct ata_link *link); 1125 extern int ata_link_abort(struct ata_link *link);
1124 extern int ata_port_abort(struct ata_port *ap); 1126 extern int ata_port_abort(struct ata_port *ap);
1125 extern int ata_port_freeze(struct ata_port *ap); 1127 extern int ata_port_freeze(struct ata_port *ap);
1126 extern int sata_async_notification(struct ata_port *ap); 1128 extern int sata_async_notification(struct ata_port *ap);
1127 1129
1128 extern void ata_eh_freeze_port(struct ata_port *ap); 1130 extern void ata_eh_freeze_port(struct ata_port *ap);
1129 extern void ata_eh_thaw_port(struct ata_port *ap); 1131 extern void ata_eh_thaw_port(struct ata_port *ap);
1130 1132
1131 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 1133 extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
1132 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 1134 extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
1133 extern void ata_eh_analyze_ncq_error(struct ata_link *link); 1135 extern void ata_eh_analyze_ncq_error(struct ata_link *link);
1134 1136
1135 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 1137 extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1136 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 1138 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1137 ata_postreset_fn_t postreset); 1139 ata_postreset_fn_t postreset);
1138 extern void ata_std_error_handler(struct ata_port *ap); 1140 extern void ata_std_error_handler(struct ata_port *ap);
1139 1141
1140 /* 1142 /*
1141 * Base operations to inherit from and initializers for sht 1143 * Base operations to inherit from and initializers for sht
1142 * 1144 *
1143 * Operations 1145 * Operations
1144 * 1146 *
1145 * base : Common to all libata drivers. 1147 * base : Common to all libata drivers.
1146 * sata : SATA controllers w/ native interface. 1148 * sata : SATA controllers w/ native interface.
1147 * pmp : SATA controllers w/ PMP support. 1149 * pmp : SATA controllers w/ PMP support.
1148 * sff : SFF ATA controllers w/o BMDMA support. 1150 * sff : SFF ATA controllers w/o BMDMA support.
1149 * bmdma : SFF ATA controllers w/ BMDMA support. 1151 * bmdma : SFF ATA controllers w/ BMDMA support.
1150 * 1152 *
1151 * sht initializers 1153 * sht initializers
1152 * 1154 *
1153 * BASE : Common to all libata drivers. The user must set 1155 * BASE : Common to all libata drivers. The user must set
1154 * sg_tablesize and dma_boundary. 1156 * sg_tablesize and dma_boundary.
1155 * PIO : SFF ATA controllers w/ only PIO support. 1157 * PIO : SFF ATA controllers w/ only PIO support.
1156 * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and 1158 * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and
1157 * dma_boundary are set to BMDMA limits. 1159 * dma_boundary are set to BMDMA limits.
1158 * NCQ : SATA controllers supporting NCQ. The user must set 1160 * NCQ : SATA controllers supporting NCQ. The user must set
1159 * sg_tablesize, dma_boundary and can_queue. 1161 * sg_tablesize, dma_boundary and can_queue.
1160 */ 1162 */
1161 extern const struct ata_port_operations ata_base_port_ops; 1163 extern const struct ata_port_operations ata_base_port_ops;
1162 extern const struct ata_port_operations sata_port_ops; 1164 extern const struct ata_port_operations sata_port_ops;
1163 extern struct device_attribute *ata_common_sdev_attrs[]; 1165 extern struct device_attribute *ata_common_sdev_attrs[];
1164 1166
1165 #define ATA_BASE_SHT(drv_name) \ 1167 #define ATA_BASE_SHT(drv_name) \
1166 .module = THIS_MODULE, \ 1168 .module = THIS_MODULE, \
1167 .name = drv_name, \ 1169 .name = drv_name, \
1168 .ioctl = ata_scsi_ioctl, \ 1170 .ioctl = ata_scsi_ioctl, \
1169 .queuecommand = ata_scsi_queuecmd, \ 1171 .queuecommand = ata_scsi_queuecmd, \
1170 .can_queue = ATA_DEF_QUEUE, \ 1172 .can_queue = ATA_DEF_QUEUE, \
1171 .this_id = ATA_SHT_THIS_ID, \ 1173 .this_id = ATA_SHT_THIS_ID, \
1172 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \ 1174 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
1173 .emulated = ATA_SHT_EMULATED, \ 1175 .emulated = ATA_SHT_EMULATED, \
1174 .use_clustering = ATA_SHT_USE_CLUSTERING, \ 1176 .use_clustering = ATA_SHT_USE_CLUSTERING, \
1175 .proc_name = drv_name, \ 1177 .proc_name = drv_name, \
1176 .slave_configure = ata_scsi_slave_config, \ 1178 .slave_configure = ata_scsi_slave_config, \
1177 .slave_destroy = ata_scsi_slave_destroy, \ 1179 .slave_destroy = ata_scsi_slave_destroy, \
1178 .bios_param = ata_std_bios_param, \ 1180 .bios_param = ata_std_bios_param, \
1179 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \ 1181 .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
1180 .sdev_attrs = ata_common_sdev_attrs 1182 .sdev_attrs = ata_common_sdev_attrs
1181 1183
1182 #define ATA_NCQ_SHT(drv_name) \ 1184 #define ATA_NCQ_SHT(drv_name) \
1183 ATA_BASE_SHT(drv_name), \ 1185 ATA_BASE_SHT(drv_name), \
1184 .change_queue_depth = ata_scsi_change_queue_depth 1186 .change_queue_depth = ata_scsi_change_queue_depth
1185 1187
1186 /* 1188 /*
1187 * PMP helpers 1189 * PMP helpers
1188 */ 1190 */
1189 #ifdef CONFIG_SATA_PMP 1191 #ifdef CONFIG_SATA_PMP
1190 static inline bool sata_pmp_supported(struct ata_port *ap) 1192 static inline bool sata_pmp_supported(struct ata_port *ap)
1191 { 1193 {
1192 return ap->flags & ATA_FLAG_PMP; 1194 return ap->flags & ATA_FLAG_PMP;
1193 } 1195 }
1194 1196
1195 static inline bool sata_pmp_attached(struct ata_port *ap) 1197 static inline bool sata_pmp_attached(struct ata_port *ap)
1196 { 1198 {
1197 return ap->nr_pmp_links != 0; 1199 return ap->nr_pmp_links != 0;
1198 } 1200 }
1199 1201
1200 static inline int ata_is_host_link(const struct ata_link *link) 1202 static inline int ata_is_host_link(const struct ata_link *link)
1201 { 1203 {
1202 return link == &link->ap->link || link == link->ap->slave_link; 1204 return link == &link->ap->link || link == link->ap->slave_link;
1203 } 1205 }
1204 #else /* CONFIG_SATA_PMP */ 1206 #else /* CONFIG_SATA_PMP */
1205 static inline bool sata_pmp_supported(struct ata_port *ap) 1207 static inline bool sata_pmp_supported(struct ata_port *ap)
1206 { 1208 {
1207 return false; 1209 return false;
1208 } 1210 }
1209 1211
1210 static inline bool sata_pmp_attached(struct ata_port *ap) 1212 static inline bool sata_pmp_attached(struct ata_port *ap)
1211 { 1213 {
1212 return false; 1214 return false;
1213 } 1215 }
1214 1216
1215 static inline int ata_is_host_link(const struct ata_link *link) 1217 static inline int ata_is_host_link(const struct ata_link *link)
1216 { 1218 {
1217 return 1; 1219 return 1;
1218 } 1220 }
1219 #endif /* CONFIG_SATA_PMP */ 1221 #endif /* CONFIG_SATA_PMP */
1220 1222
1221 static inline int sata_srst_pmp(struct ata_link *link) 1223 static inline int sata_srst_pmp(struct ata_link *link)
1222 { 1224 {
1223 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 1225 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
1224 return SATA_PMP_CTRL_PORT; 1226 return SATA_PMP_CTRL_PORT;
1225 return link->pmp; 1227 return link->pmp;
1226 } 1228 }
1227 1229
1228 /* 1230 /*
1229 * printk helpers 1231 * printk helpers
1230 */ 1232 */
1231 #define ata_port_printk(ap, lv, fmt, args...) \ 1233 #define ata_port_printk(ap, lv, fmt, args...) \
1232 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args) 1234 printk("%sata%u: "fmt, lv, (ap)->print_id , ##args)
1233 1235
1234 #define ata_link_printk(link, lv, fmt, args...) do { \ 1236 #define ata_link_printk(link, lv, fmt, args...) do { \
1235 if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \ 1237 if (sata_pmp_attached((link)->ap) || (link)->ap->slave_link) \
1236 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \ 1238 printk("%sata%u.%02u: "fmt, lv, (link)->ap->print_id, \
1237 (link)->pmp , ##args); \ 1239 (link)->pmp , ##args); \
1238 else \ 1240 else \
1239 printk("%sata%u: "fmt, lv, (link)->ap->print_id , ##args); \ 1241 printk("%sata%u: "fmt, lv, (link)->ap->print_id , ##args); \
1240 } while(0) 1242 } while(0)
1241 1243
1242 #define ata_dev_printk(dev, lv, fmt, args...) \ 1244 #define ata_dev_printk(dev, lv, fmt, args...) \
1243 printk("%sata%u.%02u: "fmt, lv, (dev)->link->ap->print_id, \ 1245 printk("%sata%u.%02u: "fmt, lv, (dev)->link->ap->print_id, \
1244 (dev)->link->pmp + (dev)->devno , ##args) 1246 (dev)->link->pmp + (dev)->devno , ##args)
1245 1247
1246 /* 1248 /*
1247 * ata_eh_info helpers 1249 * ata_eh_info helpers
1248 */ 1250 */
1249 extern void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 1251 extern void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
1250 __attribute__ ((format (printf, 2, 3))); 1252 __attribute__ ((format (printf, 2, 3)));
1251 extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 1253 extern void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
1252 __attribute__ ((format (printf, 2, 3))); 1254 __attribute__ ((format (printf, 2, 3)));
1253 extern void ata_ehi_clear_desc(struct ata_eh_info *ehi); 1255 extern void ata_ehi_clear_desc(struct ata_eh_info *ehi);
1254 1256
1255 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 1257 static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
1256 { 1258 {
1257 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 1259 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
1258 ehi->flags |= ATA_EHI_HOTPLUGGED; 1260 ehi->flags |= ATA_EHI_HOTPLUGGED;
1259 ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK; 1261 ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK;
1260 ehi->err_mask |= AC_ERR_ATA_BUS; 1262 ehi->err_mask |= AC_ERR_ATA_BUS;
1261 } 1263 }
1262 1264
1263 /* 1265 /*
1264 * port description helpers 1266 * port description helpers
1265 */ 1267 */
1266 extern void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 1268 extern void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
1267 __attribute__ ((format (printf, 2, 3))); 1269 __attribute__ ((format (printf, 2, 3)));
1268 #ifdef CONFIG_PCI 1270 #ifdef CONFIG_PCI
1269 extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 1271 extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
1270 const char *name); 1272 const char *name);
1271 #endif 1273 #endif
1272 1274
1273 static inline unsigned int ata_tag_valid(unsigned int tag) 1275 static inline unsigned int ata_tag_valid(unsigned int tag)
1274 { 1276 {
1275 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 1277 return (tag < ATA_MAX_QUEUE) ? 1 : 0;
1276 } 1278 }
1277 1279
1278 static inline unsigned int ata_tag_internal(unsigned int tag) 1280 static inline unsigned int ata_tag_internal(unsigned int tag)
1279 { 1281 {
1280 return tag == ATA_TAG_INTERNAL; 1282 return tag == ATA_TAG_INTERNAL;
1281 } 1283 }
1282 1284
1283 /* 1285 /*
1284 * device helpers 1286 * device helpers
1285 */ 1287 */
1286 static inline unsigned int ata_class_enabled(unsigned int class) 1288 static inline unsigned int ata_class_enabled(unsigned int class)
1287 { 1289 {
1288 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || 1290 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
1289 class == ATA_DEV_PMP || class == ATA_DEV_SEMB; 1291 class == ATA_DEV_PMP || class == ATA_DEV_SEMB;
1290 } 1292 }
1291 1293
1292 static inline unsigned int ata_class_disabled(unsigned int class) 1294 static inline unsigned int ata_class_disabled(unsigned int class)
1293 { 1295 {
1294 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP || 1296 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
1295 class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP; 1297 class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
1296 } 1298 }
1297 1299
1298 static inline unsigned int ata_class_absent(unsigned int class) 1300 static inline unsigned int ata_class_absent(unsigned int class)
1299 { 1301 {
1300 return !ata_class_enabled(class) && !ata_class_disabled(class); 1302 return !ata_class_enabled(class) && !ata_class_disabled(class);
1301 } 1303 }
1302 1304
1303 static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 1305 static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
1304 { 1306 {
1305 return ata_class_enabled(dev->class); 1307 return ata_class_enabled(dev->class);
1306 } 1308 }
1307 1309
1308 static inline unsigned int ata_dev_disabled(const struct ata_device *dev) 1310 static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
1309 { 1311 {
1310 return ata_class_disabled(dev->class); 1312 return ata_class_disabled(dev->class);
1311 } 1313 }
1312 1314
1313 static inline unsigned int ata_dev_absent(const struct ata_device *dev) 1315 static inline unsigned int ata_dev_absent(const struct ata_device *dev)
1314 { 1316 {
1315 return ata_class_absent(dev->class); 1317 return ata_class_absent(dev->class);
1316 } 1318 }
1317 1319
1318 /* 1320 /*
1319 * link helpers 1321 * link helpers
1320 */ 1322 */
1321 static inline int ata_link_max_devices(const struct ata_link *link) 1323 static inline int ata_link_max_devices(const struct ata_link *link)
1322 { 1324 {
1323 if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS) 1325 if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS)
1324 return 2; 1326 return 2;
1325 return 1; 1327 return 1;
1326 } 1328 }
1327 1329
1328 static inline int ata_link_active(struct ata_link *link) 1330 static inline int ata_link_active(struct ata_link *link)
1329 { 1331 {
1330 return ata_tag_valid(link->active_tag) || link->sactive; 1332 return ata_tag_valid(link->active_tag) || link->sactive;
1331 } 1333 }
1332 1334
1333 /* 1335 /*
1334 * Iterators 1336 * Iterators
1335 * 1337 *
1336 * ATA_LITER_* constants are used to select link iteration mode and 1338 * ATA_LITER_* constants are used to select link iteration mode and
1337 * ATA_DITER_* device iteration mode. 1339 * ATA_DITER_* device iteration mode.
1338 * 1340 *
1339 * For a custom iteration directly using ata_{link|dev}_next(), if 1341 * For a custom iteration directly using ata_{link|dev}_next(), if
1340 * @link or @dev, respectively, is NULL, the first element is 1342 * @link or @dev, respectively, is NULL, the first element is
1341 * returned. @dev and @link can be any valid device or link and the 1343 * returned. @dev and @link can be any valid device or link and the
1342 * next element according to the iteration mode will be returned. 1344 * next element according to the iteration mode will be returned.
1343 * After the last element, NULL is returned. 1345 * After the last element, NULL is returned.
1344 */ 1346 */
1345 enum ata_link_iter_mode { 1347 enum ata_link_iter_mode {
1346 ATA_LITER_EDGE, /* if present, PMP links only; otherwise, 1348 ATA_LITER_EDGE, /* if present, PMP links only; otherwise,
1347 * host link. no slave link */ 1349 * host link. no slave link */
1348 ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */ 1350 ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */
1349 ATA_LITER_PMP_FIRST, /* PMP links followed by host link, 1351 ATA_LITER_PMP_FIRST, /* PMP links followed by host link,
1350 * slave link still comes after host link */ 1352 * slave link still comes after host link */
1351 }; 1353 };
1352 1354
1353 enum ata_dev_iter_mode { 1355 enum ata_dev_iter_mode {
1354 ATA_DITER_ENABLED, 1356 ATA_DITER_ENABLED,
1355 ATA_DITER_ENABLED_REVERSE, 1357 ATA_DITER_ENABLED_REVERSE,
1356 ATA_DITER_ALL, 1358 ATA_DITER_ALL,
1357 ATA_DITER_ALL_REVERSE, 1359 ATA_DITER_ALL_REVERSE,
1358 }; 1360 };
1359 1361
1360 extern struct ata_link *ata_link_next(struct ata_link *link, 1362 extern struct ata_link *ata_link_next(struct ata_link *link,
1361 struct ata_port *ap, 1363 struct ata_port *ap,
1362 enum ata_link_iter_mode mode); 1364 enum ata_link_iter_mode mode);
1363 1365
1364 extern struct ata_device *ata_dev_next(struct ata_device *dev, 1366 extern struct ata_device *ata_dev_next(struct ata_device *dev,
1365 struct ata_link *link, 1367 struct ata_link *link,
1366 enum ata_dev_iter_mode mode); 1368 enum ata_dev_iter_mode mode);
1367 1369
1368 /* 1370 /*
1369 * Shortcut notation for iterations 1371 * Shortcut notation for iterations
1370 * 1372 *
1371 * ata_for_each_link() iterates over each link of @ap according to 1373 * ata_for_each_link() iterates over each link of @ap according to
1372 * @mode. @link points to the current link in the loop. @link is 1374 * @mode. @link points to the current link in the loop. @link is
1373 * NULL after loop termination. ata_for_each_dev() works the same way 1375 * NULL after loop termination. ata_for_each_dev() works the same way
1374 * except that it iterates over each device of @link. 1376 * except that it iterates over each device of @link.
1375 * 1377 *
1376 * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be 1378 * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be
1377 * specified when using the following shorthand notations. Only the 1379 * specified when using the following shorthand notations. Only the
1378 * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be 1380 * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be
1379 * specified. This not only increases brevity but also makes it 1381 * specified. This not only increases brevity but also makes it
1380 * impossible to use ATA_LITER_* for device iteration or vice-versa. 1382 * impossible to use ATA_LITER_* for device iteration or vice-versa.
1381 */ 1383 */
1382 #define ata_for_each_link(link, ap, mode) \ 1384 #define ata_for_each_link(link, ap, mode) \
1383 for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \ 1385 for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \
1384 (link) = ata_link_next((link), (ap), ATA_LITER_##mode)) 1386 (link) = ata_link_next((link), (ap), ATA_LITER_##mode))
1385 1387
1386 #define ata_for_each_dev(dev, link, mode) \ 1388 #define ata_for_each_dev(dev, link, mode) \
1387 for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \ 1389 for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \
1388 (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode)) 1390 (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
1389 1391
1390 /** 1392 /**
1391 * ata_ncq_enabled - Test whether NCQ is enabled 1393 * ata_ncq_enabled - Test whether NCQ is enabled
1392 * @dev: ATA device to test for 1394 * @dev: ATA device to test for
1393 * 1395 *
1394 * LOCKING: 1396 * LOCKING:
1395 * spin_lock_irqsave(host lock) 1397 * spin_lock_irqsave(host lock)
1396 * 1398 *
1397 * RETURNS: 1399 * RETURNS:
1398 * 1 if NCQ is enabled for @dev, 0 otherwise. 1400 * 1 if NCQ is enabled for @dev, 0 otherwise.
1399 */ 1401 */
1400 static inline int ata_ncq_enabled(struct ata_device *dev) 1402 static inline int ata_ncq_enabled(struct ata_device *dev)
1401 { 1403 {
1402 return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF | 1404 return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
1403 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ; 1405 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
1404 } 1406 }
1405 1407
1406 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) 1408 static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
1407 { 1409 {
1408 qc->tf.ctl |= ATA_NIEN; 1410 qc->tf.ctl |= ATA_NIEN;
1409 } 1411 }
1410 1412
1411 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, 1413 static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
1412 unsigned int tag) 1414 unsigned int tag)
1413 { 1415 {
1414 if (likely(ata_tag_valid(tag))) 1416 if (likely(ata_tag_valid(tag)))
1415 return &ap->qcmd[tag]; 1417 return &ap->qcmd[tag];
1416 return NULL; 1418 return NULL;
1417 } 1419 }
1418 1420
1419 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, 1421 static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
1420 unsigned int tag) 1422 unsigned int tag)
1421 { 1423 {
1422 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1424 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1423 1425
1424 if (unlikely(!qc) || !ap->ops->error_handler) 1426 if (unlikely(!qc) || !ap->ops->error_handler)
1425 return qc; 1427 return qc;
1426 1428
1427 if ((qc->flags & (ATA_QCFLAG_ACTIVE | 1429 if ((qc->flags & (ATA_QCFLAG_ACTIVE |
1428 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) 1430 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
1429 return qc; 1431 return qc;
1430 1432
1431 return NULL; 1433 return NULL;
1432 } 1434 }
1433 1435
1434 static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc) 1436 static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc)
1435 { 1437 {
1436 return qc->nbytes - min(qc->extrabytes, qc->nbytes); 1438 return qc->nbytes - min(qc->extrabytes, qc->nbytes);
1437 } 1439 }
1438 1440
1439 static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) 1441 static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
1440 { 1442 {
1441 memset(tf, 0, sizeof(*tf)); 1443 memset(tf, 0, sizeof(*tf));
1442 1444
1443 #ifdef CONFIG_ATA_SFF 1445 #ifdef CONFIG_ATA_SFF
1444 tf->ctl = dev->link->ap->ctl; 1446 tf->ctl = dev->link->ap->ctl;
1445 #else 1447 #else
1446 tf->ctl = ATA_DEVCTL_OBS; 1448 tf->ctl = ATA_DEVCTL_OBS;
1447 #endif 1449 #endif
1448 if (dev->devno == 0) 1450 if (dev->devno == 0)
1449 tf->device = ATA_DEVICE_OBS; 1451 tf->device = ATA_DEVICE_OBS;
1450 else 1452 else
1451 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1453 tf->device = ATA_DEVICE_OBS | ATA_DEV1;
1452 } 1454 }
1453 1455
1454 static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1456 static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
1455 { 1457 {
1456 qc->dma_dir = DMA_NONE; 1458 qc->dma_dir = DMA_NONE;
1457 qc->sg = NULL; 1459 qc->sg = NULL;
1458 qc->flags = 0; 1460 qc->flags = 0;
1459 qc->cursg = NULL; 1461 qc->cursg = NULL;
1460 qc->cursg_ofs = 0; 1462 qc->cursg_ofs = 0;
1461 qc->nbytes = qc->extrabytes = qc->curbytes = 0; 1463 qc->nbytes = qc->extrabytes = qc->curbytes = 0;
1462 qc->n_elem = 0; 1464 qc->n_elem = 0;
1463 qc->err_mask = 0; 1465 qc->err_mask = 0;
1464 qc->sect_size = ATA_SECT_SIZE; 1466 qc->sect_size = ATA_SECT_SIZE;
1465 1467
1466 ata_tf_init(qc->dev, &qc->tf); 1468 ata_tf_init(qc->dev, &qc->tf);
1467 1469
1468 /* init result_tf such that it indicates normal completion */ 1470 /* init result_tf such that it indicates normal completion */
1469 qc->result_tf.command = ATA_DRDY; 1471 qc->result_tf.command = ATA_DRDY;
1470 qc->result_tf.feature = 0; 1472 qc->result_tf.feature = 0;
1471 } 1473 }
1472 1474
1473 static inline int ata_try_flush_cache(const struct ata_device *dev) 1475 static inline int ata_try_flush_cache(const struct ata_device *dev)
1474 { 1476 {
1475 return ata_id_wcache_enabled(dev->id) || 1477 return ata_id_wcache_enabled(dev->id) ||
1476 ata_id_has_flush(dev->id) || 1478 ata_id_has_flush(dev->id) ||
1477 ata_id_has_flush_ext(dev->id); 1479 ata_id_has_flush_ext(dev->id);
1478 } 1480 }
1479 1481
1480 static inline unsigned int ac_err_mask(u8 status) 1482 static inline unsigned int ac_err_mask(u8 status)
1481 { 1483 {
1482 if (status & (ATA_BUSY | ATA_DRQ)) 1484 if (status & (ATA_BUSY | ATA_DRQ))
1483 return AC_ERR_HSM; 1485 return AC_ERR_HSM;
1484 if (status & (ATA_ERR | ATA_DF)) 1486 if (status & (ATA_ERR | ATA_DF))
1485 return AC_ERR_DEV; 1487 return AC_ERR_DEV;
1486 return 0; 1488 return 0;
1487 } 1489 }
1488 1490
1489 static inline unsigned int __ac_err_mask(u8 status) 1491 static inline unsigned int __ac_err_mask(u8 status)
1490 { 1492 {
1491 unsigned int mask = ac_err_mask(status); 1493 unsigned int mask = ac_err_mask(status);
1492 if (mask == 0) 1494 if (mask == 0)
1493 return AC_ERR_OTHER; 1495 return AC_ERR_OTHER;
1494 return mask; 1496 return mask;
1495 } 1497 }
1496 1498
1497 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1499 static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
1498 { 1500 {
1499 return *(struct ata_port **)&host->hostdata[0]; 1501 return *(struct ata_port **)&host->hostdata[0];
1500 } 1502 }
1501 1503
1502 static inline int ata_check_ready(u8 status) 1504 static inline int ata_check_ready(u8 status)
1503 { 1505 {
1504 if (!(status & ATA_BUSY)) 1506 if (!(status & ATA_BUSY))
1505 return 1; 1507 return 1;
1506 1508
1507 /* 0xff indicates either no device or device not ready */ 1509 /* 0xff indicates either no device or device not ready */
1508 if (status == 0xff) 1510 if (status == 0xff)
1509 return -ENODEV; 1511 return -ENODEV;
1510 1512
1511 return 0; 1513 return 0;
1512 } 1514 }
1513 1515
1514 static inline unsigned long ata_deadline(unsigned long from_jiffies, 1516 static inline unsigned long ata_deadline(unsigned long from_jiffies,
1515 unsigned long timeout_msecs) 1517 unsigned long timeout_msecs)
1516 { 1518 {
1517 return from_jiffies + msecs_to_jiffies(timeout_msecs); 1519 return from_jiffies + msecs_to_jiffies(timeout_msecs);
1518 } 1520 }
1519 1521
1520 /* Don't open code these in drivers as there are traps. Firstly the range may 1522 /* Don't open code these in drivers as there are traps. Firstly the range may
1521 change in future hardware and specs, secondly 0xFF means 'no DMA' but is 1523 change in future hardware and specs, secondly 0xFF means 'no DMA' but is
1522 > UDMA_0. Dyma ddreigiau */ 1524 > UDMA_0. Dyma ddreigiau */
1523 1525
1524 static inline int ata_using_mwdma(struct ata_device *adev) 1526 static inline int ata_using_mwdma(struct ata_device *adev)
1525 { 1527 {
1526 if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4) 1528 if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
1527 return 1; 1529 return 1;
1528 return 0; 1530 return 0;
1529 } 1531 }
1530 1532
1531 static inline int ata_using_udma(struct ata_device *adev) 1533 static inline int ata_using_udma(struct ata_device *adev)
1532 { 1534 {
1533 if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7) 1535 if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
1534 return 1; 1536 return 1;
1535 return 0; 1537 return 0;
1536 } 1538 }
1537 1539
1538 static inline int ata_dma_enabled(struct ata_device *adev) 1540 static inline int ata_dma_enabled(struct ata_device *adev)
1539 { 1541 {
1540 return (adev->dma_mode == 0xFF ? 0 : 1); 1542 return (adev->dma_mode == 0xFF ? 0 : 1);
1541 } 1543 }
1542 1544
1543 /************************************************************************** 1545 /**************************************************************************
1544 * PMP - drivers/ata/libata-pmp.c 1546 * PMP - drivers/ata/libata-pmp.c
1545 */ 1547 */
1546 #ifdef CONFIG_SATA_PMP 1548 #ifdef CONFIG_SATA_PMP
1547 1549
1548 extern const struct ata_port_operations sata_pmp_port_ops; 1550 extern const struct ata_port_operations sata_pmp_port_ops;
1549 1551
1550 extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc); 1552 extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
1551 extern void sata_pmp_error_handler(struct ata_port *ap); 1553 extern void sata_pmp_error_handler(struct ata_port *ap);
1552 1554
1553 #else /* CONFIG_SATA_PMP */ 1555 #else /* CONFIG_SATA_PMP */
1554 1556
1555 #define sata_pmp_port_ops sata_port_ops 1557 #define sata_pmp_port_ops sata_port_ops
1556 #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer 1558 #define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer
1557 #define sata_pmp_error_handler ata_std_error_handler 1559 #define sata_pmp_error_handler ata_std_error_handler
1558 1560
1559 #endif /* CONFIG_SATA_PMP */ 1561 #endif /* CONFIG_SATA_PMP */
1560 1562
1561 1563
1562 /************************************************************************** 1564 /**************************************************************************
1563 * SFF - drivers/ata/libata-sff.c 1565 * SFF - drivers/ata/libata-sff.c
1564 */ 1566 */
1565 #ifdef CONFIG_ATA_SFF 1567 #ifdef CONFIG_ATA_SFF
1566 1568
1567 extern const struct ata_port_operations ata_sff_port_ops; 1569 extern const struct ata_port_operations ata_sff_port_ops;
1568 extern const struct ata_port_operations ata_bmdma32_port_ops; 1570 extern const struct ata_port_operations ata_bmdma32_port_ops;
1569 1571
1570 /* PIO only, sg_tablesize and dma_boundary limits can be removed */ 1572 /* PIO only, sg_tablesize and dma_boundary limits can be removed */
1571 #define ATA_PIO_SHT(drv_name) \ 1573 #define ATA_PIO_SHT(drv_name) \
1572 ATA_BASE_SHT(drv_name), \ 1574 ATA_BASE_SHT(drv_name), \
1573 .sg_tablesize = LIBATA_MAX_PRD, \ 1575 .sg_tablesize = LIBATA_MAX_PRD, \
1574 .dma_boundary = ATA_DMA_BOUNDARY 1576 .dma_boundary = ATA_DMA_BOUNDARY
1575 1577
1576 extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); 1578 extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
1577 extern u8 ata_sff_check_status(struct ata_port *ap); 1579 extern u8 ata_sff_check_status(struct ata_port *ap);
1578 extern void ata_sff_pause(struct ata_port *ap); 1580 extern void ata_sff_pause(struct ata_port *ap);
1579 extern void ata_sff_dma_pause(struct ata_port *ap); 1581 extern void ata_sff_dma_pause(struct ata_port *ap);
1580 extern int ata_sff_busy_sleep(struct ata_port *ap, 1582 extern int ata_sff_busy_sleep(struct ata_port *ap,
1581 unsigned long timeout_pat, unsigned long timeout); 1583 unsigned long timeout_pat, unsigned long timeout);
1582 extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); 1584 extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
1583 extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 1585 extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
1584 extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 1586 extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
1585 extern void ata_sff_exec_command(struct ata_port *ap, 1587 extern void ata_sff_exec_command(struct ata_port *ap,
1586 const struct ata_taskfile *tf); 1588 const struct ata_taskfile *tf);
1587 extern unsigned int ata_sff_data_xfer(struct ata_device *dev, 1589 extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
1588 unsigned char *buf, unsigned int buflen, int rw); 1590 unsigned char *buf, unsigned int buflen, int rw);
1589 extern unsigned int ata_sff_data_xfer32(struct ata_device *dev, 1591 extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
1590 unsigned char *buf, unsigned int buflen, int rw); 1592 unsigned char *buf, unsigned int buflen, int rw);
1591 extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, 1593 extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
1592 unsigned char *buf, unsigned int buflen, int rw); 1594 unsigned char *buf, unsigned int buflen, int rw);
1593 extern void ata_sff_irq_on(struct ata_port *ap); 1595 extern void ata_sff_irq_on(struct ata_port *ap);
1594 extern void ata_sff_irq_clear(struct ata_port *ap); 1596 extern void ata_sff_irq_clear(struct ata_port *ap);
1595 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1597 extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1596 u8 status, int in_wq); 1598 u8 status, int in_wq);
1597 extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); 1599 extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
1598 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); 1600 extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
1599 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); 1601 extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
1600 extern unsigned int ata_sff_port_intr(struct ata_port *ap, 1602 extern unsigned int ata_sff_port_intr(struct ata_port *ap,
1601 struct ata_queued_cmd *qc); 1603 struct ata_queued_cmd *qc);
1602 extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); 1604 extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
1603 extern void ata_sff_lost_interrupt(struct ata_port *ap); 1605 extern void ata_sff_lost_interrupt(struct ata_port *ap);
1604 extern void ata_sff_freeze(struct ata_port *ap); 1606 extern void ata_sff_freeze(struct ata_port *ap);
1605 extern void ata_sff_thaw(struct ata_port *ap); 1607 extern void ata_sff_thaw(struct ata_port *ap);
1606 extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); 1608 extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline);
1607 extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, 1609 extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1608 u8 *r_err); 1610 u8 *r_err);
1609 extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, 1611 extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1610 unsigned long deadline); 1612 unsigned long deadline);
1611 extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, 1613 extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
1612 unsigned long deadline); 1614 unsigned long deadline);
1613 extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, 1615 extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
1614 unsigned long deadline); 1616 unsigned long deadline);
1615 extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); 1617 extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
1616 extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); 1618 extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
1617 extern void ata_sff_error_handler(struct ata_port *ap); 1619 extern void ata_sff_error_handler(struct ata_port *ap);
1618 extern void ata_sff_std_ports(struct ata_ioports *ioaddr); 1620 extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
1619 #ifdef CONFIG_PCI 1621 #ifdef CONFIG_PCI
1620 extern int ata_pci_sff_init_host(struct ata_host *host); 1622 extern int ata_pci_sff_init_host(struct ata_host *host);
1621 extern int ata_pci_sff_prepare_host(struct pci_dev *pdev, 1623 extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
1622 const struct ata_port_info * const * ppi, 1624 const struct ata_port_info * const * ppi,
1623 struct ata_host **r_host); 1625 struct ata_host **r_host);
1624 extern int ata_pci_sff_activate_host(struct ata_host *host, 1626 extern int ata_pci_sff_activate_host(struct ata_host *host,
1625 irq_handler_t irq_handler, 1627 irq_handler_t irq_handler,
1626 struct scsi_host_template *sht); 1628 struct scsi_host_template *sht);
1627 extern int ata_pci_sff_init_one(struct pci_dev *pdev, 1629 extern int ata_pci_sff_init_one(struct pci_dev *pdev,
1628 const struct ata_port_info * const * ppi, 1630 const struct ata_port_info * const * ppi,
1629 struct scsi_host_template *sht, void *host_priv, int hflags); 1631 struct scsi_host_template *sht, void *host_priv, int hflags);
1630 #endif /* CONFIG_PCI */ 1632 #endif /* CONFIG_PCI */
1631 1633
1632 #ifdef CONFIG_ATA_BMDMA 1634 #ifdef CONFIG_ATA_BMDMA
1633 1635
1634 extern const struct ata_port_operations ata_bmdma_port_ops; 1636 extern const struct ata_port_operations ata_bmdma_port_ops;
1635 1637
1636 #define ATA_BMDMA_SHT(drv_name) \ 1638 #define ATA_BMDMA_SHT(drv_name) \
1637 ATA_BASE_SHT(drv_name), \ 1639 ATA_BASE_SHT(drv_name), \
1638 .sg_tablesize = LIBATA_MAX_PRD, \ 1640 .sg_tablesize = LIBATA_MAX_PRD, \
1639 .dma_boundary = ATA_DMA_BOUNDARY 1641 .dma_boundary = ATA_DMA_BOUNDARY
1640 1642
1641 extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); 1643 extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
1642 extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); 1644 extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
1643 extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); 1645 extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
1644 extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, 1646 extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
1645 struct ata_queued_cmd *qc); 1647 struct ata_queued_cmd *qc);
1646 extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); 1648 extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
1647 extern void ata_bmdma_error_handler(struct ata_port *ap); 1649 extern void ata_bmdma_error_handler(struct ata_port *ap);
1648 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 1650 extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
1649 extern void ata_bmdma_irq_clear(struct ata_port *ap); 1651 extern void ata_bmdma_irq_clear(struct ata_port *ap);
1650 extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 1652 extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
1651 extern void ata_bmdma_start(struct ata_queued_cmd *qc); 1653 extern void ata_bmdma_start(struct ata_queued_cmd *qc);
1652 extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 1654 extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
1653 extern u8 ata_bmdma_status(struct ata_port *ap); 1655 extern u8 ata_bmdma_status(struct ata_port *ap);
1654 extern int ata_bmdma_port_start(struct ata_port *ap); 1656 extern int ata_bmdma_port_start(struct ata_port *ap);
1655 extern int ata_bmdma_port_start32(struct ata_port *ap); 1657 extern int ata_bmdma_port_start32(struct ata_port *ap);
1656 1658
1657 #ifdef CONFIG_PCI 1659 #ifdef CONFIG_PCI
1658 extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev); 1660 extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
1659 extern void ata_pci_bmdma_init(struct ata_host *host); 1661 extern void ata_pci_bmdma_init(struct ata_host *host);
1660 extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, 1662 extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
1661 const struct ata_port_info * const * ppi, 1663 const struct ata_port_info * const * ppi,
1662 struct ata_host **r_host); 1664 struct ata_host **r_host);
1663 extern int ata_pci_bmdma_init_one(struct pci_dev *pdev, 1665 extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
1664 const struct ata_port_info * const * ppi, 1666 const struct ata_port_info * const * ppi,
1665 struct scsi_host_template *sht, 1667 struct scsi_host_template *sht,
1666 void *host_priv, int hflags); 1668 void *host_priv, int hflags);
1667 #endif /* CONFIG_PCI */ 1669 #endif /* CONFIG_PCI */
1668 #endif /* CONFIG_ATA_BMDMA */ 1670 #endif /* CONFIG_ATA_BMDMA */
1669 1671
1670 /** 1672 /**
1671 * ata_sff_busy_wait - Wait for a port status register 1673 * ata_sff_busy_wait - Wait for a port status register
1672 * @ap: Port to wait for. 1674 * @ap: Port to wait for.
1673 * @bits: bits that must be clear 1675 * @bits: bits that must be clear
1674 * @max: number of 10uS waits to perform 1676 * @max: number of 10uS waits to perform
1675 * 1677 *
1676 * Waits up to max*10 microseconds for the selected bits in the port's 1678 * Waits up to max*10 microseconds for the selected bits in the port's
1677 * status register to be cleared. 1679 * status register to be cleared.
1678 * Returns final value of status register. 1680 * Returns final value of status register.
1679 * 1681 *
1680 * LOCKING: 1682 * LOCKING:
1681 * Inherited from caller. 1683 * Inherited from caller.
1682 */ 1684 */
1683 static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits, 1685 static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits,
1684 unsigned int max) 1686 unsigned int max)
1685 { 1687 {
1686 u8 status; 1688 u8 status;
1687 1689
1688 do { 1690 do {
1689 udelay(10); 1691 udelay(10);
1690 status = ap->ops->sff_check_status(ap); 1692 status = ap->ops->sff_check_status(ap);
1691 max--; 1693 max--;
1692 } while (status != 0xff && (status & bits) && (max > 0)); 1694 } while (status != 0xff && (status & bits) && (max > 0));
1693 1695
1694 return status; 1696 return status;
1695 } 1697 }
1696 1698
1697 /** 1699 /**
1698 * ata_wait_idle - Wait for a port to be idle. 1700 * ata_wait_idle - Wait for a port to be idle.
1699 * @ap: Port to wait for. 1701 * @ap: Port to wait for.
1700 * 1702 *
1701 * Waits up to 10ms for port's BUSY and DRQ signals to clear. 1703 * Waits up to 10ms for port's BUSY and DRQ signals to clear.
1702 * Returns final value of status register. 1704 * Returns final value of status register.
1703 * 1705 *
1704 * LOCKING: 1706 * LOCKING:
1705 * Inherited from caller. 1707 * Inherited from caller.
1706 */ 1708 */
1707 static inline u8 ata_wait_idle(struct ata_port *ap) 1709 static inline u8 ata_wait_idle(struct ata_port *ap)
1708 { 1710 {
1709 u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 1711 u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
1710 1712
1711 #ifdef ATA_DEBUG 1713 #ifdef ATA_DEBUG
1712 if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) 1714 if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
1713 ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n", 1715 ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
1714 status); 1716 status);
1715 #endif 1717 #endif
1716 1718
1717 return status; 1719 return status;
1718 } 1720 }
1719 #endif /* CONFIG_ATA_SFF */ 1721 #endif /* CONFIG_ATA_SFF */
1720 1722
1721 #endif /* __LINUX_LIBATA_H__ */ 1723 #endif /* __LINUX_LIBATA_H__ */
1722 1724