Commit e5005b15c91f3362036067bde5210d5c78af2f0d

Authored by Tejun Heo
Committed by Jeff Garzik
1 parent f08dc1ac6b

libata: issue DIPM enable commands with LPM state updated

Low level drivers may behave differently depending on the current
link->lpm_policy.  During ata_eh_set_lpm(), DIPM enable commands are
issued after the successful completion of ap->ops->set_lpm(), which
means that the controller is already in the target state.  This causes
DIPM enable commands to be processed with mismatching controller power
state and link->lpm_policy value.

In ahci, link->lpm_policy is used to ignore certain PHY events if LPM
is enabled; however, as DIPM commands are issued with stale
link->lpm_policy, they sometimes end up triggering these conditions
and get aborted leading to LPM configuration failure.

Fix it by updating link->lpm_policy before issuing DIPM enable
commands.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Kyle McMartin <kyle@mcmartin.ca>
Cc: stable@kernel.org
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

Showing 1 changed file with 14 additions and 3 deletions Inline Diff

drivers/ata/libata-eh.c
1 /* 1 /*
2 * libata-eh.c - libata error handling 2 * libata-eh.c - libata error handling
3 * 3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org 5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails. 6 * on emails.
7 * 7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com> 8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 * 9 *
10 * 10 *
11 * This program is free software; you can redistribute it and/or 11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as 12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or 13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version. 14 * (at your option) any later version.
15 * 15 *
16 * This program is distributed in the hope that it will be useful, 16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details. 19 * General Public License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to 22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA. 24 * USA.
25 * 25 *
26 * 26 *
27 * libata documentation is available via 'make {ps|pdf}docs', 27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.* 28 * as Documentation/DocBook/libata.*
29 * 29 *
30 * Hardware documentation available from http://www.t13.org/ and 30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/ 31 * http://www.sata-io.org/
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/kernel.h> 35 #include <linux/kernel.h>
36 #include <linux/blkdev.h> 36 #include <linux/blkdev.h>
37 #include <linux/pci.h> 37 #include <linux/pci.h>
38 #include <scsi/scsi.h> 38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h> 39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h> 40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h> 43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h" 44 #include "../scsi/scsi_transport_api.h"
45 45
46 #include <linux/libata.h> 46 #include <linux/libata.h>
47 47
48 #include "libata.h" 48 #include "libata.h"
49 49
50 enum { 50 enum {
51 /* speed down verdicts */ 51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0), 52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1), 53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2), 54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3), 55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
56 56
57 /* error flags */ 57 /* error flags */
58 ATA_EFLAG_IS_IO = (1 << 0), 58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1), 59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
60 ATA_EFLAG_OLD_ER = (1 << 31), 60 ATA_EFLAG_OLD_ER = (1 << 31),
61 61
62 /* error categories */ 62 /* error categories */
63 ATA_ECAT_NONE = 0, 63 ATA_ECAT_NONE = 0,
64 ATA_ECAT_ATA_BUS = 1, 64 ATA_ECAT_ATA_BUS = 1,
65 ATA_ECAT_TOUT_HSM = 2, 65 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_UNK_DEV = 3, 66 ATA_ECAT_UNK_DEV = 3,
67 ATA_ECAT_DUBIOUS_NONE = 4, 67 ATA_ECAT_DUBIOUS_NONE = 4,
68 ATA_ECAT_DUBIOUS_ATA_BUS = 5, 68 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6, 69 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
70 ATA_ECAT_DUBIOUS_UNK_DEV = 7, 70 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
71 ATA_ECAT_NR = 8, 71 ATA_ECAT_NR = 8,
72 72
73 ATA_EH_CMD_DFL_TIMEOUT = 5000, 73 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74 74
75 /* always put at least this amount of time between resets */ 75 /* always put at least this amount of time between resets */
76 ATA_EH_RESET_COOL_DOWN = 5000, 76 ATA_EH_RESET_COOL_DOWN = 5000,
77 77
78 /* Waiting in ->prereset can never be reliable. It's 78 /* Waiting in ->prereset can never be reliable. It's
79 * sometimes nice to wait there but it can't be depended upon; 79 * sometimes nice to wait there but it can't be depended upon;
80 * otherwise, we wouldn't be resetting. Just give it enough 80 * otherwise, we wouldn't be resetting. Just give it enough
81 * time for most drives to spin up. 81 * time for most drives to spin up.
82 */ 82 */
83 ATA_EH_PRERESET_TIMEOUT = 10000, 83 ATA_EH_PRERESET_TIMEOUT = 10000,
84 ATA_EH_FASTDRAIN_INTERVAL = 3000, 84 ATA_EH_FASTDRAIN_INTERVAL = 3000,
85 85
86 ATA_EH_UA_TRIES = 5, 86 ATA_EH_UA_TRIES = 5,
87 87
88 /* probe speed down parameters, see ata_eh_schedule_probe() */ 88 /* probe speed down parameters, see ata_eh_schedule_probe() */
89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */ 89 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
90 ATA_EH_PROBE_TRIALS = 2, 90 ATA_EH_PROBE_TRIALS = 2,
91 }; 91 };
92 92
93 /* The following table determines how we sequence resets. Each entry 93 /* The following table determines how we sequence resets. Each entry
94 * represents timeout for that try. The first try can be soft or 94 * represents timeout for that try. The first try can be soft or
95 * hardreset. All others are hardreset if available. In most cases 95 * hardreset. All others are hardreset if available. In most cases
96 * the first reset w/ 10sec timeout should succeed. Following entries 96 * the first reset w/ 10sec timeout should succeed. Following entries
97 * are mostly for error handling, hotplug and retarded devices. 97 * are mostly for error handling, hotplug and retarded devices.
98 */ 98 */
99 static const unsigned long ata_eh_reset_timeouts[] = { 99 static const unsigned long ata_eh_reset_timeouts[] = {
100 10000, /* most drives spin up by 10sec */ 100 10000, /* most drives spin up by 10sec */
101 10000, /* > 99% working drives spin up before 20sec */ 101 10000, /* > 99% working drives spin up before 20sec */
102 35000, /* give > 30 secs of idleness for retarded devices */ 102 35000, /* give > 30 secs of idleness for retarded devices */
103 5000, /* and sweet one last chance */ 103 5000, /* and sweet one last chance */
104 ULONG_MAX, /* > 1 min has elapsed, give up */ 104 ULONG_MAX, /* > 1 min has elapsed, give up */
105 }; 105 };
106 106
107 static const unsigned long ata_eh_identify_timeouts[] = { 107 static const unsigned long ata_eh_identify_timeouts[] = {
108 5000, /* covers > 99% of successes and not too boring on failures */ 108 5000, /* covers > 99% of successes and not too boring on failures */
109 10000, /* combined time till here is enough even for media access */ 109 10000, /* combined time till here is enough even for media access */
110 30000, /* for true idiots */ 110 30000, /* for true idiots */
111 ULONG_MAX, 111 ULONG_MAX,
112 }; 112 };
113 113
114 static const unsigned long ata_eh_flush_timeouts[] = { 114 static const unsigned long ata_eh_flush_timeouts[] = {
115 15000, /* be generous with flush */ 115 15000, /* be generous with flush */
116 15000, /* ditto */ 116 15000, /* ditto */
117 30000, /* and even more generous */ 117 30000, /* and even more generous */
118 ULONG_MAX, 118 ULONG_MAX,
119 }; 119 };
120 120
121 static const unsigned long ata_eh_other_timeouts[] = { 121 static const unsigned long ata_eh_other_timeouts[] = {
122 5000, /* same rationale as identify timeout */ 122 5000, /* same rationale as identify timeout */
123 10000, /* ditto */ 123 10000, /* ditto */
124 /* but no merciful 30sec for other commands, it just isn't worth it */ 124 /* but no merciful 30sec for other commands, it just isn't worth it */
125 ULONG_MAX, 125 ULONG_MAX,
126 }; 126 };
127 127
128 struct ata_eh_cmd_timeout_ent { 128 struct ata_eh_cmd_timeout_ent {
129 const u8 *commands; 129 const u8 *commands;
130 const unsigned long *timeouts; 130 const unsigned long *timeouts;
131 }; 131 };
132 132
133 /* The following table determines timeouts to use for EH internal 133 /* The following table determines timeouts to use for EH internal
134 * commands. Each table entry is a command class and matches the 134 * commands. Each table entry is a command class and matches the
135 * commands the entry applies to and the timeout table to use. 135 * commands the entry applies to and the timeout table to use.
136 * 136 *
137 * On the retry after a command timed out, the next timeout value from 137 * On the retry after a command timed out, the next timeout value from
138 * the table is used. If the table doesn't contain further entries, 138 * the table is used. If the table doesn't contain further entries,
139 * the last value is used. 139 * the last value is used.
140 * 140 *
141 * ehc->cmd_timeout_idx keeps track of which timeout to use per 141 * ehc->cmd_timeout_idx keeps track of which timeout to use per
142 * command class, so if SET_FEATURES times out on the first try, the 142 * command class, so if SET_FEATURES times out on the first try, the
143 * next try will use the second timeout value only for that class. 143 * next try will use the second timeout value only for that class.
144 */ 144 */
145 #define CMDS(cmds...) (const u8 []){ cmds, 0 } 145 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
146 static const struct ata_eh_cmd_timeout_ent 146 static const struct ata_eh_cmd_timeout_ent
147 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = { 147 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI), 148 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
149 .timeouts = ata_eh_identify_timeouts, }, 149 .timeouts = ata_eh_identify_timeouts, },
150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT), 150 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
151 .timeouts = ata_eh_other_timeouts, }, 151 .timeouts = ata_eh_other_timeouts, },
152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT), 152 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
153 .timeouts = ata_eh_other_timeouts, }, 153 .timeouts = ata_eh_other_timeouts, },
154 { .commands = CMDS(ATA_CMD_SET_FEATURES), 154 { .commands = CMDS(ATA_CMD_SET_FEATURES),
155 .timeouts = ata_eh_other_timeouts, }, 155 .timeouts = ata_eh_other_timeouts, },
156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS), 156 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
157 .timeouts = ata_eh_other_timeouts, }, 157 .timeouts = ata_eh_other_timeouts, },
158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT), 158 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
159 .timeouts = ata_eh_flush_timeouts }, 159 .timeouts = ata_eh_flush_timeouts },
160 }; 160 };
161 #undef CMDS 161 #undef CMDS
162 162
163 static void __ata_port_freeze(struct ata_port *ap); 163 static void __ata_port_freeze(struct ata_port *ap);
164 #ifdef CONFIG_PM 164 #ifdef CONFIG_PM
165 static void ata_eh_handle_port_suspend(struct ata_port *ap); 165 static void ata_eh_handle_port_suspend(struct ata_port *ap);
166 static void ata_eh_handle_port_resume(struct ata_port *ap); 166 static void ata_eh_handle_port_resume(struct ata_port *ap);
167 #else /* CONFIG_PM */ 167 #else /* CONFIG_PM */
168 static void ata_eh_handle_port_suspend(struct ata_port *ap) 168 static void ata_eh_handle_port_suspend(struct ata_port *ap)
169 { } 169 { }
170 170
171 static void ata_eh_handle_port_resume(struct ata_port *ap) 171 static void ata_eh_handle_port_resume(struct ata_port *ap)
172 { } 172 { }
173 #endif /* CONFIG_PM */ 173 #endif /* CONFIG_PM */
174 174
175 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, 175 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
176 va_list args) 176 va_list args)
177 { 177 {
178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, 178 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
179 ATA_EH_DESC_LEN - ehi->desc_len, 179 ATA_EH_DESC_LEN - ehi->desc_len,
180 fmt, args); 180 fmt, args);
181 } 181 }
182 182
183 /** 183 /**
184 * __ata_ehi_push_desc - push error description without adding separator 184 * __ata_ehi_push_desc - push error description without adding separator
185 * @ehi: target EHI 185 * @ehi: target EHI
186 * @fmt: printf format string 186 * @fmt: printf format string
187 * 187 *
188 * Format string according to @fmt and append it to @ehi->desc. 188 * Format string according to @fmt and append it to @ehi->desc.
189 * 189 *
190 * LOCKING: 190 * LOCKING:
191 * spin_lock_irqsave(host lock) 191 * spin_lock_irqsave(host lock)
192 */ 192 */
193 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 193 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
194 { 194 {
195 va_list args; 195 va_list args;
196 196
197 va_start(args, fmt); 197 va_start(args, fmt);
198 __ata_ehi_pushv_desc(ehi, fmt, args); 198 __ata_ehi_pushv_desc(ehi, fmt, args);
199 va_end(args); 199 va_end(args);
200 } 200 }
201 201
202 /** 202 /**
203 * ata_ehi_push_desc - push error description with separator 203 * ata_ehi_push_desc - push error description with separator
204 * @ehi: target EHI 204 * @ehi: target EHI
205 * @fmt: printf format string 205 * @fmt: printf format string
206 * 206 *
207 * Format string according to @fmt and append it to @ehi->desc. 207 * Format string according to @fmt and append it to @ehi->desc.
208 * If @ehi->desc is not empty, ", " is added in-between. 208 * If @ehi->desc is not empty, ", " is added in-between.
209 * 209 *
210 * LOCKING: 210 * LOCKING:
211 * spin_lock_irqsave(host lock) 211 * spin_lock_irqsave(host lock)
212 */ 212 */
213 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...) 213 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
214 { 214 {
215 va_list args; 215 va_list args;
216 216
217 if (ehi->desc_len) 217 if (ehi->desc_len)
218 __ata_ehi_push_desc(ehi, ", "); 218 __ata_ehi_push_desc(ehi, ", ");
219 219
220 va_start(args, fmt); 220 va_start(args, fmt);
221 __ata_ehi_pushv_desc(ehi, fmt, args); 221 __ata_ehi_pushv_desc(ehi, fmt, args);
222 va_end(args); 222 va_end(args);
223 } 223 }
224 224
225 /** 225 /**
226 * ata_ehi_clear_desc - clean error description 226 * ata_ehi_clear_desc - clean error description
227 * @ehi: target EHI 227 * @ehi: target EHI
228 * 228 *
229 * Clear @ehi->desc. 229 * Clear @ehi->desc.
230 * 230 *
231 * LOCKING: 231 * LOCKING:
232 * spin_lock_irqsave(host lock) 232 * spin_lock_irqsave(host lock)
233 */ 233 */
234 void ata_ehi_clear_desc(struct ata_eh_info *ehi) 234 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
235 { 235 {
236 ehi->desc[0] = '\0'; 236 ehi->desc[0] = '\0';
237 ehi->desc_len = 0; 237 ehi->desc_len = 0;
238 } 238 }
239 239
240 /** 240 /**
241 * ata_port_desc - append port description 241 * ata_port_desc - append port description
242 * @ap: target ATA port 242 * @ap: target ATA port
243 * @fmt: printf format string 243 * @fmt: printf format string
244 * 244 *
245 * Format string according to @fmt and append it to port 245 * Format string according to @fmt and append it to port
246 * description. If port description is not empty, " " is added 246 * description. If port description is not empty, " " is added
247 * in-between. This function is to be used while initializing 247 * in-between. This function is to be used while initializing
248 * ata_host. The description is printed on host registration. 248 * ata_host. The description is printed on host registration.
249 * 249 *
250 * LOCKING: 250 * LOCKING:
251 * None. 251 * None.
252 */ 252 */
253 void ata_port_desc(struct ata_port *ap, const char *fmt, ...) 253 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
254 { 254 {
255 va_list args; 255 va_list args;
256 256
257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING)); 257 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258 258
259 if (ap->link.eh_info.desc_len) 259 if (ap->link.eh_info.desc_len)
260 __ata_ehi_push_desc(&ap->link.eh_info, " "); 260 __ata_ehi_push_desc(&ap->link.eh_info, " ");
261 261
262 va_start(args, fmt); 262 va_start(args, fmt);
263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args); 263 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
264 va_end(args); 264 va_end(args);
265 } 265 }
266 266
267 #ifdef CONFIG_PCI 267 #ifdef CONFIG_PCI
268 268
269 /** 269 /**
270 * ata_port_pbar_desc - append PCI BAR description 270 * ata_port_pbar_desc - append PCI BAR description
271 * @ap: target ATA port 271 * @ap: target ATA port
272 * @bar: target PCI BAR 272 * @bar: target PCI BAR
273 * @offset: offset into PCI BAR 273 * @offset: offset into PCI BAR
274 * @name: name of the area 274 * @name: name of the area
275 * 275 *
276 * If @offset is negative, this function formats a string which 276 * If @offset is negative, this function formats a string which
277 * contains the name, address, size and type of the BAR and 277 * contains the name, address, size and type of the BAR and
278 * appends it to the port description. If @offset is zero or 278 * appends it to the port description. If @offset is zero or
279 * positive, only name and offsetted address is appended. 279 * positive, only name and offsetted address is appended.
280 * 280 *
281 * LOCKING: 281 * LOCKING:
282 * None. 282 * None.
283 */ 283 */
284 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, 284 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
285 const char *name) 285 const char *name)
286 { 286 {
287 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 287 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 char *type = ""; 288 char *type = "";
289 unsigned long long start, len; 289 unsigned long long start, len;
290 290
291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 291 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 type = "m"; 292 type = "m";
293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 293 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
294 type = "i"; 294 type = "i";
295 295
296 start = (unsigned long long)pci_resource_start(pdev, bar); 296 start = (unsigned long long)pci_resource_start(pdev, bar);
297 len = (unsigned long long)pci_resource_len(pdev, bar); 297 len = (unsigned long long)pci_resource_len(pdev, bar);
298 298
299 if (offset < 0) 299 if (offset < 0)
300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start); 300 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 else 301 else
302 ata_port_desc(ap, "%s 0x%llx", name, 302 ata_port_desc(ap, "%s 0x%llx", name,
303 start + (unsigned long long)offset); 303 start + (unsigned long long)offset);
304 } 304 }
305 305
306 #endif /* CONFIG_PCI */ 306 #endif /* CONFIG_PCI */
307 307
308 static int ata_lookup_timeout_table(u8 cmd) 308 static int ata_lookup_timeout_table(u8 cmd)
309 { 309 {
310 int i; 310 int i;
311 311
312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) { 312 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
313 const u8 *cur; 313 const u8 *cur;
314 314
315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++) 315 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
316 if (*cur == cmd) 316 if (*cur == cmd)
317 return i; 317 return i;
318 } 318 }
319 319
320 return -1; 320 return -1;
321 } 321 }
322 322
323 /** 323 /**
324 * ata_internal_cmd_timeout - determine timeout for an internal command 324 * ata_internal_cmd_timeout - determine timeout for an internal command
325 * @dev: target device 325 * @dev: target device
326 * @cmd: internal command to be issued 326 * @cmd: internal command to be issued
327 * 327 *
328 * Determine timeout for internal command @cmd for @dev. 328 * Determine timeout for internal command @cmd for @dev.
329 * 329 *
330 * LOCKING: 330 * LOCKING:
331 * EH context. 331 * EH context.
332 * 332 *
333 * RETURNS: 333 * RETURNS:
334 * Determined timeout. 334 * Determined timeout.
335 */ 335 */
336 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd) 336 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337 { 337 {
338 struct ata_eh_context *ehc = &dev->link->eh_context; 338 struct ata_eh_context *ehc = &dev->link->eh_context;
339 int ent = ata_lookup_timeout_table(cmd); 339 int ent = ata_lookup_timeout_table(cmd);
340 int idx; 340 int idx;
341 341
342 if (ent < 0) 342 if (ent < 0)
343 return ATA_EH_CMD_DFL_TIMEOUT; 343 return ATA_EH_CMD_DFL_TIMEOUT;
344 344
345 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 345 idx = ehc->cmd_timeout_idx[dev->devno][ent];
346 return ata_eh_cmd_timeout_table[ent].timeouts[idx]; 346 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
347 } 347 }
348 348
349 /** 349 /**
350 * ata_internal_cmd_timed_out - notification for internal command timeout 350 * ata_internal_cmd_timed_out - notification for internal command timeout
351 * @dev: target device 351 * @dev: target device
352 * @cmd: internal command which timed out 352 * @cmd: internal command which timed out
353 * 353 *
354 * Notify EH that internal command @cmd for @dev timed out. This 354 * Notify EH that internal command @cmd for @dev timed out. This
355 * function should be called only for commands whose timeouts are 355 * function should be called only for commands whose timeouts are
356 * determined using ata_internal_cmd_timeout(). 356 * determined using ata_internal_cmd_timeout().
357 * 357 *
358 * LOCKING: 358 * LOCKING:
359 * EH context. 359 * EH context.
360 */ 360 */
361 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd) 361 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362 { 362 {
363 struct ata_eh_context *ehc = &dev->link->eh_context; 363 struct ata_eh_context *ehc = &dev->link->eh_context;
364 int ent = ata_lookup_timeout_table(cmd); 364 int ent = ata_lookup_timeout_table(cmd);
365 int idx; 365 int idx;
366 366
367 if (ent < 0) 367 if (ent < 0)
368 return; 368 return;
369 369
370 idx = ehc->cmd_timeout_idx[dev->devno][ent]; 370 idx = ehc->cmd_timeout_idx[dev->devno][ent];
371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) 371 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
372 ehc->cmd_timeout_idx[dev->devno][ent]++; 372 ehc->cmd_timeout_idx[dev->devno][ent]++;
373 } 373 }
374 374
375 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags, 375 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
376 unsigned int err_mask) 376 unsigned int err_mask)
377 { 377 {
378 struct ata_ering_entry *ent; 378 struct ata_ering_entry *ent;
379 379
380 WARN_ON(!err_mask); 380 WARN_ON(!err_mask);
381 381
382 ering->cursor++; 382 ering->cursor++;
383 ering->cursor %= ATA_ERING_SIZE; 383 ering->cursor %= ATA_ERING_SIZE;
384 384
385 ent = &ering->ring[ering->cursor]; 385 ent = &ering->ring[ering->cursor];
386 ent->eflags = eflags; 386 ent->eflags = eflags;
387 ent->err_mask = err_mask; 387 ent->err_mask = err_mask;
388 ent->timestamp = get_jiffies_64(); 388 ent->timestamp = get_jiffies_64();
389 } 389 }
390 390
391 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) 391 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392 { 392 {
393 struct ata_ering_entry *ent = &ering->ring[ering->cursor]; 393 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
394 394
395 if (ent->err_mask) 395 if (ent->err_mask)
396 return ent; 396 return ent;
397 return NULL; 397 return NULL;
398 } 398 }
399 399
400 int ata_ering_map(struct ata_ering *ering, 400 int ata_ering_map(struct ata_ering *ering,
401 int (*map_fn)(struct ata_ering_entry *, void *), 401 int (*map_fn)(struct ata_ering_entry *, void *),
402 void *arg) 402 void *arg)
403 { 403 {
404 int idx, rc = 0; 404 int idx, rc = 0;
405 struct ata_ering_entry *ent; 405 struct ata_ering_entry *ent;
406 406
407 idx = ering->cursor; 407 idx = ering->cursor;
408 do { 408 do {
409 ent = &ering->ring[idx]; 409 ent = &ering->ring[idx];
410 if (!ent->err_mask) 410 if (!ent->err_mask)
411 break; 411 break;
412 rc = map_fn(ent, arg); 412 rc = map_fn(ent, arg);
413 if (rc) 413 if (rc)
414 break; 414 break;
415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE; 415 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
416 } while (idx != ering->cursor); 416 } while (idx != ering->cursor);
417 417
418 return rc; 418 return rc;
419 } 419 }
420 420
421 int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) 421 int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg)
422 { 422 {
423 ent->eflags |= ATA_EFLAG_OLD_ER; 423 ent->eflags |= ATA_EFLAG_OLD_ER;
424 return 0; 424 return 0;
425 } 425 }
426 426
427 static void ata_ering_clear(struct ata_ering *ering) 427 static void ata_ering_clear(struct ata_ering *ering)
428 { 428 {
429 ata_ering_map(ering, ata_ering_clear_cb, NULL); 429 ata_ering_map(ering, ata_ering_clear_cb, NULL);
430 } 430 }
431 431
432 static unsigned int ata_eh_dev_action(struct ata_device *dev) 432 static unsigned int ata_eh_dev_action(struct ata_device *dev)
433 { 433 {
434 struct ata_eh_context *ehc = &dev->link->eh_context; 434 struct ata_eh_context *ehc = &dev->link->eh_context;
435 435
436 return ehc->i.action | ehc->i.dev_action[dev->devno]; 436 return ehc->i.action | ehc->i.dev_action[dev->devno];
437 } 437 }
438 438
439 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, 439 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
440 struct ata_eh_info *ehi, unsigned int action) 440 struct ata_eh_info *ehi, unsigned int action)
441 { 441 {
442 struct ata_device *tdev; 442 struct ata_device *tdev;
443 443
444 if (!dev) { 444 if (!dev) {
445 ehi->action &= ~action; 445 ehi->action &= ~action;
446 ata_for_each_dev(tdev, link, ALL) 446 ata_for_each_dev(tdev, link, ALL)
447 ehi->dev_action[tdev->devno] &= ~action; 447 ehi->dev_action[tdev->devno] &= ~action;
448 } else { 448 } else {
449 /* doesn't make sense for port-wide EH actions */ 449 /* doesn't make sense for port-wide EH actions */
450 WARN_ON(!(action & ATA_EH_PERDEV_MASK)); 450 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
451 451
452 /* break ehi->action into ehi->dev_action */ 452 /* break ehi->action into ehi->dev_action */
453 if (ehi->action & action) { 453 if (ehi->action & action) {
454 ata_for_each_dev(tdev, link, ALL) 454 ata_for_each_dev(tdev, link, ALL)
455 ehi->dev_action[tdev->devno] |= 455 ehi->dev_action[tdev->devno] |=
456 ehi->action & action; 456 ehi->action & action;
457 ehi->action &= ~action; 457 ehi->action &= ~action;
458 } 458 }
459 459
460 /* turn off the specified per-dev action */ 460 /* turn off the specified per-dev action */
461 ehi->dev_action[dev->devno] &= ~action; 461 ehi->dev_action[dev->devno] &= ~action;
462 } 462 }
463 } 463 }
464 464
465 /** 465 /**
466 * ata_eh_acquire - acquire EH ownership 466 * ata_eh_acquire - acquire EH ownership
467 * @ap: ATA port to acquire EH ownership for 467 * @ap: ATA port to acquire EH ownership for
468 * 468 *
469 * Acquire EH ownership for @ap. This is the basic exclusion 469 * Acquire EH ownership for @ap. This is the basic exclusion
470 * mechanism for ports sharing a host. Only one port hanging off 470 * mechanism for ports sharing a host. Only one port hanging off
471 * the same host can claim the ownership of EH. 471 * the same host can claim the ownership of EH.
472 * 472 *
473 * LOCKING: 473 * LOCKING:
474 * EH context. 474 * EH context.
475 */ 475 */
476 void ata_eh_acquire(struct ata_port *ap) 476 void ata_eh_acquire(struct ata_port *ap)
477 { 477 {
478 mutex_lock(&ap->host->eh_mutex); 478 mutex_lock(&ap->host->eh_mutex);
479 WARN_ON_ONCE(ap->host->eh_owner); 479 WARN_ON_ONCE(ap->host->eh_owner);
480 ap->host->eh_owner = current; 480 ap->host->eh_owner = current;
481 } 481 }
482 482
483 /** 483 /**
484 * ata_eh_release - release EH ownership 484 * ata_eh_release - release EH ownership
485 * @ap: ATA port to release EH ownership for 485 * @ap: ATA port to release EH ownership for
486 * 486 *
487 * Release EH ownership for @ap if the caller. The caller must 487 * Release EH ownership for @ap if the caller. The caller must
488 * have acquired EH ownership using ata_eh_acquire() previously. 488 * have acquired EH ownership using ata_eh_acquire() previously.
489 * 489 *
490 * LOCKING: 490 * LOCKING:
491 * EH context. 491 * EH context.
492 */ 492 */
493 void ata_eh_release(struct ata_port *ap) 493 void ata_eh_release(struct ata_port *ap)
494 { 494 {
495 WARN_ON_ONCE(ap->host->eh_owner != current); 495 WARN_ON_ONCE(ap->host->eh_owner != current);
496 ap->host->eh_owner = NULL; 496 ap->host->eh_owner = NULL;
497 mutex_unlock(&ap->host->eh_mutex); 497 mutex_unlock(&ap->host->eh_mutex);
498 } 498 }
499 499
500 /** 500 /**
501 * ata_scsi_timed_out - SCSI layer time out callback 501 * ata_scsi_timed_out - SCSI layer time out callback
502 * @cmd: timed out SCSI command 502 * @cmd: timed out SCSI command
503 * 503 *
504 * Handles SCSI layer timeout. We race with normal completion of 504 * Handles SCSI layer timeout. We race with normal completion of
505 * the qc for @cmd. If the qc is already gone, we lose and let 505 * the qc for @cmd. If the qc is already gone, we lose and let
506 * the scsi command finish (EH_HANDLED). Otherwise, the qc has 506 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
507 * timed out and EH should be invoked. Prevent ata_qc_complete() 507 * timed out and EH should be invoked. Prevent ata_qc_complete()
508 * from finishing it by setting EH_SCHEDULED and return 508 * from finishing it by setting EH_SCHEDULED and return
509 * EH_NOT_HANDLED. 509 * EH_NOT_HANDLED.
510 * 510 *
511 * TODO: kill this function once old EH is gone. 511 * TODO: kill this function once old EH is gone.
512 * 512 *
513 * LOCKING: 513 * LOCKING:
514 * Called from timer context 514 * Called from timer context
515 * 515 *
516 * RETURNS: 516 * RETURNS:
517 * EH_HANDLED or EH_NOT_HANDLED 517 * EH_HANDLED or EH_NOT_HANDLED
518 */ 518 */
519 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) 519 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
520 { 520 {
521 struct Scsi_Host *host = cmd->device->host; 521 struct Scsi_Host *host = cmd->device->host;
522 struct ata_port *ap = ata_shost_to_port(host); 522 struct ata_port *ap = ata_shost_to_port(host);
523 unsigned long flags; 523 unsigned long flags;
524 struct ata_queued_cmd *qc; 524 struct ata_queued_cmd *qc;
525 enum blk_eh_timer_return ret; 525 enum blk_eh_timer_return ret;
526 526
527 DPRINTK("ENTER\n"); 527 DPRINTK("ENTER\n");
528 528
529 if (ap->ops->error_handler) { 529 if (ap->ops->error_handler) {
530 ret = BLK_EH_NOT_HANDLED; 530 ret = BLK_EH_NOT_HANDLED;
531 goto out; 531 goto out;
532 } 532 }
533 533
534 ret = BLK_EH_HANDLED; 534 ret = BLK_EH_HANDLED;
535 spin_lock_irqsave(ap->lock, flags); 535 spin_lock_irqsave(ap->lock, flags);
536 qc = ata_qc_from_tag(ap, ap->link.active_tag); 536 qc = ata_qc_from_tag(ap, ap->link.active_tag);
537 if (qc) { 537 if (qc) {
538 WARN_ON(qc->scsicmd != cmd); 538 WARN_ON(qc->scsicmd != cmd);
539 qc->flags |= ATA_QCFLAG_EH_SCHEDULED; 539 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
540 qc->err_mask |= AC_ERR_TIMEOUT; 540 qc->err_mask |= AC_ERR_TIMEOUT;
541 ret = BLK_EH_NOT_HANDLED; 541 ret = BLK_EH_NOT_HANDLED;
542 } 542 }
543 spin_unlock_irqrestore(ap->lock, flags); 543 spin_unlock_irqrestore(ap->lock, flags);
544 544
545 out: 545 out:
546 DPRINTK("EXIT, ret=%d\n", ret); 546 DPRINTK("EXIT, ret=%d\n", ret);
547 return ret; 547 return ret;
548 } 548 }
549 549
550 static void ata_eh_unload(struct ata_port *ap) 550 static void ata_eh_unload(struct ata_port *ap)
551 { 551 {
552 struct ata_link *link; 552 struct ata_link *link;
553 struct ata_device *dev; 553 struct ata_device *dev;
554 unsigned long flags; 554 unsigned long flags;
555 555
556 /* Restore SControl IPM and SPD for the next driver and 556 /* Restore SControl IPM and SPD for the next driver and
557 * disable attached devices. 557 * disable attached devices.
558 */ 558 */
559 ata_for_each_link(link, ap, PMP_FIRST) { 559 ata_for_each_link(link, ap, PMP_FIRST) {
560 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0); 560 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
561 ata_for_each_dev(dev, link, ALL) 561 ata_for_each_dev(dev, link, ALL)
562 ata_dev_disable(dev); 562 ata_dev_disable(dev);
563 } 563 }
564 564
565 /* freeze and set UNLOADED */ 565 /* freeze and set UNLOADED */
566 spin_lock_irqsave(ap->lock, flags); 566 spin_lock_irqsave(ap->lock, flags);
567 567
568 ata_port_freeze(ap); /* won't be thawed */ 568 ata_port_freeze(ap); /* won't be thawed */
569 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */ 569 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
570 ap->pflags |= ATA_PFLAG_UNLOADED; 570 ap->pflags |= ATA_PFLAG_UNLOADED;
571 571
572 spin_unlock_irqrestore(ap->lock, flags); 572 spin_unlock_irqrestore(ap->lock, flags);
573 } 573 }
574 574
575 /** 575 /**
576 * ata_scsi_error - SCSI layer error handler callback 576 * ata_scsi_error - SCSI layer error handler callback
577 * @host: SCSI host on which error occurred 577 * @host: SCSI host on which error occurred
578 * 578 *
579 * Handles SCSI-layer-thrown error events. 579 * Handles SCSI-layer-thrown error events.
580 * 580 *
581 * LOCKING: 581 * LOCKING:
582 * Inherited from SCSI layer (none, can sleep) 582 * Inherited from SCSI layer (none, can sleep)
583 * 583 *
584 * RETURNS: 584 * RETURNS:
585 * Zero. 585 * Zero.
586 */ 586 */
587 void ata_scsi_error(struct Scsi_Host *host) 587 void ata_scsi_error(struct Scsi_Host *host)
588 { 588 {
589 struct ata_port *ap = ata_shost_to_port(host); 589 struct ata_port *ap = ata_shost_to_port(host);
590 int i; 590 int i;
591 unsigned long flags; 591 unsigned long flags;
592 592
593 DPRINTK("ENTER\n"); 593 DPRINTK("ENTER\n");
594 594
595 /* make sure sff pio task is not running */ 595 /* make sure sff pio task is not running */
596 ata_sff_flush_pio_task(ap); 596 ata_sff_flush_pio_task(ap);
597 597
598 /* synchronize with host lock and sort out timeouts */ 598 /* synchronize with host lock and sort out timeouts */
599 599
600 /* For new EH, all qcs are finished in one of three ways - 600 /* For new EH, all qcs are finished in one of three ways -
601 * normal completion, error completion, and SCSI timeout. 601 * normal completion, error completion, and SCSI timeout.
602 * Both completions can race against SCSI timeout. When normal 602 * Both completions can race against SCSI timeout. When normal
603 * completion wins, the qc never reaches EH. When error 603 * completion wins, the qc never reaches EH. When error
604 * completion wins, the qc has ATA_QCFLAG_FAILED set. 604 * completion wins, the qc has ATA_QCFLAG_FAILED set.
605 * 605 *
606 * When SCSI timeout wins, things are a bit more complex. 606 * When SCSI timeout wins, things are a bit more complex.
607 * Normal or error completion can occur after the timeout but 607 * Normal or error completion can occur after the timeout but
608 * before this point. In such cases, both types of 608 * before this point. In such cases, both types of
609 * completions are honored. A scmd is determined to have 609 * completions are honored. A scmd is determined to have
610 * timed out iff its associated qc is active and not failed. 610 * timed out iff its associated qc is active and not failed.
611 */ 611 */
612 if (ap->ops->error_handler) { 612 if (ap->ops->error_handler) {
613 struct scsi_cmnd *scmd, *tmp; 613 struct scsi_cmnd *scmd, *tmp;
614 int nr_timedout = 0; 614 int nr_timedout = 0;
615 615
616 spin_lock_irqsave(ap->lock, flags); 616 spin_lock_irqsave(ap->lock, flags);
617 617
618 /* This must occur under the ap->lock as we don't want 618 /* This must occur under the ap->lock as we don't want
619 a polled recovery to race the real interrupt handler 619 a polled recovery to race the real interrupt handler
620 620
621 The lost_interrupt handler checks for any completed but 621 The lost_interrupt handler checks for any completed but
622 non-notified command and completes much like an IRQ handler. 622 non-notified command and completes much like an IRQ handler.
623 623
624 We then fall into the error recovery code which will treat 624 We then fall into the error recovery code which will treat
625 this as if normal completion won the race */ 625 this as if normal completion won the race */
626 626
627 if (ap->ops->lost_interrupt) 627 if (ap->ops->lost_interrupt)
628 ap->ops->lost_interrupt(ap); 628 ap->ops->lost_interrupt(ap);
629 629
630 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { 630 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
631 struct ata_queued_cmd *qc; 631 struct ata_queued_cmd *qc;
632 632
633 for (i = 0; i < ATA_MAX_QUEUE; i++) { 633 for (i = 0; i < ATA_MAX_QUEUE; i++) {
634 qc = __ata_qc_from_tag(ap, i); 634 qc = __ata_qc_from_tag(ap, i);
635 if (qc->flags & ATA_QCFLAG_ACTIVE && 635 if (qc->flags & ATA_QCFLAG_ACTIVE &&
636 qc->scsicmd == scmd) 636 qc->scsicmd == scmd)
637 break; 637 break;
638 } 638 }
639 639
640 if (i < ATA_MAX_QUEUE) { 640 if (i < ATA_MAX_QUEUE) {
641 /* the scmd has an associated qc */ 641 /* the scmd has an associated qc */
642 if (!(qc->flags & ATA_QCFLAG_FAILED)) { 642 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
643 /* which hasn't failed yet, timeout */ 643 /* which hasn't failed yet, timeout */
644 qc->err_mask |= AC_ERR_TIMEOUT; 644 qc->err_mask |= AC_ERR_TIMEOUT;
645 qc->flags |= ATA_QCFLAG_FAILED; 645 qc->flags |= ATA_QCFLAG_FAILED;
646 nr_timedout++; 646 nr_timedout++;
647 } 647 }
648 } else { 648 } else {
649 /* Normal completion occurred after 649 /* Normal completion occurred after
650 * SCSI timeout but before this point. 650 * SCSI timeout but before this point.
651 * Successfully complete it. 651 * Successfully complete it.
652 */ 652 */
653 scmd->retries = scmd->allowed; 653 scmd->retries = scmd->allowed;
654 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 654 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
655 } 655 }
656 } 656 }
657 657
658 /* If we have timed out qcs. They belong to EH from 658 /* If we have timed out qcs. They belong to EH from
659 * this point but the state of the controller is 659 * this point but the state of the controller is
660 * unknown. Freeze the port to make sure the IRQ 660 * unknown. Freeze the port to make sure the IRQ
661 * handler doesn't diddle with those qcs. This must 661 * handler doesn't diddle with those qcs. This must
662 * be done atomically w.r.t. setting QCFLAG_FAILED. 662 * be done atomically w.r.t. setting QCFLAG_FAILED.
663 */ 663 */
664 if (nr_timedout) 664 if (nr_timedout)
665 __ata_port_freeze(ap); 665 __ata_port_freeze(ap);
666 666
667 spin_unlock_irqrestore(ap->lock, flags); 667 spin_unlock_irqrestore(ap->lock, flags);
668 668
669 /* initialize eh_tries */ 669 /* initialize eh_tries */
670 ap->eh_tries = ATA_EH_MAX_TRIES; 670 ap->eh_tries = ATA_EH_MAX_TRIES;
671 } else 671 } else
672 spin_unlock_wait(ap->lock); 672 spin_unlock_wait(ap->lock);
673 673
674 /* If we timed raced normal completion and there is nothing to 674 /* If we timed raced normal completion and there is nothing to
675 recover nr_timedout == 0 why exactly are we doing error recovery ? */ 675 recover nr_timedout == 0 why exactly are we doing error recovery ? */
676 676
677 /* invoke error handler */ 677 /* invoke error handler */
678 if (ap->ops->error_handler) { 678 if (ap->ops->error_handler) {
679 struct ata_link *link; 679 struct ata_link *link;
680 680
681 /* acquire EH ownership */ 681 /* acquire EH ownership */
682 ata_eh_acquire(ap); 682 ata_eh_acquire(ap);
683 repeat: 683 repeat:
684 /* kill fast drain timer */ 684 /* kill fast drain timer */
685 del_timer_sync(&ap->fastdrain_timer); 685 del_timer_sync(&ap->fastdrain_timer);
686 686
687 /* process port resume request */ 687 /* process port resume request */
688 ata_eh_handle_port_resume(ap); 688 ata_eh_handle_port_resume(ap);
689 689
690 /* fetch & clear EH info */ 690 /* fetch & clear EH info */
691 spin_lock_irqsave(ap->lock, flags); 691 spin_lock_irqsave(ap->lock, flags);
692 692
693 ata_for_each_link(link, ap, HOST_FIRST) { 693 ata_for_each_link(link, ap, HOST_FIRST) {
694 struct ata_eh_context *ehc = &link->eh_context; 694 struct ata_eh_context *ehc = &link->eh_context;
695 struct ata_device *dev; 695 struct ata_device *dev;
696 696
697 memset(&link->eh_context, 0, sizeof(link->eh_context)); 697 memset(&link->eh_context, 0, sizeof(link->eh_context));
698 link->eh_context.i = link->eh_info; 698 link->eh_context.i = link->eh_info;
699 memset(&link->eh_info, 0, sizeof(link->eh_info)); 699 memset(&link->eh_info, 0, sizeof(link->eh_info));
700 700
701 ata_for_each_dev(dev, link, ENABLED) { 701 ata_for_each_dev(dev, link, ENABLED) {
702 int devno = dev->devno; 702 int devno = dev->devno;
703 703
704 ehc->saved_xfer_mode[devno] = dev->xfer_mode; 704 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
705 if (ata_ncq_enabled(dev)) 705 if (ata_ncq_enabled(dev))
706 ehc->saved_ncq_enabled |= 1 << devno; 706 ehc->saved_ncq_enabled |= 1 << devno;
707 } 707 }
708 } 708 }
709 709
710 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS; 710 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
711 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 711 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
712 ap->excl_link = NULL; /* don't maintain exclusion over EH */ 712 ap->excl_link = NULL; /* don't maintain exclusion over EH */
713 713
714 spin_unlock_irqrestore(ap->lock, flags); 714 spin_unlock_irqrestore(ap->lock, flags);
715 715
716 /* invoke EH, skip if unloading or suspended */ 716 /* invoke EH, skip if unloading or suspended */
717 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED))) 717 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
718 ap->ops->error_handler(ap); 718 ap->ops->error_handler(ap);
719 else { 719 else {
720 /* if unloading, commence suicide */ 720 /* if unloading, commence suicide */
721 if ((ap->pflags & ATA_PFLAG_UNLOADING) && 721 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
722 !(ap->pflags & ATA_PFLAG_UNLOADED)) 722 !(ap->pflags & ATA_PFLAG_UNLOADED))
723 ata_eh_unload(ap); 723 ata_eh_unload(ap);
724 ata_eh_finish(ap); 724 ata_eh_finish(ap);
725 } 725 }
726 726
727 /* process port suspend request */ 727 /* process port suspend request */
728 ata_eh_handle_port_suspend(ap); 728 ata_eh_handle_port_suspend(ap);
729 729
730 /* Exception might have happend after ->error_handler 730 /* Exception might have happend after ->error_handler
731 * recovered the port but before this point. Repeat 731 * recovered the port but before this point. Repeat
732 * EH in such case. 732 * EH in such case.
733 */ 733 */
734 spin_lock_irqsave(ap->lock, flags); 734 spin_lock_irqsave(ap->lock, flags);
735 735
736 if (ap->pflags & ATA_PFLAG_EH_PENDING) { 736 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
737 if (--ap->eh_tries) { 737 if (--ap->eh_tries) {
738 spin_unlock_irqrestore(ap->lock, flags); 738 spin_unlock_irqrestore(ap->lock, flags);
739 goto repeat; 739 goto repeat;
740 } 740 }
741 ata_port_printk(ap, KERN_ERR, "EH pending after %d " 741 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
742 "tries, giving up\n", ATA_EH_MAX_TRIES); 742 "tries, giving up\n", ATA_EH_MAX_TRIES);
743 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 743 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
744 } 744 }
745 745
746 /* this run is complete, make sure EH info is clear */ 746 /* this run is complete, make sure EH info is clear */
747 ata_for_each_link(link, ap, HOST_FIRST) 747 ata_for_each_link(link, ap, HOST_FIRST)
748 memset(&link->eh_info, 0, sizeof(link->eh_info)); 748 memset(&link->eh_info, 0, sizeof(link->eh_info));
749 749
750 /* Clear host_eh_scheduled while holding ap->lock such 750 /* Clear host_eh_scheduled while holding ap->lock such
751 * that if exception occurs after this point but 751 * that if exception occurs after this point but
752 * before EH completion, SCSI midlayer will 752 * before EH completion, SCSI midlayer will
753 * re-initiate EH. 753 * re-initiate EH.
754 */ 754 */
755 host->host_eh_scheduled = 0; 755 host->host_eh_scheduled = 0;
756 756
757 spin_unlock_irqrestore(ap->lock, flags); 757 spin_unlock_irqrestore(ap->lock, flags);
758 ata_eh_release(ap); 758 ata_eh_release(ap);
759 } else { 759 } else {
760 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); 760 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
761 ap->ops->eng_timeout(ap); 761 ap->ops->eng_timeout(ap);
762 } 762 }
763 763
764 /* finish or retry handled scmd's and clean up */ 764 /* finish or retry handled scmd's and clean up */
765 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q)); 765 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
766 766
767 scsi_eh_flush_done_q(&ap->eh_done_q); 767 scsi_eh_flush_done_q(&ap->eh_done_q);
768 768
769 /* clean up */ 769 /* clean up */
770 spin_lock_irqsave(ap->lock, flags); 770 spin_lock_irqsave(ap->lock, flags);
771 771
772 if (ap->pflags & ATA_PFLAG_LOADING) 772 if (ap->pflags & ATA_PFLAG_LOADING)
773 ap->pflags &= ~ATA_PFLAG_LOADING; 773 ap->pflags &= ~ATA_PFLAG_LOADING;
774 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 774 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
775 schedule_delayed_work(&ap->hotplug_task, 0); 775 schedule_delayed_work(&ap->hotplug_task, 0);
776 776
777 if (ap->pflags & ATA_PFLAG_RECOVERED) 777 if (ap->pflags & ATA_PFLAG_RECOVERED)
778 ata_port_printk(ap, KERN_INFO, "EH complete\n"); 778 ata_port_printk(ap, KERN_INFO, "EH complete\n");
779 779
780 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED); 780 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
781 781
782 /* tell wait_eh that we're done */ 782 /* tell wait_eh that we're done */
783 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS; 783 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
784 wake_up_all(&ap->eh_wait_q); 784 wake_up_all(&ap->eh_wait_q);
785 785
786 spin_unlock_irqrestore(ap->lock, flags); 786 spin_unlock_irqrestore(ap->lock, flags);
787 787
788 DPRINTK("EXIT\n"); 788 DPRINTK("EXIT\n");
789 } 789 }
790 790
791 /** 791 /**
792 * ata_port_wait_eh - Wait for the currently pending EH to complete 792 * ata_port_wait_eh - Wait for the currently pending EH to complete
793 * @ap: Port to wait EH for 793 * @ap: Port to wait EH for
794 * 794 *
795 * Wait until the currently pending EH is complete. 795 * Wait until the currently pending EH is complete.
796 * 796 *
797 * LOCKING: 797 * LOCKING:
798 * Kernel thread context (may sleep). 798 * Kernel thread context (may sleep).
799 */ 799 */
800 void ata_port_wait_eh(struct ata_port *ap) 800 void ata_port_wait_eh(struct ata_port *ap)
801 { 801 {
802 unsigned long flags; 802 unsigned long flags;
803 DEFINE_WAIT(wait); 803 DEFINE_WAIT(wait);
804 804
805 retry: 805 retry:
806 spin_lock_irqsave(ap->lock, flags); 806 spin_lock_irqsave(ap->lock, flags);
807 807
808 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) { 808 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
809 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE); 809 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
810 spin_unlock_irqrestore(ap->lock, flags); 810 spin_unlock_irqrestore(ap->lock, flags);
811 schedule(); 811 schedule();
812 spin_lock_irqsave(ap->lock, flags); 812 spin_lock_irqsave(ap->lock, flags);
813 } 813 }
814 finish_wait(&ap->eh_wait_q, &wait); 814 finish_wait(&ap->eh_wait_q, &wait);
815 815
816 spin_unlock_irqrestore(ap->lock, flags); 816 spin_unlock_irqrestore(ap->lock, flags);
817 817
818 /* make sure SCSI EH is complete */ 818 /* make sure SCSI EH is complete */
819 if (scsi_host_in_recovery(ap->scsi_host)) { 819 if (scsi_host_in_recovery(ap->scsi_host)) {
820 ata_msleep(ap, 10); 820 ata_msleep(ap, 10);
821 goto retry; 821 goto retry;
822 } 822 }
823 } 823 }
824 824
825 static int ata_eh_nr_in_flight(struct ata_port *ap) 825 static int ata_eh_nr_in_flight(struct ata_port *ap)
826 { 826 {
827 unsigned int tag; 827 unsigned int tag;
828 int nr = 0; 828 int nr = 0;
829 829
830 /* count only non-internal commands */ 830 /* count only non-internal commands */
831 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) 831 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
832 if (ata_qc_from_tag(ap, tag)) 832 if (ata_qc_from_tag(ap, tag))
833 nr++; 833 nr++;
834 834
835 return nr; 835 return nr;
836 } 836 }
837 837
838 void ata_eh_fastdrain_timerfn(unsigned long arg) 838 void ata_eh_fastdrain_timerfn(unsigned long arg)
839 { 839 {
840 struct ata_port *ap = (void *)arg; 840 struct ata_port *ap = (void *)arg;
841 unsigned long flags; 841 unsigned long flags;
842 int cnt; 842 int cnt;
843 843
844 spin_lock_irqsave(ap->lock, flags); 844 spin_lock_irqsave(ap->lock, flags);
845 845
846 cnt = ata_eh_nr_in_flight(ap); 846 cnt = ata_eh_nr_in_flight(ap);
847 847
848 /* are we done? */ 848 /* are we done? */
849 if (!cnt) 849 if (!cnt)
850 goto out_unlock; 850 goto out_unlock;
851 851
852 if (cnt == ap->fastdrain_cnt) { 852 if (cnt == ap->fastdrain_cnt) {
853 unsigned int tag; 853 unsigned int tag;
854 854
855 /* No progress during the last interval, tag all 855 /* No progress during the last interval, tag all
856 * in-flight qcs as timed out and freeze the port. 856 * in-flight qcs as timed out and freeze the port.
857 */ 857 */
858 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { 858 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
859 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 859 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
860 if (qc) 860 if (qc)
861 qc->err_mask |= AC_ERR_TIMEOUT; 861 qc->err_mask |= AC_ERR_TIMEOUT;
862 } 862 }
863 863
864 ata_port_freeze(ap); 864 ata_port_freeze(ap);
865 } else { 865 } else {
866 /* some qcs have finished, give it another chance */ 866 /* some qcs have finished, give it another chance */
867 ap->fastdrain_cnt = cnt; 867 ap->fastdrain_cnt = cnt;
868 ap->fastdrain_timer.expires = 868 ap->fastdrain_timer.expires =
869 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 869 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
870 add_timer(&ap->fastdrain_timer); 870 add_timer(&ap->fastdrain_timer);
871 } 871 }
872 872
873 out_unlock: 873 out_unlock:
874 spin_unlock_irqrestore(ap->lock, flags); 874 spin_unlock_irqrestore(ap->lock, flags);
875 } 875 }
876 876
877 /** 877 /**
878 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain 878 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
879 * @ap: target ATA port 879 * @ap: target ATA port
880 * @fastdrain: activate fast drain 880 * @fastdrain: activate fast drain
881 * 881 *
882 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain 882 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
883 * is non-zero and EH wasn't pending before. Fast drain ensures 883 * is non-zero and EH wasn't pending before. Fast drain ensures
884 * that EH kicks in in timely manner. 884 * that EH kicks in in timely manner.
885 * 885 *
886 * LOCKING: 886 * LOCKING:
887 * spin_lock_irqsave(host lock) 887 * spin_lock_irqsave(host lock)
888 */ 888 */
889 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain) 889 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
890 { 890 {
891 int cnt; 891 int cnt;
892 892
893 /* already scheduled? */ 893 /* already scheduled? */
894 if (ap->pflags & ATA_PFLAG_EH_PENDING) 894 if (ap->pflags & ATA_PFLAG_EH_PENDING)
895 return; 895 return;
896 896
897 ap->pflags |= ATA_PFLAG_EH_PENDING; 897 ap->pflags |= ATA_PFLAG_EH_PENDING;
898 898
899 if (!fastdrain) 899 if (!fastdrain)
900 return; 900 return;
901 901
902 /* do we have in-flight qcs? */ 902 /* do we have in-flight qcs? */
903 cnt = ata_eh_nr_in_flight(ap); 903 cnt = ata_eh_nr_in_flight(ap);
904 if (!cnt) 904 if (!cnt)
905 return; 905 return;
906 906
907 /* activate fast drain */ 907 /* activate fast drain */
908 ap->fastdrain_cnt = cnt; 908 ap->fastdrain_cnt = cnt;
909 ap->fastdrain_timer.expires = 909 ap->fastdrain_timer.expires =
910 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL); 910 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
911 add_timer(&ap->fastdrain_timer); 911 add_timer(&ap->fastdrain_timer);
912 } 912 }
913 913
914 /** 914 /**
915 * ata_qc_schedule_eh - schedule qc for error handling 915 * ata_qc_schedule_eh - schedule qc for error handling
916 * @qc: command to schedule error handling for 916 * @qc: command to schedule error handling for
917 * 917 *
918 * Schedule error handling for @qc. EH will kick in as soon as 918 * Schedule error handling for @qc. EH will kick in as soon as
919 * other commands are drained. 919 * other commands are drained.
920 * 920 *
921 * LOCKING: 921 * LOCKING:
922 * spin_lock_irqsave(host lock) 922 * spin_lock_irqsave(host lock)
923 */ 923 */
924 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 924 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
925 { 925 {
926 struct ata_port *ap = qc->ap; 926 struct ata_port *ap = qc->ap;
927 struct request_queue *q = qc->scsicmd->device->request_queue; 927 struct request_queue *q = qc->scsicmd->device->request_queue;
928 unsigned long flags; 928 unsigned long flags;
929 929
930 WARN_ON(!ap->ops->error_handler); 930 WARN_ON(!ap->ops->error_handler);
931 931
932 qc->flags |= ATA_QCFLAG_FAILED; 932 qc->flags |= ATA_QCFLAG_FAILED;
933 ata_eh_set_pending(ap, 1); 933 ata_eh_set_pending(ap, 1);
934 934
935 /* The following will fail if timeout has already expired. 935 /* The following will fail if timeout has already expired.
936 * ata_scsi_error() takes care of such scmds on EH entry. 936 * ata_scsi_error() takes care of such scmds on EH entry.
937 * Note that ATA_QCFLAG_FAILED is unconditionally set after 937 * Note that ATA_QCFLAG_FAILED is unconditionally set after
938 * this function completes. 938 * this function completes.
939 */ 939 */
940 spin_lock_irqsave(q->queue_lock, flags); 940 spin_lock_irqsave(q->queue_lock, flags);
941 blk_abort_request(qc->scsicmd->request); 941 blk_abort_request(qc->scsicmd->request);
942 spin_unlock_irqrestore(q->queue_lock, flags); 942 spin_unlock_irqrestore(q->queue_lock, flags);
943 } 943 }
944 944
945 /** 945 /**
946 * ata_port_schedule_eh - schedule error handling without a qc 946 * ata_port_schedule_eh - schedule error handling without a qc
947 * @ap: ATA port to schedule EH for 947 * @ap: ATA port to schedule EH for
948 * 948 *
949 * Schedule error handling for @ap. EH will kick in as soon as 949 * Schedule error handling for @ap. EH will kick in as soon as
950 * all commands are drained. 950 * all commands are drained.
951 * 951 *
952 * LOCKING: 952 * LOCKING:
953 * spin_lock_irqsave(host lock) 953 * spin_lock_irqsave(host lock)
954 */ 954 */
955 void ata_port_schedule_eh(struct ata_port *ap) 955 void ata_port_schedule_eh(struct ata_port *ap)
956 { 956 {
957 WARN_ON(!ap->ops->error_handler); 957 WARN_ON(!ap->ops->error_handler);
958 958
959 if (ap->pflags & ATA_PFLAG_INITIALIZING) 959 if (ap->pflags & ATA_PFLAG_INITIALIZING)
960 return; 960 return;
961 961
962 ata_eh_set_pending(ap, 1); 962 ata_eh_set_pending(ap, 1);
963 scsi_schedule_eh(ap->scsi_host); 963 scsi_schedule_eh(ap->scsi_host);
964 964
965 DPRINTK("port EH scheduled\n"); 965 DPRINTK("port EH scheduled\n");
966 } 966 }
967 967
968 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 968 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
969 { 969 {
970 int tag, nr_aborted = 0; 970 int tag, nr_aborted = 0;
971 971
972 WARN_ON(!ap->ops->error_handler); 972 WARN_ON(!ap->ops->error_handler);
973 973
974 /* we're gonna abort all commands, no need for fast drain */ 974 /* we're gonna abort all commands, no need for fast drain */
975 ata_eh_set_pending(ap, 0); 975 ata_eh_set_pending(ap, 0);
976 976
977 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 977 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
978 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); 978 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
979 979
980 if (qc && (!link || qc->dev->link == link)) { 980 if (qc && (!link || qc->dev->link == link)) {
981 qc->flags |= ATA_QCFLAG_FAILED; 981 qc->flags |= ATA_QCFLAG_FAILED;
982 ata_qc_complete(qc); 982 ata_qc_complete(qc);
983 nr_aborted++; 983 nr_aborted++;
984 } 984 }
985 } 985 }
986 986
987 if (!nr_aborted) 987 if (!nr_aborted)
988 ata_port_schedule_eh(ap); 988 ata_port_schedule_eh(ap);
989 989
990 return nr_aborted; 990 return nr_aborted;
991 } 991 }
992 992
993 /** 993 /**
994 * ata_link_abort - abort all qc's on the link 994 * ata_link_abort - abort all qc's on the link
995 * @link: ATA link to abort qc's for 995 * @link: ATA link to abort qc's for
996 * 996 *
997 * Abort all active qc's active on @link and schedule EH. 997 * Abort all active qc's active on @link and schedule EH.
998 * 998 *
999 * LOCKING: 999 * LOCKING:
1000 * spin_lock_irqsave(host lock) 1000 * spin_lock_irqsave(host lock)
1001 * 1001 *
1002 * RETURNS: 1002 * RETURNS:
1003 * Number of aborted qc's. 1003 * Number of aborted qc's.
1004 */ 1004 */
1005 int ata_link_abort(struct ata_link *link) 1005 int ata_link_abort(struct ata_link *link)
1006 { 1006 {
1007 return ata_do_link_abort(link->ap, link); 1007 return ata_do_link_abort(link->ap, link);
1008 } 1008 }
1009 1009
1010 /** 1010 /**
1011 * ata_port_abort - abort all qc's on the port 1011 * ata_port_abort - abort all qc's on the port
1012 * @ap: ATA port to abort qc's for 1012 * @ap: ATA port to abort qc's for
1013 * 1013 *
1014 * Abort all active qc's of @ap and schedule EH. 1014 * Abort all active qc's of @ap and schedule EH.
1015 * 1015 *
1016 * LOCKING: 1016 * LOCKING:
1017 * spin_lock_irqsave(host_set lock) 1017 * spin_lock_irqsave(host_set lock)
1018 * 1018 *
1019 * RETURNS: 1019 * RETURNS:
1020 * Number of aborted qc's. 1020 * Number of aborted qc's.
1021 */ 1021 */
1022 int ata_port_abort(struct ata_port *ap) 1022 int ata_port_abort(struct ata_port *ap)
1023 { 1023 {
1024 return ata_do_link_abort(ap, NULL); 1024 return ata_do_link_abort(ap, NULL);
1025 } 1025 }
1026 1026
1027 /** 1027 /**
1028 * __ata_port_freeze - freeze port 1028 * __ata_port_freeze - freeze port
1029 * @ap: ATA port to freeze 1029 * @ap: ATA port to freeze
1030 * 1030 *
1031 * This function is called when HSM violation or some other 1031 * This function is called when HSM violation or some other
1032 * condition disrupts normal operation of the port. Frozen port 1032 * condition disrupts normal operation of the port. Frozen port
1033 * is not allowed to perform any operation until the port is 1033 * is not allowed to perform any operation until the port is
1034 * thawed, which usually follows a successful reset. 1034 * thawed, which usually follows a successful reset.
1035 * 1035 *
1036 * ap->ops->freeze() callback can be used for freezing the port 1036 * ap->ops->freeze() callback can be used for freezing the port
1037 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a 1037 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1038 * port cannot be frozen hardware-wise, the interrupt handler 1038 * port cannot be frozen hardware-wise, the interrupt handler
1039 * must ack and clear interrupts unconditionally while the port 1039 * must ack and clear interrupts unconditionally while the port
1040 * is frozen. 1040 * is frozen.
1041 * 1041 *
1042 * LOCKING: 1042 * LOCKING:
1043 * spin_lock_irqsave(host lock) 1043 * spin_lock_irqsave(host lock)
1044 */ 1044 */
1045 static void __ata_port_freeze(struct ata_port *ap) 1045 static void __ata_port_freeze(struct ata_port *ap)
1046 { 1046 {
1047 WARN_ON(!ap->ops->error_handler); 1047 WARN_ON(!ap->ops->error_handler);
1048 1048
1049 if (ap->ops->freeze) 1049 if (ap->ops->freeze)
1050 ap->ops->freeze(ap); 1050 ap->ops->freeze(ap);
1051 1051
1052 ap->pflags |= ATA_PFLAG_FROZEN; 1052 ap->pflags |= ATA_PFLAG_FROZEN;
1053 1053
1054 DPRINTK("ata%u port frozen\n", ap->print_id); 1054 DPRINTK("ata%u port frozen\n", ap->print_id);
1055 } 1055 }
1056 1056
1057 /** 1057 /**
1058 * ata_port_freeze - abort & freeze port 1058 * ata_port_freeze - abort & freeze port
1059 * @ap: ATA port to freeze 1059 * @ap: ATA port to freeze
1060 * 1060 *
1061 * Abort and freeze @ap. The freeze operation must be called 1061 * Abort and freeze @ap. The freeze operation must be called
1062 * first, because some hardware requires special operations 1062 * first, because some hardware requires special operations
1063 * before the taskfile registers are accessible. 1063 * before the taskfile registers are accessible.
1064 * 1064 *
1065 * LOCKING: 1065 * LOCKING:
1066 * spin_lock_irqsave(host lock) 1066 * spin_lock_irqsave(host lock)
1067 * 1067 *
1068 * RETURNS: 1068 * RETURNS:
1069 * Number of aborted commands. 1069 * Number of aborted commands.
1070 */ 1070 */
1071 int ata_port_freeze(struct ata_port *ap) 1071 int ata_port_freeze(struct ata_port *ap)
1072 { 1072 {
1073 int nr_aborted; 1073 int nr_aborted;
1074 1074
1075 WARN_ON(!ap->ops->error_handler); 1075 WARN_ON(!ap->ops->error_handler);
1076 1076
1077 __ata_port_freeze(ap); 1077 __ata_port_freeze(ap);
1078 nr_aborted = ata_port_abort(ap); 1078 nr_aborted = ata_port_abort(ap);
1079 1079
1080 return nr_aborted; 1080 return nr_aborted;
1081 } 1081 }
1082 1082
1083 /** 1083 /**
1084 * sata_async_notification - SATA async notification handler 1084 * sata_async_notification - SATA async notification handler
1085 * @ap: ATA port where async notification is received 1085 * @ap: ATA port where async notification is received
1086 * 1086 *
1087 * Handler to be called when async notification via SDB FIS is 1087 * Handler to be called when async notification via SDB FIS is
1088 * received. This function schedules EH if necessary. 1088 * received. This function schedules EH if necessary.
1089 * 1089 *
1090 * LOCKING: 1090 * LOCKING:
1091 * spin_lock_irqsave(host lock) 1091 * spin_lock_irqsave(host lock)
1092 * 1092 *
1093 * RETURNS: 1093 * RETURNS:
1094 * 1 if EH is scheduled, 0 otherwise. 1094 * 1 if EH is scheduled, 0 otherwise.
1095 */ 1095 */
1096 int sata_async_notification(struct ata_port *ap) 1096 int sata_async_notification(struct ata_port *ap)
1097 { 1097 {
1098 u32 sntf; 1098 u32 sntf;
1099 int rc; 1099 int rc;
1100 1100
1101 if (!(ap->flags & ATA_FLAG_AN)) 1101 if (!(ap->flags & ATA_FLAG_AN))
1102 return 0; 1102 return 0;
1103 1103
1104 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 1104 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1105 if (rc == 0) 1105 if (rc == 0)
1106 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 1106 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1107 1107
1108 if (!sata_pmp_attached(ap) || rc) { 1108 if (!sata_pmp_attached(ap) || rc) {
1109 /* PMP is not attached or SNTF is not available */ 1109 /* PMP is not attached or SNTF is not available */
1110 if (!sata_pmp_attached(ap)) { 1110 if (!sata_pmp_attached(ap)) {
1111 /* PMP is not attached. Check whether ATAPI 1111 /* PMP is not attached. Check whether ATAPI
1112 * AN is configured. If so, notify media 1112 * AN is configured. If so, notify media
1113 * change. 1113 * change.
1114 */ 1114 */
1115 struct ata_device *dev = ap->link.device; 1115 struct ata_device *dev = ap->link.device;
1116 1116
1117 if ((dev->class == ATA_DEV_ATAPI) && 1117 if ((dev->class == ATA_DEV_ATAPI) &&
1118 (dev->flags & ATA_DFLAG_AN)) 1118 (dev->flags & ATA_DFLAG_AN))
1119 ata_scsi_media_change_notify(dev); 1119 ata_scsi_media_change_notify(dev);
1120 return 0; 1120 return 0;
1121 } else { 1121 } else {
1122 /* PMP is attached but SNTF is not available. 1122 /* PMP is attached but SNTF is not available.
1123 * ATAPI async media change notification is 1123 * ATAPI async media change notification is
1124 * not used. The PMP must be reporting PHY 1124 * not used. The PMP must be reporting PHY
1125 * status change, schedule EH. 1125 * status change, schedule EH.
1126 */ 1126 */
1127 ata_port_schedule_eh(ap); 1127 ata_port_schedule_eh(ap);
1128 return 1; 1128 return 1;
1129 } 1129 }
1130 } else { 1130 } else {
1131 /* PMP is attached and SNTF is available */ 1131 /* PMP is attached and SNTF is available */
1132 struct ata_link *link; 1132 struct ata_link *link;
1133 1133
1134 /* check and notify ATAPI AN */ 1134 /* check and notify ATAPI AN */
1135 ata_for_each_link(link, ap, EDGE) { 1135 ata_for_each_link(link, ap, EDGE) {
1136 if (!(sntf & (1 << link->pmp))) 1136 if (!(sntf & (1 << link->pmp)))
1137 continue; 1137 continue;
1138 1138
1139 if ((link->device->class == ATA_DEV_ATAPI) && 1139 if ((link->device->class == ATA_DEV_ATAPI) &&
1140 (link->device->flags & ATA_DFLAG_AN)) 1140 (link->device->flags & ATA_DFLAG_AN))
1141 ata_scsi_media_change_notify(link->device); 1141 ata_scsi_media_change_notify(link->device);
1142 } 1142 }
1143 1143
1144 /* If PMP is reporting that PHY status of some 1144 /* If PMP is reporting that PHY status of some
1145 * downstream ports has changed, schedule EH. 1145 * downstream ports has changed, schedule EH.
1146 */ 1146 */
1147 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 1147 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1148 ata_port_schedule_eh(ap); 1148 ata_port_schedule_eh(ap);
1149 return 1; 1149 return 1;
1150 } 1150 }
1151 1151
1152 return 0; 1152 return 0;
1153 } 1153 }
1154 } 1154 }
1155 1155
1156 /** 1156 /**
1157 * ata_eh_freeze_port - EH helper to freeze port 1157 * ata_eh_freeze_port - EH helper to freeze port
1158 * @ap: ATA port to freeze 1158 * @ap: ATA port to freeze
1159 * 1159 *
1160 * Freeze @ap. 1160 * Freeze @ap.
1161 * 1161 *
1162 * LOCKING: 1162 * LOCKING:
1163 * None. 1163 * None.
1164 */ 1164 */
1165 void ata_eh_freeze_port(struct ata_port *ap) 1165 void ata_eh_freeze_port(struct ata_port *ap)
1166 { 1166 {
1167 unsigned long flags; 1167 unsigned long flags;
1168 1168
1169 if (!ap->ops->error_handler) 1169 if (!ap->ops->error_handler)
1170 return; 1170 return;
1171 1171
1172 spin_lock_irqsave(ap->lock, flags); 1172 spin_lock_irqsave(ap->lock, flags);
1173 __ata_port_freeze(ap); 1173 __ata_port_freeze(ap);
1174 spin_unlock_irqrestore(ap->lock, flags); 1174 spin_unlock_irqrestore(ap->lock, flags);
1175 } 1175 }
1176 1176
1177 /** 1177 /**
1178 * ata_port_thaw_port - EH helper to thaw port 1178 * ata_port_thaw_port - EH helper to thaw port
1179 * @ap: ATA port to thaw 1179 * @ap: ATA port to thaw
1180 * 1180 *
1181 * Thaw frozen port @ap. 1181 * Thaw frozen port @ap.
1182 * 1182 *
1183 * LOCKING: 1183 * LOCKING:
1184 * None. 1184 * None.
1185 */ 1185 */
1186 void ata_eh_thaw_port(struct ata_port *ap) 1186 void ata_eh_thaw_port(struct ata_port *ap)
1187 { 1187 {
1188 unsigned long flags; 1188 unsigned long flags;
1189 1189
1190 if (!ap->ops->error_handler) 1190 if (!ap->ops->error_handler)
1191 return; 1191 return;
1192 1192
1193 spin_lock_irqsave(ap->lock, flags); 1193 spin_lock_irqsave(ap->lock, flags);
1194 1194
1195 ap->pflags &= ~ATA_PFLAG_FROZEN; 1195 ap->pflags &= ~ATA_PFLAG_FROZEN;
1196 1196
1197 if (ap->ops->thaw) 1197 if (ap->ops->thaw)
1198 ap->ops->thaw(ap); 1198 ap->ops->thaw(ap);
1199 1199
1200 spin_unlock_irqrestore(ap->lock, flags); 1200 spin_unlock_irqrestore(ap->lock, flags);
1201 1201
1202 DPRINTK("ata%u port thawed\n", ap->print_id); 1202 DPRINTK("ata%u port thawed\n", ap->print_id);
1203 } 1203 }
1204 1204
1205 static void ata_eh_scsidone(struct scsi_cmnd *scmd) 1205 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1206 { 1206 {
1207 /* nada */ 1207 /* nada */
1208 } 1208 }
1209 1209
1210 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc) 1210 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1211 { 1211 {
1212 struct ata_port *ap = qc->ap; 1212 struct ata_port *ap = qc->ap;
1213 struct scsi_cmnd *scmd = qc->scsicmd; 1213 struct scsi_cmnd *scmd = qc->scsicmd;
1214 unsigned long flags; 1214 unsigned long flags;
1215 1215
1216 spin_lock_irqsave(ap->lock, flags); 1216 spin_lock_irqsave(ap->lock, flags);
1217 qc->scsidone = ata_eh_scsidone; 1217 qc->scsidone = ata_eh_scsidone;
1218 __ata_qc_complete(qc); 1218 __ata_qc_complete(qc);
1219 WARN_ON(ata_tag_valid(qc->tag)); 1219 WARN_ON(ata_tag_valid(qc->tag));
1220 spin_unlock_irqrestore(ap->lock, flags); 1220 spin_unlock_irqrestore(ap->lock, flags);
1221 1221
1222 scsi_eh_finish_cmd(scmd, &ap->eh_done_q); 1222 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1223 } 1223 }
1224 1224
1225 /** 1225 /**
1226 * ata_eh_qc_complete - Complete an active ATA command from EH 1226 * ata_eh_qc_complete - Complete an active ATA command from EH
1227 * @qc: Command to complete 1227 * @qc: Command to complete
1228 * 1228 *
1229 * Indicate to the mid and upper layers that an ATA command has 1229 * Indicate to the mid and upper layers that an ATA command has
1230 * completed. To be used from EH. 1230 * completed. To be used from EH.
1231 */ 1231 */
1232 void ata_eh_qc_complete(struct ata_queued_cmd *qc) 1232 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1233 { 1233 {
1234 struct scsi_cmnd *scmd = qc->scsicmd; 1234 struct scsi_cmnd *scmd = qc->scsicmd;
1235 scmd->retries = scmd->allowed; 1235 scmd->retries = scmd->allowed;
1236 __ata_eh_qc_complete(qc); 1236 __ata_eh_qc_complete(qc);
1237 } 1237 }
1238 1238
1239 /** 1239 /**
1240 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH 1240 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1241 * @qc: Command to retry 1241 * @qc: Command to retry
1242 * 1242 *
1243 * Indicate to the mid and upper layers that an ATA command 1243 * Indicate to the mid and upper layers that an ATA command
1244 * should be retried. To be used from EH. 1244 * should be retried. To be used from EH.
1245 * 1245 *
1246 * SCSI midlayer limits the number of retries to scmd->allowed. 1246 * SCSI midlayer limits the number of retries to scmd->allowed.
1247 * scmd->retries is decremented for commands which get retried 1247 * scmd->retries is decremented for commands which get retried
1248 * due to unrelated failures (qc->err_mask is zero). 1248 * due to unrelated failures (qc->err_mask is zero).
1249 */ 1249 */
1250 void ata_eh_qc_retry(struct ata_queued_cmd *qc) 1250 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1251 { 1251 {
1252 struct scsi_cmnd *scmd = qc->scsicmd; 1252 struct scsi_cmnd *scmd = qc->scsicmd;
1253 if (!qc->err_mask && scmd->retries) 1253 if (!qc->err_mask && scmd->retries)
1254 scmd->retries--; 1254 scmd->retries--;
1255 __ata_eh_qc_complete(qc); 1255 __ata_eh_qc_complete(qc);
1256 } 1256 }
1257 1257
1258 /** 1258 /**
1259 * ata_dev_disable - disable ATA device 1259 * ata_dev_disable - disable ATA device
1260 * @dev: ATA device to disable 1260 * @dev: ATA device to disable
1261 * 1261 *
1262 * Disable @dev. 1262 * Disable @dev.
1263 * 1263 *
1264 * Locking: 1264 * Locking:
1265 * EH context. 1265 * EH context.
1266 */ 1266 */
1267 void ata_dev_disable(struct ata_device *dev) 1267 void ata_dev_disable(struct ata_device *dev)
1268 { 1268 {
1269 if (!ata_dev_enabled(dev)) 1269 if (!ata_dev_enabled(dev))
1270 return; 1270 return;
1271 1271
1272 if (ata_msg_drv(dev->link->ap)) 1272 if (ata_msg_drv(dev->link->ap))
1273 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 1273 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1274 ata_acpi_on_disable(dev); 1274 ata_acpi_on_disable(dev);
1275 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET); 1275 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1276 dev->class++; 1276 dev->class++;
1277 1277
1278 /* From now till the next successful probe, ering is used to 1278 /* From now till the next successful probe, ering is used to
1279 * track probe failures. Clear accumulated device error info. 1279 * track probe failures. Clear accumulated device error info.
1280 */ 1280 */
1281 ata_ering_clear(&dev->ering); 1281 ata_ering_clear(&dev->ering);
1282 } 1282 }
1283 1283
1284 /** 1284 /**
1285 * ata_eh_detach_dev - detach ATA device 1285 * ata_eh_detach_dev - detach ATA device
1286 * @dev: ATA device to detach 1286 * @dev: ATA device to detach
1287 * 1287 *
1288 * Detach @dev. 1288 * Detach @dev.
1289 * 1289 *
1290 * LOCKING: 1290 * LOCKING:
1291 * None. 1291 * None.
1292 */ 1292 */
1293 void ata_eh_detach_dev(struct ata_device *dev) 1293 void ata_eh_detach_dev(struct ata_device *dev)
1294 { 1294 {
1295 struct ata_link *link = dev->link; 1295 struct ata_link *link = dev->link;
1296 struct ata_port *ap = link->ap; 1296 struct ata_port *ap = link->ap;
1297 struct ata_eh_context *ehc = &link->eh_context; 1297 struct ata_eh_context *ehc = &link->eh_context;
1298 unsigned long flags; 1298 unsigned long flags;
1299 1299
1300 ata_dev_disable(dev); 1300 ata_dev_disable(dev);
1301 1301
1302 spin_lock_irqsave(ap->lock, flags); 1302 spin_lock_irqsave(ap->lock, flags);
1303 1303
1304 dev->flags &= ~ATA_DFLAG_DETACH; 1304 dev->flags &= ~ATA_DFLAG_DETACH;
1305 1305
1306 if (ata_scsi_offline_dev(dev)) { 1306 if (ata_scsi_offline_dev(dev)) {
1307 dev->flags |= ATA_DFLAG_DETACHED; 1307 dev->flags |= ATA_DFLAG_DETACHED;
1308 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 1308 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1309 } 1309 }
1310 1310
1311 /* clear per-dev EH info */ 1311 /* clear per-dev EH info */
1312 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK); 1312 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1313 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK); 1313 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1314 ehc->saved_xfer_mode[dev->devno] = 0; 1314 ehc->saved_xfer_mode[dev->devno] = 0;
1315 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 1315 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1316 1316
1317 spin_unlock_irqrestore(ap->lock, flags); 1317 spin_unlock_irqrestore(ap->lock, flags);
1318 } 1318 }
1319 1319
1320 /** 1320 /**
1321 * ata_eh_about_to_do - about to perform eh_action 1321 * ata_eh_about_to_do - about to perform eh_action
1322 * @link: target ATA link 1322 * @link: target ATA link
1323 * @dev: target ATA dev for per-dev action (can be NULL) 1323 * @dev: target ATA dev for per-dev action (can be NULL)
1324 * @action: action about to be performed 1324 * @action: action about to be performed
1325 * 1325 *
1326 * Called just before performing EH actions to clear related bits 1326 * Called just before performing EH actions to clear related bits
1327 * in @link->eh_info such that eh actions are not unnecessarily 1327 * in @link->eh_info such that eh actions are not unnecessarily
1328 * repeated. 1328 * repeated.
1329 * 1329 *
1330 * LOCKING: 1330 * LOCKING:
1331 * None. 1331 * None.
1332 */ 1332 */
1333 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, 1333 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1334 unsigned int action) 1334 unsigned int action)
1335 { 1335 {
1336 struct ata_port *ap = link->ap; 1336 struct ata_port *ap = link->ap;
1337 struct ata_eh_info *ehi = &link->eh_info; 1337 struct ata_eh_info *ehi = &link->eh_info;
1338 struct ata_eh_context *ehc = &link->eh_context; 1338 struct ata_eh_context *ehc = &link->eh_context;
1339 unsigned long flags; 1339 unsigned long flags;
1340 1340
1341 spin_lock_irqsave(ap->lock, flags); 1341 spin_lock_irqsave(ap->lock, flags);
1342 1342
1343 ata_eh_clear_action(link, dev, ehi, action); 1343 ata_eh_clear_action(link, dev, ehi, action);
1344 1344
1345 /* About to take EH action, set RECOVERED. Ignore actions on 1345 /* About to take EH action, set RECOVERED. Ignore actions on
1346 * slave links as master will do them again. 1346 * slave links as master will do them again.
1347 */ 1347 */
1348 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link) 1348 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1349 ap->pflags |= ATA_PFLAG_RECOVERED; 1349 ap->pflags |= ATA_PFLAG_RECOVERED;
1350 1350
1351 spin_unlock_irqrestore(ap->lock, flags); 1351 spin_unlock_irqrestore(ap->lock, flags);
1352 } 1352 }
1353 1353
1354 /** 1354 /**
1355 * ata_eh_done - EH action complete 1355 * ata_eh_done - EH action complete
1356 * @ap: target ATA port 1356 * @ap: target ATA port
1357 * @dev: target ATA dev for per-dev action (can be NULL) 1357 * @dev: target ATA dev for per-dev action (can be NULL)
1358 * @action: action just completed 1358 * @action: action just completed
1359 * 1359 *
1360 * Called right after performing EH actions to clear related bits 1360 * Called right after performing EH actions to clear related bits
1361 * in @link->eh_context. 1361 * in @link->eh_context.
1362 * 1362 *
1363 * LOCKING: 1363 * LOCKING:
1364 * None. 1364 * None.
1365 */ 1365 */
1366 void ata_eh_done(struct ata_link *link, struct ata_device *dev, 1366 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1367 unsigned int action) 1367 unsigned int action)
1368 { 1368 {
1369 struct ata_eh_context *ehc = &link->eh_context; 1369 struct ata_eh_context *ehc = &link->eh_context;
1370 1370
1371 ata_eh_clear_action(link, dev, &ehc->i, action); 1371 ata_eh_clear_action(link, dev, &ehc->i, action);
1372 } 1372 }
1373 1373
1374 /** 1374 /**
1375 * ata_err_string - convert err_mask to descriptive string 1375 * ata_err_string - convert err_mask to descriptive string
1376 * @err_mask: error mask to convert to string 1376 * @err_mask: error mask to convert to string
1377 * 1377 *
1378 * Convert @err_mask to descriptive string. Errors are 1378 * Convert @err_mask to descriptive string. Errors are
1379 * prioritized according to severity and only the most severe 1379 * prioritized according to severity and only the most severe
1380 * error is reported. 1380 * error is reported.
1381 * 1381 *
1382 * LOCKING: 1382 * LOCKING:
1383 * None. 1383 * None.
1384 * 1384 *
1385 * RETURNS: 1385 * RETURNS:
1386 * Descriptive string for @err_mask 1386 * Descriptive string for @err_mask
1387 */ 1387 */
1388 static const char *ata_err_string(unsigned int err_mask) 1388 static const char *ata_err_string(unsigned int err_mask)
1389 { 1389 {
1390 if (err_mask & AC_ERR_HOST_BUS) 1390 if (err_mask & AC_ERR_HOST_BUS)
1391 return "host bus error"; 1391 return "host bus error";
1392 if (err_mask & AC_ERR_ATA_BUS) 1392 if (err_mask & AC_ERR_ATA_BUS)
1393 return "ATA bus error"; 1393 return "ATA bus error";
1394 if (err_mask & AC_ERR_TIMEOUT) 1394 if (err_mask & AC_ERR_TIMEOUT)
1395 return "timeout"; 1395 return "timeout";
1396 if (err_mask & AC_ERR_HSM) 1396 if (err_mask & AC_ERR_HSM)
1397 return "HSM violation"; 1397 return "HSM violation";
1398 if (err_mask & AC_ERR_SYSTEM) 1398 if (err_mask & AC_ERR_SYSTEM)
1399 return "internal error"; 1399 return "internal error";
1400 if (err_mask & AC_ERR_MEDIA) 1400 if (err_mask & AC_ERR_MEDIA)
1401 return "media error"; 1401 return "media error";
1402 if (err_mask & AC_ERR_INVALID) 1402 if (err_mask & AC_ERR_INVALID)
1403 return "invalid argument"; 1403 return "invalid argument";
1404 if (err_mask & AC_ERR_DEV) 1404 if (err_mask & AC_ERR_DEV)
1405 return "device error"; 1405 return "device error";
1406 return "unknown error"; 1406 return "unknown error";
1407 } 1407 }
1408 1408
1409 /** 1409 /**
1410 * ata_read_log_page - read a specific log page 1410 * ata_read_log_page - read a specific log page
1411 * @dev: target device 1411 * @dev: target device
1412 * @page: page to read 1412 * @page: page to read
1413 * @buf: buffer to store read page 1413 * @buf: buffer to store read page
1414 * @sectors: number of sectors to read 1414 * @sectors: number of sectors to read
1415 * 1415 *
1416 * Read log page using READ_LOG_EXT command. 1416 * Read log page using READ_LOG_EXT command.
1417 * 1417 *
1418 * LOCKING: 1418 * LOCKING:
1419 * Kernel thread context (may sleep). 1419 * Kernel thread context (may sleep).
1420 * 1420 *
1421 * RETURNS: 1421 * RETURNS:
1422 * 0 on success, AC_ERR_* mask otherwise. 1422 * 0 on success, AC_ERR_* mask otherwise.
1423 */ 1423 */
1424 static unsigned int ata_read_log_page(struct ata_device *dev, 1424 static unsigned int ata_read_log_page(struct ata_device *dev,
1425 u8 page, void *buf, unsigned int sectors) 1425 u8 page, void *buf, unsigned int sectors)
1426 { 1426 {
1427 struct ata_taskfile tf; 1427 struct ata_taskfile tf;
1428 unsigned int err_mask; 1428 unsigned int err_mask;
1429 1429
1430 DPRINTK("read log page - page %d\n", page); 1430 DPRINTK("read log page - page %d\n", page);
1431 1431
1432 ata_tf_init(dev, &tf); 1432 ata_tf_init(dev, &tf);
1433 tf.command = ATA_CMD_READ_LOG_EXT; 1433 tf.command = ATA_CMD_READ_LOG_EXT;
1434 tf.lbal = page; 1434 tf.lbal = page;
1435 tf.nsect = sectors; 1435 tf.nsect = sectors;
1436 tf.hob_nsect = sectors >> 8; 1436 tf.hob_nsect = sectors >> 8;
1437 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE; 1437 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1438 tf.protocol = ATA_PROT_PIO; 1438 tf.protocol = ATA_PROT_PIO;
1439 1439
1440 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 1440 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1441 buf, sectors * ATA_SECT_SIZE, 0); 1441 buf, sectors * ATA_SECT_SIZE, 0);
1442 1442
1443 DPRINTK("EXIT, err_mask=%x\n", err_mask); 1443 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1444 return err_mask; 1444 return err_mask;
1445 } 1445 }
1446 1446
1447 /** 1447 /**
1448 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1448 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1449 * @dev: Device to read log page 10h from 1449 * @dev: Device to read log page 10h from
1450 * @tag: Resulting tag of the failed command 1450 * @tag: Resulting tag of the failed command
1451 * @tf: Resulting taskfile registers of the failed command 1451 * @tf: Resulting taskfile registers of the failed command
1452 * 1452 *
1453 * Read log page 10h to obtain NCQ error details and clear error 1453 * Read log page 10h to obtain NCQ error details and clear error
1454 * condition. 1454 * condition.
1455 * 1455 *
1456 * LOCKING: 1456 * LOCKING:
1457 * Kernel thread context (may sleep). 1457 * Kernel thread context (may sleep).
1458 * 1458 *
1459 * RETURNS: 1459 * RETURNS:
1460 * 0 on success, -errno otherwise. 1460 * 0 on success, -errno otherwise.
1461 */ 1461 */
1462 static int ata_eh_read_log_10h(struct ata_device *dev, 1462 static int ata_eh_read_log_10h(struct ata_device *dev,
1463 int *tag, struct ata_taskfile *tf) 1463 int *tag, struct ata_taskfile *tf)
1464 { 1464 {
1465 u8 *buf = dev->link->ap->sector_buf; 1465 u8 *buf = dev->link->ap->sector_buf;
1466 unsigned int err_mask; 1466 unsigned int err_mask;
1467 u8 csum; 1467 u8 csum;
1468 int i; 1468 int i;
1469 1469
1470 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1); 1470 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1471 if (err_mask) 1471 if (err_mask)
1472 return -EIO; 1472 return -EIO;
1473 1473
1474 csum = 0; 1474 csum = 0;
1475 for (i = 0; i < ATA_SECT_SIZE; i++) 1475 for (i = 0; i < ATA_SECT_SIZE; i++)
1476 csum += buf[i]; 1476 csum += buf[i];
1477 if (csum) 1477 if (csum)
1478 ata_dev_printk(dev, KERN_WARNING, 1478 ata_dev_printk(dev, KERN_WARNING,
1479 "invalid checksum 0x%x on log page 10h\n", csum); 1479 "invalid checksum 0x%x on log page 10h\n", csum);
1480 1480
1481 if (buf[0] & 0x80) 1481 if (buf[0] & 0x80)
1482 return -ENOENT; 1482 return -ENOENT;
1483 1483
1484 *tag = buf[0] & 0x1f; 1484 *tag = buf[0] & 0x1f;
1485 1485
1486 tf->command = buf[2]; 1486 tf->command = buf[2];
1487 tf->feature = buf[3]; 1487 tf->feature = buf[3];
1488 tf->lbal = buf[4]; 1488 tf->lbal = buf[4];
1489 tf->lbam = buf[5]; 1489 tf->lbam = buf[5];
1490 tf->lbah = buf[6]; 1490 tf->lbah = buf[6];
1491 tf->device = buf[7]; 1491 tf->device = buf[7];
1492 tf->hob_lbal = buf[8]; 1492 tf->hob_lbal = buf[8];
1493 tf->hob_lbam = buf[9]; 1493 tf->hob_lbam = buf[9];
1494 tf->hob_lbah = buf[10]; 1494 tf->hob_lbah = buf[10];
1495 tf->nsect = buf[12]; 1495 tf->nsect = buf[12];
1496 tf->hob_nsect = buf[13]; 1496 tf->hob_nsect = buf[13];
1497 1497
1498 return 0; 1498 return 0;
1499 } 1499 }
1500 1500
1501 /** 1501 /**
1502 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY 1502 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1503 * @dev: target ATAPI device 1503 * @dev: target ATAPI device
1504 * @r_sense_key: out parameter for sense_key 1504 * @r_sense_key: out parameter for sense_key
1505 * 1505 *
1506 * Perform ATAPI TEST_UNIT_READY. 1506 * Perform ATAPI TEST_UNIT_READY.
1507 * 1507 *
1508 * LOCKING: 1508 * LOCKING:
1509 * EH context (may sleep). 1509 * EH context (may sleep).
1510 * 1510 *
1511 * RETURNS: 1511 * RETURNS:
1512 * 0 on success, AC_ERR_* mask on failure. 1512 * 0 on success, AC_ERR_* mask on failure.
1513 */ 1513 */
1514 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) 1514 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1515 { 1515 {
1516 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; 1516 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1517 struct ata_taskfile tf; 1517 struct ata_taskfile tf;
1518 unsigned int err_mask; 1518 unsigned int err_mask;
1519 1519
1520 ata_tf_init(dev, &tf); 1520 ata_tf_init(dev, &tf);
1521 1521
1522 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1522 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1523 tf.command = ATA_CMD_PACKET; 1523 tf.command = ATA_CMD_PACKET;
1524 tf.protocol = ATAPI_PROT_NODATA; 1524 tf.protocol = ATAPI_PROT_NODATA;
1525 1525
1526 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0); 1526 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1527 if (err_mask == AC_ERR_DEV) 1527 if (err_mask == AC_ERR_DEV)
1528 *r_sense_key = tf.feature >> 4; 1528 *r_sense_key = tf.feature >> 4;
1529 return err_mask; 1529 return err_mask;
1530 } 1530 }
1531 1531
1532 /** 1532 /**
1533 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1533 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1534 * @dev: device to perform REQUEST_SENSE to 1534 * @dev: device to perform REQUEST_SENSE to
1535 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1535 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1536 * @dfl_sense_key: default sense key to use 1536 * @dfl_sense_key: default sense key to use
1537 * 1537 *
1538 * Perform ATAPI REQUEST_SENSE after the device reported CHECK 1538 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1539 * SENSE. This function is EH helper. 1539 * SENSE. This function is EH helper.
1540 * 1540 *
1541 * LOCKING: 1541 * LOCKING:
1542 * Kernel thread context (may sleep). 1542 * Kernel thread context (may sleep).
1543 * 1543 *
1544 * RETURNS: 1544 * RETURNS:
1545 * 0 on success, AC_ERR_* mask on failure 1545 * 0 on success, AC_ERR_* mask on failure
1546 */ 1546 */
1547 static unsigned int atapi_eh_request_sense(struct ata_device *dev, 1547 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1548 u8 *sense_buf, u8 dfl_sense_key) 1548 u8 *sense_buf, u8 dfl_sense_key)
1549 { 1549 {
1550 u8 cdb[ATAPI_CDB_LEN] = 1550 u8 cdb[ATAPI_CDB_LEN] =
1551 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 }; 1551 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1552 struct ata_port *ap = dev->link->ap; 1552 struct ata_port *ap = dev->link->ap;
1553 struct ata_taskfile tf; 1553 struct ata_taskfile tf;
1554 1554
1555 DPRINTK("ATAPI request sense\n"); 1555 DPRINTK("ATAPI request sense\n");
1556 1556
1557 /* FIXME: is this needed? */ 1557 /* FIXME: is this needed? */
1558 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); 1558 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1559 1559
1560 /* initialize sense_buf with the error register, 1560 /* initialize sense_buf with the error register,
1561 * for the case where they are -not- overwritten 1561 * for the case where they are -not- overwritten
1562 */ 1562 */
1563 sense_buf[0] = 0x70; 1563 sense_buf[0] = 0x70;
1564 sense_buf[2] = dfl_sense_key; 1564 sense_buf[2] = dfl_sense_key;
1565 1565
1566 /* some devices time out if garbage left in tf */ 1566 /* some devices time out if garbage left in tf */
1567 ata_tf_init(dev, &tf); 1567 ata_tf_init(dev, &tf);
1568 1568
1569 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1569 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1570 tf.command = ATA_CMD_PACKET; 1570 tf.command = ATA_CMD_PACKET;
1571 1571
1572 /* is it pointless to prefer PIO for "safety reasons"? */ 1572 /* is it pointless to prefer PIO for "safety reasons"? */
1573 if (ap->flags & ATA_FLAG_PIO_DMA) { 1573 if (ap->flags & ATA_FLAG_PIO_DMA) {
1574 tf.protocol = ATAPI_PROT_DMA; 1574 tf.protocol = ATAPI_PROT_DMA;
1575 tf.feature |= ATAPI_PKT_DMA; 1575 tf.feature |= ATAPI_PKT_DMA;
1576 } else { 1576 } else {
1577 tf.protocol = ATAPI_PROT_PIO; 1577 tf.protocol = ATAPI_PROT_PIO;
1578 tf.lbam = SCSI_SENSE_BUFFERSIZE; 1578 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1579 tf.lbah = 0; 1579 tf.lbah = 0;
1580 } 1580 }
1581 1581
1582 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 1582 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1583 sense_buf, SCSI_SENSE_BUFFERSIZE, 0); 1583 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1584 } 1584 }
1585 1585
1586 /** 1586 /**
1587 * ata_eh_analyze_serror - analyze SError for a failed port 1587 * ata_eh_analyze_serror - analyze SError for a failed port
1588 * @link: ATA link to analyze SError for 1588 * @link: ATA link to analyze SError for
1589 * 1589 *
1590 * Analyze SError if available and further determine cause of 1590 * Analyze SError if available and further determine cause of
1591 * failure. 1591 * failure.
1592 * 1592 *
1593 * LOCKING: 1593 * LOCKING:
1594 * None. 1594 * None.
1595 */ 1595 */
1596 static void ata_eh_analyze_serror(struct ata_link *link) 1596 static void ata_eh_analyze_serror(struct ata_link *link)
1597 { 1597 {
1598 struct ata_eh_context *ehc = &link->eh_context; 1598 struct ata_eh_context *ehc = &link->eh_context;
1599 u32 serror = ehc->i.serror; 1599 u32 serror = ehc->i.serror;
1600 unsigned int err_mask = 0, action = 0; 1600 unsigned int err_mask = 0, action = 0;
1601 u32 hotplug_mask; 1601 u32 hotplug_mask;
1602 1602
1603 if (serror & (SERR_PERSISTENT | SERR_DATA)) { 1603 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1604 err_mask |= AC_ERR_ATA_BUS; 1604 err_mask |= AC_ERR_ATA_BUS;
1605 action |= ATA_EH_RESET; 1605 action |= ATA_EH_RESET;
1606 } 1606 }
1607 if (serror & SERR_PROTOCOL) { 1607 if (serror & SERR_PROTOCOL) {
1608 err_mask |= AC_ERR_HSM; 1608 err_mask |= AC_ERR_HSM;
1609 action |= ATA_EH_RESET; 1609 action |= ATA_EH_RESET;
1610 } 1610 }
1611 if (serror & SERR_INTERNAL) { 1611 if (serror & SERR_INTERNAL) {
1612 err_mask |= AC_ERR_SYSTEM; 1612 err_mask |= AC_ERR_SYSTEM;
1613 action |= ATA_EH_RESET; 1613 action |= ATA_EH_RESET;
1614 } 1614 }
1615 1615
1616 /* Determine whether a hotplug event has occurred. Both 1616 /* Determine whether a hotplug event has occurred. Both
1617 * SError.N/X are considered hotplug events for enabled or 1617 * SError.N/X are considered hotplug events for enabled or
1618 * host links. For disabled PMP links, only N bit is 1618 * host links. For disabled PMP links, only N bit is
1619 * considered as X bit is left at 1 for link plugging. 1619 * considered as X bit is left at 1 for link plugging.
1620 */ 1620 */
1621 if (link->lpm_policy != ATA_LPM_MAX_POWER) 1621 if (link->lpm_policy != ATA_LPM_MAX_POWER)
1622 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ 1622 hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
1623 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) 1623 else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1624 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; 1624 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1625 else 1625 else
1626 hotplug_mask = SERR_PHYRDY_CHG; 1626 hotplug_mask = SERR_PHYRDY_CHG;
1627 1627
1628 if (serror & hotplug_mask) 1628 if (serror & hotplug_mask)
1629 ata_ehi_hotplugged(&ehc->i); 1629 ata_ehi_hotplugged(&ehc->i);
1630 1630
1631 ehc->i.err_mask |= err_mask; 1631 ehc->i.err_mask |= err_mask;
1632 ehc->i.action |= action; 1632 ehc->i.action |= action;
1633 } 1633 }
1634 1634
1635 /** 1635 /**
1636 * ata_eh_analyze_ncq_error - analyze NCQ error 1636 * ata_eh_analyze_ncq_error - analyze NCQ error
1637 * @link: ATA link to analyze NCQ error for 1637 * @link: ATA link to analyze NCQ error for
1638 * 1638 *
1639 * Read log page 10h, determine the offending qc and acquire 1639 * Read log page 10h, determine the offending qc and acquire
1640 * error status TF. For NCQ device errors, all LLDDs have to do 1640 * error status TF. For NCQ device errors, all LLDDs have to do
1641 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1641 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1642 * care of the rest. 1642 * care of the rest.
1643 * 1643 *
1644 * LOCKING: 1644 * LOCKING:
1645 * Kernel thread context (may sleep). 1645 * Kernel thread context (may sleep).
1646 */ 1646 */
1647 void ata_eh_analyze_ncq_error(struct ata_link *link) 1647 void ata_eh_analyze_ncq_error(struct ata_link *link)
1648 { 1648 {
1649 struct ata_port *ap = link->ap; 1649 struct ata_port *ap = link->ap;
1650 struct ata_eh_context *ehc = &link->eh_context; 1650 struct ata_eh_context *ehc = &link->eh_context;
1651 struct ata_device *dev = link->device; 1651 struct ata_device *dev = link->device;
1652 struct ata_queued_cmd *qc; 1652 struct ata_queued_cmd *qc;
1653 struct ata_taskfile tf; 1653 struct ata_taskfile tf;
1654 int tag, rc; 1654 int tag, rc;
1655 1655
1656 /* if frozen, we can't do much */ 1656 /* if frozen, we can't do much */
1657 if (ap->pflags & ATA_PFLAG_FROZEN) 1657 if (ap->pflags & ATA_PFLAG_FROZEN)
1658 return; 1658 return;
1659 1659
1660 /* is it NCQ device error? */ 1660 /* is it NCQ device error? */
1661 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1661 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1662 return; 1662 return;
1663 1663
1664 /* has LLDD analyzed already? */ 1664 /* has LLDD analyzed already? */
1665 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1665 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1666 qc = __ata_qc_from_tag(ap, tag); 1666 qc = __ata_qc_from_tag(ap, tag);
1667 1667
1668 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1668 if (!(qc->flags & ATA_QCFLAG_FAILED))
1669 continue; 1669 continue;
1670 1670
1671 if (qc->err_mask) 1671 if (qc->err_mask)
1672 return; 1672 return;
1673 } 1673 }
1674 1674
1675 /* okay, this error is ours */ 1675 /* okay, this error is ours */
1676 memset(&tf, 0, sizeof(tf)); 1676 memset(&tf, 0, sizeof(tf));
1677 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1677 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1678 if (rc) { 1678 if (rc) {
1679 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1679 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1680 "(errno=%d)\n", rc); 1680 "(errno=%d)\n", rc);
1681 return; 1681 return;
1682 } 1682 }
1683 1683
1684 if (!(link->sactive & (1 << tag))) { 1684 if (!(link->sactive & (1 << tag))) {
1685 ata_link_printk(link, KERN_ERR, "log page 10h reported " 1685 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1686 "inactive tag %d\n", tag); 1686 "inactive tag %d\n", tag);
1687 return; 1687 return;
1688 } 1688 }
1689 1689
1690 /* we've got the perpetrator, condemn it */ 1690 /* we've got the perpetrator, condemn it */
1691 qc = __ata_qc_from_tag(ap, tag); 1691 qc = __ata_qc_from_tag(ap, tag);
1692 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1692 memcpy(&qc->result_tf, &tf, sizeof(tf));
1693 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1693 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1694 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1694 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1695 ehc->i.err_mask &= ~AC_ERR_DEV; 1695 ehc->i.err_mask &= ~AC_ERR_DEV;
1696 } 1696 }
1697 1697
1698 /** 1698 /**
1699 * ata_eh_analyze_tf - analyze taskfile of a failed qc 1699 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1700 * @qc: qc to analyze 1700 * @qc: qc to analyze
1701 * @tf: Taskfile registers to analyze 1701 * @tf: Taskfile registers to analyze
1702 * 1702 *
1703 * Analyze taskfile of @qc and further determine cause of 1703 * Analyze taskfile of @qc and further determine cause of
1704 * failure. This function also requests ATAPI sense data if 1704 * failure. This function also requests ATAPI sense data if
1705 * avaliable. 1705 * avaliable.
1706 * 1706 *
1707 * LOCKING: 1707 * LOCKING:
1708 * Kernel thread context (may sleep). 1708 * Kernel thread context (may sleep).
1709 * 1709 *
1710 * RETURNS: 1710 * RETURNS:
1711 * Determined recovery action 1711 * Determined recovery action
1712 */ 1712 */
1713 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, 1713 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1714 const struct ata_taskfile *tf) 1714 const struct ata_taskfile *tf)
1715 { 1715 {
1716 unsigned int tmp, action = 0; 1716 unsigned int tmp, action = 0;
1717 u8 stat = tf->command, err = tf->feature; 1717 u8 stat = tf->command, err = tf->feature;
1718 1718
1719 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) { 1719 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1720 qc->err_mask |= AC_ERR_HSM; 1720 qc->err_mask |= AC_ERR_HSM;
1721 return ATA_EH_RESET; 1721 return ATA_EH_RESET;
1722 } 1722 }
1723 1723
1724 if (stat & (ATA_ERR | ATA_DF)) 1724 if (stat & (ATA_ERR | ATA_DF))
1725 qc->err_mask |= AC_ERR_DEV; 1725 qc->err_mask |= AC_ERR_DEV;
1726 else 1726 else
1727 return 0; 1727 return 0;
1728 1728
1729 switch (qc->dev->class) { 1729 switch (qc->dev->class) {
1730 case ATA_DEV_ATA: 1730 case ATA_DEV_ATA:
1731 if (err & ATA_ICRC) 1731 if (err & ATA_ICRC)
1732 qc->err_mask |= AC_ERR_ATA_BUS; 1732 qc->err_mask |= AC_ERR_ATA_BUS;
1733 if (err & ATA_UNC) 1733 if (err & ATA_UNC)
1734 qc->err_mask |= AC_ERR_MEDIA; 1734 qc->err_mask |= AC_ERR_MEDIA;
1735 if (err & ATA_IDNF) 1735 if (err & ATA_IDNF)
1736 qc->err_mask |= AC_ERR_INVALID; 1736 qc->err_mask |= AC_ERR_INVALID;
1737 break; 1737 break;
1738 1738
1739 case ATA_DEV_ATAPI: 1739 case ATA_DEV_ATAPI:
1740 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) { 1740 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1741 tmp = atapi_eh_request_sense(qc->dev, 1741 tmp = atapi_eh_request_sense(qc->dev,
1742 qc->scsicmd->sense_buffer, 1742 qc->scsicmd->sense_buffer,
1743 qc->result_tf.feature >> 4); 1743 qc->result_tf.feature >> 4);
1744 if (!tmp) { 1744 if (!tmp) {
1745 /* ATA_QCFLAG_SENSE_VALID is used to 1745 /* ATA_QCFLAG_SENSE_VALID is used to
1746 * tell atapi_qc_complete() that sense 1746 * tell atapi_qc_complete() that sense
1747 * data is already valid. 1747 * data is already valid.
1748 * 1748 *
1749 * TODO: interpret sense data and set 1749 * TODO: interpret sense data and set
1750 * appropriate err_mask. 1750 * appropriate err_mask.
1751 */ 1751 */
1752 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1752 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1753 } else 1753 } else
1754 qc->err_mask |= tmp; 1754 qc->err_mask |= tmp;
1755 } 1755 }
1756 } 1756 }
1757 1757
1758 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS)) 1758 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1759 action |= ATA_EH_RESET; 1759 action |= ATA_EH_RESET;
1760 1760
1761 return action; 1761 return action;
1762 } 1762 }
1763 1763
1764 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask, 1764 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1765 int *xfer_ok) 1765 int *xfer_ok)
1766 { 1766 {
1767 int base = 0; 1767 int base = 0;
1768 1768
1769 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER)) 1769 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1770 *xfer_ok = 1; 1770 *xfer_ok = 1;
1771 1771
1772 if (!*xfer_ok) 1772 if (!*xfer_ok)
1773 base = ATA_ECAT_DUBIOUS_NONE; 1773 base = ATA_ECAT_DUBIOUS_NONE;
1774 1774
1775 if (err_mask & AC_ERR_ATA_BUS) 1775 if (err_mask & AC_ERR_ATA_BUS)
1776 return base + ATA_ECAT_ATA_BUS; 1776 return base + ATA_ECAT_ATA_BUS;
1777 1777
1778 if (err_mask & AC_ERR_TIMEOUT) 1778 if (err_mask & AC_ERR_TIMEOUT)
1779 return base + ATA_ECAT_TOUT_HSM; 1779 return base + ATA_ECAT_TOUT_HSM;
1780 1780
1781 if (eflags & ATA_EFLAG_IS_IO) { 1781 if (eflags & ATA_EFLAG_IS_IO) {
1782 if (err_mask & AC_ERR_HSM) 1782 if (err_mask & AC_ERR_HSM)
1783 return base + ATA_ECAT_TOUT_HSM; 1783 return base + ATA_ECAT_TOUT_HSM;
1784 if ((err_mask & 1784 if ((err_mask &
1785 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV) 1785 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1786 return base + ATA_ECAT_UNK_DEV; 1786 return base + ATA_ECAT_UNK_DEV;
1787 } 1787 }
1788 1788
1789 return 0; 1789 return 0;
1790 } 1790 }
1791 1791
1792 struct speed_down_verdict_arg { 1792 struct speed_down_verdict_arg {
1793 u64 since; 1793 u64 since;
1794 int xfer_ok; 1794 int xfer_ok;
1795 int nr_errors[ATA_ECAT_NR]; 1795 int nr_errors[ATA_ECAT_NR];
1796 }; 1796 };
1797 1797
1798 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) 1798 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1799 { 1799 {
1800 struct speed_down_verdict_arg *arg = void_arg; 1800 struct speed_down_verdict_arg *arg = void_arg;
1801 int cat; 1801 int cat;
1802 1802
1803 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) 1803 if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since))
1804 return -1; 1804 return -1;
1805 1805
1806 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, 1806 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1807 &arg->xfer_ok); 1807 &arg->xfer_ok);
1808 arg->nr_errors[cat]++; 1808 arg->nr_errors[cat]++;
1809 1809
1810 return 0; 1810 return 0;
1811 } 1811 }
1812 1812
1813 /** 1813 /**
1814 * ata_eh_speed_down_verdict - Determine speed down verdict 1814 * ata_eh_speed_down_verdict - Determine speed down verdict
1815 * @dev: Device of interest 1815 * @dev: Device of interest
1816 * 1816 *
1817 * This function examines error ring of @dev and determines 1817 * This function examines error ring of @dev and determines
1818 * whether NCQ needs to be turned off, transfer speed should be 1818 * whether NCQ needs to be turned off, transfer speed should be
1819 * stepped down, or falling back to PIO is necessary. 1819 * stepped down, or falling back to PIO is necessary.
1820 * 1820 *
1821 * ECAT_ATA_BUS : ATA_BUS error for any command 1821 * ECAT_ATA_BUS : ATA_BUS error for any command
1822 * 1822 *
1823 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for 1823 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1824 * IO commands 1824 * IO commands
1825 * 1825 *
1826 * ECAT_UNK_DEV : Unknown DEV error for IO commands 1826 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1827 * 1827 *
1828 * ECAT_DUBIOUS_* : Identical to above three but occurred while 1828 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1829 * data transfer hasn't been verified. 1829 * data transfer hasn't been verified.
1830 * 1830 *
1831 * Verdicts are 1831 * Verdicts are
1832 * 1832 *
1833 * NCQ_OFF : Turn off NCQ. 1833 * NCQ_OFF : Turn off NCQ.
1834 * 1834 *
1835 * SPEED_DOWN : Speed down transfer speed but don't fall back 1835 * SPEED_DOWN : Speed down transfer speed but don't fall back
1836 * to PIO. 1836 * to PIO.
1837 * 1837 *
1838 * FALLBACK_TO_PIO : Fall back to PIO. 1838 * FALLBACK_TO_PIO : Fall back to PIO.
1839 * 1839 *
1840 * Even if multiple verdicts are returned, only one action is 1840 * Even if multiple verdicts are returned, only one action is
1841 * taken per error. An action triggered by non-DUBIOUS errors 1841 * taken per error. An action triggered by non-DUBIOUS errors
1842 * clears ering, while one triggered by DUBIOUS_* errors doesn't. 1842 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1843 * This is to expedite speed down decisions right after device is 1843 * This is to expedite speed down decisions right after device is
1844 * initially configured. 1844 * initially configured.
1845 * 1845 *
1846 * The followings are speed down rules. #1 and #2 deal with 1846 * The followings are speed down rules. #1 and #2 deal with
1847 * DUBIOUS errors. 1847 * DUBIOUS errors.
1848 * 1848 *
1849 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors 1849 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1850 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO. 1850 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1851 * 1851 *
1852 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors 1852 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1853 * occurred during last 5 mins, NCQ_OFF. 1853 * occurred during last 5 mins, NCQ_OFF.
1854 * 1854 *
1855 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors 1855 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1856 * ocurred during last 5 mins, FALLBACK_TO_PIO 1856 * ocurred during last 5 mins, FALLBACK_TO_PIO
1857 * 1857 *
1858 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred 1858 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1859 * during last 10 mins, NCQ_OFF. 1859 * during last 10 mins, NCQ_OFF.
1860 * 1860 *
1861 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6 1861 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1862 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN. 1862 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1863 * 1863 *
1864 * LOCKING: 1864 * LOCKING:
1865 * Inherited from caller. 1865 * Inherited from caller.
1866 * 1866 *
1867 * RETURNS: 1867 * RETURNS:
1868 * OR of ATA_EH_SPDN_* flags. 1868 * OR of ATA_EH_SPDN_* flags.
1869 */ 1869 */
1870 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev) 1870 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1871 { 1871 {
1872 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ; 1872 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1873 u64 j64 = get_jiffies_64(); 1873 u64 j64 = get_jiffies_64();
1874 struct speed_down_verdict_arg arg; 1874 struct speed_down_verdict_arg arg;
1875 unsigned int verdict = 0; 1875 unsigned int verdict = 0;
1876 1876
1877 /* scan past 5 mins of error history */ 1877 /* scan past 5 mins of error history */
1878 memset(&arg, 0, sizeof(arg)); 1878 memset(&arg, 0, sizeof(arg));
1879 arg.since = j64 - min(j64, j5mins); 1879 arg.since = j64 - min(j64, j5mins);
1880 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1880 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1881 1881
1882 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] + 1882 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1883 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1) 1883 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1884 verdict |= ATA_EH_SPDN_SPEED_DOWN | 1884 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1885 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS; 1885 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1886 1886
1887 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] + 1887 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1888 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1) 1888 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1889 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS; 1889 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1890 1890
1891 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1891 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1892 arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1892 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1893 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1893 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1894 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO; 1894 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1895 1895
1896 /* scan past 10 mins of error history */ 1896 /* scan past 10 mins of error history */
1897 memset(&arg, 0, sizeof(arg)); 1897 memset(&arg, 0, sizeof(arg));
1898 arg.since = j64 - min(j64, j10mins); 1898 arg.since = j64 - min(j64, j10mins);
1899 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg); 1899 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1900 1900
1901 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] + 1901 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1902 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3) 1902 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1903 verdict |= ATA_EH_SPDN_NCQ_OFF; 1903 verdict |= ATA_EH_SPDN_NCQ_OFF;
1904 1904
1905 if (arg.nr_errors[ATA_ECAT_ATA_BUS] + 1905 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1906 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 || 1906 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1907 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6) 1907 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1908 verdict |= ATA_EH_SPDN_SPEED_DOWN; 1908 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1909 1909
1910 return verdict; 1910 return verdict;
1911 } 1911 }
1912 1912
1913 /** 1913 /**
1914 * ata_eh_speed_down - record error and speed down if necessary 1914 * ata_eh_speed_down - record error and speed down if necessary
1915 * @dev: Failed device 1915 * @dev: Failed device
1916 * @eflags: mask of ATA_EFLAG_* flags 1916 * @eflags: mask of ATA_EFLAG_* flags
1917 * @err_mask: err_mask of the error 1917 * @err_mask: err_mask of the error
1918 * 1918 *
1919 * Record error and examine error history to determine whether 1919 * Record error and examine error history to determine whether
1920 * adjusting transmission speed is necessary. It also sets 1920 * adjusting transmission speed is necessary. It also sets
1921 * transmission limits appropriately if such adjustment is 1921 * transmission limits appropriately if such adjustment is
1922 * necessary. 1922 * necessary.
1923 * 1923 *
1924 * LOCKING: 1924 * LOCKING:
1925 * Kernel thread context (may sleep). 1925 * Kernel thread context (may sleep).
1926 * 1926 *
1927 * RETURNS: 1927 * RETURNS:
1928 * Determined recovery action. 1928 * Determined recovery action.
1929 */ 1929 */
1930 static unsigned int ata_eh_speed_down(struct ata_device *dev, 1930 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1931 unsigned int eflags, unsigned int err_mask) 1931 unsigned int eflags, unsigned int err_mask)
1932 { 1932 {
1933 struct ata_link *link = ata_dev_phys_link(dev); 1933 struct ata_link *link = ata_dev_phys_link(dev);
1934 int xfer_ok = 0; 1934 int xfer_ok = 0;
1935 unsigned int verdict; 1935 unsigned int verdict;
1936 unsigned int action = 0; 1936 unsigned int action = 0;
1937 1937
1938 /* don't bother if Cat-0 error */ 1938 /* don't bother if Cat-0 error */
1939 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0) 1939 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1940 return 0; 1940 return 0;
1941 1941
1942 /* record error and determine whether speed down is necessary */ 1942 /* record error and determine whether speed down is necessary */
1943 ata_ering_record(&dev->ering, eflags, err_mask); 1943 ata_ering_record(&dev->ering, eflags, err_mask);
1944 verdict = ata_eh_speed_down_verdict(dev); 1944 verdict = ata_eh_speed_down_verdict(dev);
1945 1945
1946 /* turn off NCQ? */ 1946 /* turn off NCQ? */
1947 if ((verdict & ATA_EH_SPDN_NCQ_OFF) && 1947 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1948 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ | 1948 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1949 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) { 1949 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1950 dev->flags |= ATA_DFLAG_NCQ_OFF; 1950 dev->flags |= ATA_DFLAG_NCQ_OFF;
1951 ata_dev_printk(dev, KERN_WARNING, 1951 ata_dev_printk(dev, KERN_WARNING,
1952 "NCQ disabled due to excessive errors\n"); 1952 "NCQ disabled due to excessive errors\n");
1953 goto done; 1953 goto done;
1954 } 1954 }
1955 1955
1956 /* speed down? */ 1956 /* speed down? */
1957 if (verdict & ATA_EH_SPDN_SPEED_DOWN) { 1957 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1958 /* speed down SATA link speed if possible */ 1958 /* speed down SATA link speed if possible */
1959 if (sata_down_spd_limit(link, 0) == 0) { 1959 if (sata_down_spd_limit(link, 0) == 0) {
1960 action |= ATA_EH_RESET; 1960 action |= ATA_EH_RESET;
1961 goto done; 1961 goto done;
1962 } 1962 }
1963 1963
1964 /* lower transfer mode */ 1964 /* lower transfer mode */
1965 if (dev->spdn_cnt < 2) { 1965 if (dev->spdn_cnt < 2) {
1966 static const int dma_dnxfer_sel[] = 1966 static const int dma_dnxfer_sel[] =
1967 { ATA_DNXFER_DMA, ATA_DNXFER_40C }; 1967 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1968 static const int pio_dnxfer_sel[] = 1968 static const int pio_dnxfer_sel[] =
1969 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 }; 1969 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1970 int sel; 1970 int sel;
1971 1971
1972 if (dev->xfer_shift != ATA_SHIFT_PIO) 1972 if (dev->xfer_shift != ATA_SHIFT_PIO)
1973 sel = dma_dnxfer_sel[dev->spdn_cnt]; 1973 sel = dma_dnxfer_sel[dev->spdn_cnt];
1974 else 1974 else
1975 sel = pio_dnxfer_sel[dev->spdn_cnt]; 1975 sel = pio_dnxfer_sel[dev->spdn_cnt];
1976 1976
1977 dev->spdn_cnt++; 1977 dev->spdn_cnt++;
1978 1978
1979 if (ata_down_xfermask_limit(dev, sel) == 0) { 1979 if (ata_down_xfermask_limit(dev, sel) == 0) {
1980 action |= ATA_EH_RESET; 1980 action |= ATA_EH_RESET;
1981 goto done; 1981 goto done;
1982 } 1982 }
1983 } 1983 }
1984 } 1984 }
1985 1985
1986 /* Fall back to PIO? Slowing down to PIO is meaningless for 1986 /* Fall back to PIO? Slowing down to PIO is meaningless for
1987 * SATA ATA devices. Consider it only for PATA and SATAPI. 1987 * SATA ATA devices. Consider it only for PATA and SATAPI.
1988 */ 1988 */
1989 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) && 1989 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1990 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) && 1990 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1991 (dev->xfer_shift != ATA_SHIFT_PIO)) { 1991 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1992 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) { 1992 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1993 dev->spdn_cnt = 0; 1993 dev->spdn_cnt = 0;
1994 action |= ATA_EH_RESET; 1994 action |= ATA_EH_RESET;
1995 goto done; 1995 goto done;
1996 } 1996 }
1997 } 1997 }
1998 1998
1999 return 0; 1999 return 0;
2000 done: 2000 done:
2001 /* device has been slowed down, blow error history */ 2001 /* device has been slowed down, blow error history */
2002 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS)) 2002 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
2003 ata_ering_clear(&dev->ering); 2003 ata_ering_clear(&dev->ering);
2004 return action; 2004 return action;
2005 } 2005 }
2006 2006
2007 /** 2007 /**
2008 * ata_eh_link_autopsy - analyze error and determine recovery action 2008 * ata_eh_link_autopsy - analyze error and determine recovery action
2009 * @link: host link to perform autopsy on 2009 * @link: host link to perform autopsy on
2010 * 2010 *
2011 * Analyze why @link failed and determine which recovery actions 2011 * Analyze why @link failed and determine which recovery actions
2012 * are needed. This function also sets more detailed AC_ERR_* 2012 * are needed. This function also sets more detailed AC_ERR_*
2013 * values and fills sense data for ATAPI CHECK SENSE. 2013 * values and fills sense data for ATAPI CHECK SENSE.
2014 * 2014 *
2015 * LOCKING: 2015 * LOCKING:
2016 * Kernel thread context (may sleep). 2016 * Kernel thread context (may sleep).
2017 */ 2017 */
2018 static void ata_eh_link_autopsy(struct ata_link *link) 2018 static void ata_eh_link_autopsy(struct ata_link *link)
2019 { 2019 {
2020 struct ata_port *ap = link->ap; 2020 struct ata_port *ap = link->ap;
2021 struct ata_eh_context *ehc = &link->eh_context; 2021 struct ata_eh_context *ehc = &link->eh_context;
2022 struct ata_device *dev; 2022 struct ata_device *dev;
2023 unsigned int all_err_mask = 0, eflags = 0; 2023 unsigned int all_err_mask = 0, eflags = 0;
2024 int tag; 2024 int tag;
2025 u32 serror; 2025 u32 serror;
2026 int rc; 2026 int rc;
2027 2027
2028 DPRINTK("ENTER\n"); 2028 DPRINTK("ENTER\n");
2029 2029
2030 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY) 2030 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
2031 return; 2031 return;
2032 2032
2033 /* obtain and analyze SError */ 2033 /* obtain and analyze SError */
2034 rc = sata_scr_read(link, SCR_ERROR, &serror); 2034 rc = sata_scr_read(link, SCR_ERROR, &serror);
2035 if (rc == 0) { 2035 if (rc == 0) {
2036 ehc->i.serror |= serror; 2036 ehc->i.serror |= serror;
2037 ata_eh_analyze_serror(link); 2037 ata_eh_analyze_serror(link);
2038 } else if (rc != -EOPNOTSUPP) { 2038 } else if (rc != -EOPNOTSUPP) {
2039 /* SError read failed, force reset and probing */ 2039 /* SError read failed, force reset and probing */
2040 ehc->i.probe_mask |= ATA_ALL_DEVICES; 2040 ehc->i.probe_mask |= ATA_ALL_DEVICES;
2041 ehc->i.action |= ATA_EH_RESET; 2041 ehc->i.action |= ATA_EH_RESET;
2042 ehc->i.err_mask |= AC_ERR_OTHER; 2042 ehc->i.err_mask |= AC_ERR_OTHER;
2043 } 2043 }
2044 2044
2045 /* analyze NCQ failure */ 2045 /* analyze NCQ failure */
2046 ata_eh_analyze_ncq_error(link); 2046 ata_eh_analyze_ncq_error(link);
2047 2047
2048 /* any real error trumps AC_ERR_OTHER */ 2048 /* any real error trumps AC_ERR_OTHER */
2049 if (ehc->i.err_mask & ~AC_ERR_OTHER) 2049 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2050 ehc->i.err_mask &= ~AC_ERR_OTHER; 2050 ehc->i.err_mask &= ~AC_ERR_OTHER;
2051 2051
2052 all_err_mask |= ehc->i.err_mask; 2052 all_err_mask |= ehc->i.err_mask;
2053 2053
2054 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2054 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2055 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2055 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2056 2056
2057 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2057 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2058 ata_dev_phys_link(qc->dev) != link) 2058 ata_dev_phys_link(qc->dev) != link)
2059 continue; 2059 continue;
2060 2060
2061 /* inherit upper level err_mask */ 2061 /* inherit upper level err_mask */
2062 qc->err_mask |= ehc->i.err_mask; 2062 qc->err_mask |= ehc->i.err_mask;
2063 2063
2064 /* analyze TF */ 2064 /* analyze TF */
2065 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf); 2065 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2066 2066
2067 /* DEV errors are probably spurious in case of ATA_BUS error */ 2067 /* DEV errors are probably spurious in case of ATA_BUS error */
2068 if (qc->err_mask & AC_ERR_ATA_BUS) 2068 if (qc->err_mask & AC_ERR_ATA_BUS)
2069 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA | 2069 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2070 AC_ERR_INVALID); 2070 AC_ERR_INVALID);
2071 2071
2072 /* any real error trumps unknown error */ 2072 /* any real error trumps unknown error */
2073 if (qc->err_mask & ~AC_ERR_OTHER) 2073 if (qc->err_mask & ~AC_ERR_OTHER)
2074 qc->err_mask &= ~AC_ERR_OTHER; 2074 qc->err_mask &= ~AC_ERR_OTHER;
2075 2075
2076 /* SENSE_VALID trumps dev/unknown error and revalidation */ 2076 /* SENSE_VALID trumps dev/unknown error and revalidation */
2077 if (qc->flags & ATA_QCFLAG_SENSE_VALID) 2077 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2078 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); 2078 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2079 2079
2080 /* determine whether the command is worth retrying */ 2080 /* determine whether the command is worth retrying */
2081 if (qc->flags & ATA_QCFLAG_IO || 2081 if (qc->flags & ATA_QCFLAG_IO ||
2082 (!(qc->err_mask & AC_ERR_INVALID) && 2082 (!(qc->err_mask & AC_ERR_INVALID) &&
2083 qc->err_mask != AC_ERR_DEV)) 2083 qc->err_mask != AC_ERR_DEV))
2084 qc->flags |= ATA_QCFLAG_RETRY; 2084 qc->flags |= ATA_QCFLAG_RETRY;
2085 2085
2086 /* accumulate error info */ 2086 /* accumulate error info */
2087 ehc->i.dev = qc->dev; 2087 ehc->i.dev = qc->dev;
2088 all_err_mask |= qc->err_mask; 2088 all_err_mask |= qc->err_mask;
2089 if (qc->flags & ATA_QCFLAG_IO) 2089 if (qc->flags & ATA_QCFLAG_IO)
2090 eflags |= ATA_EFLAG_IS_IO; 2090 eflags |= ATA_EFLAG_IS_IO;
2091 } 2091 }
2092 2092
2093 /* enforce default EH actions */ 2093 /* enforce default EH actions */
2094 if (ap->pflags & ATA_PFLAG_FROZEN || 2094 if (ap->pflags & ATA_PFLAG_FROZEN ||
2095 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) 2095 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2096 ehc->i.action |= ATA_EH_RESET; 2096 ehc->i.action |= ATA_EH_RESET;
2097 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) || 2097 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2098 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV))) 2098 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2099 ehc->i.action |= ATA_EH_REVALIDATE; 2099 ehc->i.action |= ATA_EH_REVALIDATE;
2100 2100
2101 /* If we have offending qcs and the associated failed device, 2101 /* If we have offending qcs and the associated failed device,
2102 * perform per-dev EH action only on the offending device. 2102 * perform per-dev EH action only on the offending device.
2103 */ 2103 */
2104 if (ehc->i.dev) { 2104 if (ehc->i.dev) {
2105 ehc->i.dev_action[ehc->i.dev->devno] |= 2105 ehc->i.dev_action[ehc->i.dev->devno] |=
2106 ehc->i.action & ATA_EH_PERDEV_MASK; 2106 ehc->i.action & ATA_EH_PERDEV_MASK;
2107 ehc->i.action &= ~ATA_EH_PERDEV_MASK; 2107 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2108 } 2108 }
2109 2109
2110 /* propagate timeout to host link */ 2110 /* propagate timeout to host link */
2111 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link)) 2111 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2112 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT; 2112 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2113 2113
2114 /* record error and consider speeding down */ 2114 /* record error and consider speeding down */
2115 dev = ehc->i.dev; 2115 dev = ehc->i.dev;
2116 if (!dev && ((ata_link_max_devices(link) == 1 && 2116 if (!dev && ((ata_link_max_devices(link) == 1 &&
2117 ata_dev_enabled(link->device)))) 2117 ata_dev_enabled(link->device))))
2118 dev = link->device; 2118 dev = link->device;
2119 2119
2120 if (dev) { 2120 if (dev) {
2121 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER) 2121 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2122 eflags |= ATA_EFLAG_DUBIOUS_XFER; 2122 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2123 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask); 2123 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2124 } 2124 }
2125 2125
2126 DPRINTK("EXIT\n"); 2126 DPRINTK("EXIT\n");
2127 } 2127 }
2128 2128
2129 /** 2129 /**
2130 * ata_eh_autopsy - analyze error and determine recovery action 2130 * ata_eh_autopsy - analyze error and determine recovery action
2131 * @ap: host port to perform autopsy on 2131 * @ap: host port to perform autopsy on
2132 * 2132 *
2133 * Analyze all links of @ap and determine why they failed and 2133 * Analyze all links of @ap and determine why they failed and
2134 * which recovery actions are needed. 2134 * which recovery actions are needed.
2135 * 2135 *
2136 * LOCKING: 2136 * LOCKING:
2137 * Kernel thread context (may sleep). 2137 * Kernel thread context (may sleep).
2138 */ 2138 */
2139 void ata_eh_autopsy(struct ata_port *ap) 2139 void ata_eh_autopsy(struct ata_port *ap)
2140 { 2140 {
2141 struct ata_link *link; 2141 struct ata_link *link;
2142 2142
2143 ata_for_each_link(link, ap, EDGE) 2143 ata_for_each_link(link, ap, EDGE)
2144 ata_eh_link_autopsy(link); 2144 ata_eh_link_autopsy(link);
2145 2145
2146 /* Handle the frigging slave link. Autopsy is done similarly 2146 /* Handle the frigging slave link. Autopsy is done similarly
2147 * but actions and flags are transferred over to the master 2147 * but actions and flags are transferred over to the master
2148 * link and handled from there. 2148 * link and handled from there.
2149 */ 2149 */
2150 if (ap->slave_link) { 2150 if (ap->slave_link) {
2151 struct ata_eh_context *mehc = &ap->link.eh_context; 2151 struct ata_eh_context *mehc = &ap->link.eh_context;
2152 struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2152 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2153 2153
2154 /* transfer control flags from master to slave */ 2154 /* transfer control flags from master to slave */
2155 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK; 2155 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2156 2156
2157 /* perform autopsy on the slave link */ 2157 /* perform autopsy on the slave link */
2158 ata_eh_link_autopsy(ap->slave_link); 2158 ata_eh_link_autopsy(ap->slave_link);
2159 2159
2160 /* transfer actions from slave to master and clear slave */ 2160 /* transfer actions from slave to master and clear slave */
2161 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2161 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2162 mehc->i.action |= sehc->i.action; 2162 mehc->i.action |= sehc->i.action;
2163 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2163 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2164 mehc->i.flags |= sehc->i.flags; 2164 mehc->i.flags |= sehc->i.flags;
2165 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2165 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2166 } 2166 }
2167 2167
2168 /* Autopsy of fanout ports can affect host link autopsy. 2168 /* Autopsy of fanout ports can affect host link autopsy.
2169 * Perform host link autopsy last. 2169 * Perform host link autopsy last.
2170 */ 2170 */
2171 if (sata_pmp_attached(ap)) 2171 if (sata_pmp_attached(ap))
2172 ata_eh_link_autopsy(&ap->link); 2172 ata_eh_link_autopsy(&ap->link);
2173 } 2173 }
2174 2174
2175 /** 2175 /**
2176 * ata_get_cmd_descript - get description for ATA command 2176 * ata_get_cmd_descript - get description for ATA command
2177 * @command: ATA command code to get description for 2177 * @command: ATA command code to get description for
2178 * 2178 *
2179 * Return a textual description of the given command, or NULL if the 2179 * Return a textual description of the given command, or NULL if the
2180 * command is not known. 2180 * command is not known.
2181 * 2181 *
2182 * LOCKING: 2182 * LOCKING:
2183 * None 2183 * None
2184 */ 2184 */
2185 const char *ata_get_cmd_descript(u8 command) 2185 const char *ata_get_cmd_descript(u8 command)
2186 { 2186 {
2187 #ifdef CONFIG_ATA_VERBOSE_ERROR 2187 #ifdef CONFIG_ATA_VERBOSE_ERROR
2188 static const struct 2188 static const struct
2189 { 2189 {
2190 u8 command; 2190 u8 command;
2191 const char *text; 2191 const char *text;
2192 } cmd_descr[] = { 2192 } cmd_descr[] = {
2193 { ATA_CMD_DEV_RESET, "DEVICE RESET" }, 2193 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2194 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" }, 2194 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2195 { ATA_CMD_STANDBY, "STANDBY" }, 2195 { ATA_CMD_STANDBY, "STANDBY" },
2196 { ATA_CMD_IDLE, "IDLE" }, 2196 { ATA_CMD_IDLE, "IDLE" },
2197 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" }, 2197 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2198 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" }, 2198 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2199 { ATA_CMD_NOP, "NOP" }, 2199 { ATA_CMD_NOP, "NOP" },
2200 { ATA_CMD_FLUSH, "FLUSH CACHE" }, 2200 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2201 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" }, 2201 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2202 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" }, 2202 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2203 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" }, 2203 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2204 { ATA_CMD_SERVICE, "SERVICE" }, 2204 { ATA_CMD_SERVICE, "SERVICE" },
2205 { ATA_CMD_READ, "READ DMA" }, 2205 { ATA_CMD_READ, "READ DMA" },
2206 { ATA_CMD_READ_EXT, "READ DMA EXT" }, 2206 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2207 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" }, 2207 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2208 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" }, 2208 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2209 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" }, 2209 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2210 { ATA_CMD_WRITE, "WRITE DMA" }, 2210 { ATA_CMD_WRITE, "WRITE DMA" },
2211 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" }, 2211 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2212 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" }, 2212 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2213 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" }, 2213 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2214 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" }, 2214 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2215 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" }, 2215 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2216 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" }, 2216 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2217 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" }, 2217 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2218 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" }, 2218 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2219 { ATA_CMD_PIO_READ, "READ SECTOR(S)" }, 2219 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2220 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" }, 2220 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2221 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" }, 2221 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2222 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" }, 2222 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2223 { ATA_CMD_READ_MULTI, "READ MULTIPLE" }, 2223 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2224 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" }, 2224 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2225 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" }, 2225 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2226 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" }, 2226 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2227 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" }, 2227 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2228 { ATA_CMD_SET_FEATURES, "SET FEATURES" }, 2228 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2229 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" }, 2229 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2230 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" }, 2230 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2231 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" }, 2231 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2232 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" }, 2232 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2233 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" }, 2233 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2234 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" }, 2234 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2235 { ATA_CMD_SLEEP, "SLEEP" }, 2235 { ATA_CMD_SLEEP, "SLEEP" },
2236 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" }, 2236 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2237 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" }, 2237 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2238 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" }, 2238 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2239 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" }, 2239 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2240 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" }, 2240 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2241 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" }, 2241 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2242 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" }, 2242 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2243 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" }, 2243 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2244 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" }, 2244 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2245 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" }, 2245 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2246 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" }, 2246 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2247 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" }, 2247 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2248 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" }, 2248 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2249 { ATA_CMD_PMP_READ, "READ BUFFER" }, 2249 { ATA_CMD_PMP_READ, "READ BUFFER" },
2250 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" }, 2250 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2251 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" }, 2251 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2252 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" }, 2252 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2253 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" }, 2253 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2254 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" }, 2254 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2255 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" }, 2255 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2256 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" }, 2256 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2257 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" }, 2257 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2258 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" }, 2258 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2259 { ATA_CMD_SMART, "SMART" }, 2259 { ATA_CMD_SMART, "SMART" },
2260 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" }, 2260 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2261 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" }, 2261 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2262 { ATA_CMD_DSM, "DATA SET MANAGEMENT" }, 2262 { ATA_CMD_DSM, "DATA SET MANAGEMENT" },
2263 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" }, 2263 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2264 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" }, 2264 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2265 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" }, 2265 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2266 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" }, 2266 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2267 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" }, 2267 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2268 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" }, 2268 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2269 { ATA_CMD_READ_LONG, "READ LONG (with retries)" }, 2269 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2270 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" }, 2270 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2271 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" }, 2271 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2272 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" }, 2272 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2273 { ATA_CMD_RESTORE, "RECALIBRATE" }, 2273 { ATA_CMD_RESTORE, "RECALIBRATE" },
2274 { 0, NULL } /* terminate list */ 2274 { 0, NULL } /* terminate list */
2275 }; 2275 };
2276 2276
2277 unsigned int i; 2277 unsigned int i;
2278 for (i = 0; cmd_descr[i].text; i++) 2278 for (i = 0; cmd_descr[i].text; i++)
2279 if (cmd_descr[i].command == command) 2279 if (cmd_descr[i].command == command)
2280 return cmd_descr[i].text; 2280 return cmd_descr[i].text;
2281 #endif 2281 #endif
2282 2282
2283 return NULL; 2283 return NULL;
2284 } 2284 }
2285 2285
2286 /** 2286 /**
2287 * ata_eh_link_report - report error handling to user 2287 * ata_eh_link_report - report error handling to user
2288 * @link: ATA link EH is going on 2288 * @link: ATA link EH is going on
2289 * 2289 *
2290 * Report EH to user. 2290 * Report EH to user.
2291 * 2291 *
2292 * LOCKING: 2292 * LOCKING:
2293 * None. 2293 * None.
2294 */ 2294 */
2295 static void ata_eh_link_report(struct ata_link *link) 2295 static void ata_eh_link_report(struct ata_link *link)
2296 { 2296 {
2297 struct ata_port *ap = link->ap; 2297 struct ata_port *ap = link->ap;
2298 struct ata_eh_context *ehc = &link->eh_context; 2298 struct ata_eh_context *ehc = &link->eh_context;
2299 const char *frozen, *desc; 2299 const char *frozen, *desc;
2300 char tries_buf[6]; 2300 char tries_buf[6];
2301 int tag, nr_failed = 0; 2301 int tag, nr_failed = 0;
2302 2302
2303 if (ehc->i.flags & ATA_EHI_QUIET) 2303 if (ehc->i.flags & ATA_EHI_QUIET)
2304 return; 2304 return;
2305 2305
2306 desc = NULL; 2306 desc = NULL;
2307 if (ehc->i.desc[0] != '\0') 2307 if (ehc->i.desc[0] != '\0')
2308 desc = ehc->i.desc; 2308 desc = ehc->i.desc;
2309 2309
2310 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2310 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2311 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2311 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2312 2312
2313 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2313 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2314 ata_dev_phys_link(qc->dev) != link || 2314 ata_dev_phys_link(qc->dev) != link ||
2315 ((qc->flags & ATA_QCFLAG_QUIET) && 2315 ((qc->flags & ATA_QCFLAG_QUIET) &&
2316 qc->err_mask == AC_ERR_DEV)) 2316 qc->err_mask == AC_ERR_DEV))
2317 continue; 2317 continue;
2318 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask) 2318 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2319 continue; 2319 continue;
2320 2320
2321 nr_failed++; 2321 nr_failed++;
2322 } 2322 }
2323 2323
2324 if (!nr_failed && !ehc->i.err_mask) 2324 if (!nr_failed && !ehc->i.err_mask)
2325 return; 2325 return;
2326 2326
2327 frozen = ""; 2327 frozen = "";
2328 if (ap->pflags & ATA_PFLAG_FROZEN) 2328 if (ap->pflags & ATA_PFLAG_FROZEN)
2329 frozen = " frozen"; 2329 frozen = " frozen";
2330 2330
2331 memset(tries_buf, 0, sizeof(tries_buf)); 2331 memset(tries_buf, 0, sizeof(tries_buf));
2332 if (ap->eh_tries < ATA_EH_MAX_TRIES) 2332 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2333 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d", 2333 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2334 ap->eh_tries); 2334 ap->eh_tries);
2335 2335
2336 if (ehc->i.dev) { 2336 if (ehc->i.dev) {
2337 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x " 2337 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2338 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2338 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2339 ehc->i.err_mask, link->sactive, ehc->i.serror, 2339 ehc->i.err_mask, link->sactive, ehc->i.serror,
2340 ehc->i.action, frozen, tries_buf); 2340 ehc->i.action, frozen, tries_buf);
2341 if (desc) 2341 if (desc)
2342 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc); 2342 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2343 } else { 2343 } else {
2344 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x " 2344 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2345 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n", 2345 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2346 ehc->i.err_mask, link->sactive, ehc->i.serror, 2346 ehc->i.err_mask, link->sactive, ehc->i.serror,
2347 ehc->i.action, frozen, tries_buf); 2347 ehc->i.action, frozen, tries_buf);
2348 if (desc) 2348 if (desc)
2349 ata_link_printk(link, KERN_ERR, "%s\n", desc); 2349 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2350 } 2350 }
2351 2351
2352 #ifdef CONFIG_ATA_VERBOSE_ERROR 2352 #ifdef CONFIG_ATA_VERBOSE_ERROR
2353 if (ehc->i.serror) 2353 if (ehc->i.serror)
2354 ata_link_printk(link, KERN_ERR, 2354 ata_link_printk(link, KERN_ERR,
2355 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 2355 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2356 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "", 2356 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2357 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "", 2357 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2358 ehc->i.serror & SERR_DATA ? "UnrecovData " : "", 2358 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2359 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "", 2359 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2360 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "", 2360 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2361 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "", 2361 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2362 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "", 2362 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2363 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "", 2363 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2364 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "", 2364 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2365 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "", 2365 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2366 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "", 2366 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2367 ehc->i.serror & SERR_CRC ? "BadCRC " : "", 2367 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2368 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "", 2368 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2369 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 2369 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2370 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 2370 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2371 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 2371 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2372 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2372 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2373 #endif 2373 #endif
2374 2374
2375 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2375 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2376 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2376 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2377 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2377 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2378 const u8 *cdb = qc->cdb; 2378 const u8 *cdb = qc->cdb;
2379 char data_buf[20] = ""; 2379 char data_buf[20] = "";
2380 char cdb_buf[70] = ""; 2380 char cdb_buf[70] = "";
2381 2381
2382 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2382 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2383 ata_dev_phys_link(qc->dev) != link || !qc->err_mask) 2383 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2384 continue; 2384 continue;
2385 2385
2386 if (qc->dma_dir != DMA_NONE) { 2386 if (qc->dma_dir != DMA_NONE) {
2387 static const char *dma_str[] = { 2387 static const char *dma_str[] = {
2388 [DMA_BIDIRECTIONAL] = "bidi", 2388 [DMA_BIDIRECTIONAL] = "bidi",
2389 [DMA_TO_DEVICE] = "out", 2389 [DMA_TO_DEVICE] = "out",
2390 [DMA_FROM_DEVICE] = "in", 2390 [DMA_FROM_DEVICE] = "in",
2391 }; 2391 };
2392 static const char *prot_str[] = { 2392 static const char *prot_str[] = {
2393 [ATA_PROT_PIO] = "pio", 2393 [ATA_PROT_PIO] = "pio",
2394 [ATA_PROT_DMA] = "dma", 2394 [ATA_PROT_DMA] = "dma",
2395 [ATA_PROT_NCQ] = "ncq", 2395 [ATA_PROT_NCQ] = "ncq",
2396 [ATAPI_PROT_PIO] = "pio", 2396 [ATAPI_PROT_PIO] = "pio",
2397 [ATAPI_PROT_DMA] = "dma", 2397 [ATAPI_PROT_DMA] = "dma",
2398 }; 2398 };
2399 2399
2400 snprintf(data_buf, sizeof(data_buf), " %s %u %s", 2400 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2401 prot_str[qc->tf.protocol], qc->nbytes, 2401 prot_str[qc->tf.protocol], qc->nbytes,
2402 dma_str[qc->dma_dir]); 2402 dma_str[qc->dma_dir]);
2403 } 2403 }
2404 2404
2405 if (ata_is_atapi(qc->tf.protocol)) { 2405 if (ata_is_atapi(qc->tf.protocol)) {
2406 if (qc->scsicmd) 2406 if (qc->scsicmd)
2407 scsi_print_command(qc->scsicmd); 2407 scsi_print_command(qc->scsicmd);
2408 else 2408 else
2409 snprintf(cdb_buf, sizeof(cdb_buf), 2409 snprintf(cdb_buf, sizeof(cdb_buf),
2410 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2410 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2411 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2411 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2412 cdb[0], cdb[1], cdb[2], cdb[3], 2412 cdb[0], cdb[1], cdb[2], cdb[3],
2413 cdb[4], cdb[5], cdb[6], cdb[7], 2413 cdb[4], cdb[5], cdb[6], cdb[7],
2414 cdb[8], cdb[9], cdb[10], cdb[11], 2414 cdb[8], cdb[9], cdb[10], cdb[11],
2415 cdb[12], cdb[13], cdb[14], cdb[15]); 2415 cdb[12], cdb[13], cdb[14], cdb[15]);
2416 } else { 2416 } else {
2417 const char *descr = ata_get_cmd_descript(cmd->command); 2417 const char *descr = ata_get_cmd_descript(cmd->command);
2418 if (descr) 2418 if (descr)
2419 ata_dev_printk(qc->dev, KERN_ERR, 2419 ata_dev_printk(qc->dev, KERN_ERR,
2420 "failed command: %s\n", descr); 2420 "failed command: %s\n", descr);
2421 } 2421 }
2422 2422
2423 ata_dev_printk(qc->dev, KERN_ERR, 2423 ata_dev_printk(qc->dev, KERN_ERR,
2424 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2424 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2425 "tag %d%s\n %s" 2425 "tag %d%s\n %s"
2426 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2426 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2427 "Emask 0x%x (%s)%s\n", 2427 "Emask 0x%x (%s)%s\n",
2428 cmd->command, cmd->feature, cmd->nsect, 2428 cmd->command, cmd->feature, cmd->nsect,
2429 cmd->lbal, cmd->lbam, cmd->lbah, 2429 cmd->lbal, cmd->lbam, cmd->lbah,
2430 cmd->hob_feature, cmd->hob_nsect, 2430 cmd->hob_feature, cmd->hob_nsect,
2431 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah, 2431 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2432 cmd->device, qc->tag, data_buf, cdb_buf, 2432 cmd->device, qc->tag, data_buf, cdb_buf,
2433 res->command, res->feature, res->nsect, 2433 res->command, res->feature, res->nsect,
2434 res->lbal, res->lbam, res->lbah, 2434 res->lbal, res->lbam, res->lbah,
2435 res->hob_feature, res->hob_nsect, 2435 res->hob_feature, res->hob_nsect,
2436 res->hob_lbal, res->hob_lbam, res->hob_lbah, 2436 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2437 res->device, qc->err_mask, ata_err_string(qc->err_mask), 2437 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2438 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 2438 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2439 2439
2440 #ifdef CONFIG_ATA_VERBOSE_ERROR 2440 #ifdef CONFIG_ATA_VERBOSE_ERROR
2441 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2441 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2442 ATA_ERR)) { 2442 ATA_ERR)) {
2443 if (res->command & ATA_BUSY) 2443 if (res->command & ATA_BUSY)
2444 ata_dev_printk(qc->dev, KERN_ERR, 2444 ata_dev_printk(qc->dev, KERN_ERR,
2445 "status: { Busy }\n"); 2445 "status: { Busy }\n");
2446 else 2446 else
2447 ata_dev_printk(qc->dev, KERN_ERR, 2447 ata_dev_printk(qc->dev, KERN_ERR,
2448 "status: { %s%s%s%s}\n", 2448 "status: { %s%s%s%s}\n",
2449 res->command & ATA_DRDY ? "DRDY " : "", 2449 res->command & ATA_DRDY ? "DRDY " : "",
2450 res->command & ATA_DF ? "DF " : "", 2450 res->command & ATA_DF ? "DF " : "",
2451 res->command & ATA_DRQ ? "DRQ " : "", 2451 res->command & ATA_DRQ ? "DRQ " : "",
2452 res->command & ATA_ERR ? "ERR " : ""); 2452 res->command & ATA_ERR ? "ERR " : "");
2453 } 2453 }
2454 2454
2455 if (cmd->command != ATA_CMD_PACKET && 2455 if (cmd->command != ATA_CMD_PACKET &&
2456 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2456 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2457 ATA_ABORTED))) 2457 ATA_ABORTED)))
2458 ata_dev_printk(qc->dev, KERN_ERR, 2458 ata_dev_printk(qc->dev, KERN_ERR,
2459 "error: { %s%s%s%s}\n", 2459 "error: { %s%s%s%s}\n",
2460 res->feature & ATA_ICRC ? "ICRC " : "", 2460 res->feature & ATA_ICRC ? "ICRC " : "",
2461 res->feature & ATA_UNC ? "UNC " : "", 2461 res->feature & ATA_UNC ? "UNC " : "",
2462 res->feature & ATA_IDNF ? "IDNF " : "", 2462 res->feature & ATA_IDNF ? "IDNF " : "",
2463 res->feature & ATA_ABORTED ? "ABRT " : ""); 2463 res->feature & ATA_ABORTED ? "ABRT " : "");
2464 #endif 2464 #endif
2465 } 2465 }
2466 } 2466 }
2467 2467
2468 /** 2468 /**
2469 * ata_eh_report - report error handling to user 2469 * ata_eh_report - report error handling to user
2470 * @ap: ATA port to report EH about 2470 * @ap: ATA port to report EH about
2471 * 2471 *
2472 * Report EH to user. 2472 * Report EH to user.
2473 * 2473 *
2474 * LOCKING: 2474 * LOCKING:
2475 * None. 2475 * None.
2476 */ 2476 */
2477 void ata_eh_report(struct ata_port *ap) 2477 void ata_eh_report(struct ata_port *ap)
2478 { 2478 {
2479 struct ata_link *link; 2479 struct ata_link *link;
2480 2480
2481 ata_for_each_link(link, ap, HOST_FIRST) 2481 ata_for_each_link(link, ap, HOST_FIRST)
2482 ata_eh_link_report(link); 2482 ata_eh_link_report(link);
2483 } 2483 }
2484 2484
2485 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset, 2485 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2486 unsigned int *classes, unsigned long deadline, 2486 unsigned int *classes, unsigned long deadline,
2487 bool clear_classes) 2487 bool clear_classes)
2488 { 2488 {
2489 struct ata_device *dev; 2489 struct ata_device *dev;
2490 2490
2491 if (clear_classes) 2491 if (clear_classes)
2492 ata_for_each_dev(dev, link, ALL) 2492 ata_for_each_dev(dev, link, ALL)
2493 classes[dev->devno] = ATA_DEV_UNKNOWN; 2493 classes[dev->devno] = ATA_DEV_UNKNOWN;
2494 2494
2495 return reset(link, classes, deadline); 2495 return reset(link, classes, deadline);
2496 } 2496 }
2497 2497
2498 static int ata_eh_followup_srst_needed(struct ata_link *link, 2498 static int ata_eh_followup_srst_needed(struct ata_link *link,
2499 int rc, const unsigned int *classes) 2499 int rc, const unsigned int *classes)
2500 { 2500 {
2501 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link)) 2501 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2502 return 0; 2502 return 0;
2503 if (rc == -EAGAIN) 2503 if (rc == -EAGAIN)
2504 return 1; 2504 return 1;
2505 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) 2505 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2506 return 1; 2506 return 1;
2507 return 0; 2507 return 0;
2508 } 2508 }
2509 2509
2510 int ata_eh_reset(struct ata_link *link, int classify, 2510 int ata_eh_reset(struct ata_link *link, int classify,
2511 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 2511 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2512 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset) 2512 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2513 { 2513 {
2514 struct ata_port *ap = link->ap; 2514 struct ata_port *ap = link->ap;
2515 struct ata_link *slave = ap->slave_link; 2515 struct ata_link *slave = ap->slave_link;
2516 struct ata_eh_context *ehc = &link->eh_context; 2516 struct ata_eh_context *ehc = &link->eh_context;
2517 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL; 2517 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2518 unsigned int *classes = ehc->classes; 2518 unsigned int *classes = ehc->classes;
2519 unsigned int lflags = link->flags; 2519 unsigned int lflags = link->flags;
2520 int verbose = !(ehc->i.flags & ATA_EHI_QUIET); 2520 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2521 int max_tries = 0, try = 0; 2521 int max_tries = 0, try = 0;
2522 struct ata_link *failed_link; 2522 struct ata_link *failed_link;
2523 struct ata_device *dev; 2523 struct ata_device *dev;
2524 unsigned long deadline, now; 2524 unsigned long deadline, now;
2525 ata_reset_fn_t reset; 2525 ata_reset_fn_t reset;
2526 unsigned long flags; 2526 unsigned long flags;
2527 u32 sstatus; 2527 u32 sstatus;
2528 int nr_unknown, rc; 2528 int nr_unknown, rc;
2529 2529
2530 /* 2530 /*
2531 * Prepare to reset 2531 * Prepare to reset
2532 */ 2532 */
2533 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX) 2533 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2534 max_tries++; 2534 max_tries++;
2535 if (link->flags & ATA_LFLAG_NO_HRST) 2535 if (link->flags & ATA_LFLAG_NO_HRST)
2536 hardreset = NULL; 2536 hardreset = NULL;
2537 if (link->flags & ATA_LFLAG_NO_SRST) 2537 if (link->flags & ATA_LFLAG_NO_SRST)
2538 softreset = NULL; 2538 softreset = NULL;
2539 2539
2540 /* make sure each reset attemp is at least COOL_DOWN apart */ 2540 /* make sure each reset attemp is at least COOL_DOWN apart */
2541 if (ehc->i.flags & ATA_EHI_DID_RESET) { 2541 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2542 now = jiffies; 2542 now = jiffies;
2543 WARN_ON(time_after(ehc->last_reset, now)); 2543 WARN_ON(time_after(ehc->last_reset, now));
2544 deadline = ata_deadline(ehc->last_reset, 2544 deadline = ata_deadline(ehc->last_reset,
2545 ATA_EH_RESET_COOL_DOWN); 2545 ATA_EH_RESET_COOL_DOWN);
2546 if (time_before(now, deadline)) 2546 if (time_before(now, deadline))
2547 schedule_timeout_uninterruptible(deadline - now); 2547 schedule_timeout_uninterruptible(deadline - now);
2548 } 2548 }
2549 2549
2550 spin_lock_irqsave(ap->lock, flags); 2550 spin_lock_irqsave(ap->lock, flags);
2551 ap->pflags |= ATA_PFLAG_RESETTING; 2551 ap->pflags |= ATA_PFLAG_RESETTING;
2552 spin_unlock_irqrestore(ap->lock, flags); 2552 spin_unlock_irqrestore(ap->lock, flags);
2553 2553
2554 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2554 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2555 2555
2556 ata_for_each_dev(dev, link, ALL) { 2556 ata_for_each_dev(dev, link, ALL) {
2557 /* If we issue an SRST then an ATA drive (not ATAPI) 2557 /* If we issue an SRST then an ATA drive (not ATAPI)
2558 * may change configuration and be in PIO0 timing. If 2558 * may change configuration and be in PIO0 timing. If
2559 * we do a hard reset (or are coming from power on) 2559 * we do a hard reset (or are coming from power on)
2560 * this is true for ATA or ATAPI. Until we've set a 2560 * this is true for ATA or ATAPI. Until we've set a
2561 * suitable controller mode we should not touch the 2561 * suitable controller mode we should not touch the
2562 * bus as we may be talking too fast. 2562 * bus as we may be talking too fast.
2563 */ 2563 */
2564 dev->pio_mode = XFER_PIO_0; 2564 dev->pio_mode = XFER_PIO_0;
2565 2565
2566 /* If the controller has a pio mode setup function 2566 /* If the controller has a pio mode setup function
2567 * then use it to set the chipset to rights. Don't 2567 * then use it to set the chipset to rights. Don't
2568 * touch the DMA setup as that will be dealt with when 2568 * touch the DMA setup as that will be dealt with when
2569 * configuring devices. 2569 * configuring devices.
2570 */ 2570 */
2571 if (ap->ops->set_piomode) 2571 if (ap->ops->set_piomode)
2572 ap->ops->set_piomode(ap, dev); 2572 ap->ops->set_piomode(ap, dev);
2573 } 2573 }
2574 2574
2575 /* prefer hardreset */ 2575 /* prefer hardreset */
2576 reset = NULL; 2576 reset = NULL;
2577 ehc->i.action &= ~ATA_EH_RESET; 2577 ehc->i.action &= ~ATA_EH_RESET;
2578 if (hardreset) { 2578 if (hardreset) {
2579 reset = hardreset; 2579 reset = hardreset;
2580 ehc->i.action |= ATA_EH_HARDRESET; 2580 ehc->i.action |= ATA_EH_HARDRESET;
2581 } else if (softreset) { 2581 } else if (softreset) {
2582 reset = softreset; 2582 reset = softreset;
2583 ehc->i.action |= ATA_EH_SOFTRESET; 2583 ehc->i.action |= ATA_EH_SOFTRESET;
2584 } 2584 }
2585 2585
2586 if (prereset) { 2586 if (prereset) {
2587 unsigned long deadline = ata_deadline(jiffies, 2587 unsigned long deadline = ata_deadline(jiffies,
2588 ATA_EH_PRERESET_TIMEOUT); 2588 ATA_EH_PRERESET_TIMEOUT);
2589 2589
2590 if (slave) { 2590 if (slave) {
2591 sehc->i.action &= ~ATA_EH_RESET; 2591 sehc->i.action &= ~ATA_EH_RESET;
2592 sehc->i.action |= ehc->i.action; 2592 sehc->i.action |= ehc->i.action;
2593 } 2593 }
2594 2594
2595 rc = prereset(link, deadline); 2595 rc = prereset(link, deadline);
2596 2596
2597 /* If present, do prereset on slave link too. Reset 2597 /* If present, do prereset on slave link too. Reset
2598 * is skipped iff both master and slave links report 2598 * is skipped iff both master and slave links report
2599 * -ENOENT or clear ATA_EH_RESET. 2599 * -ENOENT or clear ATA_EH_RESET.
2600 */ 2600 */
2601 if (slave && (rc == 0 || rc == -ENOENT)) { 2601 if (slave && (rc == 0 || rc == -ENOENT)) {
2602 int tmp; 2602 int tmp;
2603 2603
2604 tmp = prereset(slave, deadline); 2604 tmp = prereset(slave, deadline);
2605 if (tmp != -ENOENT) 2605 if (tmp != -ENOENT)
2606 rc = tmp; 2606 rc = tmp;
2607 2607
2608 ehc->i.action |= sehc->i.action; 2608 ehc->i.action |= sehc->i.action;
2609 } 2609 }
2610 2610
2611 if (rc) { 2611 if (rc) {
2612 if (rc == -ENOENT) { 2612 if (rc == -ENOENT) {
2613 ata_link_printk(link, KERN_DEBUG, 2613 ata_link_printk(link, KERN_DEBUG,
2614 "port disabled. ignoring.\n"); 2614 "port disabled. ignoring.\n");
2615 ehc->i.action &= ~ATA_EH_RESET; 2615 ehc->i.action &= ~ATA_EH_RESET;
2616 2616
2617 ata_for_each_dev(dev, link, ALL) 2617 ata_for_each_dev(dev, link, ALL)
2618 classes[dev->devno] = ATA_DEV_NONE; 2618 classes[dev->devno] = ATA_DEV_NONE;
2619 2619
2620 rc = 0; 2620 rc = 0;
2621 } else 2621 } else
2622 ata_link_printk(link, KERN_ERR, 2622 ata_link_printk(link, KERN_ERR,
2623 "prereset failed (errno=%d)\n", rc); 2623 "prereset failed (errno=%d)\n", rc);
2624 goto out; 2624 goto out;
2625 } 2625 }
2626 2626
2627 /* prereset() might have cleared ATA_EH_RESET. If so, 2627 /* prereset() might have cleared ATA_EH_RESET. If so,
2628 * bang classes, thaw and return. 2628 * bang classes, thaw and return.
2629 */ 2629 */
2630 if (reset && !(ehc->i.action & ATA_EH_RESET)) { 2630 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2631 ata_for_each_dev(dev, link, ALL) 2631 ata_for_each_dev(dev, link, ALL)
2632 classes[dev->devno] = ATA_DEV_NONE; 2632 classes[dev->devno] = ATA_DEV_NONE;
2633 if ((ap->pflags & ATA_PFLAG_FROZEN) && 2633 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2634 ata_is_host_link(link)) 2634 ata_is_host_link(link))
2635 ata_eh_thaw_port(ap); 2635 ata_eh_thaw_port(ap);
2636 rc = 0; 2636 rc = 0;
2637 goto out; 2637 goto out;
2638 } 2638 }
2639 } 2639 }
2640 2640
2641 retry: 2641 retry:
2642 /* 2642 /*
2643 * Perform reset 2643 * Perform reset
2644 */ 2644 */
2645 if (ata_is_host_link(link)) 2645 if (ata_is_host_link(link))
2646 ata_eh_freeze_port(ap); 2646 ata_eh_freeze_port(ap);
2647 2647
2648 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]); 2648 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2649 2649
2650 if (reset) { 2650 if (reset) {
2651 if (verbose) 2651 if (verbose)
2652 ata_link_printk(link, KERN_INFO, "%s resetting link\n", 2652 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2653 reset == softreset ? "soft" : "hard"); 2653 reset == softreset ? "soft" : "hard");
2654 2654
2655 /* mark that this EH session started with reset */ 2655 /* mark that this EH session started with reset */
2656 ehc->last_reset = jiffies; 2656 ehc->last_reset = jiffies;
2657 if (reset == hardreset) 2657 if (reset == hardreset)
2658 ehc->i.flags |= ATA_EHI_DID_HARDRESET; 2658 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2659 else 2659 else
2660 ehc->i.flags |= ATA_EHI_DID_SOFTRESET; 2660 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2661 2661
2662 rc = ata_do_reset(link, reset, classes, deadline, true); 2662 rc = ata_do_reset(link, reset, classes, deadline, true);
2663 if (rc && rc != -EAGAIN) { 2663 if (rc && rc != -EAGAIN) {
2664 failed_link = link; 2664 failed_link = link;
2665 goto fail; 2665 goto fail;
2666 } 2666 }
2667 2667
2668 /* hardreset slave link if existent */ 2668 /* hardreset slave link if existent */
2669 if (slave && reset == hardreset) { 2669 if (slave && reset == hardreset) {
2670 int tmp; 2670 int tmp;
2671 2671
2672 if (verbose) 2672 if (verbose)
2673 ata_link_printk(slave, KERN_INFO, 2673 ata_link_printk(slave, KERN_INFO,
2674 "hard resetting link\n"); 2674 "hard resetting link\n");
2675 2675
2676 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET); 2676 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2677 tmp = ata_do_reset(slave, reset, classes, deadline, 2677 tmp = ata_do_reset(slave, reset, classes, deadline,
2678 false); 2678 false);
2679 switch (tmp) { 2679 switch (tmp) {
2680 case -EAGAIN: 2680 case -EAGAIN:
2681 rc = -EAGAIN; 2681 rc = -EAGAIN;
2682 case 0: 2682 case 0:
2683 break; 2683 break;
2684 default: 2684 default:
2685 failed_link = slave; 2685 failed_link = slave;
2686 rc = tmp; 2686 rc = tmp;
2687 goto fail; 2687 goto fail;
2688 } 2688 }
2689 } 2689 }
2690 2690
2691 /* perform follow-up SRST if necessary */ 2691 /* perform follow-up SRST if necessary */
2692 if (reset == hardreset && 2692 if (reset == hardreset &&
2693 ata_eh_followup_srst_needed(link, rc, classes)) { 2693 ata_eh_followup_srst_needed(link, rc, classes)) {
2694 reset = softreset; 2694 reset = softreset;
2695 2695
2696 if (!reset) { 2696 if (!reset) {
2697 ata_link_printk(link, KERN_ERR, 2697 ata_link_printk(link, KERN_ERR,
2698 "follow-up softreset required " 2698 "follow-up softreset required "
2699 "but no softreset avaliable\n"); 2699 "but no softreset avaliable\n");
2700 failed_link = link; 2700 failed_link = link;
2701 rc = -EINVAL; 2701 rc = -EINVAL;
2702 goto fail; 2702 goto fail;
2703 } 2703 }
2704 2704
2705 ata_eh_about_to_do(link, NULL, ATA_EH_RESET); 2705 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2706 rc = ata_do_reset(link, reset, classes, deadline, true); 2706 rc = ata_do_reset(link, reset, classes, deadline, true);
2707 if (rc) { 2707 if (rc) {
2708 failed_link = link; 2708 failed_link = link;
2709 goto fail; 2709 goto fail;
2710 } 2710 }
2711 } 2711 }
2712 } else { 2712 } else {
2713 if (verbose) 2713 if (verbose)
2714 ata_link_printk(link, KERN_INFO, "no reset method " 2714 ata_link_printk(link, KERN_INFO, "no reset method "
2715 "available, skipping reset\n"); 2715 "available, skipping reset\n");
2716 if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) 2716 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2717 lflags |= ATA_LFLAG_ASSUME_ATA; 2717 lflags |= ATA_LFLAG_ASSUME_ATA;
2718 } 2718 }
2719 2719
2720 /* 2720 /*
2721 * Post-reset processing 2721 * Post-reset processing
2722 */ 2722 */
2723 ata_for_each_dev(dev, link, ALL) { 2723 ata_for_each_dev(dev, link, ALL) {
2724 /* After the reset, the device state is PIO 0 and the 2724 /* After the reset, the device state is PIO 0 and the
2725 * controller state is undefined. Reset also wakes up 2725 * controller state is undefined. Reset also wakes up
2726 * drives from sleeping mode. 2726 * drives from sleeping mode.
2727 */ 2727 */
2728 dev->pio_mode = XFER_PIO_0; 2728 dev->pio_mode = XFER_PIO_0;
2729 dev->flags &= ~ATA_DFLAG_SLEEPING; 2729 dev->flags &= ~ATA_DFLAG_SLEEPING;
2730 2730
2731 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 2731 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2732 continue; 2732 continue;
2733 2733
2734 /* apply class override */ 2734 /* apply class override */
2735 if (lflags & ATA_LFLAG_ASSUME_ATA) 2735 if (lflags & ATA_LFLAG_ASSUME_ATA)
2736 classes[dev->devno] = ATA_DEV_ATA; 2736 classes[dev->devno] = ATA_DEV_ATA;
2737 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2737 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2738 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; 2738 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2739 } 2739 }
2740 2740
2741 /* record current link speed */ 2741 /* record current link speed */
2742 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0) 2742 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2743 link->sata_spd = (sstatus >> 4) & 0xf; 2743 link->sata_spd = (sstatus >> 4) & 0xf;
2744 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0) 2744 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2745 slave->sata_spd = (sstatus >> 4) & 0xf; 2745 slave->sata_spd = (sstatus >> 4) & 0xf;
2746 2746
2747 /* thaw the port */ 2747 /* thaw the port */
2748 if (ata_is_host_link(link)) 2748 if (ata_is_host_link(link))
2749 ata_eh_thaw_port(ap); 2749 ata_eh_thaw_port(ap);
2750 2750
2751 /* postreset() should clear hardware SError. Although SError 2751 /* postreset() should clear hardware SError. Although SError
2752 * is cleared during link resume, clearing SError here is 2752 * is cleared during link resume, clearing SError here is
2753 * necessary as some PHYs raise hotplug events after SRST. 2753 * necessary as some PHYs raise hotplug events after SRST.
2754 * This introduces race condition where hotplug occurs between 2754 * This introduces race condition where hotplug occurs between
2755 * reset and here. This race is mediated by cross checking 2755 * reset and here. This race is mediated by cross checking
2756 * link onlineness and classification result later. 2756 * link onlineness and classification result later.
2757 */ 2757 */
2758 if (postreset) { 2758 if (postreset) {
2759 postreset(link, classes); 2759 postreset(link, classes);
2760 if (slave) 2760 if (slave)
2761 postreset(slave, classes); 2761 postreset(slave, classes);
2762 } 2762 }
2763 2763
2764 /* 2764 /*
2765 * Some controllers can't be frozen very well and may set 2765 * Some controllers can't be frozen very well and may set
2766 * spuruious error conditions during reset. Clear accumulated 2766 * spuruious error conditions during reset. Clear accumulated
2767 * error information. As reset is the final recovery action, 2767 * error information. As reset is the final recovery action,
2768 * nothing is lost by doing this. 2768 * nothing is lost by doing this.
2769 */ 2769 */
2770 spin_lock_irqsave(link->ap->lock, flags); 2770 spin_lock_irqsave(link->ap->lock, flags);
2771 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2771 memset(&link->eh_info, 0, sizeof(link->eh_info));
2772 if (slave) 2772 if (slave)
2773 memset(&slave->eh_info, 0, sizeof(link->eh_info)); 2773 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2774 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2774 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2775 spin_unlock_irqrestore(link->ap->lock, flags); 2775 spin_unlock_irqrestore(link->ap->lock, flags);
2776 2776
2777 /* 2777 /*
2778 * Make sure onlineness and classification result correspond. 2778 * Make sure onlineness and classification result correspond.
2779 * Hotplug could have happened during reset and some 2779 * Hotplug could have happened during reset and some
2780 * controllers fail to wait while a drive is spinning up after 2780 * controllers fail to wait while a drive is spinning up after
2781 * being hotplugged causing misdetection. By cross checking 2781 * being hotplugged causing misdetection. By cross checking
2782 * link on/offlineness and classification result, those 2782 * link on/offlineness and classification result, those
2783 * conditions can be reliably detected and retried. 2783 * conditions can be reliably detected and retried.
2784 */ 2784 */
2785 nr_unknown = 0; 2785 nr_unknown = 0;
2786 ata_for_each_dev(dev, link, ALL) { 2786 ata_for_each_dev(dev, link, ALL) {
2787 if (ata_phys_link_online(ata_dev_phys_link(dev))) { 2787 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2788 if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2788 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2789 ata_dev_printk(dev, KERN_DEBUG, "link online " 2789 ata_dev_printk(dev, KERN_DEBUG, "link online "
2790 "but device misclassifed\n"); 2790 "but device misclassifed\n");
2791 classes[dev->devno] = ATA_DEV_NONE; 2791 classes[dev->devno] = ATA_DEV_NONE;
2792 nr_unknown++; 2792 nr_unknown++;
2793 } 2793 }
2794 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2794 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2795 if (ata_class_enabled(classes[dev->devno])) 2795 if (ata_class_enabled(classes[dev->devno]))
2796 ata_dev_printk(dev, KERN_DEBUG, "link offline, " 2796 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2797 "clearing class %d to NONE\n", 2797 "clearing class %d to NONE\n",
2798 classes[dev->devno]); 2798 classes[dev->devno]);
2799 classes[dev->devno] = ATA_DEV_NONE; 2799 classes[dev->devno] = ATA_DEV_NONE;
2800 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) { 2800 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2801 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, " 2801 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2802 "clearing UNKNOWN to NONE\n"); 2802 "clearing UNKNOWN to NONE\n");
2803 classes[dev->devno] = ATA_DEV_NONE; 2803 classes[dev->devno] = ATA_DEV_NONE;
2804 } 2804 }
2805 } 2805 }
2806 2806
2807 if (classify && nr_unknown) { 2807 if (classify && nr_unknown) {
2808 if (try < max_tries) { 2808 if (try < max_tries) {
2809 ata_link_printk(link, KERN_WARNING, "link online but " 2809 ata_link_printk(link, KERN_WARNING, "link online but "
2810 "%d devices misclassified, retrying\n", 2810 "%d devices misclassified, retrying\n",
2811 nr_unknown); 2811 nr_unknown);
2812 failed_link = link; 2812 failed_link = link;
2813 rc = -EAGAIN; 2813 rc = -EAGAIN;
2814 goto fail; 2814 goto fail;
2815 } 2815 }
2816 ata_link_printk(link, KERN_WARNING, 2816 ata_link_printk(link, KERN_WARNING,
2817 "link online but %d devices misclassified, " 2817 "link online but %d devices misclassified, "
2818 "device detection might fail\n", nr_unknown); 2818 "device detection might fail\n", nr_unknown);
2819 } 2819 }
2820 2820
2821 /* reset successful, schedule revalidation */ 2821 /* reset successful, schedule revalidation */
2822 ata_eh_done(link, NULL, ATA_EH_RESET); 2822 ata_eh_done(link, NULL, ATA_EH_RESET);
2823 if (slave) 2823 if (slave)
2824 ata_eh_done(slave, NULL, ATA_EH_RESET); 2824 ata_eh_done(slave, NULL, ATA_EH_RESET);
2825 ehc->last_reset = jiffies; /* update to completion time */ 2825 ehc->last_reset = jiffies; /* update to completion time */
2826 ehc->i.action |= ATA_EH_REVALIDATE; 2826 ehc->i.action |= ATA_EH_REVALIDATE;
2827 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ 2827 link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */
2828 2828
2829 rc = 0; 2829 rc = 0;
2830 out: 2830 out:
2831 /* clear hotplug flag */ 2831 /* clear hotplug flag */
2832 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2832 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2833 if (slave) 2833 if (slave)
2834 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED; 2834 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2835 2835
2836 spin_lock_irqsave(ap->lock, flags); 2836 spin_lock_irqsave(ap->lock, flags);
2837 ap->pflags &= ~ATA_PFLAG_RESETTING; 2837 ap->pflags &= ~ATA_PFLAG_RESETTING;
2838 spin_unlock_irqrestore(ap->lock, flags); 2838 spin_unlock_irqrestore(ap->lock, flags);
2839 2839
2840 return rc; 2840 return rc;
2841 2841
2842 fail: 2842 fail:
2843 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */ 2843 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2844 if (!ata_is_host_link(link) && 2844 if (!ata_is_host_link(link) &&
2845 sata_scr_read(link, SCR_STATUS, &sstatus)) 2845 sata_scr_read(link, SCR_STATUS, &sstatus))
2846 rc = -ERESTART; 2846 rc = -ERESTART;
2847 2847
2848 if (rc == -ERESTART || try >= max_tries) 2848 if (rc == -ERESTART || try >= max_tries)
2849 goto out; 2849 goto out;
2850 2850
2851 now = jiffies; 2851 now = jiffies;
2852 if (time_before(now, deadline)) { 2852 if (time_before(now, deadline)) {
2853 unsigned long delta = deadline - now; 2853 unsigned long delta = deadline - now;
2854 2854
2855 ata_link_printk(failed_link, KERN_WARNING, 2855 ata_link_printk(failed_link, KERN_WARNING,
2856 "reset failed (errno=%d), retrying in %u secs\n", 2856 "reset failed (errno=%d), retrying in %u secs\n",
2857 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); 2857 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2858 2858
2859 ata_eh_release(ap); 2859 ata_eh_release(ap);
2860 while (delta) 2860 while (delta)
2861 delta = schedule_timeout_uninterruptible(delta); 2861 delta = schedule_timeout_uninterruptible(delta);
2862 ata_eh_acquire(ap); 2862 ata_eh_acquire(ap);
2863 } 2863 }
2864 2864
2865 if (try == max_tries - 1) { 2865 if (try == max_tries - 1) {
2866 sata_down_spd_limit(link, 0); 2866 sata_down_spd_limit(link, 0);
2867 if (slave) 2867 if (slave)
2868 sata_down_spd_limit(slave, 0); 2868 sata_down_spd_limit(slave, 0);
2869 } else if (rc == -EPIPE) 2869 } else if (rc == -EPIPE)
2870 sata_down_spd_limit(failed_link, 0); 2870 sata_down_spd_limit(failed_link, 0);
2871 2871
2872 if (hardreset) 2872 if (hardreset)
2873 reset = hardreset; 2873 reset = hardreset;
2874 goto retry; 2874 goto retry;
2875 } 2875 }
2876 2876
2877 static inline void ata_eh_pull_park_action(struct ata_port *ap) 2877 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2878 { 2878 {
2879 struct ata_link *link; 2879 struct ata_link *link;
2880 struct ata_device *dev; 2880 struct ata_device *dev;
2881 unsigned long flags; 2881 unsigned long flags;
2882 2882
2883 /* 2883 /*
2884 * This function can be thought of as an extended version of 2884 * This function can be thought of as an extended version of
2885 * ata_eh_about_to_do() specially crafted to accommodate the 2885 * ata_eh_about_to_do() specially crafted to accommodate the
2886 * requirements of ATA_EH_PARK handling. Since the EH thread 2886 * requirements of ATA_EH_PARK handling. Since the EH thread
2887 * does not leave the do {} while () loop in ata_eh_recover as 2887 * does not leave the do {} while () loop in ata_eh_recover as
2888 * long as the timeout for a park request to *one* device on 2888 * long as the timeout for a park request to *one* device on
2889 * the port has not expired, and since we still want to pick 2889 * the port has not expired, and since we still want to pick
2890 * up park requests to other devices on the same port or 2890 * up park requests to other devices on the same port or
2891 * timeout updates for the same device, we have to pull 2891 * timeout updates for the same device, we have to pull
2892 * ATA_EH_PARK actions from eh_info into eh_context.i 2892 * ATA_EH_PARK actions from eh_info into eh_context.i
2893 * ourselves at the beginning of each pass over the loop. 2893 * ourselves at the beginning of each pass over the loop.
2894 * 2894 *
2895 * Additionally, all write accesses to &ap->park_req_pending 2895 * Additionally, all write accesses to &ap->park_req_pending
2896 * through INIT_COMPLETION() (see below) or complete_all() 2896 * through INIT_COMPLETION() (see below) or complete_all()
2897 * (see ata_scsi_park_store()) are protected by the host lock. 2897 * (see ata_scsi_park_store()) are protected by the host lock.
2898 * As a result we have that park_req_pending.done is zero on 2898 * As a result we have that park_req_pending.done is zero on
2899 * exit from this function, i.e. when ATA_EH_PARK actions for 2899 * exit from this function, i.e. when ATA_EH_PARK actions for
2900 * *all* devices on port ap have been pulled into the 2900 * *all* devices on port ap have been pulled into the
2901 * respective eh_context structs. If, and only if, 2901 * respective eh_context structs. If, and only if,
2902 * park_req_pending.done is non-zero by the time we reach 2902 * park_req_pending.done is non-zero by the time we reach
2903 * wait_for_completion_timeout(), another ATA_EH_PARK action 2903 * wait_for_completion_timeout(), another ATA_EH_PARK action
2904 * has been scheduled for at least one of the devices on port 2904 * has been scheduled for at least one of the devices on port
2905 * ap and we have to cycle over the do {} while () loop in 2905 * ap and we have to cycle over the do {} while () loop in
2906 * ata_eh_recover() again. 2906 * ata_eh_recover() again.
2907 */ 2907 */
2908 2908
2909 spin_lock_irqsave(ap->lock, flags); 2909 spin_lock_irqsave(ap->lock, flags);
2910 INIT_COMPLETION(ap->park_req_pending); 2910 INIT_COMPLETION(ap->park_req_pending);
2911 ata_for_each_link(link, ap, EDGE) { 2911 ata_for_each_link(link, ap, EDGE) {
2912 ata_for_each_dev(dev, link, ALL) { 2912 ata_for_each_dev(dev, link, ALL) {
2913 struct ata_eh_info *ehi = &link->eh_info; 2913 struct ata_eh_info *ehi = &link->eh_info;
2914 2914
2915 link->eh_context.i.dev_action[dev->devno] |= 2915 link->eh_context.i.dev_action[dev->devno] |=
2916 ehi->dev_action[dev->devno] & ATA_EH_PARK; 2916 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2917 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK); 2917 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2918 } 2918 }
2919 } 2919 }
2920 spin_unlock_irqrestore(ap->lock, flags); 2920 spin_unlock_irqrestore(ap->lock, flags);
2921 } 2921 }
2922 2922
2923 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park) 2923 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2924 { 2924 {
2925 struct ata_eh_context *ehc = &dev->link->eh_context; 2925 struct ata_eh_context *ehc = &dev->link->eh_context;
2926 struct ata_taskfile tf; 2926 struct ata_taskfile tf;
2927 unsigned int err_mask; 2927 unsigned int err_mask;
2928 2928
2929 ata_tf_init(dev, &tf); 2929 ata_tf_init(dev, &tf);
2930 if (park) { 2930 if (park) {
2931 ehc->unloaded_mask |= 1 << dev->devno; 2931 ehc->unloaded_mask |= 1 << dev->devno;
2932 tf.command = ATA_CMD_IDLEIMMEDIATE; 2932 tf.command = ATA_CMD_IDLEIMMEDIATE;
2933 tf.feature = 0x44; 2933 tf.feature = 0x44;
2934 tf.lbal = 0x4c; 2934 tf.lbal = 0x4c;
2935 tf.lbam = 0x4e; 2935 tf.lbam = 0x4e;
2936 tf.lbah = 0x55; 2936 tf.lbah = 0x55;
2937 } else { 2937 } else {
2938 ehc->unloaded_mask &= ~(1 << dev->devno); 2938 ehc->unloaded_mask &= ~(1 << dev->devno);
2939 tf.command = ATA_CMD_CHK_POWER; 2939 tf.command = ATA_CMD_CHK_POWER;
2940 } 2940 }
2941 2941
2942 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 2942 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2943 tf.protocol |= ATA_PROT_NODATA; 2943 tf.protocol |= ATA_PROT_NODATA;
2944 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 2944 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2945 if (park && (err_mask || tf.lbal != 0xc4)) { 2945 if (park && (err_mask || tf.lbal != 0xc4)) {
2946 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n"); 2946 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2947 ehc->unloaded_mask &= ~(1 << dev->devno); 2947 ehc->unloaded_mask &= ~(1 << dev->devno);
2948 } 2948 }
2949 } 2949 }
2950 2950
2951 static int ata_eh_revalidate_and_attach(struct ata_link *link, 2951 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2952 struct ata_device **r_failed_dev) 2952 struct ata_device **r_failed_dev)
2953 { 2953 {
2954 struct ata_port *ap = link->ap; 2954 struct ata_port *ap = link->ap;
2955 struct ata_eh_context *ehc = &link->eh_context; 2955 struct ata_eh_context *ehc = &link->eh_context;
2956 struct ata_device *dev; 2956 struct ata_device *dev;
2957 unsigned int new_mask = 0; 2957 unsigned int new_mask = 0;
2958 unsigned long flags; 2958 unsigned long flags;
2959 int rc = 0; 2959 int rc = 0;
2960 2960
2961 DPRINTK("ENTER\n"); 2961 DPRINTK("ENTER\n");
2962 2962
2963 /* For PATA drive side cable detection to work, IDENTIFY must 2963 /* For PATA drive side cable detection to work, IDENTIFY must
2964 * be done backwards such that PDIAG- is released by the slave 2964 * be done backwards such that PDIAG- is released by the slave
2965 * device before the master device is identified. 2965 * device before the master device is identified.
2966 */ 2966 */
2967 ata_for_each_dev(dev, link, ALL_REVERSE) { 2967 ata_for_each_dev(dev, link, ALL_REVERSE) {
2968 unsigned int action = ata_eh_dev_action(dev); 2968 unsigned int action = ata_eh_dev_action(dev);
2969 unsigned int readid_flags = 0; 2969 unsigned int readid_flags = 0;
2970 2970
2971 if (ehc->i.flags & ATA_EHI_DID_RESET) 2971 if (ehc->i.flags & ATA_EHI_DID_RESET)
2972 readid_flags |= ATA_READID_POSTRESET; 2972 readid_flags |= ATA_READID_POSTRESET;
2973 2973
2974 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) { 2974 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2975 WARN_ON(dev->class == ATA_DEV_PMP); 2975 WARN_ON(dev->class == ATA_DEV_PMP);
2976 2976
2977 if (ata_phys_link_offline(ata_dev_phys_link(dev))) { 2977 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2978 rc = -EIO; 2978 rc = -EIO;
2979 goto err; 2979 goto err;
2980 } 2980 }
2981 2981
2982 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE); 2982 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2983 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno], 2983 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2984 readid_flags); 2984 readid_flags);
2985 if (rc) 2985 if (rc)
2986 goto err; 2986 goto err;
2987 2987
2988 ata_eh_done(link, dev, ATA_EH_REVALIDATE); 2988 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2989 2989
2990 /* Configuration may have changed, reconfigure 2990 /* Configuration may have changed, reconfigure
2991 * transfer mode. 2991 * transfer mode.
2992 */ 2992 */
2993 ehc->i.flags |= ATA_EHI_SETMODE; 2993 ehc->i.flags |= ATA_EHI_SETMODE;
2994 2994
2995 /* schedule the scsi_rescan_device() here */ 2995 /* schedule the scsi_rescan_device() here */
2996 schedule_work(&(ap->scsi_rescan_task)); 2996 schedule_work(&(ap->scsi_rescan_task));
2997 } else if (dev->class == ATA_DEV_UNKNOWN && 2997 } else if (dev->class == ATA_DEV_UNKNOWN &&
2998 ehc->tries[dev->devno] && 2998 ehc->tries[dev->devno] &&
2999 ata_class_enabled(ehc->classes[dev->devno])) { 2999 ata_class_enabled(ehc->classes[dev->devno])) {
3000 /* Temporarily set dev->class, it will be 3000 /* Temporarily set dev->class, it will be
3001 * permanently set once all configurations are 3001 * permanently set once all configurations are
3002 * complete. This is necessary because new 3002 * complete. This is necessary because new
3003 * device configuration is done in two 3003 * device configuration is done in two
3004 * separate loops. 3004 * separate loops.
3005 */ 3005 */
3006 dev->class = ehc->classes[dev->devno]; 3006 dev->class = ehc->classes[dev->devno];
3007 3007
3008 if (dev->class == ATA_DEV_PMP) 3008 if (dev->class == ATA_DEV_PMP)
3009 rc = sata_pmp_attach(dev); 3009 rc = sata_pmp_attach(dev);
3010 else 3010 else
3011 rc = ata_dev_read_id(dev, &dev->class, 3011 rc = ata_dev_read_id(dev, &dev->class,
3012 readid_flags, dev->id); 3012 readid_flags, dev->id);
3013 3013
3014 /* read_id might have changed class, store and reset */ 3014 /* read_id might have changed class, store and reset */
3015 ehc->classes[dev->devno] = dev->class; 3015 ehc->classes[dev->devno] = dev->class;
3016 dev->class = ATA_DEV_UNKNOWN; 3016 dev->class = ATA_DEV_UNKNOWN;
3017 3017
3018 switch (rc) { 3018 switch (rc) {
3019 case 0: 3019 case 0:
3020 /* clear error info accumulated during probe */ 3020 /* clear error info accumulated during probe */
3021 ata_ering_clear(&dev->ering); 3021 ata_ering_clear(&dev->ering);
3022 new_mask |= 1 << dev->devno; 3022 new_mask |= 1 << dev->devno;
3023 break; 3023 break;
3024 case -ENOENT: 3024 case -ENOENT:
3025 /* IDENTIFY was issued to non-existent 3025 /* IDENTIFY was issued to non-existent
3026 * device. No need to reset. Just 3026 * device. No need to reset. Just
3027 * thaw and ignore the device. 3027 * thaw and ignore the device.
3028 */ 3028 */
3029 ata_eh_thaw_port(ap); 3029 ata_eh_thaw_port(ap);
3030 break; 3030 break;
3031 default: 3031 default:
3032 goto err; 3032 goto err;
3033 } 3033 }
3034 } 3034 }
3035 } 3035 }
3036 3036
3037 /* PDIAG- should have been released, ask cable type if post-reset */ 3037 /* PDIAG- should have been released, ask cable type if post-reset */
3038 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) { 3038 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
3039 if (ap->ops->cable_detect) 3039 if (ap->ops->cable_detect)
3040 ap->cbl = ap->ops->cable_detect(ap); 3040 ap->cbl = ap->ops->cable_detect(ap);
3041 ata_force_cbl(ap); 3041 ata_force_cbl(ap);
3042 } 3042 }
3043 3043
3044 /* Configure new devices forward such that user doesn't see 3044 /* Configure new devices forward such that user doesn't see
3045 * device detection messages backwards. 3045 * device detection messages backwards.
3046 */ 3046 */
3047 ata_for_each_dev(dev, link, ALL) { 3047 ata_for_each_dev(dev, link, ALL) {
3048 if (!(new_mask & (1 << dev->devno))) 3048 if (!(new_mask & (1 << dev->devno)))
3049 continue; 3049 continue;
3050 3050
3051 dev->class = ehc->classes[dev->devno]; 3051 dev->class = ehc->classes[dev->devno];
3052 3052
3053 if (dev->class == ATA_DEV_PMP) 3053 if (dev->class == ATA_DEV_PMP)
3054 continue; 3054 continue;
3055 3055
3056 ehc->i.flags |= ATA_EHI_PRINTINFO; 3056 ehc->i.flags |= ATA_EHI_PRINTINFO;
3057 rc = ata_dev_configure(dev); 3057 rc = ata_dev_configure(dev);
3058 ehc->i.flags &= ~ATA_EHI_PRINTINFO; 3058 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3059 if (rc) { 3059 if (rc) {
3060 dev->class = ATA_DEV_UNKNOWN; 3060 dev->class = ATA_DEV_UNKNOWN;
3061 goto err; 3061 goto err;
3062 } 3062 }
3063 3063
3064 spin_lock_irqsave(ap->lock, flags); 3064 spin_lock_irqsave(ap->lock, flags);
3065 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG; 3065 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3066 spin_unlock_irqrestore(ap->lock, flags); 3066 spin_unlock_irqrestore(ap->lock, flags);
3067 3067
3068 /* new device discovered, configure xfermode */ 3068 /* new device discovered, configure xfermode */
3069 ehc->i.flags |= ATA_EHI_SETMODE; 3069 ehc->i.flags |= ATA_EHI_SETMODE;
3070 } 3070 }
3071 3071
3072 return 0; 3072 return 0;
3073 3073
3074 err: 3074 err:
3075 *r_failed_dev = dev; 3075 *r_failed_dev = dev;
3076 DPRINTK("EXIT rc=%d\n", rc); 3076 DPRINTK("EXIT rc=%d\n", rc);
3077 return rc; 3077 return rc;
3078 } 3078 }
3079 3079
3080 /** 3080 /**
3081 * ata_set_mode - Program timings and issue SET FEATURES - XFER 3081 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3082 * @link: link on which timings will be programmed 3082 * @link: link on which timings will be programmed
3083 * @r_failed_dev: out parameter for failed device 3083 * @r_failed_dev: out parameter for failed device
3084 * 3084 *
3085 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3085 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3086 * ata_set_mode() fails, pointer to the failing device is 3086 * ata_set_mode() fails, pointer to the failing device is
3087 * returned in @r_failed_dev. 3087 * returned in @r_failed_dev.
3088 * 3088 *
3089 * LOCKING: 3089 * LOCKING:
3090 * PCI/etc. bus probe sem. 3090 * PCI/etc. bus probe sem.
3091 * 3091 *
3092 * RETURNS: 3092 * RETURNS:
3093 * 0 on success, negative errno otherwise 3093 * 0 on success, negative errno otherwise
3094 */ 3094 */
3095 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3095 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3096 { 3096 {
3097 struct ata_port *ap = link->ap; 3097 struct ata_port *ap = link->ap;
3098 struct ata_device *dev; 3098 struct ata_device *dev;
3099 int rc; 3099 int rc;
3100 3100
3101 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */ 3101 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3102 ata_for_each_dev(dev, link, ENABLED) { 3102 ata_for_each_dev(dev, link, ENABLED) {
3103 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) { 3103 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3104 struct ata_ering_entry *ent; 3104 struct ata_ering_entry *ent;
3105 3105
3106 ent = ata_ering_top(&dev->ering); 3106 ent = ata_ering_top(&dev->ering);
3107 if (ent) 3107 if (ent)
3108 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; 3108 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3109 } 3109 }
3110 } 3110 }
3111 3111
3112 /* has private set_mode? */ 3112 /* has private set_mode? */
3113 if (ap->ops->set_mode) 3113 if (ap->ops->set_mode)
3114 rc = ap->ops->set_mode(link, r_failed_dev); 3114 rc = ap->ops->set_mode(link, r_failed_dev);
3115 else 3115 else
3116 rc = ata_do_set_mode(link, r_failed_dev); 3116 rc = ata_do_set_mode(link, r_failed_dev);
3117 3117
3118 /* if transfer mode has changed, set DUBIOUS_XFER on device */ 3118 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3119 ata_for_each_dev(dev, link, ENABLED) { 3119 ata_for_each_dev(dev, link, ENABLED) {
3120 struct ata_eh_context *ehc = &link->eh_context; 3120 struct ata_eh_context *ehc = &link->eh_context;
3121 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno]; 3121 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3122 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno)); 3122 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3123 3123
3124 if (dev->xfer_mode != saved_xfer_mode || 3124 if (dev->xfer_mode != saved_xfer_mode ||
3125 ata_ncq_enabled(dev) != saved_ncq) 3125 ata_ncq_enabled(dev) != saved_ncq)
3126 dev->flags |= ATA_DFLAG_DUBIOUS_XFER; 3126 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3127 } 3127 }
3128 3128
3129 return rc; 3129 return rc;
3130 } 3130 }
3131 3131
3132 /** 3132 /**
3133 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset 3133 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3134 * @dev: ATAPI device to clear UA for 3134 * @dev: ATAPI device to clear UA for
3135 * 3135 *
3136 * Resets and other operations can make an ATAPI device raise 3136 * Resets and other operations can make an ATAPI device raise
3137 * UNIT ATTENTION which causes the next operation to fail. This 3137 * UNIT ATTENTION which causes the next operation to fail. This
3138 * function clears UA. 3138 * function clears UA.
3139 * 3139 *
3140 * LOCKING: 3140 * LOCKING:
3141 * EH context (may sleep). 3141 * EH context (may sleep).
3142 * 3142 *
3143 * RETURNS: 3143 * RETURNS:
3144 * 0 on success, -errno on failure. 3144 * 0 on success, -errno on failure.
3145 */ 3145 */
3146 static int atapi_eh_clear_ua(struct ata_device *dev) 3146 static int atapi_eh_clear_ua(struct ata_device *dev)
3147 { 3147 {
3148 int i; 3148 int i;
3149 3149
3150 for (i = 0; i < ATA_EH_UA_TRIES; i++) { 3150 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3151 u8 *sense_buffer = dev->link->ap->sector_buf; 3151 u8 *sense_buffer = dev->link->ap->sector_buf;
3152 u8 sense_key = 0; 3152 u8 sense_key = 0;
3153 unsigned int err_mask; 3153 unsigned int err_mask;
3154 3154
3155 err_mask = atapi_eh_tur(dev, &sense_key); 3155 err_mask = atapi_eh_tur(dev, &sense_key);
3156 if (err_mask != 0 && err_mask != AC_ERR_DEV) { 3156 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3157 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY " 3157 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3158 "failed (err_mask=0x%x)\n", err_mask); 3158 "failed (err_mask=0x%x)\n", err_mask);
3159 return -EIO; 3159 return -EIO;
3160 } 3160 }
3161 3161
3162 if (!err_mask || sense_key != UNIT_ATTENTION) 3162 if (!err_mask || sense_key != UNIT_ATTENTION)
3163 return 0; 3163 return 0;
3164 3164
3165 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key); 3165 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3166 if (err_mask) { 3166 if (err_mask) {
3167 ata_dev_printk(dev, KERN_WARNING, "failed to clear " 3167 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3168 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask); 3168 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3169 return -EIO; 3169 return -EIO;
3170 } 3170 }
3171 } 3171 }
3172 3172
3173 ata_dev_printk(dev, KERN_WARNING, 3173 ata_dev_printk(dev, KERN_WARNING,
3174 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES); 3174 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3175 3175
3176 return 0; 3176 return 0;
3177 } 3177 }
3178 3178
3179 /** 3179 /**
3180 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary 3180 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3181 * @dev: ATA device which may need FLUSH retry 3181 * @dev: ATA device which may need FLUSH retry
3182 * 3182 *
3183 * If @dev failed FLUSH, it needs to be reported upper layer 3183 * If @dev failed FLUSH, it needs to be reported upper layer
3184 * immediately as it means that @dev failed to remap and already 3184 * immediately as it means that @dev failed to remap and already
3185 * lost at least a sector and further FLUSH retrials won't make 3185 * lost at least a sector and further FLUSH retrials won't make
3186 * any difference to the lost sector. However, if FLUSH failed 3186 * any difference to the lost sector. However, if FLUSH failed
3187 * for other reasons, for example transmission error, FLUSH needs 3187 * for other reasons, for example transmission error, FLUSH needs
3188 * to be retried. 3188 * to be retried.
3189 * 3189 *
3190 * This function determines whether FLUSH failure retry is 3190 * This function determines whether FLUSH failure retry is
3191 * necessary and performs it if so. 3191 * necessary and performs it if so.
3192 * 3192 *
3193 * RETURNS: 3193 * RETURNS:
3194 * 0 if EH can continue, -errno if EH needs to be repeated. 3194 * 0 if EH can continue, -errno if EH needs to be repeated.
3195 */ 3195 */
3196 static int ata_eh_maybe_retry_flush(struct ata_device *dev) 3196 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3197 { 3197 {
3198 struct ata_link *link = dev->link; 3198 struct ata_link *link = dev->link;
3199 struct ata_port *ap = link->ap; 3199 struct ata_port *ap = link->ap;
3200 struct ata_queued_cmd *qc; 3200 struct ata_queued_cmd *qc;
3201 struct ata_taskfile tf; 3201 struct ata_taskfile tf;
3202 unsigned int err_mask; 3202 unsigned int err_mask;
3203 int rc = 0; 3203 int rc = 0;
3204 3204
3205 /* did flush fail for this device? */ 3205 /* did flush fail for this device? */
3206 if (!ata_tag_valid(link->active_tag)) 3206 if (!ata_tag_valid(link->active_tag))
3207 return 0; 3207 return 0;
3208 3208
3209 qc = __ata_qc_from_tag(ap, link->active_tag); 3209 qc = __ata_qc_from_tag(ap, link->active_tag);
3210 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT && 3210 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3211 qc->tf.command != ATA_CMD_FLUSH)) 3211 qc->tf.command != ATA_CMD_FLUSH))
3212 return 0; 3212 return 0;
3213 3213
3214 /* if the device failed it, it should be reported to upper layers */ 3214 /* if the device failed it, it should be reported to upper layers */
3215 if (qc->err_mask & AC_ERR_DEV) 3215 if (qc->err_mask & AC_ERR_DEV)
3216 return 0; 3216 return 0;
3217 3217
3218 /* flush failed for some other reason, give it another shot */ 3218 /* flush failed for some other reason, give it another shot */
3219 ata_tf_init(dev, &tf); 3219 ata_tf_init(dev, &tf);
3220 3220
3221 tf.command = qc->tf.command; 3221 tf.command = qc->tf.command;
3222 tf.flags |= ATA_TFLAG_DEVICE; 3222 tf.flags |= ATA_TFLAG_DEVICE;
3223 tf.protocol = ATA_PROT_NODATA; 3223 tf.protocol = ATA_PROT_NODATA;
3224 3224
3225 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n", 3225 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3226 tf.command, qc->err_mask); 3226 tf.command, qc->err_mask);
3227 3227
3228 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 3228 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3229 if (!err_mask) { 3229 if (!err_mask) {
3230 /* 3230 /*
3231 * FLUSH is complete but there's no way to 3231 * FLUSH is complete but there's no way to
3232 * successfully complete a failed command from EH. 3232 * successfully complete a failed command from EH.
3233 * Making sure retry is allowed at least once and 3233 * Making sure retry is allowed at least once and
3234 * retrying it should do the trick - whatever was in 3234 * retrying it should do the trick - whatever was in
3235 * the cache is already on the platter and this won't 3235 * the cache is already on the platter and this won't
3236 * cause infinite loop. 3236 * cause infinite loop.
3237 */ 3237 */
3238 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1); 3238 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3239 } else { 3239 } else {
3240 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n", 3240 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3241 err_mask); 3241 err_mask);
3242 rc = -EIO; 3242 rc = -EIO;
3243 3243
3244 /* if device failed it, report it to upper layers */ 3244 /* if device failed it, report it to upper layers */
3245 if (err_mask & AC_ERR_DEV) { 3245 if (err_mask & AC_ERR_DEV) {
3246 qc->err_mask |= AC_ERR_DEV; 3246 qc->err_mask |= AC_ERR_DEV;
3247 qc->result_tf = tf; 3247 qc->result_tf = tf;
3248 if (!(ap->pflags & ATA_PFLAG_FROZEN)) 3248 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3249 rc = 0; 3249 rc = 0;
3250 } 3250 }
3251 } 3251 }
3252 return rc; 3252 return rc;
3253 } 3253 }
3254 3254
3255 /** 3255 /**
3256 * ata_eh_set_lpm - configure SATA interface power management 3256 * ata_eh_set_lpm - configure SATA interface power management
3257 * @link: link to configure power management 3257 * @link: link to configure power management
3258 * @policy: the link power management policy 3258 * @policy: the link power management policy
3259 * @r_failed_dev: out parameter for failed device 3259 * @r_failed_dev: out parameter for failed device
3260 * 3260 *
3261 * Enable SATA Interface power management. This will enable 3261 * Enable SATA Interface power management. This will enable
3262 * Device Interface Power Management (DIPM) for min_power 3262 * Device Interface Power Management (DIPM) for min_power
3263 * policy, and then call driver specific callbacks for 3263 * policy, and then call driver specific callbacks for
3264 * enabling Host Initiated Power management. 3264 * enabling Host Initiated Power management.
3265 * 3265 *
3266 * LOCKING: 3266 * LOCKING:
3267 * EH context. 3267 * EH context.
3268 * 3268 *
3269 * RETURNS: 3269 * RETURNS:
3270 * 0 on success, -errno on failure. 3270 * 0 on success, -errno on failure.
3271 */ 3271 */
3272 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3272 static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3273 struct ata_device **r_failed_dev) 3273 struct ata_device **r_failed_dev)
3274 { 3274 {
3275 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; 3275 struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
3276 struct ata_eh_context *ehc = &link->eh_context; 3276 struct ata_eh_context *ehc = &link->eh_context;
3277 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3277 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3278 enum ata_lpm_policy old_policy = link->lpm_policy;
3278 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3279 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3279 unsigned int err_mask; 3280 unsigned int err_mask;
3280 int rc; 3281 int rc;
3281 3282
3282 /* if the link or host doesn't do LPM, noop */ 3283 /* if the link or host doesn't do LPM, noop */
3283 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) 3284 if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
3284 return 0; 3285 return 0;
3285 3286
3286 /* 3287 /*
3287 * DIPM is enabled only for MIN_POWER as some devices 3288 * DIPM is enabled only for MIN_POWER as some devices
3288 * misbehave when the host NACKs transition to SLUMBER. Order 3289 * misbehave when the host NACKs transition to SLUMBER. Order
3289 * device and link configurations such that the host always 3290 * device and link configurations such that the host always
3290 * allows DIPM requests. 3291 * allows DIPM requests.
3291 */ 3292 */
3292 ata_for_each_dev(dev, link, ENABLED) { 3293 ata_for_each_dev(dev, link, ENABLED) {
3293 bool hipm = ata_id_has_hipm(dev->id); 3294 bool hipm = ata_id_has_hipm(dev->id);
3294 bool dipm = ata_id_has_dipm(dev->id); 3295 bool dipm = ata_id_has_dipm(dev->id);
3295 3296
3296 /* find the first enabled and LPM enabled devices */ 3297 /* find the first enabled and LPM enabled devices */
3297 if (!link_dev) 3298 if (!link_dev)
3298 link_dev = dev; 3299 link_dev = dev;
3299 3300
3300 if (!lpm_dev && (hipm || dipm)) 3301 if (!lpm_dev && (hipm || dipm))
3301 lpm_dev = dev; 3302 lpm_dev = dev;
3302 3303
3303 hints &= ~ATA_LPM_EMPTY; 3304 hints &= ~ATA_LPM_EMPTY;
3304 if (!hipm) 3305 if (!hipm)
3305 hints &= ~ATA_LPM_HIPM; 3306 hints &= ~ATA_LPM_HIPM;
3306 3307
3307 /* disable DIPM before changing link config */ 3308 /* disable DIPM before changing link config */
3308 if (policy != ATA_LPM_MIN_POWER && dipm) { 3309 if (policy != ATA_LPM_MIN_POWER && dipm) {
3309 err_mask = ata_dev_set_feature(dev, 3310 err_mask = ata_dev_set_feature(dev,
3310 SETFEATURES_SATA_DISABLE, SATA_DIPM); 3311 SETFEATURES_SATA_DISABLE, SATA_DIPM);
3311 if (err_mask && err_mask != AC_ERR_DEV) { 3312 if (err_mask && err_mask != AC_ERR_DEV) {
3312 ata_dev_printk(dev, KERN_WARNING, 3313 ata_dev_printk(dev, KERN_WARNING,
3313 "failed to disable DIPM, Emask 0x%x\n", 3314 "failed to disable DIPM, Emask 0x%x\n",
3314 err_mask); 3315 err_mask);
3315 rc = -EIO; 3316 rc = -EIO;
3316 goto fail; 3317 goto fail;
3317 } 3318 }
3318 } 3319 }
3319 } 3320 }
3320 3321
3321 if (ap) { 3322 if (ap) {
3322 rc = ap->ops->set_lpm(link, policy, hints); 3323 rc = ap->ops->set_lpm(link, policy, hints);
3323 if (!rc && ap->slave_link) 3324 if (!rc && ap->slave_link)
3324 rc = ap->ops->set_lpm(ap->slave_link, policy, hints); 3325 rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
3325 } else 3326 } else
3326 rc = sata_pmp_set_lpm(link, policy, hints); 3327 rc = sata_pmp_set_lpm(link, policy, hints);
3327 3328
3328 /* 3329 /*
3329 * Attribute link config failure to the first (LPM) enabled 3330 * Attribute link config failure to the first (LPM) enabled
3330 * device on the link. 3331 * device on the link.
3331 */ 3332 */
3332 if (rc) { 3333 if (rc) {
3333 if (rc == -EOPNOTSUPP) { 3334 if (rc == -EOPNOTSUPP) {
3334 link->flags |= ATA_LFLAG_NO_LPM; 3335 link->flags |= ATA_LFLAG_NO_LPM;
3335 return 0; 3336 return 0;
3336 } 3337 }
3337 dev = lpm_dev ? lpm_dev : link_dev; 3338 dev = lpm_dev ? lpm_dev : link_dev;
3338 goto fail; 3339 goto fail;
3339 } 3340 }
3340 3341
3342 /*
3343 * Low level driver acked the transition. Issue DIPM command
3344 * with the new policy set.
3345 */
3346 link->lpm_policy = policy;
3347 if (ap && ap->slave_link)
3348 ap->slave_link->lpm_policy = policy;
3349
3341 /* host config updated, enable DIPM if transitioning to MIN_POWER */ 3350 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3342 ata_for_each_dev(dev, link, ENABLED) { 3351 ata_for_each_dev(dev, link, ENABLED) {
3343 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { 3352 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
3344 err_mask = ata_dev_set_feature(dev, 3353 err_mask = ata_dev_set_feature(dev,
3345 SETFEATURES_SATA_ENABLE, SATA_DIPM); 3354 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3346 if (err_mask && err_mask != AC_ERR_DEV) { 3355 if (err_mask && err_mask != AC_ERR_DEV) {
3347 ata_dev_printk(dev, KERN_WARNING, 3356 ata_dev_printk(dev, KERN_WARNING,
3348 "failed to enable DIPM, Emask 0x%x\n", 3357 "failed to enable DIPM, Emask 0x%x\n",
3349 err_mask); 3358 err_mask);
3350 rc = -EIO; 3359 rc = -EIO;
3351 goto fail; 3360 goto fail;
3352 } 3361 }
3353 } 3362 }
3354 } 3363 }
3355 3364
3356 link->lpm_policy = policy;
3357 if (ap && ap->slave_link)
3358 ap->slave_link->lpm_policy = policy;
3359 return 0; 3365 return 0;
3360 3366
3361 fail: 3367 fail:
3368 /* restore the old policy */
3369 link->lpm_policy = old_policy;
3370 if (ap && ap->slave_link)
3371 ap->slave_link->lpm_policy = old_policy;
3372
3362 /* if no device or only one more chance is left, disable LPM */ 3373 /* if no device or only one more chance is left, disable LPM */
3363 if (!dev || ehc->tries[dev->devno] <= 2) { 3374 if (!dev || ehc->tries[dev->devno] <= 2) {
3364 ata_link_printk(link, KERN_WARNING, 3375 ata_link_printk(link, KERN_WARNING,
3365 "disabling LPM on the link\n"); 3376 "disabling LPM on the link\n");
3366 link->flags |= ATA_LFLAG_NO_LPM; 3377 link->flags |= ATA_LFLAG_NO_LPM;
3367 } 3378 }
3368 if (r_failed_dev) 3379 if (r_failed_dev)
3369 *r_failed_dev = dev; 3380 *r_failed_dev = dev;
3370 return rc; 3381 return rc;
3371 } 3382 }
3372 3383
3373 static int ata_link_nr_enabled(struct ata_link *link) 3384 static int ata_link_nr_enabled(struct ata_link *link)
3374 { 3385 {
3375 struct ata_device *dev; 3386 struct ata_device *dev;
3376 int cnt = 0; 3387 int cnt = 0;
3377 3388
3378 ata_for_each_dev(dev, link, ENABLED) 3389 ata_for_each_dev(dev, link, ENABLED)
3379 cnt++; 3390 cnt++;
3380 return cnt; 3391 return cnt;
3381 } 3392 }
3382 3393
3383 static int ata_link_nr_vacant(struct ata_link *link) 3394 static int ata_link_nr_vacant(struct ata_link *link)
3384 { 3395 {
3385 struct ata_device *dev; 3396 struct ata_device *dev;
3386 int cnt = 0; 3397 int cnt = 0;
3387 3398
3388 ata_for_each_dev(dev, link, ALL) 3399 ata_for_each_dev(dev, link, ALL)
3389 if (dev->class == ATA_DEV_UNKNOWN) 3400 if (dev->class == ATA_DEV_UNKNOWN)
3390 cnt++; 3401 cnt++;
3391 return cnt; 3402 return cnt;
3392 } 3403 }
3393 3404
3394 static int ata_eh_skip_recovery(struct ata_link *link) 3405 static int ata_eh_skip_recovery(struct ata_link *link)
3395 { 3406 {
3396 struct ata_port *ap = link->ap; 3407 struct ata_port *ap = link->ap;
3397 struct ata_eh_context *ehc = &link->eh_context; 3408 struct ata_eh_context *ehc = &link->eh_context;
3398 struct ata_device *dev; 3409 struct ata_device *dev;
3399 3410
3400 /* skip disabled links */ 3411 /* skip disabled links */
3401 if (link->flags & ATA_LFLAG_DISABLED) 3412 if (link->flags & ATA_LFLAG_DISABLED)
3402 return 1; 3413 return 1;
3403 3414
3404 /* skip if explicitly requested */ 3415 /* skip if explicitly requested */
3405 if (ehc->i.flags & ATA_EHI_NO_RECOVERY) 3416 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3406 return 1; 3417 return 1;
3407 3418
3408 /* thaw frozen port and recover failed devices */ 3419 /* thaw frozen port and recover failed devices */
3409 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3420 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3410 return 0; 3421 return 0;
3411 3422
3412 /* reset at least once if reset is requested */ 3423 /* reset at least once if reset is requested */
3413 if ((ehc->i.action & ATA_EH_RESET) && 3424 if ((ehc->i.action & ATA_EH_RESET) &&
3414 !(ehc->i.flags & ATA_EHI_DID_RESET)) 3425 !(ehc->i.flags & ATA_EHI_DID_RESET))
3415 return 0; 3426 return 0;
3416 3427
3417 /* skip if class codes for all vacant slots are ATA_DEV_NONE */ 3428 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3418 ata_for_each_dev(dev, link, ALL) { 3429 ata_for_each_dev(dev, link, ALL) {
3419 if (dev->class == ATA_DEV_UNKNOWN && 3430 if (dev->class == ATA_DEV_UNKNOWN &&
3420 ehc->classes[dev->devno] != ATA_DEV_NONE) 3431 ehc->classes[dev->devno] != ATA_DEV_NONE)
3421 return 0; 3432 return 0;
3422 } 3433 }
3423 3434
3424 return 1; 3435 return 1;
3425 } 3436 }
3426 3437
3427 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) 3438 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3428 { 3439 {
3429 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL); 3440 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3430 u64 now = get_jiffies_64(); 3441 u64 now = get_jiffies_64();
3431 int *trials = void_arg; 3442 int *trials = void_arg;
3432 3443
3433 if (ent->timestamp < now - min(now, interval)) 3444 if (ent->timestamp < now - min(now, interval))
3434 return -1; 3445 return -1;
3435 3446
3436 (*trials)++; 3447 (*trials)++;
3437 return 0; 3448 return 0;
3438 } 3449 }
3439 3450
3440 static int ata_eh_schedule_probe(struct ata_device *dev) 3451 static int ata_eh_schedule_probe(struct ata_device *dev)
3441 { 3452 {
3442 struct ata_eh_context *ehc = &dev->link->eh_context; 3453 struct ata_eh_context *ehc = &dev->link->eh_context;
3443 struct ata_link *link = ata_dev_phys_link(dev); 3454 struct ata_link *link = ata_dev_phys_link(dev);
3444 int trials = 0; 3455 int trials = 0;
3445 3456
3446 if (!(ehc->i.probe_mask & (1 << dev->devno)) || 3457 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3447 (ehc->did_probe_mask & (1 << dev->devno))) 3458 (ehc->did_probe_mask & (1 << dev->devno)))
3448 return 0; 3459 return 0;
3449 3460
3450 ata_eh_detach_dev(dev); 3461 ata_eh_detach_dev(dev);
3451 ata_dev_init(dev); 3462 ata_dev_init(dev);
3452 ehc->did_probe_mask |= (1 << dev->devno); 3463 ehc->did_probe_mask |= (1 << dev->devno);
3453 ehc->i.action |= ATA_EH_RESET; 3464 ehc->i.action |= ATA_EH_RESET;
3454 ehc->saved_xfer_mode[dev->devno] = 0; 3465 ehc->saved_xfer_mode[dev->devno] = 0;
3455 ehc->saved_ncq_enabled &= ~(1 << dev->devno); 3466 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3456 3467
3457 /* the link maybe in a deep sleep, wake it up */ 3468 /* the link maybe in a deep sleep, wake it up */
3458 if (link->lpm_policy > ATA_LPM_MAX_POWER) { 3469 if (link->lpm_policy > ATA_LPM_MAX_POWER) {
3459 if (ata_is_host_link(link)) 3470 if (ata_is_host_link(link))
3460 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, 3471 link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER,
3461 ATA_LPM_EMPTY); 3472 ATA_LPM_EMPTY);
3462 else 3473 else
3463 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, 3474 sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER,
3464 ATA_LPM_EMPTY); 3475 ATA_LPM_EMPTY);
3465 } 3476 }
3466 3477
3467 /* Record and count probe trials on the ering. The specific 3478 /* Record and count probe trials on the ering. The specific
3468 * error mask used is irrelevant. Because a successful device 3479 * error mask used is irrelevant. Because a successful device
3469 * detection clears the ering, this count accumulates only if 3480 * detection clears the ering, this count accumulates only if
3470 * there are consecutive failed probes. 3481 * there are consecutive failed probes.
3471 * 3482 *
3472 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS 3483 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3473 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is 3484 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3474 * forced to 1.5Gbps. 3485 * forced to 1.5Gbps.
3475 * 3486 *
3476 * This is to work around cases where failed link speed 3487 * This is to work around cases where failed link speed
3477 * negotiation results in device misdetection leading to 3488 * negotiation results in device misdetection leading to
3478 * infinite DEVXCHG or PHRDY CHG events. 3489 * infinite DEVXCHG or PHRDY CHG events.
3479 */ 3490 */
3480 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER); 3491 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3481 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials); 3492 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3482 3493
3483 if (trials > ATA_EH_PROBE_TRIALS) 3494 if (trials > ATA_EH_PROBE_TRIALS)
3484 sata_down_spd_limit(link, 1); 3495 sata_down_spd_limit(link, 1);
3485 3496
3486 return 1; 3497 return 1;
3487 } 3498 }
3488 3499
3489 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err) 3500 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3490 { 3501 {
3491 struct ata_eh_context *ehc = &dev->link->eh_context; 3502 struct ata_eh_context *ehc = &dev->link->eh_context;
3492 3503
3493 /* -EAGAIN from EH routine indicates retry without prejudice. 3504 /* -EAGAIN from EH routine indicates retry without prejudice.
3494 * The requester is responsible for ensuring forward progress. 3505 * The requester is responsible for ensuring forward progress.
3495 */ 3506 */
3496 if (err != -EAGAIN) 3507 if (err != -EAGAIN)
3497 ehc->tries[dev->devno]--; 3508 ehc->tries[dev->devno]--;
3498 3509
3499 switch (err) { 3510 switch (err) {
3500 case -ENODEV: 3511 case -ENODEV:
3501 /* device missing or wrong IDENTIFY data, schedule probing */ 3512 /* device missing or wrong IDENTIFY data, schedule probing */
3502 ehc->i.probe_mask |= (1 << dev->devno); 3513 ehc->i.probe_mask |= (1 << dev->devno);
3503 case -EINVAL: 3514 case -EINVAL:
3504 /* give it just one more chance */ 3515 /* give it just one more chance */
3505 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1); 3516 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3506 case -EIO: 3517 case -EIO:
3507 if (ehc->tries[dev->devno] == 1) { 3518 if (ehc->tries[dev->devno] == 1) {
3508 /* This is the last chance, better to slow 3519 /* This is the last chance, better to slow
3509 * down than lose it. 3520 * down than lose it.
3510 */ 3521 */
3511 sata_down_spd_limit(ata_dev_phys_link(dev), 0); 3522 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3512 if (dev->pio_mode > XFER_PIO_0) 3523 if (dev->pio_mode > XFER_PIO_0)
3513 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 3524 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3514 } 3525 }
3515 } 3526 }
3516 3527
3517 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) { 3528 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3518 /* disable device if it has used up all its chances */ 3529 /* disable device if it has used up all its chances */
3519 ata_dev_disable(dev); 3530 ata_dev_disable(dev);
3520 3531
3521 /* detach if offline */ 3532 /* detach if offline */
3522 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 3533 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3523 ata_eh_detach_dev(dev); 3534 ata_eh_detach_dev(dev);
3524 3535
3525 /* schedule probe if necessary */ 3536 /* schedule probe if necessary */
3526 if (ata_eh_schedule_probe(dev)) { 3537 if (ata_eh_schedule_probe(dev)) {
3527 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3538 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3528 memset(ehc->cmd_timeout_idx[dev->devno], 0, 3539 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3529 sizeof(ehc->cmd_timeout_idx[dev->devno])); 3540 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3530 } 3541 }
3531 3542
3532 return 1; 3543 return 1;
3533 } else { 3544 } else {
3534 ehc->i.action |= ATA_EH_RESET; 3545 ehc->i.action |= ATA_EH_RESET;
3535 return 0; 3546 return 0;
3536 } 3547 }
3537 } 3548 }
3538 3549
3539 /** 3550 /**
3540 * ata_eh_recover - recover host port after error 3551 * ata_eh_recover - recover host port after error
3541 * @ap: host port to recover 3552 * @ap: host port to recover
3542 * @prereset: prereset method (can be NULL) 3553 * @prereset: prereset method (can be NULL)
3543 * @softreset: softreset method (can be NULL) 3554 * @softreset: softreset method (can be NULL)
3544 * @hardreset: hardreset method (can be NULL) 3555 * @hardreset: hardreset method (can be NULL)
3545 * @postreset: postreset method (can be NULL) 3556 * @postreset: postreset method (can be NULL)
3546 * @r_failed_link: out parameter for failed link 3557 * @r_failed_link: out parameter for failed link
3547 * 3558 *
3548 * This is the alpha and omega, eum and yang, heart and soul of 3559 * This is the alpha and omega, eum and yang, heart and soul of
3549 * libata exception handling. On entry, actions required to 3560 * libata exception handling. On entry, actions required to
3550 * recover each link and hotplug requests are recorded in the 3561 * recover each link and hotplug requests are recorded in the
3551 * link's eh_context. This function executes all the operations 3562 * link's eh_context. This function executes all the operations
3552 * with appropriate retrials and fallbacks to resurrect failed 3563 * with appropriate retrials and fallbacks to resurrect failed
3553 * devices, detach goners and greet newcomers. 3564 * devices, detach goners and greet newcomers.
3554 * 3565 *
3555 * LOCKING: 3566 * LOCKING:
3556 * Kernel thread context (may sleep). 3567 * Kernel thread context (may sleep).
3557 * 3568 *
3558 * RETURNS: 3569 * RETURNS:
3559 * 0 on success, -errno on failure. 3570 * 0 on success, -errno on failure.
3560 */ 3571 */
3561 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, 3572 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3562 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3573 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3563 ata_postreset_fn_t postreset, 3574 ata_postreset_fn_t postreset,
3564 struct ata_link **r_failed_link) 3575 struct ata_link **r_failed_link)
3565 { 3576 {
3566 struct ata_link *link; 3577 struct ata_link *link;
3567 struct ata_device *dev; 3578 struct ata_device *dev;
3568 int rc, nr_fails; 3579 int rc, nr_fails;
3569 unsigned long flags, deadline; 3580 unsigned long flags, deadline;
3570 3581
3571 DPRINTK("ENTER\n"); 3582 DPRINTK("ENTER\n");
3572 3583
3573 /* prep for recovery */ 3584 /* prep for recovery */
3574 ata_for_each_link(link, ap, EDGE) { 3585 ata_for_each_link(link, ap, EDGE) {
3575 struct ata_eh_context *ehc = &link->eh_context; 3586 struct ata_eh_context *ehc = &link->eh_context;
3576 3587
3577 /* re-enable link? */ 3588 /* re-enable link? */
3578 if (ehc->i.action & ATA_EH_ENABLE_LINK) { 3589 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3579 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK); 3590 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3580 spin_lock_irqsave(ap->lock, flags); 3591 spin_lock_irqsave(ap->lock, flags);
3581 link->flags &= ~ATA_LFLAG_DISABLED; 3592 link->flags &= ~ATA_LFLAG_DISABLED;
3582 spin_unlock_irqrestore(ap->lock, flags); 3593 spin_unlock_irqrestore(ap->lock, flags);
3583 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK); 3594 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3584 } 3595 }
3585 3596
3586 ata_for_each_dev(dev, link, ALL) { 3597 ata_for_each_dev(dev, link, ALL) {
3587 if (link->flags & ATA_LFLAG_NO_RETRY) 3598 if (link->flags & ATA_LFLAG_NO_RETRY)
3588 ehc->tries[dev->devno] = 1; 3599 ehc->tries[dev->devno] = 1;
3589 else 3600 else
3590 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES; 3601 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3591 3602
3592 /* collect port action mask recorded in dev actions */ 3603 /* collect port action mask recorded in dev actions */
3593 ehc->i.action |= ehc->i.dev_action[dev->devno] & 3604 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3594 ~ATA_EH_PERDEV_MASK; 3605 ~ATA_EH_PERDEV_MASK;
3595 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK; 3606 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3596 3607
3597 /* process hotplug request */ 3608 /* process hotplug request */
3598 if (dev->flags & ATA_DFLAG_DETACH) 3609 if (dev->flags & ATA_DFLAG_DETACH)
3599 ata_eh_detach_dev(dev); 3610 ata_eh_detach_dev(dev);
3600 3611
3601 /* schedule probe if necessary */ 3612 /* schedule probe if necessary */
3602 if (!ata_dev_enabled(dev)) 3613 if (!ata_dev_enabled(dev))
3603 ata_eh_schedule_probe(dev); 3614 ata_eh_schedule_probe(dev);
3604 } 3615 }
3605 } 3616 }
3606 3617
3607 retry: 3618 retry:
3608 rc = 0; 3619 rc = 0;
3609 3620
3610 /* if UNLOADING, finish immediately */ 3621 /* if UNLOADING, finish immediately */
3611 if (ap->pflags & ATA_PFLAG_UNLOADING) 3622 if (ap->pflags & ATA_PFLAG_UNLOADING)
3612 goto out; 3623 goto out;
3613 3624
3614 /* prep for EH */ 3625 /* prep for EH */
3615 ata_for_each_link(link, ap, EDGE) { 3626 ata_for_each_link(link, ap, EDGE) {
3616 struct ata_eh_context *ehc = &link->eh_context; 3627 struct ata_eh_context *ehc = &link->eh_context;
3617 3628
3618 /* skip EH if possible. */ 3629 /* skip EH if possible. */
3619 if (ata_eh_skip_recovery(link)) 3630 if (ata_eh_skip_recovery(link))
3620 ehc->i.action = 0; 3631 ehc->i.action = 0;
3621 3632
3622 ata_for_each_dev(dev, link, ALL) 3633 ata_for_each_dev(dev, link, ALL)
3623 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN; 3634 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3624 } 3635 }
3625 3636
3626 /* reset */ 3637 /* reset */
3627 ata_for_each_link(link, ap, EDGE) { 3638 ata_for_each_link(link, ap, EDGE) {
3628 struct ata_eh_context *ehc = &link->eh_context; 3639 struct ata_eh_context *ehc = &link->eh_context;
3629 3640
3630 if (!(ehc->i.action & ATA_EH_RESET)) 3641 if (!(ehc->i.action & ATA_EH_RESET))
3631 continue; 3642 continue;
3632 3643
3633 rc = ata_eh_reset(link, ata_link_nr_vacant(link), 3644 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3634 prereset, softreset, hardreset, postreset); 3645 prereset, softreset, hardreset, postreset);
3635 if (rc) { 3646 if (rc) {
3636 ata_link_printk(link, KERN_ERR, 3647 ata_link_printk(link, KERN_ERR,
3637 "reset failed, giving up\n"); 3648 "reset failed, giving up\n");
3638 goto out; 3649 goto out;
3639 } 3650 }
3640 } 3651 }
3641 3652
3642 do { 3653 do {
3643 unsigned long now; 3654 unsigned long now;
3644 3655
3645 /* 3656 /*
3646 * clears ATA_EH_PARK in eh_info and resets 3657 * clears ATA_EH_PARK in eh_info and resets
3647 * ap->park_req_pending 3658 * ap->park_req_pending
3648 */ 3659 */
3649 ata_eh_pull_park_action(ap); 3660 ata_eh_pull_park_action(ap);
3650 3661
3651 deadline = jiffies; 3662 deadline = jiffies;
3652 ata_for_each_link(link, ap, EDGE) { 3663 ata_for_each_link(link, ap, EDGE) {
3653 ata_for_each_dev(dev, link, ALL) { 3664 ata_for_each_dev(dev, link, ALL) {
3654 struct ata_eh_context *ehc = &link->eh_context; 3665 struct ata_eh_context *ehc = &link->eh_context;
3655 unsigned long tmp; 3666 unsigned long tmp;
3656 3667
3657 if (dev->class != ATA_DEV_ATA) 3668 if (dev->class != ATA_DEV_ATA)
3658 continue; 3669 continue;
3659 if (!(ehc->i.dev_action[dev->devno] & 3670 if (!(ehc->i.dev_action[dev->devno] &
3660 ATA_EH_PARK)) 3671 ATA_EH_PARK))
3661 continue; 3672 continue;
3662 tmp = dev->unpark_deadline; 3673 tmp = dev->unpark_deadline;
3663 if (time_before(deadline, tmp)) 3674 if (time_before(deadline, tmp))
3664 deadline = tmp; 3675 deadline = tmp;
3665 else if (time_before_eq(tmp, jiffies)) 3676 else if (time_before_eq(tmp, jiffies))
3666 continue; 3677 continue;
3667 if (ehc->unloaded_mask & (1 << dev->devno)) 3678 if (ehc->unloaded_mask & (1 << dev->devno))
3668 continue; 3679 continue;
3669 3680
3670 ata_eh_park_issue_cmd(dev, 1); 3681 ata_eh_park_issue_cmd(dev, 1);
3671 } 3682 }
3672 } 3683 }
3673 3684
3674 now = jiffies; 3685 now = jiffies;
3675 if (time_before_eq(deadline, now)) 3686 if (time_before_eq(deadline, now))
3676 break; 3687 break;
3677 3688
3678 ata_eh_release(ap); 3689 ata_eh_release(ap);
3679 deadline = wait_for_completion_timeout(&ap->park_req_pending, 3690 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3680 deadline - now); 3691 deadline - now);
3681 ata_eh_acquire(ap); 3692 ata_eh_acquire(ap);
3682 } while (deadline); 3693 } while (deadline);
3683 ata_for_each_link(link, ap, EDGE) { 3694 ata_for_each_link(link, ap, EDGE) {
3684 ata_for_each_dev(dev, link, ALL) { 3695 ata_for_each_dev(dev, link, ALL) {
3685 if (!(link->eh_context.unloaded_mask & 3696 if (!(link->eh_context.unloaded_mask &
3686 (1 << dev->devno))) 3697 (1 << dev->devno)))
3687 continue; 3698 continue;
3688 3699
3689 ata_eh_park_issue_cmd(dev, 0); 3700 ata_eh_park_issue_cmd(dev, 0);
3690 ata_eh_done(link, dev, ATA_EH_PARK); 3701 ata_eh_done(link, dev, ATA_EH_PARK);
3691 } 3702 }
3692 } 3703 }
3693 3704
3694 /* the rest */ 3705 /* the rest */
3695 nr_fails = 0; 3706 nr_fails = 0;
3696 ata_for_each_link(link, ap, PMP_FIRST) { 3707 ata_for_each_link(link, ap, PMP_FIRST) {
3697 struct ata_eh_context *ehc = &link->eh_context; 3708 struct ata_eh_context *ehc = &link->eh_context;
3698 3709
3699 if (sata_pmp_attached(ap) && ata_is_host_link(link)) 3710 if (sata_pmp_attached(ap) && ata_is_host_link(link))
3700 goto config_lpm; 3711 goto config_lpm;
3701 3712
3702 /* revalidate existing devices and attach new ones */ 3713 /* revalidate existing devices and attach new ones */
3703 rc = ata_eh_revalidate_and_attach(link, &dev); 3714 rc = ata_eh_revalidate_and_attach(link, &dev);
3704 if (rc) 3715 if (rc)
3705 goto rest_fail; 3716 goto rest_fail;
3706 3717
3707 /* if PMP got attached, return, pmp EH will take care of it */ 3718 /* if PMP got attached, return, pmp EH will take care of it */
3708 if (link->device->class == ATA_DEV_PMP) { 3719 if (link->device->class == ATA_DEV_PMP) {
3709 ehc->i.action = 0; 3720 ehc->i.action = 0;
3710 return 0; 3721 return 0;
3711 } 3722 }
3712 3723
3713 /* configure transfer mode if necessary */ 3724 /* configure transfer mode if necessary */
3714 if (ehc->i.flags & ATA_EHI_SETMODE) { 3725 if (ehc->i.flags & ATA_EHI_SETMODE) {
3715 rc = ata_set_mode(link, &dev); 3726 rc = ata_set_mode(link, &dev);
3716 if (rc) 3727 if (rc)
3717 goto rest_fail; 3728 goto rest_fail;
3718 ehc->i.flags &= ~ATA_EHI_SETMODE; 3729 ehc->i.flags &= ~ATA_EHI_SETMODE;
3719 } 3730 }
3720 3731
3721 /* If reset has been issued, clear UA to avoid 3732 /* If reset has been issued, clear UA to avoid
3722 * disrupting the current users of the device. 3733 * disrupting the current users of the device.
3723 */ 3734 */
3724 if (ehc->i.flags & ATA_EHI_DID_RESET) { 3735 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3725 ata_for_each_dev(dev, link, ALL) { 3736 ata_for_each_dev(dev, link, ALL) {
3726 if (dev->class != ATA_DEV_ATAPI) 3737 if (dev->class != ATA_DEV_ATAPI)
3727 continue; 3738 continue;
3728 rc = atapi_eh_clear_ua(dev); 3739 rc = atapi_eh_clear_ua(dev);
3729 if (rc) 3740 if (rc)
3730 goto rest_fail; 3741 goto rest_fail;
3731 } 3742 }
3732 } 3743 }
3733 3744
3734 /* retry flush if necessary */ 3745 /* retry flush if necessary */
3735 ata_for_each_dev(dev, link, ALL) { 3746 ata_for_each_dev(dev, link, ALL) {
3736 if (dev->class != ATA_DEV_ATA) 3747 if (dev->class != ATA_DEV_ATA)
3737 continue; 3748 continue;
3738 rc = ata_eh_maybe_retry_flush(dev); 3749 rc = ata_eh_maybe_retry_flush(dev);
3739 if (rc) 3750 if (rc)
3740 goto rest_fail; 3751 goto rest_fail;
3741 } 3752 }
3742 3753
3743 config_lpm: 3754 config_lpm:
3744 /* configure link power saving */ 3755 /* configure link power saving */
3745 if (link->lpm_policy != ap->target_lpm_policy) { 3756 if (link->lpm_policy != ap->target_lpm_policy) {
3746 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); 3757 rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
3747 if (rc) 3758 if (rc)
3748 goto rest_fail; 3759 goto rest_fail;
3749 } 3760 }
3750 3761
3751 /* this link is okay now */ 3762 /* this link is okay now */
3752 ehc->i.flags = 0; 3763 ehc->i.flags = 0;
3753 continue; 3764 continue;
3754 3765
3755 rest_fail: 3766 rest_fail:
3756 nr_fails++; 3767 nr_fails++;
3757 if (dev) 3768 if (dev)
3758 ata_eh_handle_dev_fail(dev, rc); 3769 ata_eh_handle_dev_fail(dev, rc);
3759 3770
3760 if (ap->pflags & ATA_PFLAG_FROZEN) { 3771 if (ap->pflags & ATA_PFLAG_FROZEN) {
3761 /* PMP reset requires working host port. 3772 /* PMP reset requires working host port.
3762 * Can't retry if it's frozen. 3773 * Can't retry if it's frozen.
3763 */ 3774 */
3764 if (sata_pmp_attached(ap)) 3775 if (sata_pmp_attached(ap))
3765 goto out; 3776 goto out;
3766 break; 3777 break;
3767 } 3778 }
3768 } 3779 }
3769 3780
3770 if (nr_fails) 3781 if (nr_fails)
3771 goto retry; 3782 goto retry;
3772 3783
3773 out: 3784 out:
3774 if (rc && r_failed_link) 3785 if (rc && r_failed_link)
3775 *r_failed_link = link; 3786 *r_failed_link = link;
3776 3787
3777 DPRINTK("EXIT, rc=%d\n", rc); 3788 DPRINTK("EXIT, rc=%d\n", rc);
3778 return rc; 3789 return rc;
3779 } 3790 }
3780 3791
3781 /** 3792 /**
3782 * ata_eh_finish - finish up EH 3793 * ata_eh_finish - finish up EH
3783 * @ap: host port to finish EH for 3794 * @ap: host port to finish EH for
3784 * 3795 *
3785 * Recovery is complete. Clean up EH states and retry or finish 3796 * Recovery is complete. Clean up EH states and retry or finish
3786 * failed qcs. 3797 * failed qcs.
3787 * 3798 *
3788 * LOCKING: 3799 * LOCKING:
3789 * None. 3800 * None.
3790 */ 3801 */
3791 void ata_eh_finish(struct ata_port *ap) 3802 void ata_eh_finish(struct ata_port *ap)
3792 { 3803 {
3793 int tag; 3804 int tag;
3794 3805
3795 /* retry or finish qcs */ 3806 /* retry or finish qcs */
3796 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3807 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3797 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 3808 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3798 3809
3799 if (!(qc->flags & ATA_QCFLAG_FAILED)) 3810 if (!(qc->flags & ATA_QCFLAG_FAILED))
3800 continue; 3811 continue;
3801 3812
3802 if (qc->err_mask) { 3813 if (qc->err_mask) {
3803 /* FIXME: Once EH migration is complete, 3814 /* FIXME: Once EH migration is complete,
3804 * generate sense data in this function, 3815 * generate sense data in this function,
3805 * considering both err_mask and tf. 3816 * considering both err_mask and tf.
3806 */ 3817 */
3807 if (qc->flags & ATA_QCFLAG_RETRY) 3818 if (qc->flags & ATA_QCFLAG_RETRY)
3808 ata_eh_qc_retry(qc); 3819 ata_eh_qc_retry(qc);
3809 else 3820 else
3810 ata_eh_qc_complete(qc); 3821 ata_eh_qc_complete(qc);
3811 } else { 3822 } else {
3812 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 3823 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3813 ata_eh_qc_complete(qc); 3824 ata_eh_qc_complete(qc);
3814 } else { 3825 } else {
3815 /* feed zero TF to sense generation */ 3826 /* feed zero TF to sense generation */
3816 memset(&qc->result_tf, 0, sizeof(qc->result_tf)); 3827 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3817 ata_eh_qc_retry(qc); 3828 ata_eh_qc_retry(qc);
3818 } 3829 }
3819 } 3830 }
3820 } 3831 }
3821 3832
3822 /* make sure nr_active_links is zero after EH */ 3833 /* make sure nr_active_links is zero after EH */
3823 WARN_ON(ap->nr_active_links); 3834 WARN_ON(ap->nr_active_links);
3824 ap->nr_active_links = 0; 3835 ap->nr_active_links = 0;
3825 } 3836 }
3826 3837
3827 /** 3838 /**
3828 * ata_do_eh - do standard error handling 3839 * ata_do_eh - do standard error handling
3829 * @ap: host port to handle error for 3840 * @ap: host port to handle error for
3830 * 3841 *
3831 * @prereset: prereset method (can be NULL) 3842 * @prereset: prereset method (can be NULL)
3832 * @softreset: softreset method (can be NULL) 3843 * @softreset: softreset method (can be NULL)
3833 * @hardreset: hardreset method (can be NULL) 3844 * @hardreset: hardreset method (can be NULL)
3834 * @postreset: postreset method (can be NULL) 3845 * @postreset: postreset method (can be NULL)
3835 * 3846 *
3836 * Perform standard error handling sequence. 3847 * Perform standard error handling sequence.
3837 * 3848 *
3838 * LOCKING: 3849 * LOCKING:
3839 * Kernel thread context (may sleep). 3850 * Kernel thread context (may sleep).
3840 */ 3851 */
3841 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 3852 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3842 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 3853 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3843 ata_postreset_fn_t postreset) 3854 ata_postreset_fn_t postreset)
3844 { 3855 {
3845 struct ata_device *dev; 3856 struct ata_device *dev;
3846 int rc; 3857 int rc;
3847 3858
3848 ata_eh_autopsy(ap); 3859 ata_eh_autopsy(ap);
3849 ata_eh_report(ap); 3860 ata_eh_report(ap);
3850 3861
3851 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset, 3862 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3852 NULL); 3863 NULL);
3853 if (rc) { 3864 if (rc) {
3854 ata_for_each_dev(dev, &ap->link, ALL) 3865 ata_for_each_dev(dev, &ap->link, ALL)
3855 ata_dev_disable(dev); 3866 ata_dev_disable(dev);
3856 } 3867 }
3857 3868
3858 ata_eh_finish(ap); 3869 ata_eh_finish(ap);
3859 } 3870 }
3860 3871
3861 /** 3872 /**
3862 * ata_std_error_handler - standard error handler 3873 * ata_std_error_handler - standard error handler
3863 * @ap: host port to handle error for 3874 * @ap: host port to handle error for
3864 * 3875 *
3865 * Standard error handler 3876 * Standard error handler
3866 * 3877 *
3867 * LOCKING: 3878 * LOCKING:
3868 * Kernel thread context (may sleep). 3879 * Kernel thread context (may sleep).
3869 */ 3880 */
3870 void ata_std_error_handler(struct ata_port *ap) 3881 void ata_std_error_handler(struct ata_port *ap)
3871 { 3882 {
3872 struct ata_port_operations *ops = ap->ops; 3883 struct ata_port_operations *ops = ap->ops;
3873 ata_reset_fn_t hardreset = ops->hardreset; 3884 ata_reset_fn_t hardreset = ops->hardreset;
3874 3885
3875 /* ignore built-in hardreset if SCR access is not available */ 3886 /* ignore built-in hardreset if SCR access is not available */
3876 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link)) 3887 if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3877 hardreset = NULL; 3888 hardreset = NULL;
3878 3889
3879 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset); 3890 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3880 } 3891 }
3881 3892
3882 #ifdef CONFIG_PM 3893 #ifdef CONFIG_PM
3883 /** 3894 /**
3884 * ata_eh_handle_port_suspend - perform port suspend operation 3895 * ata_eh_handle_port_suspend - perform port suspend operation
3885 * @ap: port to suspend 3896 * @ap: port to suspend
3886 * 3897 *
3887 * Suspend @ap. 3898 * Suspend @ap.
3888 * 3899 *
3889 * LOCKING: 3900 * LOCKING:
3890 * Kernel thread context (may sleep). 3901 * Kernel thread context (may sleep).
3891 */ 3902 */
3892 static void ata_eh_handle_port_suspend(struct ata_port *ap) 3903 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3893 { 3904 {
3894 unsigned long flags; 3905 unsigned long flags;
3895 int rc = 0; 3906 int rc = 0;
3896 3907
3897 /* are we suspending? */ 3908 /* are we suspending? */
3898 spin_lock_irqsave(ap->lock, flags); 3909 spin_lock_irqsave(ap->lock, flags);
3899 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3910 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3900 ap->pm_mesg.event == PM_EVENT_ON) { 3911 ap->pm_mesg.event == PM_EVENT_ON) {
3901 spin_unlock_irqrestore(ap->lock, flags); 3912 spin_unlock_irqrestore(ap->lock, flags);
3902 return; 3913 return;
3903 } 3914 }
3904 spin_unlock_irqrestore(ap->lock, flags); 3915 spin_unlock_irqrestore(ap->lock, flags);
3905 3916
3906 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED); 3917 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3907 3918
3908 /* tell ACPI we're suspending */ 3919 /* tell ACPI we're suspending */
3909 rc = ata_acpi_on_suspend(ap); 3920 rc = ata_acpi_on_suspend(ap);
3910 if (rc) 3921 if (rc)
3911 goto out; 3922 goto out;
3912 3923
3913 /* suspend */ 3924 /* suspend */
3914 ata_eh_freeze_port(ap); 3925 ata_eh_freeze_port(ap);
3915 3926
3916 if (ap->ops->port_suspend) 3927 if (ap->ops->port_suspend)
3917 rc = ap->ops->port_suspend(ap, ap->pm_mesg); 3928 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3918 3929
3919 ata_acpi_set_state(ap, PMSG_SUSPEND); 3930 ata_acpi_set_state(ap, PMSG_SUSPEND);
3920 out: 3931 out:
3921 /* report result */ 3932 /* report result */
3922 spin_lock_irqsave(ap->lock, flags); 3933 spin_lock_irqsave(ap->lock, flags);
3923 3934
3924 ap->pflags &= ~ATA_PFLAG_PM_PENDING; 3935 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3925 if (rc == 0) 3936 if (rc == 0)
3926 ap->pflags |= ATA_PFLAG_SUSPENDED; 3937 ap->pflags |= ATA_PFLAG_SUSPENDED;
3927 else if (ap->pflags & ATA_PFLAG_FROZEN) 3938 else if (ap->pflags & ATA_PFLAG_FROZEN)
3928 ata_port_schedule_eh(ap); 3939 ata_port_schedule_eh(ap);
3929 3940
3930 if (ap->pm_result) { 3941 if (ap->pm_result) {
3931 *ap->pm_result = rc; 3942 *ap->pm_result = rc;
3932 ap->pm_result = NULL; 3943 ap->pm_result = NULL;
3933 } 3944 }
3934 3945
3935 spin_unlock_irqrestore(ap->lock, flags); 3946 spin_unlock_irqrestore(ap->lock, flags);
3936 3947
3937 return; 3948 return;
3938 } 3949 }
3939 3950
3940 /** 3951 /**
3941 * ata_eh_handle_port_resume - perform port resume operation 3952 * ata_eh_handle_port_resume - perform port resume operation
3942 * @ap: port to resume 3953 * @ap: port to resume
3943 * 3954 *
3944 * Resume @ap. 3955 * Resume @ap.
3945 * 3956 *
3946 * LOCKING: 3957 * LOCKING:
3947 * Kernel thread context (may sleep). 3958 * Kernel thread context (may sleep).
3948 */ 3959 */
3949 static void ata_eh_handle_port_resume(struct ata_port *ap) 3960 static void ata_eh_handle_port_resume(struct ata_port *ap)
3950 { 3961 {
3951 struct ata_link *link; 3962 struct ata_link *link;
3952 struct ata_device *dev; 3963 struct ata_device *dev;
3953 unsigned long flags; 3964 unsigned long flags;
3954 int rc = 0; 3965 int rc = 0;
3955 3966
3956 /* are we resuming? */ 3967 /* are we resuming? */
3957 spin_lock_irqsave(ap->lock, flags); 3968 spin_lock_irqsave(ap->lock, flags);
3958 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) || 3969 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3959 ap->pm_mesg.event != PM_EVENT_ON) { 3970 ap->pm_mesg.event != PM_EVENT_ON) {
3960 spin_unlock_irqrestore(ap->lock, flags); 3971 spin_unlock_irqrestore(ap->lock, flags);
3961 return; 3972 return;
3962 } 3973 }
3963 spin_unlock_irqrestore(ap->lock, flags); 3974 spin_unlock_irqrestore(ap->lock, flags);
3964 3975
3965 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED)); 3976 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3966 3977
3967 /* 3978 /*
3968 * Error timestamps are in jiffies which doesn't run while 3979 * Error timestamps are in jiffies which doesn't run while
3969 * suspended and PHY events during resume isn't too uncommon. 3980 * suspended and PHY events during resume isn't too uncommon.
3970 * When the two are combined, it can lead to unnecessary speed 3981 * When the two are combined, it can lead to unnecessary speed
3971 * downs if the machine is suspended and resumed repeatedly. 3982 * downs if the machine is suspended and resumed repeatedly.
3972 * Clear error history. 3983 * Clear error history.
3973 */ 3984 */
3974 ata_for_each_link(link, ap, HOST_FIRST) 3985 ata_for_each_link(link, ap, HOST_FIRST)
3975 ata_for_each_dev(dev, link, ALL) 3986 ata_for_each_dev(dev, link, ALL)
3976 ata_ering_clear(&dev->ering); 3987 ata_ering_clear(&dev->ering);
3977 3988
3978 ata_acpi_set_state(ap, PMSG_ON); 3989 ata_acpi_set_state(ap, PMSG_ON);
3979 3990
3980 if (ap->ops->port_resume) 3991 if (ap->ops->port_resume)
3981 rc = ap->ops->port_resume(ap); 3992 rc = ap->ops->port_resume(ap);
3982 3993
3983 /* tell ACPI that we're resuming */ 3994 /* tell ACPI that we're resuming */
3984 ata_acpi_on_resume(ap); 3995 ata_acpi_on_resume(ap);
3985 3996
3986 /* report result */ 3997 /* report result */
3987 spin_lock_irqsave(ap->lock, flags); 3998 spin_lock_irqsave(ap->lock, flags);
3988 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED); 3999 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3989 if (ap->pm_result) { 4000 if (ap->pm_result) {
3990 *ap->pm_result = rc; 4001 *ap->pm_result = rc;
3991 ap->pm_result = NULL; 4002 ap->pm_result = NULL;
3992 } 4003 }
3993 spin_unlock_irqrestore(ap->lock, flags); 4004 spin_unlock_irqrestore(ap->lock, flags);