Commit 87805162b6af20d2ad386a49aec13b753cca523a
Committed by
Dan Williams
1 parent
1f05388933
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
isci: Restore the ATAPI device RNC management code.
The ATAPI specific and STP general RNC suspension code had been incorrectly removed from the remote device code. Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Showing 3 changed files with 34 additions and 20 deletions Inline Diff
drivers/scsi/isci/remote_device.c
1 | /* | 1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | 2 | * This file is provided under a dual BSD/GPLv2 license. When using or |
3 | * redistributing this file, you may do so under either license. | 3 | * redistributing this file, you may do so under either license. |
4 | * | 4 | * |
5 | * GPL LICENSE SUMMARY | 5 | * GPL LICENSE SUMMARY |
6 | * | 6 | * |
7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of version 2 of the GNU General Public License as | 10 | * it under the terms of version 2 of the GNU General Public License as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
21 | * The full GNU General Public License is included in this distribution | 21 | * The full GNU General Public License is included in this distribution |
22 | * in the file called LICENSE.GPL. | 22 | * in the file called LICENSE.GPL. |
23 | * | 23 | * |
24 | * BSD LICENSE | 24 | * BSD LICENSE |
25 | * | 25 | * |
26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
27 | * All rights reserved. | 27 | * All rights reserved. |
28 | * | 28 | * |
29 | * Redistribution and use in source and binary forms, with or without | 29 | * Redistribution and use in source and binary forms, with or without |
30 | * modification, are permitted provided that the following conditions | 30 | * modification, are permitted provided that the following conditions |
31 | * are met: | 31 | * are met: |
32 | * | 32 | * |
33 | * * Redistributions of source code must retain the above copyright | 33 | * * Redistributions of source code must retain the above copyright |
34 | * notice, this list of conditions and the following disclaimer. | 34 | * notice, this list of conditions and the following disclaimer. |
35 | * * Redistributions in binary form must reproduce the above copyright | 35 | * * Redistributions in binary form must reproduce the above copyright |
36 | * notice, this list of conditions and the following disclaimer in | 36 | * notice, this list of conditions and the following disclaimer in |
37 | * the documentation and/or other materials provided with the | 37 | * the documentation and/or other materials provided with the |
38 | * distribution. | 38 | * distribution. |
39 | * * Neither the name of Intel Corporation nor the names of its | 39 | * * Neither the name of Intel Corporation nor the names of its |
40 | * contributors may be used to endorse or promote products derived | 40 | * contributors may be used to endorse or promote products derived |
41 | * from this software without specific prior written permission. | 41 | * from this software without specific prior written permission. |
42 | * | 42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | #include <scsi/sas.h> | 55 | #include <scsi/sas.h> |
56 | #include <linux/bitops.h> | 56 | #include <linux/bitops.h> |
57 | #include "isci.h" | 57 | #include "isci.h" |
58 | #include "port.h" | 58 | #include "port.h" |
59 | #include "remote_device.h" | 59 | #include "remote_device.h" |
60 | #include "request.h" | 60 | #include "request.h" |
61 | #include "remote_node_context.h" | 61 | #include "remote_node_context.h" |
62 | #include "scu_event_codes.h" | 62 | #include "scu_event_codes.h" |
63 | #include "task.h" | 63 | #include "task.h" |
64 | 64 | ||
65 | #undef C | 65 | #undef C |
66 | #define C(a) (#a) | 66 | #define C(a) (#a) |
67 | const char *dev_state_name(enum sci_remote_device_states state) | 67 | const char *dev_state_name(enum sci_remote_device_states state) |
68 | { | 68 | { |
69 | static const char * const strings[] = REMOTE_DEV_STATES; | 69 | static const char * const strings[] = REMOTE_DEV_STATES; |
70 | 70 | ||
71 | return strings[state]; | 71 | return strings[state]; |
72 | } | 72 | } |
73 | #undef C | 73 | #undef C |
74 | 74 | ||
75 | static enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | 75 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, |
76 | enum sci_remote_node_suspension_reasons reason) | 76 | enum sci_remote_node_suspension_reasons reason) |
77 | { | 77 | { |
78 | return sci_remote_node_context_suspend(&idev->rnc, reason, | 78 | return sci_remote_node_context_suspend(&idev->rnc, reason, |
79 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); | 79 | SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); |
80 | } | 80 | } |
81 | 81 | ||
82 | /** | 82 | /** |
83 | * isci_remote_device_ready() - This function is called by the ihost when the | 83 | * isci_remote_device_ready() - This function is called by the ihost when the |
84 | * remote device is ready. We mark the isci device as ready and signal the | 84 | * remote device is ready. We mark the isci device as ready and signal the |
85 | * waiting proccess. | 85 | * waiting proccess. |
86 | * @ihost: our valid isci_host | 86 | * @ihost: our valid isci_host |
87 | * @idev: remote device | 87 | * @idev: remote device |
88 | * | 88 | * |
89 | */ | 89 | */ |
90 | static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) | 90 | static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) |
91 | { | 91 | { |
92 | dev_dbg(&ihost->pdev->dev, | 92 | dev_dbg(&ihost->pdev->dev, |
93 | "%s: idev = %p\n", __func__, idev); | 93 | "%s: idev = %p\n", __func__, idev); |
94 | 94 | ||
95 | clear_bit(IDEV_IO_NCQERROR, &idev->flags); | 95 | clear_bit(IDEV_IO_NCQERROR, &idev->flags); |
96 | set_bit(IDEV_IO_READY, &idev->flags); | 96 | set_bit(IDEV_IO_READY, &idev->flags); |
97 | if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) | 97 | if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) |
98 | wake_up(&ihost->eventq); | 98 | wake_up(&ihost->eventq); |
99 | } | 99 | } |
100 | 100 | ||
101 | static enum sci_status sci_remote_device_terminate_req( | 101 | static enum sci_status sci_remote_device_terminate_req( |
102 | struct isci_host *ihost, | 102 | struct isci_host *ihost, |
103 | struct isci_remote_device *idev, | 103 | struct isci_remote_device *idev, |
104 | int check_abort, | 104 | int check_abort, |
105 | struct isci_request *ireq) | 105 | struct isci_request *ireq) |
106 | { | 106 | { |
107 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || | 107 | if (!test_bit(IREQ_ACTIVE, &ireq->flags) || |
108 | (ireq->target_device != idev) || | 108 | (ireq->target_device != idev) || |
109 | (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) | 109 | (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) |
110 | return SCI_SUCCESS; | 110 | return SCI_SUCCESS; |
111 | 111 | ||
112 | dev_dbg(&ihost->pdev->dev, | 112 | dev_dbg(&ihost->pdev->dev, |
113 | "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", | 113 | "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", |
114 | __func__, idev, idev->flags, ireq, ireq->target_device); | 114 | __func__, idev, idev->flags, ireq, ireq->target_device); |
115 | 115 | ||
116 | set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | 116 | set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); |
117 | 117 | ||
118 | return sci_controller_terminate_request(ihost, idev, ireq); | 118 | return sci_controller_terminate_request(ihost, idev, ireq); |
119 | } | 119 | } |
120 | 120 | ||
121 | static enum sci_status sci_remote_device_terminate_reqs_checkabort( | 121 | static enum sci_status sci_remote_device_terminate_reqs_checkabort( |
122 | struct isci_remote_device *idev, | 122 | struct isci_remote_device *idev, |
123 | int chk) | 123 | int chk) |
124 | { | 124 | { |
125 | struct isci_host *ihost = idev->owning_port->owning_controller; | 125 | struct isci_host *ihost = idev->owning_port->owning_controller; |
126 | enum sci_status status = SCI_SUCCESS; | 126 | enum sci_status status = SCI_SUCCESS; |
127 | u32 i; | 127 | u32 i; |
128 | 128 | ||
129 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { | 129 | for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { |
130 | struct isci_request *ireq = ihost->reqs[i]; | 130 | struct isci_request *ireq = ihost->reqs[i]; |
131 | enum sci_status s; | 131 | enum sci_status s; |
132 | 132 | ||
133 | s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); | 133 | s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); |
134 | if (s != SCI_SUCCESS) | 134 | if (s != SCI_SUCCESS) |
135 | status = s; | 135 | status = s; |
136 | } | 136 | } |
137 | return status; | 137 | return status; |
138 | } | 138 | } |
139 | 139 | ||
140 | static bool isci_compare_suspendcount( | 140 | static bool isci_compare_suspendcount( |
141 | struct isci_remote_device *idev, | 141 | struct isci_remote_device *idev, |
142 | u32 localcount) | 142 | u32 localcount) |
143 | { | 143 | { |
144 | smp_rmb(); | 144 | smp_rmb(); |
145 | 145 | ||
146 | /* Check for a change in the suspend count, or the RNC | 146 | /* Check for a change in the suspend count, or the RNC |
147 | * being destroyed. | 147 | * being destroyed. |
148 | */ | 148 | */ |
149 | return (localcount != idev->rnc.suspend_count) | 149 | return (localcount != idev->rnc.suspend_count) |
150 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); | 150 | || sci_remote_node_context_is_being_destroyed(&idev->rnc); |
151 | } | 151 | } |
152 | 152 | ||
153 | static bool isci_check_reqterm( | 153 | static bool isci_check_reqterm( |
154 | struct isci_host *ihost, | 154 | struct isci_host *ihost, |
155 | struct isci_remote_device *idev, | 155 | struct isci_remote_device *idev, |
156 | struct isci_request *ireq, | 156 | struct isci_request *ireq, |
157 | u32 localcount) | 157 | u32 localcount) |
158 | { | 158 | { |
159 | unsigned long flags; | 159 | unsigned long flags; |
160 | bool res; | 160 | bool res; |
161 | 161 | ||
162 | spin_lock_irqsave(&ihost->scic_lock, flags); | 162 | spin_lock_irqsave(&ihost->scic_lock, flags); |
163 | res = isci_compare_suspendcount(idev, localcount) | 163 | res = isci_compare_suspendcount(idev, localcount) |
164 | && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); | 164 | && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); |
165 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 165 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
166 | 166 | ||
167 | return res; | 167 | return res; |
168 | } | 168 | } |
169 | 169 | ||
170 | static bool isci_check_devempty( | 170 | static bool isci_check_devempty( |
171 | struct isci_host *ihost, | 171 | struct isci_host *ihost, |
172 | struct isci_remote_device *idev, | 172 | struct isci_remote_device *idev, |
173 | u32 localcount) | 173 | u32 localcount) |
174 | { | 174 | { |
175 | unsigned long flags; | 175 | unsigned long flags; |
176 | bool res; | 176 | bool res; |
177 | 177 | ||
178 | spin_lock_irqsave(&ihost->scic_lock, flags); | 178 | spin_lock_irqsave(&ihost->scic_lock, flags); |
179 | res = isci_compare_suspendcount(idev, localcount) | 179 | res = isci_compare_suspendcount(idev, localcount) |
180 | && idev->started_request_count == 0; | 180 | && idev->started_request_count == 0; |
181 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 181 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
182 | 182 | ||
183 | return res; | 183 | return res; |
184 | } | 184 | } |
185 | 185 | ||
186 | enum sci_status isci_remote_device_terminate_requests( | 186 | enum sci_status isci_remote_device_terminate_requests( |
187 | struct isci_host *ihost, | 187 | struct isci_host *ihost, |
188 | struct isci_remote_device *idev, | 188 | struct isci_remote_device *idev, |
189 | struct isci_request *ireq) | 189 | struct isci_request *ireq) |
190 | { | 190 | { |
191 | enum sci_status status = SCI_SUCCESS; | 191 | enum sci_status status = SCI_SUCCESS; |
192 | unsigned long flags; | 192 | unsigned long flags; |
193 | u32 rnc_suspend_count; | 193 | u32 rnc_suspend_count; |
194 | 194 | ||
195 | spin_lock_irqsave(&ihost->scic_lock, flags); | 195 | spin_lock_irqsave(&ihost->scic_lock, flags); |
196 | 196 | ||
197 | if (isci_get_device(idev) == NULL) { | 197 | if (isci_get_device(idev) == NULL) { |
198 | dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", | 198 | dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", |
199 | __func__, idev); | 199 | __func__, idev); |
200 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 200 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
201 | status = SCI_FAILURE; | 201 | status = SCI_FAILURE; |
202 | } else { | 202 | } else { |
203 | /* If already suspended, don't wait for another suspension. */ | 203 | /* If already suspended, don't wait for another suspension. */ |
204 | smp_rmb(); | 204 | smp_rmb(); |
205 | rnc_suspend_count | 205 | rnc_suspend_count |
206 | = sci_remote_node_context_is_suspended(&idev->rnc) | 206 | = sci_remote_node_context_is_suspended(&idev->rnc) |
207 | ? 0 : idev->rnc.suspend_count; | 207 | ? 0 : idev->rnc.suspend_count; |
208 | 208 | ||
209 | dev_dbg(&ihost->pdev->dev, | 209 | dev_dbg(&ihost->pdev->dev, |
210 | "%s: idev=%p, ireq=%p; started_request_count=%d, " | 210 | "%s: idev=%p, ireq=%p; started_request_count=%d, " |
211 | "rnc_suspend_count=%d, rnc.suspend_count=%d" | 211 | "rnc_suspend_count=%d, rnc.suspend_count=%d" |
212 | "about to wait\n", | 212 | "about to wait\n", |
213 | __func__, idev, ireq, idev->started_request_count, | 213 | __func__, idev, ireq, idev->started_request_count, |
214 | rnc_suspend_count, idev->rnc.suspend_count); | 214 | rnc_suspend_count, idev->rnc.suspend_count); |
215 | 215 | ||
216 | #define MAX_SUSPEND_MSECS 10000 | 216 | #define MAX_SUSPEND_MSECS 10000 |
217 | if (ireq) { | 217 | if (ireq) { |
218 | /* Terminate a specific TC. */ | 218 | /* Terminate a specific TC. */ |
219 | set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | 219 | set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); |
220 | sci_remote_device_terminate_req(ihost, idev, 0, ireq); | 220 | sci_remote_device_terminate_req(ihost, idev, 0, ireq); |
221 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 221 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
222 | if (!wait_event_timeout(ihost->eventq, | 222 | if (!wait_event_timeout(ihost->eventq, |
223 | isci_check_reqterm(ihost, idev, ireq, | 223 | isci_check_reqterm(ihost, idev, ireq, |
224 | rnc_suspend_count), | 224 | rnc_suspend_count), |
225 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | 225 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { |
226 | 226 | ||
227 | dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", | 227 | dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", |
228 | __func__, ihost->id); | 228 | __func__, ihost->id); |
229 | dev_dbg(&ihost->pdev->dev, | 229 | dev_dbg(&ihost->pdev->dev, |
230 | "%s: ******* Timeout waiting for " | 230 | "%s: ******* Timeout waiting for " |
231 | "suspend; idev=%p, current state %s; " | 231 | "suspend; idev=%p, current state %s; " |
232 | "started_request_count=%d, flags=%lx\n\t" | 232 | "started_request_count=%d, flags=%lx\n\t" |
233 | "rnc_suspend_count=%d, rnc.suspend_count=%d " | 233 | "rnc_suspend_count=%d, rnc.suspend_count=%d " |
234 | "RNC: current state %s, current " | 234 | "RNC: current state %s, current " |
235 | "suspend_type %x dest state %d;\n" | 235 | "suspend_type %x dest state %d;\n" |
236 | "ireq=%p, ireq->flags = %lx\n", | 236 | "ireq=%p, ireq->flags = %lx\n", |
237 | __func__, idev, | 237 | __func__, idev, |
238 | dev_state_name(idev->sm.current_state_id), | 238 | dev_state_name(idev->sm.current_state_id), |
239 | idev->started_request_count, idev->flags, | 239 | idev->started_request_count, idev->flags, |
240 | rnc_suspend_count, idev->rnc.suspend_count, | 240 | rnc_suspend_count, idev->rnc.suspend_count, |
241 | rnc_state_name(idev->rnc.sm.current_state_id), | 241 | rnc_state_name(idev->rnc.sm.current_state_id), |
242 | idev->rnc.suspend_type, | 242 | idev->rnc.suspend_type, |
243 | idev->rnc.destination_state, | 243 | idev->rnc.destination_state, |
244 | ireq, ireq->flags); | 244 | ireq, ireq->flags); |
245 | } | 245 | } |
246 | clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); | 246 | clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); |
247 | isci_free_tag(ihost, ireq->io_tag); | 247 | isci_free_tag(ihost, ireq->io_tag); |
248 | } else { | 248 | } else { |
249 | /* Terminate all TCs. */ | 249 | /* Terminate all TCs. */ |
250 | sci_remote_device_terminate_requests(idev); | 250 | sci_remote_device_terminate_requests(idev); |
251 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 251 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
252 | if (!wait_event_timeout(ihost->eventq, | 252 | if (!wait_event_timeout(ihost->eventq, |
253 | isci_check_devempty(ihost, idev, | 253 | isci_check_devempty(ihost, idev, |
254 | rnc_suspend_count), | 254 | rnc_suspend_count), |
255 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { | 255 | msecs_to_jiffies(MAX_SUSPEND_MSECS))) { |
256 | 256 | ||
257 | dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", | 257 | dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", |
258 | __func__, ihost->id); | 258 | __func__, ihost->id); |
259 | dev_dbg(&ihost->pdev->dev, | 259 | dev_dbg(&ihost->pdev->dev, |
260 | "%s: ******* Timeout waiting for " | 260 | "%s: ******* Timeout waiting for " |
261 | "suspend; idev=%p, current state %s; " | 261 | "suspend; idev=%p, current state %s; " |
262 | "started_request_count=%d, flags=%lx\n\t" | 262 | "started_request_count=%d, flags=%lx\n\t" |
263 | "rnc_suspend_count=%d, " | 263 | "rnc_suspend_count=%d, " |
264 | "RNC: current state %s, " | 264 | "RNC: current state %s, " |
265 | "rnc.suspend_count=%d, current " | 265 | "rnc.suspend_count=%d, current " |
266 | "suspend_type %x dest state %d\n", | 266 | "suspend_type %x dest state %d\n", |
267 | __func__, idev, | 267 | __func__, idev, |
268 | dev_state_name(idev->sm.current_state_id), | 268 | dev_state_name(idev->sm.current_state_id), |
269 | idev->started_request_count, idev->flags, | 269 | idev->started_request_count, idev->flags, |
270 | rnc_suspend_count, | 270 | rnc_suspend_count, |
271 | rnc_state_name(idev->rnc.sm.current_state_id), | 271 | rnc_state_name(idev->rnc.sm.current_state_id), |
272 | idev->rnc.suspend_count, | 272 | idev->rnc.suspend_count, |
273 | idev->rnc.suspend_type, | 273 | idev->rnc.suspend_type, |
274 | idev->rnc.destination_state); | 274 | idev->rnc.destination_state); |
275 | } | 275 | } |
276 | } | 276 | } |
277 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", | 277 | dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", |
278 | __func__, idev); | 278 | __func__, idev); |
279 | isci_put_device(idev); | 279 | isci_put_device(idev); |
280 | } | 280 | } |
281 | return status; | 281 | return status; |
282 | } | 282 | } |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * isci_remote_device_not_ready() - This function is called by the ihost when | 285 | * isci_remote_device_not_ready() - This function is called by the ihost when |
286 | * the remote device is not ready. We mark the isci device as ready (not | 286 | * the remote device is not ready. We mark the isci device as ready (not |
287 | * "ready_for_io") and signal the waiting proccess. | 287 | * "ready_for_io") and signal the waiting proccess. |
288 | * @isci_host: This parameter specifies the isci host object. | 288 | * @isci_host: This parameter specifies the isci host object. |
289 | * @isci_device: This parameter specifies the remote device | 289 | * @isci_device: This parameter specifies the remote device |
290 | * | 290 | * |
291 | * sci_lock is held on entrance to this function. | 291 | * sci_lock is held on entrance to this function. |
292 | */ | 292 | */ |
293 | static void isci_remote_device_not_ready(struct isci_host *ihost, | 293 | static void isci_remote_device_not_ready(struct isci_host *ihost, |
294 | struct isci_remote_device *idev, | 294 | struct isci_remote_device *idev, |
295 | u32 reason) | 295 | u32 reason) |
296 | { | 296 | { |
297 | dev_dbg(&ihost->pdev->dev, | 297 | dev_dbg(&ihost->pdev->dev, |
298 | "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); | 298 | "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); |
299 | 299 | ||
300 | switch (reason) { | 300 | switch (reason) { |
301 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: | 301 | case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: |
302 | set_bit(IDEV_IO_NCQERROR, &idev->flags); | 302 | set_bit(IDEV_IO_NCQERROR, &idev->flags); |
303 | 303 | ||
304 | /* Suspend the remote device so the I/O can be terminated. */ | 304 | /* Suspend the remote device so the I/O can be terminated. */ |
305 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); | 305 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); |
306 | 306 | ||
307 | /* Kill all outstanding requests for the device. */ | 307 | /* Kill all outstanding requests for the device. */ |
308 | sci_remote_device_terminate_requests(idev); | 308 | sci_remote_device_terminate_requests(idev); |
309 | 309 | ||
310 | /* Fall through into the default case... */ | 310 | /* Fall through into the default case... */ |
311 | default: | 311 | default: |
312 | clear_bit(IDEV_IO_READY, &idev->flags); | 312 | clear_bit(IDEV_IO_READY, &idev->flags); |
313 | break; | 313 | break; |
314 | } | 314 | } |
315 | } | 315 | } |
316 | 316 | ||
317 | /* called once the remote node context is ready to be freed. | 317 | /* called once the remote node context is ready to be freed. |
318 | * The remote device can now report that its stop operation is complete. none | 318 | * The remote device can now report that its stop operation is complete. none |
319 | */ | 319 | */ |
320 | static void rnc_destruct_done(void *_dev) | 320 | static void rnc_destruct_done(void *_dev) |
321 | { | 321 | { |
322 | struct isci_remote_device *idev = _dev; | 322 | struct isci_remote_device *idev = _dev; |
323 | 323 | ||
324 | BUG_ON(idev->started_request_count != 0); | 324 | BUG_ON(idev->started_request_count != 0); |
325 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | 325 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); |
326 | } | 326 | } |
327 | 327 | ||
328 | enum sci_status sci_remote_device_terminate_requests( | 328 | enum sci_status sci_remote_device_terminate_requests( |
329 | struct isci_remote_device *idev) | 329 | struct isci_remote_device *idev) |
330 | { | 330 | { |
331 | return sci_remote_device_terminate_reqs_checkabort(idev, 0); | 331 | return sci_remote_device_terminate_reqs_checkabort(idev, 0); |
332 | } | 332 | } |
333 | 333 | ||
334 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, | 334 | enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, |
335 | u32 timeout) | 335 | u32 timeout) |
336 | { | 336 | { |
337 | struct sci_base_state_machine *sm = &idev->sm; | 337 | struct sci_base_state_machine *sm = &idev->sm; |
338 | enum sci_remote_device_states state = sm->current_state_id; | 338 | enum sci_remote_device_states state = sm->current_state_id; |
339 | 339 | ||
340 | switch (state) { | 340 | switch (state) { |
341 | case SCI_DEV_INITIAL: | 341 | case SCI_DEV_INITIAL: |
342 | case SCI_DEV_FAILED: | 342 | case SCI_DEV_FAILED: |
343 | case SCI_DEV_FINAL: | 343 | case SCI_DEV_FINAL: |
344 | default: | 344 | default: |
345 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 345 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
346 | __func__, dev_state_name(state)); | 346 | __func__, dev_state_name(state)); |
347 | return SCI_FAILURE_INVALID_STATE; | 347 | return SCI_FAILURE_INVALID_STATE; |
348 | case SCI_DEV_STOPPED: | 348 | case SCI_DEV_STOPPED: |
349 | return SCI_SUCCESS; | 349 | return SCI_SUCCESS; |
350 | case SCI_DEV_STARTING: | 350 | case SCI_DEV_STARTING: |
351 | /* device not started so there had better be no requests */ | 351 | /* device not started so there had better be no requests */ |
352 | BUG_ON(idev->started_request_count != 0); | 352 | BUG_ON(idev->started_request_count != 0); |
353 | sci_remote_node_context_destruct(&idev->rnc, | 353 | sci_remote_node_context_destruct(&idev->rnc, |
354 | rnc_destruct_done, idev); | 354 | rnc_destruct_done, idev); |
355 | /* Transition to the stopping state and wait for the | 355 | /* Transition to the stopping state and wait for the |
356 | * remote node to complete being posted and invalidated. | 356 | * remote node to complete being posted and invalidated. |
357 | */ | 357 | */ |
358 | sci_change_state(sm, SCI_DEV_STOPPING); | 358 | sci_change_state(sm, SCI_DEV_STOPPING); |
359 | return SCI_SUCCESS; | 359 | return SCI_SUCCESS; |
360 | case SCI_DEV_READY: | 360 | case SCI_DEV_READY: |
361 | case SCI_STP_DEV_IDLE: | 361 | case SCI_STP_DEV_IDLE: |
362 | case SCI_STP_DEV_CMD: | 362 | case SCI_STP_DEV_CMD: |
363 | case SCI_STP_DEV_NCQ: | 363 | case SCI_STP_DEV_NCQ: |
364 | case SCI_STP_DEV_NCQ_ERROR: | 364 | case SCI_STP_DEV_NCQ_ERROR: |
365 | case SCI_STP_DEV_AWAIT_RESET: | 365 | case SCI_STP_DEV_AWAIT_RESET: |
366 | case SCI_SMP_DEV_IDLE: | 366 | case SCI_SMP_DEV_IDLE: |
367 | case SCI_SMP_DEV_CMD: | 367 | case SCI_SMP_DEV_CMD: |
368 | sci_change_state(sm, SCI_DEV_STOPPING); | 368 | sci_change_state(sm, SCI_DEV_STOPPING); |
369 | if (idev->started_request_count == 0) | 369 | if (idev->started_request_count == 0) |
370 | sci_remote_node_context_destruct(&idev->rnc, | 370 | sci_remote_node_context_destruct(&idev->rnc, |
371 | rnc_destruct_done, | 371 | rnc_destruct_done, |
372 | idev); | 372 | idev); |
373 | else { | 373 | else { |
374 | sci_remote_device_suspend( | 374 | sci_remote_device_suspend( |
375 | idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | 375 | idev, SCI_SW_SUSPEND_LINKHANG_DETECT); |
376 | sci_remote_device_terminate_requests(idev); | 376 | sci_remote_device_terminate_requests(idev); |
377 | } | 377 | } |
378 | return SCI_SUCCESS; | 378 | return SCI_SUCCESS; |
379 | case SCI_DEV_STOPPING: | 379 | case SCI_DEV_STOPPING: |
380 | /* All requests should have been terminated, but if there is an | 380 | /* All requests should have been terminated, but if there is an |
381 | * attempt to stop a device already in the stopping state, then | 381 | * attempt to stop a device already in the stopping state, then |
382 | * try again to terminate. | 382 | * try again to terminate. |
383 | */ | 383 | */ |
384 | return sci_remote_device_terminate_requests(idev); | 384 | return sci_remote_device_terminate_requests(idev); |
385 | case SCI_DEV_RESETTING: | 385 | case SCI_DEV_RESETTING: |
386 | sci_change_state(sm, SCI_DEV_STOPPING); | 386 | sci_change_state(sm, SCI_DEV_STOPPING); |
387 | return SCI_SUCCESS; | 387 | return SCI_SUCCESS; |
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) | 391 | enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) |
392 | { | 392 | { |
393 | struct sci_base_state_machine *sm = &idev->sm; | 393 | struct sci_base_state_machine *sm = &idev->sm; |
394 | enum sci_remote_device_states state = sm->current_state_id; | 394 | enum sci_remote_device_states state = sm->current_state_id; |
395 | 395 | ||
396 | switch (state) { | 396 | switch (state) { |
397 | case SCI_DEV_INITIAL: | 397 | case SCI_DEV_INITIAL: |
398 | case SCI_DEV_STOPPED: | 398 | case SCI_DEV_STOPPED: |
399 | case SCI_DEV_STARTING: | 399 | case SCI_DEV_STARTING: |
400 | case SCI_SMP_DEV_IDLE: | 400 | case SCI_SMP_DEV_IDLE: |
401 | case SCI_SMP_DEV_CMD: | 401 | case SCI_SMP_DEV_CMD: |
402 | case SCI_DEV_STOPPING: | 402 | case SCI_DEV_STOPPING: |
403 | case SCI_DEV_FAILED: | 403 | case SCI_DEV_FAILED: |
404 | case SCI_DEV_RESETTING: | 404 | case SCI_DEV_RESETTING: |
405 | case SCI_DEV_FINAL: | 405 | case SCI_DEV_FINAL: |
406 | default: | 406 | default: |
407 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 407 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
408 | __func__, dev_state_name(state)); | 408 | __func__, dev_state_name(state)); |
409 | return SCI_FAILURE_INVALID_STATE; | 409 | return SCI_FAILURE_INVALID_STATE; |
410 | case SCI_DEV_READY: | 410 | case SCI_DEV_READY: |
411 | case SCI_STP_DEV_IDLE: | 411 | case SCI_STP_DEV_IDLE: |
412 | case SCI_STP_DEV_CMD: | 412 | case SCI_STP_DEV_CMD: |
413 | case SCI_STP_DEV_NCQ: | 413 | case SCI_STP_DEV_NCQ: |
414 | case SCI_STP_DEV_NCQ_ERROR: | 414 | case SCI_STP_DEV_NCQ_ERROR: |
415 | case SCI_STP_DEV_AWAIT_RESET: | 415 | case SCI_STP_DEV_AWAIT_RESET: |
416 | sci_change_state(sm, SCI_DEV_RESETTING); | 416 | sci_change_state(sm, SCI_DEV_RESETTING); |
417 | return SCI_SUCCESS; | 417 | return SCI_SUCCESS; |
418 | } | 418 | } |
419 | } | 419 | } |
420 | 420 | ||
421 | enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) | 421 | enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) |
422 | { | 422 | { |
423 | struct sci_base_state_machine *sm = &idev->sm; | 423 | struct sci_base_state_machine *sm = &idev->sm; |
424 | enum sci_remote_device_states state = sm->current_state_id; | 424 | enum sci_remote_device_states state = sm->current_state_id; |
425 | 425 | ||
426 | if (state != SCI_DEV_RESETTING) { | 426 | if (state != SCI_DEV_RESETTING) { |
427 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 427 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
428 | __func__, dev_state_name(state)); | 428 | __func__, dev_state_name(state)); |
429 | return SCI_FAILURE_INVALID_STATE; | 429 | return SCI_FAILURE_INVALID_STATE; |
430 | } | 430 | } |
431 | 431 | ||
432 | sci_change_state(sm, SCI_DEV_READY); | 432 | sci_change_state(sm, SCI_DEV_READY); |
433 | return SCI_SUCCESS; | 433 | return SCI_SUCCESS; |
434 | } | 434 | } |
435 | 435 | ||
436 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, | 436 | enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, |
437 | u32 frame_index) | 437 | u32 frame_index) |
438 | { | 438 | { |
439 | struct sci_base_state_machine *sm = &idev->sm; | 439 | struct sci_base_state_machine *sm = &idev->sm; |
440 | enum sci_remote_device_states state = sm->current_state_id; | 440 | enum sci_remote_device_states state = sm->current_state_id; |
441 | struct isci_host *ihost = idev->owning_port->owning_controller; | 441 | struct isci_host *ihost = idev->owning_port->owning_controller; |
442 | enum sci_status status; | 442 | enum sci_status status; |
443 | 443 | ||
444 | switch (state) { | 444 | switch (state) { |
445 | case SCI_DEV_INITIAL: | 445 | case SCI_DEV_INITIAL: |
446 | case SCI_DEV_STOPPED: | 446 | case SCI_DEV_STOPPED: |
447 | case SCI_DEV_STARTING: | 447 | case SCI_DEV_STARTING: |
448 | case SCI_STP_DEV_IDLE: | 448 | case SCI_STP_DEV_IDLE: |
449 | case SCI_SMP_DEV_IDLE: | 449 | case SCI_SMP_DEV_IDLE: |
450 | case SCI_DEV_FINAL: | 450 | case SCI_DEV_FINAL: |
451 | default: | 451 | default: |
452 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 452 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
453 | __func__, dev_state_name(state)); | 453 | __func__, dev_state_name(state)); |
454 | /* Return the frame back to the controller */ | 454 | /* Return the frame back to the controller */ |
455 | sci_controller_release_frame(ihost, frame_index); | 455 | sci_controller_release_frame(ihost, frame_index); |
456 | return SCI_FAILURE_INVALID_STATE; | 456 | return SCI_FAILURE_INVALID_STATE; |
457 | case SCI_DEV_READY: | 457 | case SCI_DEV_READY: |
458 | case SCI_STP_DEV_NCQ_ERROR: | 458 | case SCI_STP_DEV_NCQ_ERROR: |
459 | case SCI_STP_DEV_AWAIT_RESET: | 459 | case SCI_STP_DEV_AWAIT_RESET: |
460 | case SCI_DEV_STOPPING: | 460 | case SCI_DEV_STOPPING: |
461 | case SCI_DEV_FAILED: | 461 | case SCI_DEV_FAILED: |
462 | case SCI_DEV_RESETTING: { | 462 | case SCI_DEV_RESETTING: { |
463 | struct isci_request *ireq; | 463 | struct isci_request *ireq; |
464 | struct ssp_frame_hdr hdr; | 464 | struct ssp_frame_hdr hdr; |
465 | void *frame_header; | 465 | void *frame_header; |
466 | ssize_t word_cnt; | 466 | ssize_t word_cnt; |
467 | 467 | ||
468 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 468 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
469 | frame_index, | 469 | frame_index, |
470 | &frame_header); | 470 | &frame_header); |
471 | if (status != SCI_SUCCESS) | 471 | if (status != SCI_SUCCESS) |
472 | return status; | 472 | return status; |
473 | 473 | ||
474 | word_cnt = sizeof(hdr) / sizeof(u32); | 474 | word_cnt = sizeof(hdr) / sizeof(u32); |
475 | sci_swab32_cpy(&hdr, frame_header, word_cnt); | 475 | sci_swab32_cpy(&hdr, frame_header, word_cnt); |
476 | 476 | ||
477 | ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); | 477 | ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); |
478 | if (ireq && ireq->target_device == idev) { | 478 | if (ireq && ireq->target_device == idev) { |
479 | /* The IO request is now in charge of releasing the frame */ | 479 | /* The IO request is now in charge of releasing the frame */ |
480 | status = sci_io_request_frame_handler(ireq, frame_index); | 480 | status = sci_io_request_frame_handler(ireq, frame_index); |
481 | } else { | 481 | } else { |
482 | /* We could not map this tag to a valid IO | 482 | /* We could not map this tag to a valid IO |
483 | * request Just toss the frame and continue | 483 | * request Just toss the frame and continue |
484 | */ | 484 | */ |
485 | sci_controller_release_frame(ihost, frame_index); | 485 | sci_controller_release_frame(ihost, frame_index); |
486 | } | 486 | } |
487 | break; | 487 | break; |
488 | } | 488 | } |
489 | case SCI_STP_DEV_NCQ: { | 489 | case SCI_STP_DEV_NCQ: { |
490 | struct dev_to_host_fis *hdr; | 490 | struct dev_to_host_fis *hdr; |
491 | 491 | ||
492 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 492 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
493 | frame_index, | 493 | frame_index, |
494 | (void **)&hdr); | 494 | (void **)&hdr); |
495 | if (status != SCI_SUCCESS) | 495 | if (status != SCI_SUCCESS) |
496 | return status; | 496 | return status; |
497 | 497 | ||
498 | if (hdr->fis_type == FIS_SETDEVBITS && | 498 | if (hdr->fis_type == FIS_SETDEVBITS && |
499 | (hdr->status & ATA_ERR)) { | 499 | (hdr->status & ATA_ERR)) { |
500 | idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; | 500 | idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; |
501 | 501 | ||
502 | /* TODO Check sactive and complete associated IO if any. */ | 502 | /* TODO Check sactive and complete associated IO if any. */ |
503 | sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); | 503 | sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); |
504 | } else if (hdr->fis_type == FIS_REGD2H && | 504 | } else if (hdr->fis_type == FIS_REGD2H && |
505 | (hdr->status & ATA_ERR)) { | 505 | (hdr->status & ATA_ERR)) { |
506 | /* | 506 | /* |
507 | * Some devices return D2H FIS when an NCQ error is detected. | 507 | * Some devices return D2H FIS when an NCQ error is detected. |
508 | * Treat this like an SDB error FIS ready reason. | 508 | * Treat this like an SDB error FIS ready reason. |
509 | */ | 509 | */ |
510 | idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; | 510 | idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; |
511 | sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); | 511 | sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); |
512 | } else | 512 | } else |
513 | status = SCI_FAILURE; | 513 | status = SCI_FAILURE; |
514 | 514 | ||
515 | sci_controller_release_frame(ihost, frame_index); | 515 | sci_controller_release_frame(ihost, frame_index); |
516 | break; | 516 | break; |
517 | } | 517 | } |
518 | case SCI_STP_DEV_CMD: | 518 | case SCI_STP_DEV_CMD: |
519 | case SCI_SMP_DEV_CMD: | 519 | case SCI_SMP_DEV_CMD: |
520 | /* The device does not process any UF received from the hardware while | 520 | /* The device does not process any UF received from the hardware while |
521 | * in this state. All unsolicited frames are forwarded to the io request | 521 | * in this state. All unsolicited frames are forwarded to the io request |
522 | * object. | 522 | * object. |
523 | */ | 523 | */ |
524 | status = sci_io_request_frame_handler(idev->working_request, frame_index); | 524 | status = sci_io_request_frame_handler(idev->working_request, frame_index); |
525 | break; | 525 | break; |
526 | } | 526 | } |
527 | 527 | ||
528 | return status; | 528 | return status; |
529 | } | 529 | } |
530 | 530 | ||
531 | static bool is_remote_device_ready(struct isci_remote_device *idev) | 531 | static bool is_remote_device_ready(struct isci_remote_device *idev) |
532 | { | 532 | { |
533 | 533 | ||
534 | struct sci_base_state_machine *sm = &idev->sm; | 534 | struct sci_base_state_machine *sm = &idev->sm; |
535 | enum sci_remote_device_states state = sm->current_state_id; | 535 | enum sci_remote_device_states state = sm->current_state_id; |
536 | 536 | ||
537 | switch (state) { | 537 | switch (state) { |
538 | case SCI_DEV_READY: | 538 | case SCI_DEV_READY: |
539 | case SCI_STP_DEV_IDLE: | 539 | case SCI_STP_DEV_IDLE: |
540 | case SCI_STP_DEV_CMD: | 540 | case SCI_STP_DEV_CMD: |
541 | case SCI_STP_DEV_NCQ: | 541 | case SCI_STP_DEV_NCQ: |
542 | case SCI_STP_DEV_NCQ_ERROR: | 542 | case SCI_STP_DEV_NCQ_ERROR: |
543 | case SCI_STP_DEV_AWAIT_RESET: | 543 | case SCI_STP_DEV_AWAIT_RESET: |
544 | case SCI_SMP_DEV_IDLE: | 544 | case SCI_SMP_DEV_IDLE: |
545 | case SCI_SMP_DEV_CMD: | 545 | case SCI_SMP_DEV_CMD: |
546 | return true; | 546 | return true; |
547 | default: | 547 | default: |
548 | return false; | 548 | return false; |
549 | } | 549 | } |
550 | } | 550 | } |
551 | 551 | ||
552 | /* | 552 | /* |
553 | * called once the remote node context has transisitioned to a ready | 553 | * called once the remote node context has transisitioned to a ready |
554 | * state (after suspending RX and/or TX due to early D2H fis) | 554 | * state (after suspending RX and/or TX due to early D2H fis) |
555 | */ | 555 | */ |
556 | static void atapi_remote_device_resume_done(void *_dev) | 556 | static void atapi_remote_device_resume_done(void *_dev) |
557 | { | 557 | { |
558 | struct isci_remote_device *idev = _dev; | 558 | struct isci_remote_device *idev = _dev; |
559 | struct isci_request *ireq = idev->working_request; | 559 | struct isci_request *ireq = idev->working_request; |
560 | 560 | ||
561 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 561 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
562 | } | 562 | } |
563 | 563 | ||
564 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, | 564 | enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, |
565 | u32 event_code) | 565 | u32 event_code) |
566 | { | 566 | { |
567 | enum sci_status status; | 567 | enum sci_status status; |
568 | struct sci_base_state_machine *sm = &idev->sm; | ||
569 | enum sci_remote_device_states state = sm->current_state_id; | ||
568 | 570 | ||
569 | switch (scu_get_event_type(event_code)) { | 571 | switch (scu_get_event_type(event_code)) { |
570 | case SCU_EVENT_TYPE_RNC_OPS_MISC: | 572 | case SCU_EVENT_TYPE_RNC_OPS_MISC: |
571 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: | 573 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX: |
572 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: | 574 | case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: |
573 | status = sci_remote_node_context_event_handler(&idev->rnc, event_code); | 575 | status = sci_remote_node_context_event_handler(&idev->rnc, event_code); |
574 | break; | 576 | break; |
575 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: | 577 | case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: |
576 | if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { | 578 | if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { |
577 | status = SCI_SUCCESS; | 579 | status = SCI_SUCCESS; |
578 | 580 | ||
579 | /* Suspend the associated RNC */ | 581 | /* Suspend the associated RNC */ |
580 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); | 582 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); |
581 | 583 | ||
582 | dev_dbg(scirdev_to_dev(idev), | 584 | dev_dbg(scirdev_to_dev(idev), |
583 | "%s: device: %p event code: %x: %s\n", | 585 | "%s: device: %p event code: %x: %s\n", |
584 | __func__, idev, event_code, | 586 | __func__, idev, event_code, |
585 | is_remote_device_ready(idev) | 587 | is_remote_device_ready(idev) |
586 | ? "I_T_Nexus_Timeout event" | 588 | ? "I_T_Nexus_Timeout event" |
587 | : "I_T_Nexus_Timeout event in wrong state"); | 589 | : "I_T_Nexus_Timeout event in wrong state"); |
588 | 590 | ||
589 | break; | 591 | break; |
590 | } | 592 | } |
591 | /* Else, fall through and treat as unhandled... */ | 593 | /* Else, fall through and treat as unhandled... */ |
592 | default: | 594 | default: |
593 | dev_dbg(scirdev_to_dev(idev), | 595 | dev_dbg(scirdev_to_dev(idev), |
594 | "%s: device: %p event code: %x: %s\n", | 596 | "%s: device: %p event code: %x: %s\n", |
595 | __func__, idev, event_code, | 597 | __func__, idev, event_code, |
596 | is_remote_device_ready(idev) | 598 | is_remote_device_ready(idev) |
597 | ? "unexpected event" | 599 | ? "unexpected event" |
598 | : "unexpected event in wrong state"); | 600 | : "unexpected event in wrong state"); |
599 | status = SCI_FAILURE_INVALID_STATE; | 601 | status = SCI_FAILURE_INVALID_STATE; |
600 | break; | 602 | break; |
601 | } | 603 | } |
602 | 604 | ||
603 | if (status != SCI_SUCCESS) | 605 | if (status != SCI_SUCCESS) |
604 | return status; | 606 | return status; |
605 | 607 | ||
608 | /* Decode device-specific states that may require an RNC resume during | ||
609 | * normal operation. When the abort path is active, these resumes are | ||
610 | * managed when the abort path exits. | ||
611 | */ | ||
612 | if (state == SCI_STP_DEV_ATAPI_ERROR) { | ||
613 | /* For ATAPI error state resume the RNC right away. */ | ||
614 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || | ||
615 | scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { | ||
616 | return sci_remote_node_context_resume(&idev->rnc, | ||
617 | atapi_remote_device_resume_done, | ||
618 | idev); | ||
619 | } | ||
620 | } | ||
621 | |||
622 | if (state == SCI_STP_DEV_IDLE) { | ||
623 | |||
624 | /* We pick up suspension events to handle specifically to this | ||
625 | * state. We resume the RNC right away. | ||
626 | */ | ||
627 | if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || | ||
628 | scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) | ||
629 | status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL); | ||
630 | } | ||
631 | |||
606 | return status; | 632 | return status; |
607 | } | 633 | } |
608 | 634 | ||
609 | static void sci_remote_device_start_request(struct isci_remote_device *idev, | 635 | static void sci_remote_device_start_request(struct isci_remote_device *idev, |
610 | struct isci_request *ireq, | 636 | struct isci_request *ireq, |
611 | enum sci_status status) | 637 | enum sci_status status) |
612 | { | 638 | { |
613 | struct isci_port *iport = idev->owning_port; | 639 | struct isci_port *iport = idev->owning_port; |
614 | 640 | ||
615 | /* cleanup requests that failed after starting on the port */ | 641 | /* cleanup requests that failed after starting on the port */ |
616 | if (status != SCI_SUCCESS) | 642 | if (status != SCI_SUCCESS) |
617 | sci_port_complete_io(iport, idev, ireq); | 643 | sci_port_complete_io(iport, idev, ireq); |
618 | else { | 644 | else { |
619 | kref_get(&idev->kref); | 645 | kref_get(&idev->kref); |
620 | idev->started_request_count++; | 646 | idev->started_request_count++; |
621 | } | 647 | } |
622 | } | 648 | } |
623 | 649 | ||
624 | enum sci_status sci_remote_device_start_io(struct isci_host *ihost, | 650 | enum sci_status sci_remote_device_start_io(struct isci_host *ihost, |
625 | struct isci_remote_device *idev, | 651 | struct isci_remote_device *idev, |
626 | struct isci_request *ireq) | 652 | struct isci_request *ireq) |
627 | { | 653 | { |
628 | struct sci_base_state_machine *sm = &idev->sm; | 654 | struct sci_base_state_machine *sm = &idev->sm; |
629 | enum sci_remote_device_states state = sm->current_state_id; | 655 | enum sci_remote_device_states state = sm->current_state_id; |
630 | struct isci_port *iport = idev->owning_port; | 656 | struct isci_port *iport = idev->owning_port; |
631 | enum sci_status status; | 657 | enum sci_status status; |
632 | 658 | ||
633 | switch (state) { | 659 | switch (state) { |
634 | case SCI_DEV_INITIAL: | 660 | case SCI_DEV_INITIAL: |
635 | case SCI_DEV_STOPPED: | 661 | case SCI_DEV_STOPPED: |
636 | case SCI_DEV_STARTING: | 662 | case SCI_DEV_STARTING: |
637 | case SCI_STP_DEV_NCQ_ERROR: | 663 | case SCI_STP_DEV_NCQ_ERROR: |
638 | case SCI_DEV_STOPPING: | 664 | case SCI_DEV_STOPPING: |
639 | case SCI_DEV_FAILED: | 665 | case SCI_DEV_FAILED: |
640 | case SCI_DEV_RESETTING: | 666 | case SCI_DEV_RESETTING: |
641 | case SCI_DEV_FINAL: | 667 | case SCI_DEV_FINAL: |
642 | default: | 668 | default: |
643 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 669 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
644 | __func__, dev_state_name(state)); | 670 | __func__, dev_state_name(state)); |
645 | return SCI_FAILURE_INVALID_STATE; | 671 | return SCI_FAILURE_INVALID_STATE; |
646 | case SCI_DEV_READY: | 672 | case SCI_DEV_READY: |
647 | /* attempt to start an io request for this device object. The remote | 673 | /* attempt to start an io request for this device object. The remote |
648 | * device object will issue the start request for the io and if | 674 | * device object will issue the start request for the io and if |
649 | * successful it will start the request for the port object then | 675 | * successful it will start the request for the port object then |
650 | * increment its own request count. | 676 | * increment its own request count. |
651 | */ | 677 | */ |
652 | status = sci_port_start_io(iport, idev, ireq); | 678 | status = sci_port_start_io(iport, idev, ireq); |
653 | if (status != SCI_SUCCESS) | 679 | if (status != SCI_SUCCESS) |
654 | return status; | 680 | return status; |
655 | 681 | ||
656 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); | 682 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
657 | if (status != SCI_SUCCESS) | 683 | if (status != SCI_SUCCESS) |
658 | break; | 684 | break; |
659 | 685 | ||
660 | status = sci_request_start(ireq); | 686 | status = sci_request_start(ireq); |
661 | break; | 687 | break; |
662 | case SCI_STP_DEV_IDLE: { | 688 | case SCI_STP_DEV_IDLE: { |
663 | /* handle the start io operation for a sata device that is in | 689 | /* handle the start io operation for a sata device that is in |
664 | * the command idle state. - Evalute the type of IO request to | 690 | * the command idle state. - Evalute the type of IO request to |
665 | * be started - If its an NCQ request change to NCQ substate - | 691 | * be started - If its an NCQ request change to NCQ substate - |
666 | * If its any other command change to the CMD substate | 692 | * If its any other command change to the CMD substate |
667 | * | 693 | * |
668 | * If this is a softreset we may want to have a different | 694 | * If this is a softreset we may want to have a different |
669 | * substate. | 695 | * substate. |
670 | */ | 696 | */ |
671 | enum sci_remote_device_states new_state; | 697 | enum sci_remote_device_states new_state; |
672 | struct sas_task *task = isci_request_access_task(ireq); | 698 | struct sas_task *task = isci_request_access_task(ireq); |
673 | 699 | ||
674 | status = sci_port_start_io(iport, idev, ireq); | 700 | status = sci_port_start_io(iport, idev, ireq); |
675 | if (status != SCI_SUCCESS) | 701 | if (status != SCI_SUCCESS) |
676 | return status; | 702 | return status; |
677 | 703 | ||
678 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); | 704 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
679 | if (status != SCI_SUCCESS) | 705 | if (status != SCI_SUCCESS) |
680 | break; | 706 | break; |
681 | 707 | ||
682 | status = sci_request_start(ireq); | 708 | status = sci_request_start(ireq); |
683 | if (status != SCI_SUCCESS) | 709 | if (status != SCI_SUCCESS) |
684 | break; | 710 | break; |
685 | 711 | ||
686 | if (task->ata_task.use_ncq) | 712 | if (task->ata_task.use_ncq) |
687 | new_state = SCI_STP_DEV_NCQ; | 713 | new_state = SCI_STP_DEV_NCQ; |
688 | else { | 714 | else { |
689 | idev->working_request = ireq; | 715 | idev->working_request = ireq; |
690 | new_state = SCI_STP_DEV_CMD; | 716 | new_state = SCI_STP_DEV_CMD; |
691 | } | 717 | } |
692 | sci_change_state(sm, new_state); | 718 | sci_change_state(sm, new_state); |
693 | break; | 719 | break; |
694 | } | 720 | } |
695 | case SCI_STP_DEV_NCQ: { | 721 | case SCI_STP_DEV_NCQ: { |
696 | struct sas_task *task = isci_request_access_task(ireq); | 722 | struct sas_task *task = isci_request_access_task(ireq); |
697 | 723 | ||
698 | if (task->ata_task.use_ncq) { | 724 | if (task->ata_task.use_ncq) { |
699 | status = sci_port_start_io(iport, idev, ireq); | 725 | status = sci_port_start_io(iport, idev, ireq); |
700 | if (status != SCI_SUCCESS) | 726 | if (status != SCI_SUCCESS) |
701 | return status; | 727 | return status; |
702 | 728 | ||
703 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); | 729 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
704 | if (status != SCI_SUCCESS) | 730 | if (status != SCI_SUCCESS) |
705 | break; | 731 | break; |
706 | 732 | ||
707 | status = sci_request_start(ireq); | 733 | status = sci_request_start(ireq); |
708 | } else | 734 | } else |
709 | return SCI_FAILURE_INVALID_STATE; | 735 | return SCI_FAILURE_INVALID_STATE; |
710 | break; | 736 | break; |
711 | } | 737 | } |
712 | case SCI_STP_DEV_AWAIT_RESET: | 738 | case SCI_STP_DEV_AWAIT_RESET: |
713 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 739 | return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
714 | case SCI_SMP_DEV_IDLE: | 740 | case SCI_SMP_DEV_IDLE: |
715 | status = sci_port_start_io(iport, idev, ireq); | 741 | status = sci_port_start_io(iport, idev, ireq); |
716 | if (status != SCI_SUCCESS) | 742 | if (status != SCI_SUCCESS) |
717 | return status; | 743 | return status; |
718 | 744 | ||
719 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); | 745 | status = sci_remote_node_context_start_io(&idev->rnc, ireq); |
720 | if (status != SCI_SUCCESS) | 746 | if (status != SCI_SUCCESS) |
721 | break; | 747 | break; |
722 | 748 | ||
723 | status = sci_request_start(ireq); | 749 | status = sci_request_start(ireq); |
724 | if (status != SCI_SUCCESS) | 750 | if (status != SCI_SUCCESS) |
725 | break; | 751 | break; |
726 | 752 | ||
727 | idev->working_request = ireq; | 753 | idev->working_request = ireq; |
728 | sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); | 754 | sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); |
729 | break; | 755 | break; |
730 | case SCI_STP_DEV_CMD: | 756 | case SCI_STP_DEV_CMD: |
731 | case SCI_SMP_DEV_CMD: | 757 | case SCI_SMP_DEV_CMD: |
732 | /* device is already handling a command it can not accept new commands | 758 | /* device is already handling a command it can not accept new commands |
733 | * until this one is complete. | 759 | * until this one is complete. |
734 | */ | 760 | */ |
735 | return SCI_FAILURE_INVALID_STATE; | 761 | return SCI_FAILURE_INVALID_STATE; |
736 | } | 762 | } |
737 | 763 | ||
738 | sci_remote_device_start_request(idev, ireq, status); | 764 | sci_remote_device_start_request(idev, ireq, status); |
739 | return status; | 765 | return status; |
740 | } | 766 | } |
741 | 767 | ||
742 | static enum sci_status common_complete_io(struct isci_port *iport, | 768 | static enum sci_status common_complete_io(struct isci_port *iport, |
743 | struct isci_remote_device *idev, | 769 | struct isci_remote_device *idev, |
744 | struct isci_request *ireq) | 770 | struct isci_request *ireq) |
745 | { | 771 | { |
746 | enum sci_status status; | 772 | enum sci_status status; |
747 | 773 | ||
748 | status = sci_request_complete(ireq); | 774 | status = sci_request_complete(ireq); |
749 | if (status != SCI_SUCCESS) | 775 | if (status != SCI_SUCCESS) |
750 | return status; | 776 | return status; |
751 | 777 | ||
752 | status = sci_port_complete_io(iport, idev, ireq); | 778 | status = sci_port_complete_io(iport, idev, ireq); |
753 | if (status != SCI_SUCCESS) | 779 | if (status != SCI_SUCCESS) |
754 | return status; | 780 | return status; |
755 | 781 | ||
756 | sci_remote_device_decrement_request_count(idev); | 782 | sci_remote_device_decrement_request_count(idev); |
757 | return status; | 783 | return status; |
758 | } | 784 | } |
759 | 785 | ||
760 | enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, | 786 | enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, |
761 | struct isci_remote_device *idev, | 787 | struct isci_remote_device *idev, |
762 | struct isci_request *ireq) | 788 | struct isci_request *ireq) |
763 | { | 789 | { |
764 | struct sci_base_state_machine *sm = &idev->sm; | 790 | struct sci_base_state_machine *sm = &idev->sm; |
765 | enum sci_remote_device_states state = sm->current_state_id; | 791 | enum sci_remote_device_states state = sm->current_state_id; |
766 | struct isci_port *iport = idev->owning_port; | 792 | struct isci_port *iport = idev->owning_port; |
767 | enum sci_status status; | 793 | enum sci_status status; |
768 | 794 | ||
769 | switch (state) { | 795 | switch (state) { |
770 | case SCI_DEV_INITIAL: | 796 | case SCI_DEV_INITIAL: |
771 | case SCI_DEV_STOPPED: | 797 | case SCI_DEV_STOPPED: |
772 | case SCI_DEV_STARTING: | 798 | case SCI_DEV_STARTING: |
773 | case SCI_STP_DEV_IDLE: | 799 | case SCI_STP_DEV_IDLE: |
774 | case SCI_SMP_DEV_IDLE: | 800 | case SCI_SMP_DEV_IDLE: |
775 | case SCI_DEV_FAILED: | 801 | case SCI_DEV_FAILED: |
776 | case SCI_DEV_FINAL: | 802 | case SCI_DEV_FINAL: |
777 | default: | 803 | default: |
778 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 804 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
779 | __func__, dev_state_name(state)); | 805 | __func__, dev_state_name(state)); |
780 | return SCI_FAILURE_INVALID_STATE; | 806 | return SCI_FAILURE_INVALID_STATE; |
781 | case SCI_DEV_READY: | 807 | case SCI_DEV_READY: |
782 | case SCI_STP_DEV_AWAIT_RESET: | 808 | case SCI_STP_DEV_AWAIT_RESET: |
783 | case SCI_DEV_RESETTING: | 809 | case SCI_DEV_RESETTING: |
784 | status = common_complete_io(iport, idev, ireq); | 810 | status = common_complete_io(iport, idev, ireq); |
785 | break; | 811 | break; |
786 | case SCI_STP_DEV_CMD: | 812 | case SCI_STP_DEV_CMD: |
787 | case SCI_STP_DEV_NCQ: | 813 | case SCI_STP_DEV_NCQ: |
788 | case SCI_STP_DEV_NCQ_ERROR: | 814 | case SCI_STP_DEV_NCQ_ERROR: |
789 | case SCI_STP_DEV_ATAPI_ERROR: | 815 | case SCI_STP_DEV_ATAPI_ERROR: |
790 | status = common_complete_io(iport, idev, ireq); | 816 | status = common_complete_io(iport, idev, ireq); |
791 | if (status != SCI_SUCCESS) | 817 | if (status != SCI_SUCCESS) |
792 | break; | 818 | break; |
793 | 819 | ||
794 | if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 820 | if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
795 | /* This request causes hardware error, device needs to be Lun Reset. | 821 | /* This request causes hardware error, device needs to be Lun Reset. |
796 | * So here we force the state machine to IDLE state so the rest IOs | 822 | * So here we force the state machine to IDLE state so the rest IOs |
797 | * can reach RNC state handler, these IOs will be completed by RNC with | 823 | * can reach RNC state handler, these IOs will be completed by RNC with |
798 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". | 824 | * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". |
799 | */ | 825 | */ |
800 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); | 826 | sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); |
801 | } else if (idev->started_request_count == 0) | 827 | } else if (idev->started_request_count == 0) |
802 | sci_change_state(sm, SCI_STP_DEV_IDLE); | 828 | sci_change_state(sm, SCI_STP_DEV_IDLE); |
803 | break; | 829 | break; |
804 | case SCI_SMP_DEV_CMD: | 830 | case SCI_SMP_DEV_CMD: |
805 | status = common_complete_io(iport, idev, ireq); | 831 | status = common_complete_io(iport, idev, ireq); |
806 | if (status != SCI_SUCCESS) | 832 | if (status != SCI_SUCCESS) |
807 | break; | 833 | break; |
808 | sci_change_state(sm, SCI_SMP_DEV_IDLE); | 834 | sci_change_state(sm, SCI_SMP_DEV_IDLE); |
809 | break; | 835 | break; |
810 | case SCI_DEV_STOPPING: | 836 | case SCI_DEV_STOPPING: |
811 | status = common_complete_io(iport, idev, ireq); | 837 | status = common_complete_io(iport, idev, ireq); |
812 | if (status != SCI_SUCCESS) | 838 | if (status != SCI_SUCCESS) |
813 | break; | 839 | break; |
814 | 840 | ||
815 | if (idev->started_request_count == 0) | 841 | if (idev->started_request_count == 0) |
816 | sci_remote_node_context_destruct(&idev->rnc, | 842 | sci_remote_node_context_destruct(&idev->rnc, |
817 | rnc_destruct_done, | 843 | rnc_destruct_done, |
818 | idev); | 844 | idev); |
819 | break; | 845 | break; |
820 | } | 846 | } |
821 | 847 | ||
822 | if (status != SCI_SUCCESS) | 848 | if (status != SCI_SUCCESS) |
823 | dev_err(scirdev_to_dev(idev), | 849 | dev_err(scirdev_to_dev(idev), |
824 | "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " | 850 | "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " |
825 | "could not complete\n", __func__, iport, | 851 | "could not complete\n", __func__, iport, |
826 | idev, ireq, status); | 852 | idev, ireq, status); |
827 | else | 853 | else |
828 | isci_put_device(idev); | 854 | isci_put_device(idev); |
829 | 855 | ||
830 | return status; | 856 | return status; |
831 | } | 857 | } |
832 | 858 | ||
833 | static void sci_remote_device_continue_request(void *dev) | 859 | static void sci_remote_device_continue_request(void *dev) |
834 | { | 860 | { |
835 | struct isci_remote_device *idev = dev; | 861 | struct isci_remote_device *idev = dev; |
836 | 862 | ||
837 | /* we need to check if this request is still valid to continue. */ | 863 | /* we need to check if this request is still valid to continue. */ |
838 | if (idev->working_request) | 864 | if (idev->working_request) |
839 | sci_controller_continue_io(idev->working_request); | 865 | sci_controller_continue_io(idev->working_request); |
840 | } | 866 | } |
841 | 867 | ||
842 | enum sci_status sci_remote_device_start_task(struct isci_host *ihost, | 868 | enum sci_status sci_remote_device_start_task(struct isci_host *ihost, |
843 | struct isci_remote_device *idev, | 869 | struct isci_remote_device *idev, |
844 | struct isci_request *ireq) | 870 | struct isci_request *ireq) |
845 | { | 871 | { |
846 | struct sci_base_state_machine *sm = &idev->sm; | 872 | struct sci_base_state_machine *sm = &idev->sm; |
847 | enum sci_remote_device_states state = sm->current_state_id; | 873 | enum sci_remote_device_states state = sm->current_state_id; |
848 | struct isci_port *iport = idev->owning_port; | 874 | struct isci_port *iport = idev->owning_port; |
849 | enum sci_status status; | 875 | enum sci_status status; |
850 | 876 | ||
851 | switch (state) { | 877 | switch (state) { |
852 | case SCI_DEV_INITIAL: | 878 | case SCI_DEV_INITIAL: |
853 | case SCI_DEV_STOPPED: | 879 | case SCI_DEV_STOPPED: |
854 | case SCI_DEV_STARTING: | 880 | case SCI_DEV_STARTING: |
855 | case SCI_SMP_DEV_IDLE: | 881 | case SCI_SMP_DEV_IDLE: |
856 | case SCI_SMP_DEV_CMD: | 882 | case SCI_SMP_DEV_CMD: |
857 | case SCI_DEV_STOPPING: | 883 | case SCI_DEV_STOPPING: |
858 | case SCI_DEV_FAILED: | 884 | case SCI_DEV_FAILED: |
859 | case SCI_DEV_RESETTING: | 885 | case SCI_DEV_RESETTING: |
860 | case SCI_DEV_FINAL: | 886 | case SCI_DEV_FINAL: |
861 | default: | 887 | default: |
862 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 888 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
863 | __func__, dev_state_name(state)); | 889 | __func__, dev_state_name(state)); |
864 | return SCI_FAILURE_INVALID_STATE; | 890 | return SCI_FAILURE_INVALID_STATE; |
865 | case SCI_STP_DEV_IDLE: | 891 | case SCI_STP_DEV_IDLE: |
866 | case SCI_STP_DEV_CMD: | 892 | case SCI_STP_DEV_CMD: |
867 | case SCI_STP_DEV_NCQ: | 893 | case SCI_STP_DEV_NCQ: |
868 | case SCI_STP_DEV_NCQ_ERROR: | 894 | case SCI_STP_DEV_NCQ_ERROR: |
869 | case SCI_STP_DEV_AWAIT_RESET: | 895 | case SCI_STP_DEV_AWAIT_RESET: |
870 | status = sci_port_start_io(iport, idev, ireq); | 896 | status = sci_port_start_io(iport, idev, ireq); |
871 | if (status != SCI_SUCCESS) | 897 | if (status != SCI_SUCCESS) |
872 | return status; | 898 | return status; |
873 | 899 | ||
874 | status = sci_request_start(ireq); | 900 | status = sci_request_start(ireq); |
875 | if (status != SCI_SUCCESS) | 901 | if (status != SCI_SUCCESS) |
876 | goto out; | 902 | goto out; |
877 | 903 | ||
878 | /* Note: If the remote device state is not IDLE this will | 904 | /* Note: If the remote device state is not IDLE this will |
879 | * replace the request that probably resulted in the task | 905 | * replace the request that probably resulted in the task |
880 | * management request. | 906 | * management request. |
881 | */ | 907 | */ |
882 | idev->working_request = ireq; | 908 | idev->working_request = ireq; |
883 | sci_change_state(sm, SCI_STP_DEV_CMD); | 909 | sci_change_state(sm, SCI_STP_DEV_CMD); |
884 | 910 | ||
885 | /* The remote node context must cleanup the TCi to NCQ mapping | 911 | /* The remote node context must cleanup the TCi to NCQ mapping |
886 | * table. The only way to do this correctly is to either write | 912 | * table. The only way to do this correctly is to either write |
887 | * to the TLCR register or to invalidate and repost the RNC. In | 913 | * to the TLCR register or to invalidate and repost the RNC. In |
888 | * either case the remote node context state machine will take | 914 | * either case the remote node context state machine will take |
889 | * the correct action when the remote node context is suspended | 915 | * the correct action when the remote node context is suspended |
890 | * and later resumed. | 916 | * and later resumed. |
891 | */ | 917 | */ |
892 | sci_remote_device_suspend(idev, | 918 | sci_remote_device_suspend(idev, |
893 | SCI_SW_SUSPEND_LINKHANG_DETECT); | 919 | SCI_SW_SUSPEND_LINKHANG_DETECT); |
894 | 920 | ||
895 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | 921 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, |
896 | sci_remote_device_continue_request, idev); | 922 | sci_remote_device_continue_request, idev); |
897 | 923 | ||
898 | out: | 924 | out: |
899 | sci_remote_device_start_request(idev, ireq, status); | 925 | sci_remote_device_start_request(idev, ireq, status); |
900 | /* We need to let the controller start request handler know that | 926 | /* We need to let the controller start request handler know that |
901 | * it can't post TC yet. We will provide a callback function to | 927 | * it can't post TC yet. We will provide a callback function to |
902 | * post TC when RNC gets resumed. | 928 | * post TC when RNC gets resumed. |
903 | */ | 929 | */ |
904 | return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; | 930 | return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; |
905 | case SCI_DEV_READY: | 931 | case SCI_DEV_READY: |
906 | status = sci_port_start_io(iport, idev, ireq); | 932 | status = sci_port_start_io(iport, idev, ireq); |
907 | if (status != SCI_SUCCESS) | 933 | if (status != SCI_SUCCESS) |
908 | return status; | 934 | return status; |
909 | 935 | ||
910 | /* Resume the RNC as needed: */ | 936 | /* Resume the RNC as needed: */ |
911 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, | 937 | status = sci_remote_node_context_start_task(&idev->rnc, ireq, |
912 | NULL, NULL); | 938 | NULL, NULL); |
913 | if (status != SCI_SUCCESS) | 939 | if (status != SCI_SUCCESS) |
914 | break; | 940 | break; |
915 | 941 | ||
916 | status = sci_request_start(ireq); | 942 | status = sci_request_start(ireq); |
917 | break; | 943 | break; |
918 | } | 944 | } |
919 | sci_remote_device_start_request(idev, ireq, status); | 945 | sci_remote_device_start_request(idev, ireq, status); |
920 | 946 | ||
921 | return status; | 947 | return status; |
922 | } | 948 | } |
923 | 949 | ||
924 | void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) | 950 | void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) |
925 | { | 951 | { |
926 | struct isci_port *iport = idev->owning_port; | 952 | struct isci_port *iport = idev->owning_port; |
927 | u32 context; | 953 | u32 context; |
928 | 954 | ||
929 | context = request | | 955 | context = request | |
930 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 956 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
931 | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 957 | (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
932 | idev->rnc.remote_node_index; | 958 | idev->rnc.remote_node_index; |
933 | 959 | ||
934 | sci_controller_post_request(iport->owning_controller, context); | 960 | sci_controller_post_request(iport->owning_controller, context); |
935 | } | 961 | } |
936 | 962 | ||
937 | /* called once the remote node context has transisitioned to a | 963 | /* called once the remote node context has transisitioned to a |
938 | * ready state. This is the indication that the remote device object can also | 964 | * ready state. This is the indication that the remote device object can also |
939 | * transition to ready. | 965 | * transition to ready. |
940 | */ | 966 | */ |
941 | static void remote_device_resume_done(void *_dev) | 967 | static void remote_device_resume_done(void *_dev) |
942 | { | 968 | { |
943 | struct isci_remote_device *idev = _dev; | 969 | struct isci_remote_device *idev = _dev; |
944 | 970 | ||
945 | if (is_remote_device_ready(idev)) | 971 | if (is_remote_device_ready(idev)) |
946 | return; | 972 | return; |
947 | 973 | ||
948 | /* go 'ready' if we are not already in a ready state */ | 974 | /* go 'ready' if we are not already in a ready state */ |
949 | sci_change_state(&idev->sm, SCI_DEV_READY); | 975 | sci_change_state(&idev->sm, SCI_DEV_READY); |
950 | } | 976 | } |
951 | 977 | ||
952 | static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) | 978 | static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) |
953 | { | 979 | { |
954 | struct isci_remote_device *idev = _dev; | 980 | struct isci_remote_device *idev = _dev; |
955 | struct isci_host *ihost = idev->owning_port->owning_controller; | 981 | struct isci_host *ihost = idev->owning_port->owning_controller; |
956 | 982 | ||
957 | /* For NCQ operation we do not issue a isci_remote_device_not_ready(). | 983 | /* For NCQ operation we do not issue a isci_remote_device_not_ready(). |
958 | * As a result, avoid sending the ready notification. | 984 | * As a result, avoid sending the ready notification. |
959 | */ | 985 | */ |
960 | if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) | 986 | if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) |
961 | isci_remote_device_ready(ihost, idev); | 987 | isci_remote_device_ready(ihost, idev); |
962 | } | 988 | } |
963 | 989 | ||
964 | static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) | 990 | static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) |
965 | { | 991 | { |
966 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 992 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
967 | 993 | ||
968 | /* Initial state is a transitional state to the stopped state */ | 994 | /* Initial state is a transitional state to the stopped state */ |
969 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); | 995 | sci_change_state(&idev->sm, SCI_DEV_STOPPED); |
970 | } | 996 | } |
971 | 997 | ||
972 | /** | 998 | /** |
973 | * sci_remote_device_destruct() - free remote node context and destruct | 999 | * sci_remote_device_destruct() - free remote node context and destruct |
974 | * @remote_device: This parameter specifies the remote device to be destructed. | 1000 | * @remote_device: This parameter specifies the remote device to be destructed. |
975 | * | 1001 | * |
976 | * Remote device objects are a limited resource. As such, they must be | 1002 | * Remote device objects are a limited resource. As such, they must be |
977 | * protected. Thus calls to construct and destruct are mutually exclusive and | 1003 | * protected. Thus calls to construct and destruct are mutually exclusive and |
978 | * non-reentrant. The return value shall indicate if the device was | 1004 | * non-reentrant. The return value shall indicate if the device was |
979 | * successfully destructed or if some failure occurred. enum sci_status This value | 1005 | * successfully destructed or if some failure occurred. enum sci_status This value |
980 | * is returned if the device is successfully destructed. | 1006 | * is returned if the device is successfully destructed. |
981 | * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied | 1007 | * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied |
982 | * device isn't valid (e.g. it's already been destoryed, the handle isn't | 1008 | * device isn't valid (e.g. it's already been destoryed, the handle isn't |
983 | * valid, etc.). | 1009 | * valid, etc.). |
984 | */ | 1010 | */ |
985 | static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) | 1011 | static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) |
986 | { | 1012 | { |
987 | struct sci_base_state_machine *sm = &idev->sm; | 1013 | struct sci_base_state_machine *sm = &idev->sm; |
988 | enum sci_remote_device_states state = sm->current_state_id; | 1014 | enum sci_remote_device_states state = sm->current_state_id; |
989 | struct isci_host *ihost; | 1015 | struct isci_host *ihost; |
990 | 1016 | ||
991 | if (state != SCI_DEV_STOPPED) { | 1017 | if (state != SCI_DEV_STOPPED) { |
992 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 1018 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
993 | __func__, dev_state_name(state)); | 1019 | __func__, dev_state_name(state)); |
994 | return SCI_FAILURE_INVALID_STATE; | 1020 | return SCI_FAILURE_INVALID_STATE; |
995 | } | 1021 | } |
996 | 1022 | ||
997 | ihost = idev->owning_port->owning_controller; | 1023 | ihost = idev->owning_port->owning_controller; |
998 | sci_controller_free_remote_node_context(ihost, idev, | 1024 | sci_controller_free_remote_node_context(ihost, idev, |
999 | idev->rnc.remote_node_index); | 1025 | idev->rnc.remote_node_index); |
1000 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; | 1026 | idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; |
1001 | sci_change_state(sm, SCI_DEV_FINAL); | 1027 | sci_change_state(sm, SCI_DEV_FINAL); |
1002 | 1028 | ||
1003 | return SCI_SUCCESS; | 1029 | return SCI_SUCCESS; |
1004 | } | 1030 | } |
1005 | 1031 | ||
1006 | /** | 1032 | /** |
1007 | * isci_remote_device_deconstruct() - This function frees an isci_remote_device. | 1033 | * isci_remote_device_deconstruct() - This function frees an isci_remote_device. |
1008 | * @ihost: This parameter specifies the isci host object. | 1034 | * @ihost: This parameter specifies the isci host object. |
1009 | * @idev: This parameter specifies the remote device to be freed. | 1035 | * @idev: This parameter specifies the remote device to be freed. |
1010 | * | 1036 | * |
1011 | */ | 1037 | */ |
1012 | static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) | 1038 | static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) |
1013 | { | 1039 | { |
1014 | dev_dbg(&ihost->pdev->dev, | 1040 | dev_dbg(&ihost->pdev->dev, |
1015 | "%s: isci_device = %p\n", __func__, idev); | 1041 | "%s: isci_device = %p\n", __func__, idev); |
1016 | 1042 | ||
1017 | /* There should not be any outstanding io's. All paths to | 1043 | /* There should not be any outstanding io's. All paths to |
1018 | * here should go through isci_remote_device_nuke_requests. | 1044 | * here should go through isci_remote_device_nuke_requests. |
1019 | * If we hit this condition, we will need a way to complete | 1045 | * If we hit this condition, we will need a way to complete |
1020 | * io requests in process */ | 1046 | * io requests in process */ |
1021 | BUG_ON(idev->started_request_count > 0); | 1047 | BUG_ON(idev->started_request_count > 0); |
1022 | 1048 | ||
1023 | sci_remote_device_destruct(idev); | 1049 | sci_remote_device_destruct(idev); |
1024 | list_del_init(&idev->node); | 1050 | list_del_init(&idev->node); |
1025 | isci_put_device(idev); | 1051 | isci_put_device(idev); |
1026 | } | 1052 | } |
1027 | 1053 | ||
1028 | static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) | 1054 | static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) |
1029 | { | 1055 | { |
1030 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1056 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1031 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1057 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1032 | u32 prev_state; | 1058 | u32 prev_state; |
1033 | 1059 | ||
1034 | /* If we are entering from the stopping state let the SCI User know that | 1060 | /* If we are entering from the stopping state let the SCI User know that |
1035 | * the stop operation has completed. | 1061 | * the stop operation has completed. |
1036 | */ | 1062 | */ |
1037 | prev_state = idev->sm.previous_state_id; | 1063 | prev_state = idev->sm.previous_state_id; |
1038 | if (prev_state == SCI_DEV_STOPPING) | 1064 | if (prev_state == SCI_DEV_STOPPING) |
1039 | isci_remote_device_deconstruct(ihost, idev); | 1065 | isci_remote_device_deconstruct(ihost, idev); |
1040 | 1066 | ||
1041 | sci_controller_remote_device_stopped(ihost, idev); | 1067 | sci_controller_remote_device_stopped(ihost, idev); |
1042 | } | 1068 | } |
1043 | 1069 | ||
1044 | static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) | 1070 | static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) |
1045 | { | 1071 | { |
1046 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1072 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1047 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1073 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1048 | 1074 | ||
1049 | isci_remote_device_not_ready(ihost, idev, | 1075 | isci_remote_device_not_ready(ihost, idev, |
1050 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); | 1076 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); |
1051 | } | 1077 | } |
1052 | 1078 | ||
1053 | static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) | 1079 | static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) |
1054 | { | 1080 | { |
1055 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1081 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1056 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1082 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1057 | struct domain_device *dev = idev->domain_dev; | 1083 | struct domain_device *dev = idev->domain_dev; |
1058 | 1084 | ||
1059 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { | 1085 | if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { |
1060 | sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); | 1086 | sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); |
1061 | } else if (dev_is_expander(dev)) { | 1087 | } else if (dev_is_expander(dev)) { |
1062 | sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); | 1088 | sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); |
1063 | } else | 1089 | } else |
1064 | isci_remote_device_ready(ihost, idev); | 1090 | isci_remote_device_ready(ihost, idev); |
1065 | } | 1091 | } |
1066 | 1092 | ||
1067 | static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) | 1093 | static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) |
1068 | { | 1094 | { |
1069 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1095 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1070 | struct domain_device *dev = idev->domain_dev; | 1096 | struct domain_device *dev = idev->domain_dev; |
1071 | 1097 | ||
1072 | if (dev->dev_type == SAS_END_DEV) { | 1098 | if (dev->dev_type == SAS_END_DEV) { |
1073 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1099 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1074 | 1100 | ||
1075 | isci_remote_device_not_ready(ihost, idev, | 1101 | isci_remote_device_not_ready(ihost, idev, |
1076 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); | 1102 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); |
1077 | } | 1103 | } |
1078 | } | 1104 | } |
1079 | 1105 | ||
1080 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) | 1106 | static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) |
1081 | { | 1107 | { |
1082 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1108 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1083 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1109 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1084 | 1110 | ||
1085 | dev_dbg(&ihost->pdev->dev, | 1111 | dev_dbg(&ihost->pdev->dev, |
1086 | "%s: isci_device = %p\n", __func__, idev); | 1112 | "%s: isci_device = %p\n", __func__, idev); |
1087 | 1113 | ||
1088 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | 1114 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); |
1089 | } | 1115 | } |
1090 | 1116 | ||
1091 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) | 1117 | static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) |
1092 | { | 1118 | { |
1093 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1119 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1094 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1120 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1095 | 1121 | ||
1096 | dev_dbg(&ihost->pdev->dev, | 1122 | dev_dbg(&ihost->pdev->dev, |
1097 | "%s: isci_device = %p\n", __func__, idev); | 1123 | "%s: isci_device = %p\n", __func__, idev); |
1098 | 1124 | ||
1099 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); | 1125 | sci_remote_node_context_resume(&idev->rnc, NULL, NULL); |
1100 | } | 1126 | } |
1101 | 1127 | ||
1102 | static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 1128 | static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
1103 | { | 1129 | { |
1104 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1130 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1105 | 1131 | ||
1106 | idev->working_request = NULL; | 1132 | idev->working_request = NULL; |
1107 | if (sci_remote_node_context_is_ready(&idev->rnc)) { | 1133 | if (sci_remote_node_context_is_ready(&idev->rnc)) { |
1108 | /* | 1134 | /* |
1109 | * Since the RNC is ready, it's alright to finish completion | 1135 | * Since the RNC is ready, it's alright to finish completion |
1110 | * processing (e.g. signal the remote device is ready). */ | 1136 | * processing (e.g. signal the remote device is ready). */ |
1111 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); | 1137 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); |
1112 | } else { | 1138 | } else { |
1113 | sci_remote_node_context_resume(&idev->rnc, | 1139 | sci_remote_node_context_resume(&idev->rnc, |
1114 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler, | 1140 | sci_stp_remote_device_ready_idle_substate_resume_complete_handler, |
1115 | idev); | 1141 | idev); |
1116 | } | 1142 | } |
1117 | } | 1143 | } |
1118 | 1144 | ||
1119 | static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 1145 | static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
1120 | { | 1146 | { |
1121 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1147 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1122 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1148 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1123 | 1149 | ||
1124 | BUG_ON(idev->working_request == NULL); | 1150 | BUG_ON(idev->working_request == NULL); |
1125 | 1151 | ||
1126 | isci_remote_device_not_ready(ihost, idev, | 1152 | isci_remote_device_not_ready(ihost, idev, |
1127 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); | 1153 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); |
1128 | } | 1154 | } |
1129 | 1155 | ||
1130 | static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) | 1156 | static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) |
1131 | { | 1157 | { |
1132 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1158 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1133 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1159 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1134 | 1160 | ||
1135 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) | 1161 | if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) |
1136 | isci_remote_device_not_ready(ihost, idev, | 1162 | isci_remote_device_not_ready(ihost, idev, |
1137 | idev->not_ready_reason); | 1163 | idev->not_ready_reason); |
1138 | } | 1164 | } |
1139 | 1165 | ||
1140 | static void sci_stp_remote_device_atapi_error_substate_enter( | ||
1141 | struct sci_base_state_machine *sm) | ||
1142 | { | ||
1143 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | ||
1144 | |||
1145 | /* This state is entered when an I/O is decoded with an error | ||
1146 | * condition. By this point the RNC expected suspension state is set. | ||
1147 | * The error conditions suspend the device, so unsuspend here if | ||
1148 | * possible. | ||
1149 | */ | ||
1150 | sci_remote_node_context_resume(&idev->rnc, | ||
1151 | atapi_remote_device_resume_done, | ||
1152 | idev); | ||
1153 | } | ||
1154 | |||
1155 | static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) | 1166 | static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) |
1156 | { | 1167 | { |
1157 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1168 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1158 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1169 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1159 | 1170 | ||
1160 | isci_remote_device_ready(ihost, idev); | 1171 | isci_remote_device_ready(ihost, idev); |
1161 | } | 1172 | } |
1162 | 1173 | ||
1163 | static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) | 1174 | static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) |
1164 | { | 1175 | { |
1165 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1176 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1166 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1177 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1167 | 1178 | ||
1168 | BUG_ON(idev->working_request == NULL); | 1179 | BUG_ON(idev->working_request == NULL); |
1169 | 1180 | ||
1170 | isci_remote_device_not_ready(ihost, idev, | 1181 | isci_remote_device_not_ready(ihost, idev, |
1171 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); | 1182 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); |
1172 | } | 1183 | } |
1173 | 1184 | ||
1174 | static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) | 1185 | static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) |
1175 | { | 1186 | { |
1176 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); | 1187 | struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); |
1177 | 1188 | ||
1178 | idev->working_request = NULL; | 1189 | idev->working_request = NULL; |
1179 | } | 1190 | } |
1180 | 1191 | ||
1181 | static const struct sci_base_state sci_remote_device_state_table[] = { | 1192 | static const struct sci_base_state sci_remote_device_state_table[] = { |
1182 | [SCI_DEV_INITIAL] = { | 1193 | [SCI_DEV_INITIAL] = { |
1183 | .enter_state = sci_remote_device_initial_state_enter, | 1194 | .enter_state = sci_remote_device_initial_state_enter, |
1184 | }, | 1195 | }, |
1185 | [SCI_DEV_STOPPED] = { | 1196 | [SCI_DEV_STOPPED] = { |
1186 | .enter_state = sci_remote_device_stopped_state_enter, | 1197 | .enter_state = sci_remote_device_stopped_state_enter, |
1187 | }, | 1198 | }, |
1188 | [SCI_DEV_STARTING] = { | 1199 | [SCI_DEV_STARTING] = { |
1189 | .enter_state = sci_remote_device_starting_state_enter, | 1200 | .enter_state = sci_remote_device_starting_state_enter, |
1190 | }, | 1201 | }, |
1191 | [SCI_DEV_READY] = { | 1202 | [SCI_DEV_READY] = { |
1192 | .enter_state = sci_remote_device_ready_state_enter, | 1203 | .enter_state = sci_remote_device_ready_state_enter, |
1193 | .exit_state = sci_remote_device_ready_state_exit | 1204 | .exit_state = sci_remote_device_ready_state_exit |
1194 | }, | 1205 | }, |
1195 | [SCI_STP_DEV_IDLE] = { | 1206 | [SCI_STP_DEV_IDLE] = { |
1196 | .enter_state = sci_stp_remote_device_ready_idle_substate_enter, | 1207 | .enter_state = sci_stp_remote_device_ready_idle_substate_enter, |
1197 | }, | 1208 | }, |
1198 | [SCI_STP_DEV_CMD] = { | 1209 | [SCI_STP_DEV_CMD] = { |
1199 | .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, | 1210 | .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, |
1200 | }, | 1211 | }, |
1201 | [SCI_STP_DEV_NCQ] = { }, | 1212 | [SCI_STP_DEV_NCQ] = { }, |
1202 | [SCI_STP_DEV_NCQ_ERROR] = { | 1213 | [SCI_STP_DEV_NCQ_ERROR] = { |
1203 | .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, | 1214 | .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, |
1204 | }, | 1215 | }, |
1205 | [SCI_STP_DEV_ATAPI_ERROR] = { | 1216 | [SCI_STP_DEV_ATAPI_ERROR] = { }, |
1206 | .enter_state = sci_stp_remote_device_atapi_error_substate_enter, | ||
1207 | }, | ||
1208 | [SCI_STP_DEV_AWAIT_RESET] = { }, | 1217 | [SCI_STP_DEV_AWAIT_RESET] = { }, |
1209 | [SCI_SMP_DEV_IDLE] = { | 1218 | [SCI_SMP_DEV_IDLE] = { |
1210 | .enter_state = sci_smp_remote_device_ready_idle_substate_enter, | 1219 | .enter_state = sci_smp_remote_device_ready_idle_substate_enter, |
1211 | }, | 1220 | }, |
1212 | [SCI_SMP_DEV_CMD] = { | 1221 | [SCI_SMP_DEV_CMD] = { |
1213 | .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, | 1222 | .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, |
1214 | .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, | 1223 | .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, |
1215 | }, | 1224 | }, |
1216 | [SCI_DEV_STOPPING] = { }, | 1225 | [SCI_DEV_STOPPING] = { }, |
1217 | [SCI_DEV_FAILED] = { }, | 1226 | [SCI_DEV_FAILED] = { }, |
1218 | [SCI_DEV_RESETTING] = { | 1227 | [SCI_DEV_RESETTING] = { |
1219 | .enter_state = sci_remote_device_resetting_state_enter, | 1228 | .enter_state = sci_remote_device_resetting_state_enter, |
1220 | .exit_state = sci_remote_device_resetting_state_exit | 1229 | .exit_state = sci_remote_device_resetting_state_exit |
1221 | }, | 1230 | }, |
1222 | [SCI_DEV_FINAL] = { }, | 1231 | [SCI_DEV_FINAL] = { }, |
1223 | }; | 1232 | }; |
1224 | 1233 | ||
1225 | /** | 1234 | /** |
1226 | * sci_remote_device_construct() - common construction | 1235 | * sci_remote_device_construct() - common construction |
1227 | * @sci_port: SAS/SATA port through which this device is accessed. | 1236 | * @sci_port: SAS/SATA port through which this device is accessed. |
1228 | * @sci_dev: remote device to construct | 1237 | * @sci_dev: remote device to construct |
1229 | * | 1238 | * |
1230 | * This routine just performs benign initialization and does not | 1239 | * This routine just performs benign initialization and does not |
1231 | * allocate the remote_node_context which is left to | 1240 | * allocate the remote_node_context which is left to |
1232 | * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() | 1241 | * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() |
1233 | * frees the remote_node_context(s) for the device. | 1242 | * frees the remote_node_context(s) for the device. |
1234 | */ | 1243 | */ |
1235 | static void sci_remote_device_construct(struct isci_port *iport, | 1244 | static void sci_remote_device_construct(struct isci_port *iport, |
1236 | struct isci_remote_device *idev) | 1245 | struct isci_remote_device *idev) |
1237 | { | 1246 | { |
1238 | idev->owning_port = iport; | 1247 | idev->owning_port = iport; |
1239 | idev->started_request_count = 0; | 1248 | idev->started_request_count = 0; |
1240 | 1249 | ||
1241 | sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); | 1250 | sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); |
1242 | 1251 | ||
1243 | sci_remote_node_context_construct(&idev->rnc, | 1252 | sci_remote_node_context_construct(&idev->rnc, |
1244 | SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); | 1253 | SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); |
1245 | } | 1254 | } |
1246 | 1255 | ||
1247 | /** | 1256 | /** |
1248 | * sci_remote_device_da_construct() - construct direct attached device. | 1257 | * sci_remote_device_da_construct() - construct direct attached device. |
1249 | * | 1258 | * |
1250 | * The information (e.g. IAF, Signature FIS, etc.) necessary to build | 1259 | * The information (e.g. IAF, Signature FIS, etc.) necessary to build |
1251 | * the device is known to the SCI Core since it is contained in the | 1260 | * the device is known to the SCI Core since it is contained in the |
1252 | * sci_phy object. Remote node context(s) is/are a global resource | 1261 | * sci_phy object. Remote node context(s) is/are a global resource |
1253 | * allocated by this routine, freed by sci_remote_device_destruct(). | 1262 | * allocated by this routine, freed by sci_remote_device_destruct(). |
1254 | * | 1263 | * |
1255 | * Returns: | 1264 | * Returns: |
1256 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. | 1265 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. |
1257 | * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to | 1266 | * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to |
1258 | * sata-only controller instance. | 1267 | * sata-only controller instance. |
1259 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. | 1268 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. |
1260 | */ | 1269 | */ |
1261 | static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, | 1270 | static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, |
1262 | struct isci_remote_device *idev) | 1271 | struct isci_remote_device *idev) |
1263 | { | 1272 | { |
1264 | enum sci_status status; | 1273 | enum sci_status status; |
1265 | struct sci_port_properties properties; | 1274 | struct sci_port_properties properties; |
1266 | 1275 | ||
1267 | sci_remote_device_construct(iport, idev); | 1276 | sci_remote_device_construct(iport, idev); |
1268 | 1277 | ||
1269 | sci_port_get_properties(iport, &properties); | 1278 | sci_port_get_properties(iport, &properties); |
1270 | /* Get accurate port width from port's phy mask for a DA device. */ | 1279 | /* Get accurate port width from port's phy mask for a DA device. */ |
1271 | idev->device_port_width = hweight32(properties.phy_mask); | 1280 | idev->device_port_width = hweight32(properties.phy_mask); |
1272 | 1281 | ||
1273 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, | 1282 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1274 | idev, | 1283 | idev, |
1275 | &idev->rnc.remote_node_index); | 1284 | &idev->rnc.remote_node_index); |
1276 | 1285 | ||
1277 | if (status != SCI_SUCCESS) | 1286 | if (status != SCI_SUCCESS) |
1278 | return status; | 1287 | return status; |
1279 | 1288 | ||
1280 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); | 1289 | idev->connection_rate = sci_port_get_max_allowed_speed(iport); |
1281 | 1290 | ||
1282 | return SCI_SUCCESS; | 1291 | return SCI_SUCCESS; |
1283 | } | 1292 | } |
1284 | 1293 | ||
1285 | /** | 1294 | /** |
1286 | * sci_remote_device_ea_construct() - construct expander attached device | 1295 | * sci_remote_device_ea_construct() - construct expander attached device |
1287 | * | 1296 | * |
1288 | * Remote node context(s) is/are a global resource allocated by this | 1297 | * Remote node context(s) is/are a global resource allocated by this |
1289 | * routine, freed by sci_remote_device_destruct(). | 1298 | * routine, freed by sci_remote_device_destruct(). |
1290 | * | 1299 | * |
1291 | * Returns: | 1300 | * Returns: |
1292 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. | 1301 | * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. |
1293 | * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to | 1302 | * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to |
1294 | * sata-only controller instance. | 1303 | * sata-only controller instance. |
1295 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. | 1304 | * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. |
1296 | */ | 1305 | */ |
1297 | static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, | 1306 | static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, |
1298 | struct isci_remote_device *idev) | 1307 | struct isci_remote_device *idev) |
1299 | { | 1308 | { |
1300 | struct domain_device *dev = idev->domain_dev; | 1309 | struct domain_device *dev = idev->domain_dev; |
1301 | enum sci_status status; | 1310 | enum sci_status status; |
1302 | 1311 | ||
1303 | sci_remote_device_construct(iport, idev); | 1312 | sci_remote_device_construct(iport, idev); |
1304 | 1313 | ||
1305 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, | 1314 | status = sci_controller_allocate_remote_node_context(iport->owning_controller, |
1306 | idev, | 1315 | idev, |
1307 | &idev->rnc.remote_node_index); | 1316 | &idev->rnc.remote_node_index); |
1308 | if (status != SCI_SUCCESS) | 1317 | if (status != SCI_SUCCESS) |
1309 | return status; | 1318 | return status; |
1310 | 1319 | ||
1311 | /* For SAS-2 the physical link rate is actually a logical link | 1320 | /* For SAS-2 the physical link rate is actually a logical link |
1312 | * rate that incorporates multiplexing. The SCU doesn't | 1321 | * rate that incorporates multiplexing. The SCU doesn't |
1313 | * incorporate multiplexing and for the purposes of the | 1322 | * incorporate multiplexing and for the purposes of the |
1314 | * connection the logical link rate is that same as the | 1323 | * connection the logical link rate is that same as the |
1315 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay | 1324 | * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay |
1316 | * one another, so this code works for both situations. | 1325 | * one another, so this code works for both situations. |
1317 | */ | 1326 | */ |
1318 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), | 1327 | idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), |
1319 | dev->linkrate); | 1328 | dev->linkrate); |
1320 | 1329 | ||
1321 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ | 1330 | /* / @todo Should I assign the port width by reading all of the phys on the port? */ |
1322 | idev->device_port_width = 1; | 1331 | idev->device_port_width = 1; |
1323 | 1332 | ||
1324 | return SCI_SUCCESS; | 1333 | return SCI_SUCCESS; |
1325 | } | 1334 | } |
1326 | 1335 | ||
1327 | enum sci_status sci_remote_device_resume( | 1336 | enum sci_status sci_remote_device_resume( |
1328 | struct isci_remote_device *idev, | 1337 | struct isci_remote_device *idev, |
1329 | scics_sds_remote_node_context_callback cb_fn, | 1338 | scics_sds_remote_node_context_callback cb_fn, |
1330 | void *cb_p) | 1339 | void *cb_p) |
1331 | { | 1340 | { |
1332 | enum sci_status status; | 1341 | enum sci_status status; |
1333 | 1342 | ||
1334 | status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); | 1343 | status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); |
1335 | if (status != SCI_SUCCESS) | 1344 | if (status != SCI_SUCCESS) |
1336 | dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", | 1345 | dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", |
1337 | __func__, status); | 1346 | __func__, status); |
1338 | return status; | 1347 | return status; |
1339 | } | 1348 | } |
1340 | 1349 | ||
1341 | static void isci_remote_device_resume_from_abort_complete(void *cbparam) | 1350 | static void isci_remote_device_resume_from_abort_complete(void *cbparam) |
1342 | { | 1351 | { |
1343 | struct isci_remote_device *idev = cbparam; | 1352 | struct isci_remote_device *idev = cbparam; |
1344 | struct isci_host *ihost = idev->owning_port->owning_controller; | 1353 | struct isci_host *ihost = idev->owning_port->owning_controller; |
1345 | scics_sds_remote_node_context_callback abort_resume_cb = | 1354 | scics_sds_remote_node_context_callback abort_resume_cb = |
1346 | idev->abort_resume_cb; | 1355 | idev->abort_resume_cb; |
1347 | 1356 | ||
1348 | dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", | 1357 | dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", |
1349 | __func__, abort_resume_cb); | 1358 | __func__, abort_resume_cb); |
1350 | 1359 | ||
1351 | if (abort_resume_cb != NULL) { | 1360 | if (abort_resume_cb != NULL) { |
1352 | idev->abort_resume_cb = NULL; | 1361 | idev->abort_resume_cb = NULL; |
1353 | abort_resume_cb(idev->abort_resume_cbparam); | 1362 | abort_resume_cb(idev->abort_resume_cbparam); |
1354 | } | 1363 | } |
1355 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | 1364 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); |
1356 | wake_up(&ihost->eventq); | 1365 | wake_up(&ihost->eventq); |
1357 | } | 1366 | } |
1358 | 1367 | ||
1359 | 1368 | ||
1360 | void isci_remote_device_wait_for_resume_from_abort( | 1369 | void isci_remote_device_wait_for_resume_from_abort( |
1361 | struct isci_host *ihost, | 1370 | struct isci_host *ihost, |
1362 | struct isci_remote_device *idev) | 1371 | struct isci_remote_device *idev) |
1363 | { | 1372 | { |
1364 | dev_dbg(scirdev_to_dev(idev), "%s: starting resume wait: %p\n", | 1373 | dev_dbg(scirdev_to_dev(idev), "%s: starting resume wait: %p\n", |
1365 | __func__, idev); | 1374 | __func__, idev); |
1366 | 1375 | ||
1367 | #define MAX_RESUME_MSECS 10000 | 1376 | #define MAX_RESUME_MSECS 10000 |
1368 | if (!wait_event_timeout(ihost->eventq, | 1377 | if (!wait_event_timeout(ihost->eventq, |
1369 | (!test_bit(IDEV_ABORT_PATH_RESUME_PENDING, | 1378 | (!test_bit(IDEV_ABORT_PATH_RESUME_PENDING, |
1370 | &idev->flags) | 1379 | &idev->flags) |
1371 | || test_bit(IDEV_STOP_PENDING, &idev->flags)), | 1380 | || test_bit(IDEV_STOP_PENDING, &idev->flags)), |
1372 | msecs_to_jiffies(MAX_RESUME_MSECS))) { | 1381 | msecs_to_jiffies(MAX_RESUME_MSECS))) { |
1373 | 1382 | ||
1374 | dev_warn(scirdev_to_dev(idev), "%s: #### Timeout waiting for " | 1383 | dev_warn(scirdev_to_dev(idev), "%s: #### Timeout waiting for " |
1375 | "resume: %p\n", __func__, idev); | 1384 | "resume: %p\n", __func__, idev); |
1376 | } | 1385 | } |
1377 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | 1386 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); |
1378 | 1387 | ||
1379 | dev_dbg(scirdev_to_dev(idev), "%s: resume wait done: %p\n", | 1388 | dev_dbg(scirdev_to_dev(idev), "%s: resume wait done: %p\n", |
1380 | __func__, idev); | 1389 | __func__, idev); |
1381 | } | 1390 | } |
1382 | 1391 | ||
1383 | enum sci_status isci_remote_device_resume_from_abort( | 1392 | enum sci_status isci_remote_device_resume_from_abort( |
1384 | struct isci_host *ihost, | 1393 | struct isci_host *ihost, |
1385 | struct isci_remote_device *idev) | 1394 | struct isci_remote_device *idev) |
1386 | { | 1395 | { |
1387 | unsigned long flags; | 1396 | unsigned long flags; |
1388 | enum sci_status status = SCI_SUCCESS; | 1397 | enum sci_status status = SCI_SUCCESS; |
1389 | int destroyed; | 1398 | int destroyed; |
1390 | 1399 | ||
1391 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1400 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1392 | /* Preserve any current resume callbacks, for instance from other | 1401 | /* Preserve any current resume callbacks, for instance from other |
1393 | * resumptions. | 1402 | * resumptions. |
1394 | */ | 1403 | */ |
1395 | idev->abort_resume_cb = idev->rnc.user_callback; | 1404 | idev->abort_resume_cb = idev->rnc.user_callback; |
1396 | idev->abort_resume_cbparam = idev->rnc.user_cookie; | 1405 | idev->abort_resume_cbparam = idev->rnc.user_cookie; |
1397 | set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | 1406 | set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); |
1398 | clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | 1407 | clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); |
1399 | destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); | 1408 | destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); |
1400 | if (!destroyed) | 1409 | if (!destroyed) |
1401 | status = sci_remote_device_resume( | 1410 | status = sci_remote_device_resume( |
1402 | idev, isci_remote_device_resume_from_abort_complete, | 1411 | idev, isci_remote_device_resume_from_abort_complete, |
1403 | idev); | 1412 | idev); |
1404 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1413 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1405 | if (!destroyed) | 1414 | if (!destroyed) |
1406 | isci_remote_device_wait_for_resume_from_abort(ihost, idev); | 1415 | isci_remote_device_wait_for_resume_from_abort(ihost, idev); |
1407 | else | 1416 | else |
1408 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); | 1417 | clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); |
1409 | 1418 | ||
1410 | return status; | 1419 | return status; |
1411 | } | 1420 | } |
1412 | 1421 | ||
1413 | /** | 1422 | /** |
1414 | * sci_remote_device_start() - This method will start the supplied remote | 1423 | * sci_remote_device_start() - This method will start the supplied remote |
1415 | * device. This method enables normal IO requests to flow through to the | 1424 | * device. This method enables normal IO requests to flow through to the |
1416 | * remote device. | 1425 | * remote device. |
1417 | * @remote_device: This parameter specifies the device to be started. | 1426 | * @remote_device: This parameter specifies the device to be started. |
1418 | * @timeout: This parameter specifies the number of milliseconds in which the | 1427 | * @timeout: This parameter specifies the number of milliseconds in which the |
1419 | * start operation should complete. | 1428 | * start operation should complete. |
1420 | * | 1429 | * |
1421 | * An indication of whether the device was successfully started. SCI_SUCCESS | 1430 | * An indication of whether the device was successfully started. SCI_SUCCESS |
1422 | * This value is returned if the device was successfully started. | 1431 | * This value is returned if the device was successfully started. |
1423 | * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start | 1432 | * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start |
1424 | * the device when there have been no phys added to it. | 1433 | * the device when there have been no phys added to it. |
1425 | */ | 1434 | */ |
1426 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, | 1435 | static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, |
1427 | u32 timeout) | 1436 | u32 timeout) |
1428 | { | 1437 | { |
1429 | struct sci_base_state_machine *sm = &idev->sm; | 1438 | struct sci_base_state_machine *sm = &idev->sm; |
1430 | enum sci_remote_device_states state = sm->current_state_id; | 1439 | enum sci_remote_device_states state = sm->current_state_id; |
1431 | enum sci_status status; | 1440 | enum sci_status status; |
1432 | 1441 | ||
1433 | if (state != SCI_DEV_STOPPED) { | 1442 | if (state != SCI_DEV_STOPPED) { |
1434 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", | 1443 | dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", |
1435 | __func__, dev_state_name(state)); | 1444 | __func__, dev_state_name(state)); |
1436 | return SCI_FAILURE_INVALID_STATE; | 1445 | return SCI_FAILURE_INVALID_STATE; |
1437 | } | 1446 | } |
1438 | 1447 | ||
1439 | status = sci_remote_device_resume(idev, remote_device_resume_done, | 1448 | status = sci_remote_device_resume(idev, remote_device_resume_done, |
1440 | idev); | 1449 | idev); |
1441 | if (status != SCI_SUCCESS) | 1450 | if (status != SCI_SUCCESS) |
1442 | return status; | 1451 | return status; |
1443 | 1452 | ||
1444 | sci_change_state(sm, SCI_DEV_STARTING); | 1453 | sci_change_state(sm, SCI_DEV_STARTING); |
1445 | 1454 | ||
1446 | return SCI_SUCCESS; | 1455 | return SCI_SUCCESS; |
1447 | } | 1456 | } |
1448 | 1457 | ||
1449 | static enum sci_status isci_remote_device_construct(struct isci_port *iport, | 1458 | static enum sci_status isci_remote_device_construct(struct isci_port *iport, |
1450 | struct isci_remote_device *idev) | 1459 | struct isci_remote_device *idev) |
1451 | { | 1460 | { |
1452 | struct isci_host *ihost = iport->isci_host; | 1461 | struct isci_host *ihost = iport->isci_host; |
1453 | struct domain_device *dev = idev->domain_dev; | 1462 | struct domain_device *dev = idev->domain_dev; |
1454 | enum sci_status status; | 1463 | enum sci_status status; |
1455 | 1464 | ||
1456 | if (dev->parent && dev_is_expander(dev->parent)) | 1465 | if (dev->parent && dev_is_expander(dev->parent)) |
1457 | status = sci_remote_device_ea_construct(iport, idev); | 1466 | status = sci_remote_device_ea_construct(iport, idev); |
1458 | else | 1467 | else |
1459 | status = sci_remote_device_da_construct(iport, idev); | 1468 | status = sci_remote_device_da_construct(iport, idev); |
1460 | 1469 | ||
1461 | if (status != SCI_SUCCESS) { | 1470 | if (status != SCI_SUCCESS) { |
1462 | dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", | 1471 | dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", |
1463 | __func__, status); | 1472 | __func__, status); |
1464 | 1473 | ||
1465 | return status; | 1474 | return status; |
1466 | } | 1475 | } |
1467 | 1476 | ||
1468 | /* start the device. */ | 1477 | /* start the device. */ |
1469 | status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); | 1478 | status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); |
1470 | 1479 | ||
1471 | if (status != SCI_SUCCESS) | 1480 | if (status != SCI_SUCCESS) |
1472 | dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", | 1481 | dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", |
1473 | status); | 1482 | status); |
1474 | 1483 | ||
1475 | return status; | 1484 | return status; |
1476 | } | 1485 | } |
1477 | 1486 | ||
1478 | /** | 1487 | /** |
1479 | * This function builds the isci_remote_device when a libsas dev_found message | 1488 | * This function builds the isci_remote_device when a libsas dev_found message |
1480 | * is received. | 1489 | * is received. |
1481 | * @isci_host: This parameter specifies the isci host object. | 1490 | * @isci_host: This parameter specifies the isci host object. |
1482 | * @port: This parameter specifies the isci_port conected to this device. | 1491 | * @port: This parameter specifies the isci_port conected to this device. |
1483 | * | 1492 | * |
1484 | * pointer to new isci_remote_device. | 1493 | * pointer to new isci_remote_device. |
1485 | */ | 1494 | */ |
1486 | static struct isci_remote_device * | 1495 | static struct isci_remote_device * |
1487 | isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) | 1496 | isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) |
1488 | { | 1497 | { |
1489 | struct isci_remote_device *idev; | 1498 | struct isci_remote_device *idev; |
1490 | int i; | 1499 | int i; |
1491 | 1500 | ||
1492 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { | 1501 | for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { |
1493 | idev = &ihost->devices[i]; | 1502 | idev = &ihost->devices[i]; |
1494 | if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) | 1503 | if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) |
1495 | break; | 1504 | break; |
1496 | } | 1505 | } |
1497 | 1506 | ||
1498 | if (i >= SCI_MAX_REMOTE_DEVICES) { | 1507 | if (i >= SCI_MAX_REMOTE_DEVICES) { |
1499 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); | 1508 | dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); |
1500 | return NULL; | 1509 | return NULL; |
1501 | } | 1510 | } |
1502 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) | 1511 | if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) |
1503 | return NULL; | 1512 | return NULL; |
1504 | 1513 | ||
1505 | return idev; | 1514 | return idev; |
1506 | } | 1515 | } |
1507 | 1516 | ||
1508 | void isci_remote_device_release(struct kref *kref) | 1517 | void isci_remote_device_release(struct kref *kref) |
1509 | { | 1518 | { |
1510 | struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); | 1519 | struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); |
1511 | struct isci_host *ihost = idev->isci_port->isci_host; | 1520 | struct isci_host *ihost = idev->isci_port->isci_host; |
1512 | 1521 | ||
1513 | idev->domain_dev = NULL; | 1522 | idev->domain_dev = NULL; |
1514 | idev->isci_port = NULL; | 1523 | idev->isci_port = NULL; |
1515 | clear_bit(IDEV_START_PENDING, &idev->flags); | 1524 | clear_bit(IDEV_START_PENDING, &idev->flags); |
1516 | clear_bit(IDEV_STOP_PENDING, &idev->flags); | 1525 | clear_bit(IDEV_STOP_PENDING, &idev->flags); |
1517 | clear_bit(IDEV_IO_READY, &idev->flags); | 1526 | clear_bit(IDEV_IO_READY, &idev->flags); |
1518 | clear_bit(IDEV_GONE, &idev->flags); | 1527 | clear_bit(IDEV_GONE, &idev->flags); |
1519 | smp_mb__before_clear_bit(); | 1528 | smp_mb__before_clear_bit(); |
1520 | clear_bit(IDEV_ALLOCATED, &idev->flags); | 1529 | clear_bit(IDEV_ALLOCATED, &idev->flags); |
1521 | wake_up(&ihost->eventq); | 1530 | wake_up(&ihost->eventq); |
1522 | } | 1531 | } |
1523 | 1532 | ||
1524 | /** | 1533 | /** |
1525 | * isci_remote_device_stop() - This function is called internally to stop the | 1534 | * isci_remote_device_stop() - This function is called internally to stop the |
1526 | * remote device. | 1535 | * remote device. |
1527 | * @isci_host: This parameter specifies the isci host object. | 1536 | * @isci_host: This parameter specifies the isci host object. |
1528 | * @isci_device: This parameter specifies the remote device. | 1537 | * @isci_device: This parameter specifies the remote device. |
1529 | * | 1538 | * |
1530 | * The status of the ihost request to stop. | 1539 | * The status of the ihost request to stop. |
1531 | */ | 1540 | */ |
1532 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) | 1541 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) |
1533 | { | 1542 | { |
1534 | enum sci_status status; | 1543 | enum sci_status status; |
1535 | unsigned long flags; | 1544 | unsigned long flags; |
1536 | 1545 | ||
1537 | dev_dbg(&ihost->pdev->dev, | 1546 | dev_dbg(&ihost->pdev->dev, |
1538 | "%s: isci_device = %p\n", __func__, idev); | 1547 | "%s: isci_device = %p\n", __func__, idev); |
1539 | 1548 | ||
1540 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1549 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1541 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ | 1550 | idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ |
1542 | set_bit(IDEV_GONE, &idev->flags); | 1551 | set_bit(IDEV_GONE, &idev->flags); |
1543 | 1552 | ||
1544 | set_bit(IDEV_STOP_PENDING, &idev->flags); | 1553 | set_bit(IDEV_STOP_PENDING, &idev->flags); |
1545 | status = sci_remote_device_stop(idev, 50); | 1554 | status = sci_remote_device_stop(idev, 50); |
1546 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1555 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1547 | 1556 | ||
1548 | /* Wait for the stop complete callback. */ | 1557 | /* Wait for the stop complete callback. */ |
1549 | if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) | 1558 | if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) |
1550 | /* nothing to wait for */; | 1559 | /* nothing to wait for */; |
1551 | else | 1560 | else |
1552 | wait_for_device_stop(ihost, idev); | 1561 | wait_for_device_stop(ihost, idev); |
1553 | 1562 | ||
1554 | dev_dbg(&ihost->pdev->dev, | 1563 | dev_dbg(&ihost->pdev->dev, |
1555 | "%s: isci_device = %p, waiting done.\n", __func__, idev); | 1564 | "%s: isci_device = %p, waiting done.\n", __func__, idev); |
1556 | 1565 | ||
1557 | return status; | 1566 | return status; |
1558 | } | 1567 | } |
1559 | 1568 | ||
1560 | /** | 1569 | /** |
1561 | * isci_remote_device_gone() - This function is called by libsas when a domain | 1570 | * isci_remote_device_gone() - This function is called by libsas when a domain |
1562 | * device is removed. | 1571 | * device is removed. |
1563 | * @domain_device: This parameter specifies the libsas domain device. | 1572 | * @domain_device: This parameter specifies the libsas domain device. |
1564 | * | 1573 | * |
1565 | */ | 1574 | */ |
1566 | void isci_remote_device_gone(struct domain_device *dev) | 1575 | void isci_remote_device_gone(struct domain_device *dev) |
1567 | { | 1576 | { |
1568 | struct isci_host *ihost = dev_to_ihost(dev); | 1577 | struct isci_host *ihost = dev_to_ihost(dev); |
1569 | struct isci_remote_device *idev = dev->lldd_dev; | 1578 | struct isci_remote_device *idev = dev->lldd_dev; |
1570 | 1579 | ||
1571 | dev_dbg(&ihost->pdev->dev, | 1580 | dev_dbg(&ihost->pdev->dev, |
1572 | "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", | 1581 | "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", |
1573 | __func__, dev, idev, idev->isci_port); | 1582 | __func__, dev, idev, idev->isci_port); |
1574 | 1583 | ||
1575 | isci_remote_device_stop(ihost, idev); | 1584 | isci_remote_device_stop(ihost, idev); |
1576 | } | 1585 | } |
1577 | 1586 | ||
1578 | 1587 | ||
1579 | /** | 1588 | /** |
1580 | * isci_remote_device_found() - This function is called by libsas when a remote | 1589 | * isci_remote_device_found() - This function is called by libsas when a remote |
1581 | * device is discovered. A remote device object is created and started. the | 1590 | * device is discovered. A remote device object is created and started. the |
1582 | * function then sleeps until the sci core device started message is | 1591 | * function then sleeps until the sci core device started message is |
1583 | * received. | 1592 | * received. |
1584 | * @domain_device: This parameter specifies the libsas domain device. | 1593 | * @domain_device: This parameter specifies the libsas domain device. |
1585 | * | 1594 | * |
1586 | * status, zero indicates success. | 1595 | * status, zero indicates success. |
1587 | */ | 1596 | */ |
1588 | int isci_remote_device_found(struct domain_device *dev) | 1597 | int isci_remote_device_found(struct domain_device *dev) |
1589 | { | 1598 | { |
1590 | struct isci_host *isci_host = dev_to_ihost(dev); | 1599 | struct isci_host *isci_host = dev_to_ihost(dev); |
1591 | struct isci_port *isci_port = dev->port->lldd_port; | 1600 | struct isci_port *isci_port = dev->port->lldd_port; |
1592 | struct isci_remote_device *isci_device; | 1601 | struct isci_remote_device *isci_device; |
1593 | enum sci_status status; | 1602 | enum sci_status status; |
1594 | 1603 | ||
1595 | dev_dbg(&isci_host->pdev->dev, | 1604 | dev_dbg(&isci_host->pdev->dev, |
1596 | "%s: domain_device = %p\n", __func__, dev); | 1605 | "%s: domain_device = %p\n", __func__, dev); |
1597 | 1606 | ||
1598 | if (!isci_port) | 1607 | if (!isci_port) |
1599 | return -ENODEV; | 1608 | return -ENODEV; |
1600 | 1609 | ||
1601 | isci_device = isci_remote_device_alloc(isci_host, isci_port); | 1610 | isci_device = isci_remote_device_alloc(isci_host, isci_port); |
1602 | if (!isci_device) | 1611 | if (!isci_device) |
1603 | return -ENODEV; | 1612 | return -ENODEV; |
1604 | 1613 | ||
1605 | kref_init(&isci_device->kref); | 1614 | kref_init(&isci_device->kref); |
1606 | INIT_LIST_HEAD(&isci_device->node); | 1615 | INIT_LIST_HEAD(&isci_device->node); |
1607 | 1616 | ||
1608 | spin_lock_irq(&isci_host->scic_lock); | 1617 | spin_lock_irq(&isci_host->scic_lock); |
1609 | isci_device->domain_dev = dev; | 1618 | isci_device->domain_dev = dev; |
1610 | isci_device->isci_port = isci_port; | 1619 | isci_device->isci_port = isci_port; |
1611 | list_add_tail(&isci_device->node, &isci_port->remote_dev_list); | 1620 | list_add_tail(&isci_device->node, &isci_port->remote_dev_list); |
1612 | 1621 | ||
1613 | set_bit(IDEV_START_PENDING, &isci_device->flags); | 1622 | set_bit(IDEV_START_PENDING, &isci_device->flags); |
1614 | status = isci_remote_device_construct(isci_port, isci_device); | 1623 | status = isci_remote_device_construct(isci_port, isci_device); |
1615 | 1624 | ||
1616 | dev_dbg(&isci_host->pdev->dev, | 1625 | dev_dbg(&isci_host->pdev->dev, |
1617 | "%s: isci_device = %p\n", | 1626 | "%s: isci_device = %p\n", |
1618 | __func__, isci_device); | 1627 | __func__, isci_device); |
1619 | 1628 | ||
1620 | if (status == SCI_SUCCESS) { | 1629 | if (status == SCI_SUCCESS) { |
1621 | /* device came up, advertise it to the world */ | 1630 | /* device came up, advertise it to the world */ |
1622 | dev->lldd_dev = isci_device; | 1631 | dev->lldd_dev = isci_device; |
1623 | } else | 1632 | } else |
1624 | isci_put_device(isci_device); | 1633 | isci_put_device(isci_device); |
1625 | spin_unlock_irq(&isci_host->scic_lock); | 1634 | spin_unlock_irq(&isci_host->scic_lock); |
1626 | 1635 | ||
1627 | /* wait for the device ready callback. */ | 1636 | /* wait for the device ready callback. */ |
1628 | wait_for_device_start(isci_host, isci_device); | 1637 | wait_for_device_start(isci_host, isci_device); |
1629 | 1638 | ||
1630 | return status == SCI_SUCCESS ? 0 : -ENODEV; | 1639 | return status == SCI_SUCCESS ? 0 : -ENODEV; |
1631 | } | 1640 | } |
1632 | 1641 | ||
1633 | enum sci_status isci_remote_device_suspend_terminate( | 1642 | enum sci_status isci_remote_device_suspend_terminate( |
1634 | struct isci_host *ihost, | 1643 | struct isci_host *ihost, |
1635 | struct isci_remote_device *idev, | 1644 | struct isci_remote_device *idev, |
1636 | struct isci_request *ireq) | 1645 | struct isci_request *ireq) |
1637 | { | 1646 | { |
1638 | unsigned long flags; | 1647 | unsigned long flags; |
1639 | enum sci_status status; | 1648 | enum sci_status status; |
1640 | 1649 | ||
1641 | /* Put the device into suspension. */ | 1650 | /* Put the device into suspension. */ |
1642 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1651 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1643 | set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); | 1652 | set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); |
1644 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); | 1653 | sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); |
1645 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1654 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1646 | 1655 | ||
1647 | /* Terminate and wait for the completions. */ | 1656 | /* Terminate and wait for the completions. */ |
1648 | status = isci_remote_device_terminate_requests(ihost, idev, ireq); | 1657 | status = isci_remote_device_terminate_requests(ihost, idev, ireq); |
1649 | if (status != SCI_SUCCESS) | 1658 | if (status != SCI_SUCCESS) |
1650 | dev_dbg(&ihost->pdev->dev, | 1659 | dev_dbg(&ihost->pdev->dev, |
1651 | "%s: isci_remote_device_terminate_requests(%p) " | 1660 | "%s: isci_remote_device_terminate_requests(%p) " |
1652 | "returned %d!\n", | 1661 | "returned %d!\n", |
1653 | __func__, idev, status); | 1662 | __func__, idev, status); |
1654 | 1663 | ||
1655 | /* NOTE: RNC resumption is left to the caller! */ | 1664 | /* NOTE: RNC resumption is left to the caller! */ |
1656 | return status; | 1665 | return status; |
1657 | } | 1666 | } |
1658 | 1667 | ||
1659 | int isci_remote_device_is_safe_to_abort( | 1668 | int isci_remote_device_is_safe_to_abort( |
1660 | struct isci_remote_device *idev) | 1669 | struct isci_remote_device *idev) |
1661 | { | 1670 | { |
1662 | return sci_remote_node_context_is_safe_to_abort(&idev->rnc); | 1671 | return sci_remote_node_context_is_safe_to_abort(&idev->rnc); |
1663 | } | 1672 | } |
1664 | 1673 | ||
1665 | enum sci_status sci_remote_device_abort_requests_pending_abort( | 1674 | enum sci_status sci_remote_device_abort_requests_pending_abort( |
1666 | struct isci_remote_device *idev) | 1675 | struct isci_remote_device *idev) |
1667 | { | 1676 | { |
1668 | return sci_remote_device_terminate_reqs_checkabort(idev, 1); | 1677 | return sci_remote_device_terminate_reqs_checkabort(idev, 1); |
1669 | } | 1678 | } |
1670 | 1679 | ||
1671 | enum sci_status isci_remote_device_reset_complete( | 1680 | enum sci_status isci_remote_device_reset_complete( |
1672 | struct isci_host *ihost, | 1681 | struct isci_host *ihost, |
1673 | struct isci_remote_device *idev) | 1682 | struct isci_remote_device *idev) |
1674 | { | 1683 | { |
1675 | unsigned long flags; | 1684 | unsigned long flags; |
1676 | enum sci_status status; | 1685 | enum sci_status status; |
1677 | 1686 | ||
1678 | spin_lock_irqsave(&ihost->scic_lock, flags); | 1687 | spin_lock_irqsave(&ihost->scic_lock, flags); |
1679 | status = sci_remote_device_reset_complete(idev); | 1688 | status = sci_remote_device_reset_complete(idev); |
1680 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 1689 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
1681 | 1690 | ||
1682 | return status; | 1691 | return status; |
1683 | } | 1692 | } |
1684 | 1693 | ||
1685 | void isci_dev_set_hang_detection_timeout( | 1694 | void isci_dev_set_hang_detection_timeout( |
drivers/scsi/isci/remote_device.h
1 | /* | 1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | 2 | * This file is provided under a dual BSD/GPLv2 license. When using or |
3 | * redistributing this file, you may do so under either license. | 3 | * redistributing this file, you may do so under either license. |
4 | * | 4 | * |
5 | * GPL LICENSE SUMMARY | 5 | * GPL LICENSE SUMMARY |
6 | * | 6 | * |
7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of version 2 of the GNU General Public License as | 10 | * it under the terms of version 2 of the GNU General Public License as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
21 | * The full GNU General Public License is included in this distribution | 21 | * The full GNU General Public License is included in this distribution |
22 | * in the file called LICENSE.GPL. | 22 | * in the file called LICENSE.GPL. |
23 | * | 23 | * |
24 | * BSD LICENSE | 24 | * BSD LICENSE |
25 | * | 25 | * |
26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
27 | * All rights reserved. | 27 | * All rights reserved. |
28 | * | 28 | * |
29 | * Redistribution and use in source and binary forms, with or without | 29 | * Redistribution and use in source and binary forms, with or without |
30 | * modification, are permitted provided that the following conditions | 30 | * modification, are permitted provided that the following conditions |
31 | * are met: | 31 | * are met: |
32 | * | 32 | * |
33 | * * Redistributions of source code must retain the above copyright | 33 | * * Redistributions of source code must retain the above copyright |
34 | * notice, this list of conditions and the following disclaimer. | 34 | * notice, this list of conditions and the following disclaimer. |
35 | * * Redistributions in binary form must reproduce the above copyright | 35 | * * Redistributions in binary form must reproduce the above copyright |
36 | * notice, this list of conditions and the following disclaimer in | 36 | * notice, this list of conditions and the following disclaimer in |
37 | * the documentation and/or other materials provided with the | 37 | * the documentation and/or other materials provided with the |
38 | * distribution. | 38 | * distribution. |
39 | * * Neither the name of Intel Corporation nor the names of its | 39 | * * Neither the name of Intel Corporation nor the names of its |
40 | * contributors may be used to endorse or promote products derived | 40 | * contributors may be used to endorse or promote products derived |
41 | * from this software without specific prior written permission. | 41 | * from this software without specific prior written permission. |
42 | * | 42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #ifndef _ISCI_REMOTE_DEVICE_H_ | 56 | #ifndef _ISCI_REMOTE_DEVICE_H_ |
57 | #define _ISCI_REMOTE_DEVICE_H_ | 57 | #define _ISCI_REMOTE_DEVICE_H_ |
58 | #include <scsi/libsas.h> | 58 | #include <scsi/libsas.h> |
59 | #include <linux/kref.h> | 59 | #include <linux/kref.h> |
60 | #include "scu_remote_node_context.h" | 60 | #include "scu_remote_node_context.h" |
61 | #include "remote_node_context.h" | 61 | #include "remote_node_context.h" |
62 | #include "port.h" | 62 | #include "port.h" |
63 | 63 | ||
64 | enum sci_remote_device_not_ready_reason_code { | 64 | enum sci_remote_device_not_ready_reason_code { |
65 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, | 65 | SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, |
66 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, | 66 | SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, |
67 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, | 67 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, |
68 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED, | 68 | SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED, |
69 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED, | 69 | SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED, |
70 | SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX | 70 | SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX |
71 | }; | 71 | }; |
72 | 72 | ||
73 | /** | 73 | /** |
74 | * isci_remote_device - isci representation of a sas expander / end point | 74 | * isci_remote_device - isci representation of a sas expander / end point |
75 | * @device_port_width: hw setting for number of simultaneous connections | 75 | * @device_port_width: hw setting for number of simultaneous connections |
76 | * @connection_rate: per-taskcontext connection rate for this device | 76 | * @connection_rate: per-taskcontext connection rate for this device |
77 | * @working_request: SATA requests have no tag we for unaccelerated | 77 | * @working_request: SATA requests have no tag we for unaccelerated |
78 | * protocols we need a method to associate unsolicited | 78 | * protocols we need a method to associate unsolicited |
79 | * frames with a pending request | 79 | * frames with a pending request |
80 | */ | 80 | */ |
81 | struct isci_remote_device { | 81 | struct isci_remote_device { |
82 | #define IDEV_START_PENDING 0 | 82 | #define IDEV_START_PENDING 0 |
83 | #define IDEV_STOP_PENDING 1 | 83 | #define IDEV_STOP_PENDING 1 |
84 | #define IDEV_ALLOCATED 2 | 84 | #define IDEV_ALLOCATED 2 |
85 | #define IDEV_GONE 3 | 85 | #define IDEV_GONE 3 |
86 | #define IDEV_IO_READY 4 | 86 | #define IDEV_IO_READY 4 |
87 | #define IDEV_IO_NCQERROR 5 | 87 | #define IDEV_IO_NCQERROR 5 |
88 | #define IDEV_RNC_LLHANG_ENABLED 6 | 88 | #define IDEV_RNC_LLHANG_ENABLED 6 |
89 | #define IDEV_ABORT_PATH_ACTIVE 7 | 89 | #define IDEV_ABORT_PATH_ACTIVE 7 |
90 | #define IDEV_ABORT_PATH_RESUME_PENDING 8 | 90 | #define IDEV_ABORT_PATH_RESUME_PENDING 8 |
91 | unsigned long flags; | 91 | unsigned long flags; |
92 | struct kref kref; | 92 | struct kref kref; |
93 | struct isci_port *isci_port; | 93 | struct isci_port *isci_port; |
94 | struct domain_device *domain_dev; | 94 | struct domain_device *domain_dev; |
95 | struct list_head node; | 95 | struct list_head node; |
96 | struct sci_base_state_machine sm; | 96 | struct sci_base_state_machine sm; |
97 | u32 device_port_width; | 97 | u32 device_port_width; |
98 | enum sas_linkrate connection_rate; | 98 | enum sas_linkrate connection_rate; |
99 | struct isci_port *owning_port; | 99 | struct isci_port *owning_port; |
100 | struct sci_remote_node_context rnc; | 100 | struct sci_remote_node_context rnc; |
101 | /* XXX unify with device reference counting and delete */ | 101 | /* XXX unify with device reference counting and delete */ |
102 | u32 started_request_count; | 102 | u32 started_request_count; |
103 | struct isci_request *working_request; | 103 | struct isci_request *working_request; |
104 | u32 not_ready_reason; | 104 | u32 not_ready_reason; |
105 | scics_sds_remote_node_context_callback abort_resume_cb; | 105 | scics_sds_remote_node_context_callback abort_resume_cb; |
106 | void *abort_resume_cbparam; | 106 | void *abort_resume_cbparam; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 | 109 | #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 |
110 | 110 | ||
111 | /* device reference routines must be called under sci_lock */ | 111 | /* device reference routines must be called under sci_lock */ |
112 | static inline struct isci_remote_device *isci_get_device( | 112 | static inline struct isci_remote_device *isci_get_device( |
113 | struct isci_remote_device *idev) | 113 | struct isci_remote_device *idev) |
114 | { | 114 | { |
115 | if (idev) | 115 | if (idev) |
116 | kref_get(&idev->kref); | 116 | kref_get(&idev->kref); |
117 | return idev; | 117 | return idev; |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) | 120 | static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) |
121 | { | 121 | { |
122 | struct isci_remote_device *idev = dev->lldd_dev; | 122 | struct isci_remote_device *idev = dev->lldd_dev; |
123 | 123 | ||
124 | if (idev && !test_bit(IDEV_GONE, &idev->flags)) { | 124 | if (idev && !test_bit(IDEV_GONE, &idev->flags)) { |
125 | kref_get(&idev->kref); | 125 | kref_get(&idev->kref); |
126 | return idev; | 126 | return idev; |
127 | } | 127 | } |
128 | 128 | ||
129 | return NULL; | 129 | return NULL; |
130 | } | 130 | } |
131 | 131 | ||
132 | void isci_remote_device_release(struct kref *kref); | 132 | void isci_remote_device_release(struct kref *kref); |
133 | static inline void isci_put_device(struct isci_remote_device *idev) | 133 | static inline void isci_put_device(struct isci_remote_device *idev) |
134 | { | 134 | { |
135 | if (idev) | 135 | if (idev) |
136 | kref_put(&idev->kref, isci_remote_device_release); | 136 | kref_put(&idev->kref, isci_remote_device_release); |
137 | } | 137 | } |
138 | 138 | ||
139 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, | 139 | enum sci_status isci_remote_device_stop(struct isci_host *ihost, |
140 | struct isci_remote_device *idev); | 140 | struct isci_remote_device *idev); |
141 | void isci_remote_device_nuke_requests(struct isci_host *ihost, | 141 | void isci_remote_device_nuke_requests(struct isci_host *ihost, |
142 | struct isci_remote_device *idev); | 142 | struct isci_remote_device *idev); |
143 | void isci_remote_device_gone(struct domain_device *domain_dev); | 143 | void isci_remote_device_gone(struct domain_device *domain_dev); |
144 | int isci_remote_device_found(struct domain_device *domain_dev); | 144 | int isci_remote_device_found(struct domain_device *domain_dev); |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * sci_remote_device_stop() - This method will stop both transmission and | 147 | * sci_remote_device_stop() - This method will stop both transmission and |
148 | * reception of link activity for the supplied remote device. This method | 148 | * reception of link activity for the supplied remote device. This method |
149 | * disables normal IO requests from flowing through to the remote device. | 149 | * disables normal IO requests from flowing through to the remote device. |
150 | * @remote_device: This parameter specifies the device to be stopped. | 150 | * @remote_device: This parameter specifies the device to be stopped. |
151 | * @timeout: This parameter specifies the number of milliseconds in which the | 151 | * @timeout: This parameter specifies the number of milliseconds in which the |
152 | * stop operation should complete. | 152 | * stop operation should complete. |
153 | * | 153 | * |
154 | * An indication of whether the device was successfully stopped. SCI_SUCCESS | 154 | * An indication of whether the device was successfully stopped. SCI_SUCCESS |
155 | * This value is returned if the transmission and reception for the device was | 155 | * This value is returned if the transmission and reception for the device was |
156 | * successfully stopped. | 156 | * successfully stopped. |
157 | */ | 157 | */ |
158 | enum sci_status sci_remote_device_stop( | 158 | enum sci_status sci_remote_device_stop( |
159 | struct isci_remote_device *idev, | 159 | struct isci_remote_device *idev, |
160 | u32 timeout); | 160 | u32 timeout); |
161 | 161 | ||
162 | /** | 162 | /** |
163 | * sci_remote_device_reset() - This method will reset the device making it | 163 | * sci_remote_device_reset() - This method will reset the device making it |
164 | * ready for operation. This method must be called anytime the device is | 164 | * ready for operation. This method must be called anytime the device is |
165 | * reset either through a SMP phy control or a port hard reset request. | 165 | * reset either through a SMP phy control or a port hard reset request. |
166 | * @remote_device: This parameter specifies the device to be reset. | 166 | * @remote_device: This parameter specifies the device to be reset. |
167 | * | 167 | * |
168 | * This method does not actually cause the device hardware to be reset. This | 168 | * This method does not actually cause the device hardware to be reset. This |
169 | * method resets the software object so that it will be operational after a | 169 | * method resets the software object so that it will be operational after a |
170 | * device hardware reset completes. An indication of whether the device reset | 170 | * device hardware reset completes. An indication of whether the device reset |
171 | * was accepted. SCI_SUCCESS This value is returned if the device reset is | 171 | * was accepted. SCI_SUCCESS This value is returned if the device reset is |
172 | * started. | 172 | * started. |
173 | */ | 173 | */ |
174 | enum sci_status sci_remote_device_reset( | 174 | enum sci_status sci_remote_device_reset( |
175 | struct isci_remote_device *idev); | 175 | struct isci_remote_device *idev); |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * sci_remote_device_reset_complete() - This method informs the device object | 178 | * sci_remote_device_reset_complete() - This method informs the device object |
179 | * that the reset operation is complete and the device can resume operation | 179 | * that the reset operation is complete and the device can resume operation |
180 | * again. | 180 | * again. |
181 | * @remote_device: This parameter specifies the device which is to be informed | 181 | * @remote_device: This parameter specifies the device which is to be informed |
182 | * of the reset complete operation. | 182 | * of the reset complete operation. |
183 | * | 183 | * |
184 | * An indication that the device is resuming operation. SCI_SUCCESS the device | 184 | * An indication that the device is resuming operation. SCI_SUCCESS the device |
185 | * is resuming operation. | 185 | * is resuming operation. |
186 | */ | 186 | */ |
187 | enum sci_status sci_remote_device_reset_complete( | 187 | enum sci_status sci_remote_device_reset_complete( |
188 | struct isci_remote_device *idev); | 188 | struct isci_remote_device *idev); |
189 | 189 | ||
190 | /** | 190 | /** |
191 | * enum sci_remote_device_states - This enumeration depicts all the states | 191 | * enum sci_remote_device_states - This enumeration depicts all the states |
192 | * for the common remote device state machine. | 192 | * for the common remote device state machine. |
193 | * @SCI_DEV_INITIAL: Simply the initial state for the base remote device | 193 | * @SCI_DEV_INITIAL: Simply the initial state for the base remote device |
194 | * state machine. | 194 | * state machine. |
195 | * | 195 | * |
196 | * @SCI_DEV_STOPPED: This state indicates that the remote device has | 196 | * @SCI_DEV_STOPPED: This state indicates that the remote device has |
197 | * successfully been stopped. In this state no new IO operations are | 197 | * successfully been stopped. In this state no new IO operations are |
198 | * permitted. This state is entered from the INITIAL state. This state | 198 | * permitted. This state is entered from the INITIAL state. This state |
199 | * is entered from the STOPPING state. | 199 | * is entered from the STOPPING state. |
200 | * | 200 | * |
201 | * @SCI_DEV_STARTING: This state indicates the the remote device is in | 201 | * @SCI_DEV_STARTING: This state indicates the the remote device is in |
202 | * the process of becoming ready (i.e. starting). In this state no new | 202 | * the process of becoming ready (i.e. starting). In this state no new |
203 | * IO operations are permitted. This state is entered from the STOPPED | 203 | * IO operations are permitted. This state is entered from the STOPPED |
204 | * state. | 204 | * state. |
205 | * | 205 | * |
206 | * @SCI_DEV_READY: This state indicates the remote device is now ready. | 206 | * @SCI_DEV_READY: This state indicates the remote device is now ready. |
207 | * Thus, the user is able to perform IO operations on the remote device. | 207 | * Thus, the user is able to perform IO operations on the remote device. |
208 | * This state is entered from the STARTING state. | 208 | * This state is entered from the STARTING state. |
209 | * | 209 | * |
210 | * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote | 210 | * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote |
211 | * device. When there are no active IO for the device it is is in this | 211 | * device. When there are no active IO for the device it is is in this |
212 | * state. | 212 | * state. |
213 | * | 213 | * |
214 | * @SCI_STP_DEV_CMD: This is the command state for for the STP remote | 214 | * @SCI_STP_DEV_CMD: This is the command state for for the STP remote |
215 | * device. This state is entered when the device is processing a | 215 | * device. This state is entered when the device is processing a |
216 | * non-NCQ command. The device object will fail any new start IO | 216 | * non-NCQ command. The device object will fail any new start IO |
217 | * requests until this command is complete. | 217 | * requests until this command is complete. |
218 | * | 218 | * |
219 | * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device. | 219 | * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device. |
220 | * This state is entered when the device is processing an NCQ reuqest. | 220 | * This state is entered when the device is processing an NCQ reuqest. |
221 | * It will remain in this state so long as there is one or more NCQ | 221 | * It will remain in this state so long as there is one or more NCQ |
222 | * requests being processed. | 222 | * requests being processed. |
223 | * | 223 | * |
224 | * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP | 224 | * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP |
225 | * remote device. This state is entered when an SDB error FIS is | 225 | * remote device. This state is entered when an SDB error FIS is |
226 | * received by the device object while in the NCQ state. The device | 226 | * received by the device object while in the NCQ state. The device |
227 | * object will only accept a READ LOG command while in this state. | 227 | * object will only accept a READ LOG command while in this state. |
228 | * | 228 | * |
229 | * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP | 229 | * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP |
230 | * ATAPI remote device. This state is entered when ATAPI device sends | 230 | * ATAPI remote device. This state is entered when ATAPI device sends |
231 | * error status FIS without data while the device object is in CMD | 231 | * error status FIS without data while the device object is in CMD |
232 | * state. A suspension event is expected in this state. The device | 232 | * state. A suspension event is expected in this state. The device |
233 | * object will resume right away. | 233 | * object will resume right away. |
234 | * | 234 | * |
235 | * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the | 235 | * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the |
236 | * device is waiting for the RESET task coming to be recovered from | 236 | * device is waiting for the RESET task coming to be recovered from |
237 | * certain hardware specific error. | 237 | * certain hardware specific error. |
238 | * | 238 | * |
239 | * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the | 239 | * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the |
240 | * remote device. This is the normal operational state for a remote | 240 | * remote device. This is the normal operational state for a remote |
241 | * device. | 241 | * device. |
242 | * | 242 | * |
243 | * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device. | 243 | * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device. |
244 | * This is the state that the device is placed in when a RNC suspend is | 244 | * This is the state that the device is placed in when a RNC suspend is |
245 | * received by the SCU hardware. | 245 | * received by the SCU hardware. |
246 | * | 246 | * |
247 | * @SCI_DEV_STOPPING: This state indicates that the remote device is in | 247 | * @SCI_DEV_STOPPING: This state indicates that the remote device is in |
248 | * the process of stopping. In this state no new IO operations are | 248 | * the process of stopping. In this state no new IO operations are |
249 | * permitted, but existing IO operations are allowed to complete. This | 249 | * permitted, but existing IO operations are allowed to complete. This |
250 | * state is entered from the READY state. This state is entered from | 250 | * state is entered from the READY state. This state is entered from |
251 | * the FAILED state. | 251 | * the FAILED state. |
252 | * | 252 | * |
253 | * @SCI_DEV_FAILED: This state indicates that the remote device has | 253 | * @SCI_DEV_FAILED: This state indicates that the remote device has |
254 | * failed. In this state no new IO operations are permitted. This | 254 | * failed. In this state no new IO operations are permitted. This |
255 | * state is entered from the INITIALIZING state. This state is entered | 255 | * state is entered from the INITIALIZING state. This state is entered |
256 | * from the READY state. | 256 | * from the READY state. |
257 | * | 257 | * |
258 | * @SCI_DEV_RESETTING: This state indicates the device is being reset. | 258 | * @SCI_DEV_RESETTING: This state indicates the device is being reset. |
259 | * In this state no new IO operations are permitted. This state is | 259 | * In this state no new IO operations are permitted. This state is |
260 | * entered from the READY state. | 260 | * entered from the READY state. |
261 | * | 261 | * |
262 | * @SCI_DEV_FINAL: Simply the final state for the base remote device | 262 | * @SCI_DEV_FINAL: Simply the final state for the base remote device |
263 | * state machine. | 263 | * state machine. |
264 | */ | 264 | */ |
265 | #define REMOTE_DEV_STATES {\ | 265 | #define REMOTE_DEV_STATES {\ |
266 | C(DEV_INITIAL),\ | 266 | C(DEV_INITIAL),\ |
267 | C(DEV_STOPPED),\ | 267 | C(DEV_STOPPED),\ |
268 | C(DEV_STARTING),\ | 268 | C(DEV_STARTING),\ |
269 | C(DEV_READY),\ | 269 | C(DEV_READY),\ |
270 | C(STP_DEV_IDLE),\ | 270 | C(STP_DEV_IDLE),\ |
271 | C(STP_DEV_CMD),\ | 271 | C(STP_DEV_CMD),\ |
272 | C(STP_DEV_NCQ),\ | 272 | C(STP_DEV_NCQ),\ |
273 | C(STP_DEV_NCQ_ERROR),\ | 273 | C(STP_DEV_NCQ_ERROR),\ |
274 | C(STP_DEV_ATAPI_ERROR),\ | 274 | C(STP_DEV_ATAPI_ERROR),\ |
275 | C(STP_DEV_AWAIT_RESET),\ | 275 | C(STP_DEV_AWAIT_RESET),\ |
276 | C(SMP_DEV_IDLE),\ | 276 | C(SMP_DEV_IDLE),\ |
277 | C(SMP_DEV_CMD),\ | 277 | C(SMP_DEV_CMD),\ |
278 | C(DEV_STOPPING),\ | 278 | C(DEV_STOPPING),\ |
279 | C(DEV_FAILED),\ | 279 | C(DEV_FAILED),\ |
280 | C(DEV_RESETTING),\ | 280 | C(DEV_RESETTING),\ |
281 | C(DEV_FINAL),\ | 281 | C(DEV_FINAL),\ |
282 | } | 282 | } |
283 | #undef C | 283 | #undef C |
284 | #define C(a) SCI_##a | 284 | #define C(a) SCI_##a |
285 | enum sci_remote_device_states REMOTE_DEV_STATES; | 285 | enum sci_remote_device_states REMOTE_DEV_STATES; |
286 | #undef C | 286 | #undef C |
287 | const char *dev_state_name(enum sci_remote_device_states state); | 287 | const char *dev_state_name(enum sci_remote_device_states state); |
288 | 288 | ||
289 | static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) | 289 | static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) |
290 | { | 290 | { |
291 | struct isci_remote_device *idev; | 291 | struct isci_remote_device *idev; |
292 | 292 | ||
293 | idev = container_of(rnc, typeof(*idev), rnc); | 293 | idev = container_of(rnc, typeof(*idev), rnc); |
294 | 294 | ||
295 | return idev; | 295 | return idev; |
296 | } | 296 | } |
297 | 297 | ||
298 | static inline bool dev_is_expander(struct domain_device *dev) | 298 | static inline bool dev_is_expander(struct domain_device *dev) |
299 | { | 299 | { |
300 | return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; | 300 | return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) | 303 | static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) |
304 | { | 304 | { |
305 | /* XXX delete this voodoo when converting to the top-level device | 305 | /* XXX delete this voodoo when converting to the top-level device |
306 | * reference count | 306 | * reference count |
307 | */ | 307 | */ |
308 | if (WARN_ONCE(idev->started_request_count == 0, | 308 | if (WARN_ONCE(idev->started_request_count == 0, |
309 | "%s: tried to decrement started_request_count past 0!?", | 309 | "%s: tried to decrement started_request_count past 0!?", |
310 | __func__)) | 310 | __func__)) |
311 | /* pass */; | 311 | /* pass */; |
312 | else | 312 | else |
313 | idev->started_request_count--; | 313 | idev->started_request_count--; |
314 | } | 314 | } |
315 | 315 | ||
316 | void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); | 316 | void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); |
317 | 317 | ||
318 | enum sci_status sci_remote_device_frame_handler( | 318 | enum sci_status sci_remote_device_frame_handler( |
319 | struct isci_remote_device *idev, | 319 | struct isci_remote_device *idev, |
320 | u32 frame_index); | 320 | u32 frame_index); |
321 | 321 | ||
322 | enum sci_status sci_remote_device_event_handler( | 322 | enum sci_status sci_remote_device_event_handler( |
323 | struct isci_remote_device *idev, | 323 | struct isci_remote_device *idev, |
324 | u32 event_code); | 324 | u32 event_code); |
325 | 325 | ||
326 | enum sci_status sci_remote_device_start_io( | 326 | enum sci_status sci_remote_device_start_io( |
327 | struct isci_host *ihost, | 327 | struct isci_host *ihost, |
328 | struct isci_remote_device *idev, | 328 | struct isci_remote_device *idev, |
329 | struct isci_request *ireq); | 329 | struct isci_request *ireq); |
330 | 330 | ||
331 | enum sci_status sci_remote_device_start_task( | 331 | enum sci_status sci_remote_device_start_task( |
332 | struct isci_host *ihost, | 332 | struct isci_host *ihost, |
333 | struct isci_remote_device *idev, | 333 | struct isci_remote_device *idev, |
334 | struct isci_request *ireq); | 334 | struct isci_request *ireq); |
335 | 335 | ||
336 | enum sci_status sci_remote_device_complete_io( | 336 | enum sci_status sci_remote_device_complete_io( |
337 | struct isci_host *ihost, | 337 | struct isci_host *ihost, |
338 | struct isci_remote_device *idev, | 338 | struct isci_remote_device *idev, |
339 | struct isci_request *ireq); | 339 | struct isci_request *ireq); |
340 | 340 | ||
341 | void sci_remote_device_post_request( | 341 | void sci_remote_device_post_request( |
342 | struct isci_remote_device *idev, | 342 | struct isci_remote_device *idev, |
343 | u32 request); | 343 | u32 request); |
344 | 344 | ||
345 | enum sci_status sci_remote_device_terminate_requests( | 345 | enum sci_status sci_remote_device_terminate_requests( |
346 | struct isci_remote_device *idev); | 346 | struct isci_remote_device *idev); |
347 | 347 | ||
348 | int isci_remote_device_is_safe_to_abort( | 348 | int isci_remote_device_is_safe_to_abort( |
349 | struct isci_remote_device *idev); | 349 | struct isci_remote_device *idev); |
350 | 350 | ||
351 | enum sci_status | 351 | enum sci_status |
352 | sci_remote_device_abort_requests_pending_abort( | 352 | sci_remote_device_abort_requests_pending_abort( |
353 | struct isci_remote_device *idev); | 353 | struct isci_remote_device *idev); |
354 | 354 | ||
355 | enum sci_status isci_remote_device_suspend( | 355 | enum sci_status isci_remote_device_suspend( |
356 | struct isci_host *ihost, | 356 | struct isci_host *ihost, |
357 | struct isci_remote_device *idev); | 357 | struct isci_remote_device *idev); |
358 | 358 | ||
359 | enum sci_status sci_remote_device_resume( | 359 | enum sci_status sci_remote_device_resume( |
360 | struct isci_remote_device *idev, | 360 | struct isci_remote_device *idev, |
361 | scics_sds_remote_node_context_callback cb_fn, | 361 | scics_sds_remote_node_context_callback cb_fn, |
362 | void *cb_p); | 362 | void *cb_p); |
363 | 363 | ||
364 | enum sci_status isci_remote_device_resume_from_abort( | 364 | enum sci_status isci_remote_device_resume_from_abort( |
365 | struct isci_host *ihost, | 365 | struct isci_host *ihost, |
366 | struct isci_remote_device *idev); | 366 | struct isci_remote_device *idev); |
367 | 367 | ||
368 | enum sci_status isci_remote_device_reset( | 368 | enum sci_status isci_remote_device_reset( |
369 | struct isci_host *ihost, | 369 | struct isci_host *ihost, |
370 | struct isci_remote_device *idev); | 370 | struct isci_remote_device *idev); |
371 | 371 | ||
372 | enum sci_status isci_remote_device_reset_complete( | 372 | enum sci_status isci_remote_device_reset_complete( |
373 | struct isci_host *ihost, | 373 | struct isci_host *ihost, |
374 | struct isci_remote_device *idev); | 374 | struct isci_remote_device *idev); |
375 | 375 | ||
376 | enum sci_status isci_remote_device_suspend_terminate( | 376 | enum sci_status isci_remote_device_suspend_terminate( |
377 | struct isci_host *ihost, | 377 | struct isci_host *ihost, |
378 | struct isci_remote_device *idev, | 378 | struct isci_remote_device *idev, |
379 | struct isci_request *ireq); | 379 | struct isci_request *ireq); |
380 | 380 | ||
381 | enum sci_status isci_remote_device_terminate_requests( | 381 | enum sci_status isci_remote_device_terminate_requests( |
382 | struct isci_host *ihost, | 382 | struct isci_host *ihost, |
383 | struct isci_remote_device *idev, | 383 | struct isci_remote_device *idev, |
384 | struct isci_request *ireq); | 384 | struct isci_request *ireq); |
385 | enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, | ||
386 | enum sci_remote_node_suspension_reasons reason); | ||
385 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ | 387 | #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ |
386 | 388 |
drivers/scsi/isci/request.c
1 | /* | 1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or | 2 | * This file is provided under a dual BSD/GPLv2 license. When using or |
3 | * redistributing this file, you may do so under either license. | 3 | * redistributing this file, you may do so under either license. |
4 | * | 4 | * |
5 | * GPL LICENSE SUMMARY | 5 | * GPL LICENSE SUMMARY |
6 | * | 6 | * |
7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of version 2 of the GNU General Public License as | 10 | * it under the terms of version 2 of the GNU General Public License as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
21 | * The full GNU General Public License is included in this distribution | 21 | * The full GNU General Public License is included in this distribution |
22 | * in the file called LICENSE.GPL. | 22 | * in the file called LICENSE.GPL. |
23 | * | 23 | * |
24 | * BSD LICENSE | 24 | * BSD LICENSE |
25 | * | 25 | * |
26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. | 26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
27 | * All rights reserved. | 27 | * All rights reserved. |
28 | * | 28 | * |
29 | * Redistribution and use in source and binary forms, with or without | 29 | * Redistribution and use in source and binary forms, with or without |
30 | * modification, are permitted provided that the following conditions | 30 | * modification, are permitted provided that the following conditions |
31 | * are met: | 31 | * are met: |
32 | * | 32 | * |
33 | * * Redistributions of source code must retain the above copyright | 33 | * * Redistributions of source code must retain the above copyright |
34 | * notice, this list of conditions and the following disclaimer. | 34 | * notice, this list of conditions and the following disclaimer. |
35 | * * Redistributions in binary form must reproduce the above copyright | 35 | * * Redistributions in binary form must reproduce the above copyright |
36 | * notice, this list of conditions and the following disclaimer in | 36 | * notice, this list of conditions and the following disclaimer in |
37 | * the documentation and/or other materials provided with the | 37 | * the documentation and/or other materials provided with the |
38 | * distribution. | 38 | * distribution. |
39 | * * Neither the name of Intel Corporation nor the names of its | 39 | * * Neither the name of Intel Corporation nor the names of its |
40 | * contributors may be used to endorse or promote products derived | 40 | * contributors may be used to endorse or promote products derived |
41 | * from this software without specific prior written permission. | 41 | * from this software without specific prior written permission. |
42 | * | 42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ | 54 | */ |
55 | 55 | ||
56 | #include <scsi/scsi_cmnd.h> | 56 | #include <scsi/scsi_cmnd.h> |
57 | #include "isci.h" | 57 | #include "isci.h" |
58 | #include "task.h" | 58 | #include "task.h" |
59 | #include "request.h" | 59 | #include "request.h" |
60 | #include "scu_completion_codes.h" | 60 | #include "scu_completion_codes.h" |
61 | #include "scu_event_codes.h" | 61 | #include "scu_event_codes.h" |
62 | #include "sas.h" | 62 | #include "sas.h" |
63 | 63 | ||
64 | #undef C | 64 | #undef C |
65 | #define C(a) (#a) | 65 | #define C(a) (#a) |
66 | const char *req_state_name(enum sci_base_request_states state) | 66 | const char *req_state_name(enum sci_base_request_states state) |
67 | { | 67 | { |
68 | static const char * const strings[] = REQUEST_STATES; | 68 | static const char * const strings[] = REQUEST_STATES; |
69 | 69 | ||
70 | return strings[state]; | 70 | return strings[state]; |
71 | } | 71 | } |
72 | #undef C | 72 | #undef C |
73 | 73 | ||
74 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, | 74 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, |
75 | int idx) | 75 | int idx) |
76 | { | 76 | { |
77 | if (idx == 0) | 77 | if (idx == 0) |
78 | return &ireq->tc->sgl_pair_ab; | 78 | return &ireq->tc->sgl_pair_ab; |
79 | else if (idx == 1) | 79 | else if (idx == 1) |
80 | return &ireq->tc->sgl_pair_cd; | 80 | return &ireq->tc->sgl_pair_cd; |
81 | else if (idx < 0) | 81 | else if (idx < 0) |
82 | return NULL; | 82 | return NULL; |
83 | else | 83 | else |
84 | return &ireq->sg_table[idx - 2]; | 84 | return &ireq->sg_table[idx - 2]; |
85 | } | 85 | } |
86 | 86 | ||
87 | static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, | 87 | static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, |
88 | struct isci_request *ireq, u32 idx) | 88 | struct isci_request *ireq, u32 idx) |
89 | { | 89 | { |
90 | u32 offset; | 90 | u32 offset; |
91 | 91 | ||
92 | if (idx == 0) { | 92 | if (idx == 0) { |
93 | offset = (void *) &ireq->tc->sgl_pair_ab - | 93 | offset = (void *) &ireq->tc->sgl_pair_ab - |
94 | (void *) &ihost->task_context_table[0]; | 94 | (void *) &ihost->task_context_table[0]; |
95 | return ihost->tc_dma + offset; | 95 | return ihost->tc_dma + offset; |
96 | } else if (idx == 1) { | 96 | } else if (idx == 1) { |
97 | offset = (void *) &ireq->tc->sgl_pair_cd - | 97 | offset = (void *) &ireq->tc->sgl_pair_cd - |
98 | (void *) &ihost->task_context_table[0]; | 98 | (void *) &ihost->task_context_table[0]; |
99 | return ihost->tc_dma + offset; | 99 | return ihost->tc_dma + offset; |
100 | } | 100 | } |
101 | 101 | ||
102 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); | 102 | return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) | 105 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) |
106 | { | 106 | { |
107 | e->length = sg_dma_len(sg); | 107 | e->length = sg_dma_len(sg); |
108 | e->address_upper = upper_32_bits(sg_dma_address(sg)); | 108 | e->address_upper = upper_32_bits(sg_dma_address(sg)); |
109 | e->address_lower = lower_32_bits(sg_dma_address(sg)); | 109 | e->address_lower = lower_32_bits(sg_dma_address(sg)); |
110 | e->address_modifier = 0; | 110 | e->address_modifier = 0; |
111 | } | 111 | } |
112 | 112 | ||
113 | static void sci_request_build_sgl(struct isci_request *ireq) | 113 | static void sci_request_build_sgl(struct isci_request *ireq) |
114 | { | 114 | { |
115 | struct isci_host *ihost = ireq->isci_host; | 115 | struct isci_host *ihost = ireq->isci_host; |
116 | struct sas_task *task = isci_request_access_task(ireq); | 116 | struct sas_task *task = isci_request_access_task(ireq); |
117 | struct scatterlist *sg = NULL; | 117 | struct scatterlist *sg = NULL; |
118 | dma_addr_t dma_addr; | 118 | dma_addr_t dma_addr; |
119 | u32 sg_idx = 0; | 119 | u32 sg_idx = 0; |
120 | struct scu_sgl_element_pair *scu_sg = NULL; | 120 | struct scu_sgl_element_pair *scu_sg = NULL; |
121 | struct scu_sgl_element_pair *prev_sg = NULL; | 121 | struct scu_sgl_element_pair *prev_sg = NULL; |
122 | 122 | ||
123 | if (task->num_scatter > 0) { | 123 | if (task->num_scatter > 0) { |
124 | sg = task->scatter; | 124 | sg = task->scatter; |
125 | 125 | ||
126 | while (sg) { | 126 | while (sg) { |
127 | scu_sg = to_sgl_element_pair(ireq, sg_idx); | 127 | scu_sg = to_sgl_element_pair(ireq, sg_idx); |
128 | init_sgl_element(&scu_sg->A, sg); | 128 | init_sgl_element(&scu_sg->A, sg); |
129 | sg = sg_next(sg); | 129 | sg = sg_next(sg); |
130 | if (sg) { | 130 | if (sg) { |
131 | init_sgl_element(&scu_sg->B, sg); | 131 | init_sgl_element(&scu_sg->B, sg); |
132 | sg = sg_next(sg); | 132 | sg = sg_next(sg); |
133 | } else | 133 | } else |
134 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); | 134 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); |
135 | 135 | ||
136 | if (prev_sg) { | 136 | if (prev_sg) { |
137 | dma_addr = to_sgl_element_pair_dma(ihost, | 137 | dma_addr = to_sgl_element_pair_dma(ihost, |
138 | ireq, | 138 | ireq, |
139 | sg_idx); | 139 | sg_idx); |
140 | 140 | ||
141 | prev_sg->next_pair_upper = | 141 | prev_sg->next_pair_upper = |
142 | upper_32_bits(dma_addr); | 142 | upper_32_bits(dma_addr); |
143 | prev_sg->next_pair_lower = | 143 | prev_sg->next_pair_lower = |
144 | lower_32_bits(dma_addr); | 144 | lower_32_bits(dma_addr); |
145 | } | 145 | } |
146 | 146 | ||
147 | prev_sg = scu_sg; | 147 | prev_sg = scu_sg; |
148 | sg_idx++; | 148 | sg_idx++; |
149 | } | 149 | } |
150 | } else { /* handle when no sg */ | 150 | } else { /* handle when no sg */ |
151 | scu_sg = to_sgl_element_pair(ireq, sg_idx); | 151 | scu_sg = to_sgl_element_pair(ireq, sg_idx); |
152 | 152 | ||
153 | dma_addr = dma_map_single(&ihost->pdev->dev, | 153 | dma_addr = dma_map_single(&ihost->pdev->dev, |
154 | task->scatter, | 154 | task->scatter, |
155 | task->total_xfer_len, | 155 | task->total_xfer_len, |
156 | task->data_dir); | 156 | task->data_dir); |
157 | 157 | ||
158 | ireq->zero_scatter_daddr = dma_addr; | 158 | ireq->zero_scatter_daddr = dma_addr; |
159 | 159 | ||
160 | scu_sg->A.length = task->total_xfer_len; | 160 | scu_sg->A.length = task->total_xfer_len; |
161 | scu_sg->A.address_upper = upper_32_bits(dma_addr); | 161 | scu_sg->A.address_upper = upper_32_bits(dma_addr); |
162 | scu_sg->A.address_lower = lower_32_bits(dma_addr); | 162 | scu_sg->A.address_lower = lower_32_bits(dma_addr); |
163 | } | 163 | } |
164 | 164 | ||
165 | if (scu_sg) { | 165 | if (scu_sg) { |
166 | scu_sg->next_pair_upper = 0; | 166 | scu_sg->next_pair_upper = 0; |
167 | scu_sg->next_pair_lower = 0; | 167 | scu_sg->next_pair_lower = 0; |
168 | } | 168 | } |
169 | } | 169 | } |
170 | 170 | ||
171 | static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) | 171 | static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) |
172 | { | 172 | { |
173 | struct ssp_cmd_iu *cmd_iu; | 173 | struct ssp_cmd_iu *cmd_iu; |
174 | struct sas_task *task = isci_request_access_task(ireq); | 174 | struct sas_task *task = isci_request_access_task(ireq); |
175 | 175 | ||
176 | cmd_iu = &ireq->ssp.cmd; | 176 | cmd_iu = &ireq->ssp.cmd; |
177 | 177 | ||
178 | memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); | 178 | memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); |
179 | cmd_iu->add_cdb_len = 0; | 179 | cmd_iu->add_cdb_len = 0; |
180 | cmd_iu->_r_a = 0; | 180 | cmd_iu->_r_a = 0; |
181 | cmd_iu->_r_b = 0; | 181 | cmd_iu->_r_b = 0; |
182 | cmd_iu->en_fburst = 0; /* unsupported */ | 182 | cmd_iu->en_fburst = 0; /* unsupported */ |
183 | cmd_iu->task_prio = task->ssp_task.task_prio; | 183 | cmd_iu->task_prio = task->ssp_task.task_prio; |
184 | cmd_iu->task_attr = task->ssp_task.task_attr; | 184 | cmd_iu->task_attr = task->ssp_task.task_attr; |
185 | cmd_iu->_r_c = 0; | 185 | cmd_iu->_r_c = 0; |
186 | 186 | ||
187 | sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, | 187 | sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, |
188 | sizeof(task->ssp_task.cdb) / sizeof(u32)); | 188 | sizeof(task->ssp_task.cdb) / sizeof(u32)); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) | 191 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) |
192 | { | 192 | { |
193 | struct ssp_task_iu *task_iu; | 193 | struct ssp_task_iu *task_iu; |
194 | struct sas_task *task = isci_request_access_task(ireq); | 194 | struct sas_task *task = isci_request_access_task(ireq); |
195 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); | 195 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); |
196 | 196 | ||
197 | task_iu = &ireq->ssp.tmf; | 197 | task_iu = &ireq->ssp.tmf; |
198 | 198 | ||
199 | memset(task_iu, 0, sizeof(struct ssp_task_iu)); | 199 | memset(task_iu, 0, sizeof(struct ssp_task_iu)); |
200 | 200 | ||
201 | memcpy(task_iu->LUN, task->ssp_task.LUN, 8); | 201 | memcpy(task_iu->LUN, task->ssp_task.LUN, 8); |
202 | 202 | ||
203 | task_iu->task_func = isci_tmf->tmf_code; | 203 | task_iu->task_func = isci_tmf->tmf_code; |
204 | task_iu->task_tag = | 204 | task_iu->task_tag = |
205 | (test_bit(IREQ_TMF, &ireq->flags)) ? | 205 | (test_bit(IREQ_TMF, &ireq->flags)) ? |
206 | isci_tmf->io_tag : | 206 | isci_tmf->io_tag : |
207 | SCI_CONTROLLER_INVALID_IO_TAG; | 207 | SCI_CONTROLLER_INVALID_IO_TAG; |
208 | } | 208 | } |
209 | 209 | ||
210 | /** | 210 | /** |
211 | * This method is will fill in the SCU Task Context for any type of SSP request. | 211 | * This method is will fill in the SCU Task Context for any type of SSP request. |
212 | * @sci_req: | 212 | * @sci_req: |
213 | * @task_context: | 213 | * @task_context: |
214 | * | 214 | * |
215 | */ | 215 | */ |
216 | static void scu_ssp_reqeust_construct_task_context( | 216 | static void scu_ssp_reqeust_construct_task_context( |
217 | struct isci_request *ireq, | 217 | struct isci_request *ireq, |
218 | struct scu_task_context *task_context) | 218 | struct scu_task_context *task_context) |
219 | { | 219 | { |
220 | dma_addr_t dma_addr; | 220 | dma_addr_t dma_addr; |
221 | struct isci_remote_device *idev; | 221 | struct isci_remote_device *idev; |
222 | struct isci_port *iport; | 222 | struct isci_port *iport; |
223 | 223 | ||
224 | idev = ireq->target_device; | 224 | idev = ireq->target_device; |
225 | iport = idev->owning_port; | 225 | iport = idev->owning_port; |
226 | 226 | ||
227 | /* Fill in the TC with the its required data */ | 227 | /* Fill in the TC with the its required data */ |
228 | task_context->abort = 0; | 228 | task_context->abort = 0; |
229 | task_context->priority = 0; | 229 | task_context->priority = 0; |
230 | task_context->initiator_request = 1; | 230 | task_context->initiator_request = 1; |
231 | task_context->connection_rate = idev->connection_rate; | 231 | task_context->connection_rate = idev->connection_rate; |
232 | task_context->protocol_engine_index = ISCI_PEG; | 232 | task_context->protocol_engine_index = ISCI_PEG; |
233 | task_context->logical_port_index = iport->physical_port_index; | 233 | task_context->logical_port_index = iport->physical_port_index; |
234 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; | 234 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; |
235 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 235 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
236 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 236 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
237 | 237 | ||
238 | task_context->remote_node_index = idev->rnc.remote_node_index; | 238 | task_context->remote_node_index = idev->rnc.remote_node_index; |
239 | task_context->command_code = 0; | 239 | task_context->command_code = 0; |
240 | 240 | ||
241 | task_context->link_layer_control = 0; | 241 | task_context->link_layer_control = 0; |
242 | task_context->do_not_dma_ssp_good_response = 1; | 242 | task_context->do_not_dma_ssp_good_response = 1; |
243 | task_context->strict_ordering = 0; | 243 | task_context->strict_ordering = 0; |
244 | task_context->control_frame = 0; | 244 | task_context->control_frame = 0; |
245 | task_context->timeout_enable = 0; | 245 | task_context->timeout_enable = 0; |
246 | task_context->block_guard_enable = 0; | 246 | task_context->block_guard_enable = 0; |
247 | 247 | ||
248 | task_context->address_modifier = 0; | 248 | task_context->address_modifier = 0; |
249 | 249 | ||
250 | /* task_context->type.ssp.tag = ireq->io_tag; */ | 250 | /* task_context->type.ssp.tag = ireq->io_tag; */ |
251 | task_context->task_phase = 0x01; | 251 | task_context->task_phase = 0x01; |
252 | 252 | ||
253 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 253 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
254 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 254 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
255 | (iport->physical_port_index << | 255 | (iport->physical_port_index << |
256 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 256 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
257 | ISCI_TAG_TCI(ireq->io_tag)); | 257 | ISCI_TAG_TCI(ireq->io_tag)); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Copy the physical address for the command buffer to the | 260 | * Copy the physical address for the command buffer to the |
261 | * SCU Task Context | 261 | * SCU Task Context |
262 | */ | 262 | */ |
263 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); | 263 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); |
264 | 264 | ||
265 | task_context->command_iu_upper = upper_32_bits(dma_addr); | 265 | task_context->command_iu_upper = upper_32_bits(dma_addr); |
266 | task_context->command_iu_lower = lower_32_bits(dma_addr); | 266 | task_context->command_iu_lower = lower_32_bits(dma_addr); |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * Copy the physical address for the response buffer to the | 269 | * Copy the physical address for the response buffer to the |
270 | * SCU Task Context | 270 | * SCU Task Context |
271 | */ | 271 | */ |
272 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); | 272 | dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); |
273 | 273 | ||
274 | task_context->response_iu_upper = upper_32_bits(dma_addr); | 274 | task_context->response_iu_upper = upper_32_bits(dma_addr); |
275 | task_context->response_iu_lower = lower_32_bits(dma_addr); | 275 | task_context->response_iu_lower = lower_32_bits(dma_addr); |
276 | } | 276 | } |
277 | 277 | ||
278 | static u8 scu_bg_blk_size(struct scsi_device *sdp) | 278 | static u8 scu_bg_blk_size(struct scsi_device *sdp) |
279 | { | 279 | { |
280 | switch (sdp->sector_size) { | 280 | switch (sdp->sector_size) { |
281 | case 512: | 281 | case 512: |
282 | return 0; | 282 | return 0; |
283 | case 1024: | 283 | case 1024: |
284 | return 1; | 284 | return 1; |
285 | case 4096: | 285 | case 4096: |
286 | return 3; | 286 | return 3; |
287 | default: | 287 | default: |
288 | return 0xff; | 288 | return 0xff; |
289 | } | 289 | } |
290 | } | 290 | } |
291 | 291 | ||
292 | static u32 scu_dif_bytes(u32 len, u32 sector_size) | 292 | static u32 scu_dif_bytes(u32 len, u32 sector_size) |
293 | { | 293 | { |
294 | return (len >> ilog2(sector_size)) * 8; | 294 | return (len >> ilog2(sector_size)) * 8; |
295 | } | 295 | } |
296 | 296 | ||
297 | static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) | 297 | static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) |
298 | { | 298 | { |
299 | struct scu_task_context *tc = ireq->tc; | 299 | struct scu_task_context *tc = ireq->tc; |
300 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; | 300 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; |
301 | u8 blk_sz = scu_bg_blk_size(scmd->device); | 301 | u8 blk_sz = scu_bg_blk_size(scmd->device); |
302 | 302 | ||
303 | tc->block_guard_enable = 1; | 303 | tc->block_guard_enable = 1; |
304 | tc->blk_prot_en = 1; | 304 | tc->blk_prot_en = 1; |
305 | tc->blk_sz = blk_sz; | 305 | tc->blk_sz = blk_sz; |
306 | /* DIF write insert */ | 306 | /* DIF write insert */ |
307 | tc->blk_prot_func = 0x2; | 307 | tc->blk_prot_func = 0x2; |
308 | 308 | ||
309 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, | 309 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, |
310 | scmd->device->sector_size); | 310 | scmd->device->sector_size); |
311 | 311 | ||
312 | /* always init to 0, used by hw */ | 312 | /* always init to 0, used by hw */ |
313 | tc->interm_crc_val = 0; | 313 | tc->interm_crc_val = 0; |
314 | 314 | ||
315 | tc->init_crc_seed = 0; | 315 | tc->init_crc_seed = 0; |
316 | tc->app_tag_verify = 0; | 316 | tc->app_tag_verify = 0; |
317 | tc->app_tag_gen = 0; | 317 | tc->app_tag_gen = 0; |
318 | tc->ref_tag_seed_verify = 0; | 318 | tc->ref_tag_seed_verify = 0; |
319 | 319 | ||
320 | /* always init to same as bg_blk_sz */ | 320 | /* always init to same as bg_blk_sz */ |
321 | tc->UD_bytes_immed_val = scmd->device->sector_size; | 321 | tc->UD_bytes_immed_val = scmd->device->sector_size; |
322 | 322 | ||
323 | tc->reserved_DC_0 = 0; | 323 | tc->reserved_DC_0 = 0; |
324 | 324 | ||
325 | /* always init to 8 */ | 325 | /* always init to 8 */ |
326 | tc->DIF_bytes_immed_val = 8; | 326 | tc->DIF_bytes_immed_val = 8; |
327 | 327 | ||
328 | tc->reserved_DC_1 = 0; | 328 | tc->reserved_DC_1 = 0; |
329 | tc->bgc_blk_sz = scmd->device->sector_size; | 329 | tc->bgc_blk_sz = scmd->device->sector_size; |
330 | tc->reserved_E0_0 = 0; | 330 | tc->reserved_E0_0 = 0; |
331 | tc->app_tag_gen_mask = 0; | 331 | tc->app_tag_gen_mask = 0; |
332 | 332 | ||
333 | /** setup block guard control **/ | 333 | /** setup block guard control **/ |
334 | tc->bgctl = 0; | 334 | tc->bgctl = 0; |
335 | 335 | ||
336 | /* DIF write insert */ | 336 | /* DIF write insert */ |
337 | tc->bgctl_f.op = 0x2; | 337 | tc->bgctl_f.op = 0x2; |
338 | 338 | ||
339 | tc->app_tag_verify_mask = 0; | 339 | tc->app_tag_verify_mask = 0; |
340 | 340 | ||
341 | /* must init to 0 for hw */ | 341 | /* must init to 0 for hw */ |
342 | tc->blk_guard_err = 0; | 342 | tc->blk_guard_err = 0; |
343 | 343 | ||
344 | tc->reserved_E8_0 = 0; | 344 | tc->reserved_E8_0 = 0; |
345 | 345 | ||
346 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) | 346 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) |
347 | tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; | 347 | tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; |
348 | else if (type & SCSI_PROT_DIF_TYPE3) | 348 | else if (type & SCSI_PROT_DIF_TYPE3) |
349 | tc->ref_tag_seed_gen = 0; | 349 | tc->ref_tag_seed_gen = 0; |
350 | } | 350 | } |
351 | 351 | ||
352 | static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) | 352 | static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) |
353 | { | 353 | { |
354 | struct scu_task_context *tc = ireq->tc; | 354 | struct scu_task_context *tc = ireq->tc; |
355 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; | 355 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; |
356 | u8 blk_sz = scu_bg_blk_size(scmd->device); | 356 | u8 blk_sz = scu_bg_blk_size(scmd->device); |
357 | 357 | ||
358 | tc->block_guard_enable = 1; | 358 | tc->block_guard_enable = 1; |
359 | tc->blk_prot_en = 1; | 359 | tc->blk_prot_en = 1; |
360 | tc->blk_sz = blk_sz; | 360 | tc->blk_sz = blk_sz; |
361 | /* DIF read strip */ | 361 | /* DIF read strip */ |
362 | tc->blk_prot_func = 0x1; | 362 | tc->blk_prot_func = 0x1; |
363 | 363 | ||
364 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, | 364 | tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, |
365 | scmd->device->sector_size); | 365 | scmd->device->sector_size); |
366 | 366 | ||
367 | /* always init to 0, used by hw */ | 367 | /* always init to 0, used by hw */ |
368 | tc->interm_crc_val = 0; | 368 | tc->interm_crc_val = 0; |
369 | 369 | ||
370 | tc->init_crc_seed = 0; | 370 | tc->init_crc_seed = 0; |
371 | tc->app_tag_verify = 0; | 371 | tc->app_tag_verify = 0; |
372 | tc->app_tag_gen = 0; | 372 | tc->app_tag_gen = 0; |
373 | 373 | ||
374 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) | 374 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) |
375 | tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; | 375 | tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; |
376 | else if (type & SCSI_PROT_DIF_TYPE3) | 376 | else if (type & SCSI_PROT_DIF_TYPE3) |
377 | tc->ref_tag_seed_verify = 0; | 377 | tc->ref_tag_seed_verify = 0; |
378 | 378 | ||
379 | /* always init to same as bg_blk_sz */ | 379 | /* always init to same as bg_blk_sz */ |
380 | tc->UD_bytes_immed_val = scmd->device->sector_size; | 380 | tc->UD_bytes_immed_val = scmd->device->sector_size; |
381 | 381 | ||
382 | tc->reserved_DC_0 = 0; | 382 | tc->reserved_DC_0 = 0; |
383 | 383 | ||
384 | /* always init to 8 */ | 384 | /* always init to 8 */ |
385 | tc->DIF_bytes_immed_val = 8; | 385 | tc->DIF_bytes_immed_val = 8; |
386 | 386 | ||
387 | tc->reserved_DC_1 = 0; | 387 | tc->reserved_DC_1 = 0; |
388 | tc->bgc_blk_sz = scmd->device->sector_size; | 388 | tc->bgc_blk_sz = scmd->device->sector_size; |
389 | tc->reserved_E0_0 = 0; | 389 | tc->reserved_E0_0 = 0; |
390 | tc->app_tag_gen_mask = 0; | 390 | tc->app_tag_gen_mask = 0; |
391 | 391 | ||
392 | /** setup block guard control **/ | 392 | /** setup block guard control **/ |
393 | tc->bgctl = 0; | 393 | tc->bgctl = 0; |
394 | 394 | ||
395 | /* DIF read strip */ | 395 | /* DIF read strip */ |
396 | tc->bgctl_f.crc_verify = 1; | 396 | tc->bgctl_f.crc_verify = 1; |
397 | tc->bgctl_f.op = 0x1; | 397 | tc->bgctl_f.op = 0x1; |
398 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { | 398 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { |
399 | tc->bgctl_f.ref_tag_chk = 1; | 399 | tc->bgctl_f.ref_tag_chk = 1; |
400 | tc->bgctl_f.app_f_detect = 1; | 400 | tc->bgctl_f.app_f_detect = 1; |
401 | } else if (type & SCSI_PROT_DIF_TYPE3) | 401 | } else if (type & SCSI_PROT_DIF_TYPE3) |
402 | tc->bgctl_f.app_ref_f_detect = 1; | 402 | tc->bgctl_f.app_ref_f_detect = 1; |
403 | 403 | ||
404 | tc->app_tag_verify_mask = 0; | 404 | tc->app_tag_verify_mask = 0; |
405 | 405 | ||
406 | /* must init to 0 for hw */ | 406 | /* must init to 0 for hw */ |
407 | tc->blk_guard_err = 0; | 407 | tc->blk_guard_err = 0; |
408 | 408 | ||
409 | tc->reserved_E8_0 = 0; | 409 | tc->reserved_E8_0 = 0; |
410 | tc->ref_tag_seed_gen = 0; | 410 | tc->ref_tag_seed_gen = 0; |
411 | } | 411 | } |
412 | 412 | ||
413 | /** | 413 | /** |
414 | * This method is will fill in the SCU Task Context for a SSP IO request. | 414 | * This method is will fill in the SCU Task Context for a SSP IO request. |
415 | * @sci_req: | 415 | * @sci_req: |
416 | * | 416 | * |
417 | */ | 417 | */ |
418 | static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, | 418 | static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, |
419 | enum dma_data_direction dir, | 419 | enum dma_data_direction dir, |
420 | u32 len) | 420 | u32 len) |
421 | { | 421 | { |
422 | struct scu_task_context *task_context = ireq->tc; | 422 | struct scu_task_context *task_context = ireq->tc; |
423 | struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; | 423 | struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; |
424 | struct scsi_cmnd *scmd = sas_task->uldd_task; | 424 | struct scsi_cmnd *scmd = sas_task->uldd_task; |
425 | u8 prot_type = scsi_get_prot_type(scmd); | 425 | u8 prot_type = scsi_get_prot_type(scmd); |
426 | u8 prot_op = scsi_get_prot_op(scmd); | 426 | u8 prot_op = scsi_get_prot_op(scmd); |
427 | 427 | ||
428 | scu_ssp_reqeust_construct_task_context(ireq, task_context); | 428 | scu_ssp_reqeust_construct_task_context(ireq, task_context); |
429 | 429 | ||
430 | task_context->ssp_command_iu_length = | 430 | task_context->ssp_command_iu_length = |
431 | sizeof(struct ssp_cmd_iu) / sizeof(u32); | 431 | sizeof(struct ssp_cmd_iu) / sizeof(u32); |
432 | task_context->type.ssp.frame_type = SSP_COMMAND; | 432 | task_context->type.ssp.frame_type = SSP_COMMAND; |
433 | 433 | ||
434 | switch (dir) { | 434 | switch (dir) { |
435 | case DMA_FROM_DEVICE: | 435 | case DMA_FROM_DEVICE: |
436 | case DMA_NONE: | 436 | case DMA_NONE: |
437 | default: | 437 | default: |
438 | task_context->task_type = SCU_TASK_TYPE_IOREAD; | 438 | task_context->task_type = SCU_TASK_TYPE_IOREAD; |
439 | break; | 439 | break; |
440 | case DMA_TO_DEVICE: | 440 | case DMA_TO_DEVICE: |
441 | task_context->task_type = SCU_TASK_TYPE_IOWRITE; | 441 | task_context->task_type = SCU_TASK_TYPE_IOWRITE; |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | 444 | ||
445 | task_context->transfer_length_bytes = len; | 445 | task_context->transfer_length_bytes = len; |
446 | 446 | ||
447 | if (task_context->transfer_length_bytes > 0) | 447 | if (task_context->transfer_length_bytes > 0) |
448 | sci_request_build_sgl(ireq); | 448 | sci_request_build_sgl(ireq); |
449 | 449 | ||
450 | if (prot_type != SCSI_PROT_DIF_TYPE0) { | 450 | if (prot_type != SCSI_PROT_DIF_TYPE0) { |
451 | if (prot_op == SCSI_PROT_READ_STRIP) | 451 | if (prot_op == SCSI_PROT_READ_STRIP) |
452 | scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); | 452 | scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); |
453 | else if (prot_op == SCSI_PROT_WRITE_INSERT) | 453 | else if (prot_op == SCSI_PROT_WRITE_INSERT) |
454 | scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); | 454 | scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | /** | 458 | /** |
459 | * This method will fill in the SCU Task Context for a SSP Task request. The | 459 | * This method will fill in the SCU Task Context for a SSP Task request. The |
460 | * following important settings are utilized: -# priority == | 460 | * following important settings are utilized: -# priority == |
461 | * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued | 461 | * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued |
462 | * ahead of other task destined for the same Remote Node. -# task_type == | 462 | * ahead of other task destined for the same Remote Node. -# task_type == |
463 | * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type | 463 | * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type |
464 | * (i.e. non-raw frame) is being utilized to perform task management. -# | 464 | * (i.e. non-raw frame) is being utilized to perform task management. -# |
465 | * control_frame == 1. This ensures that the proper endianess is set so | 465 | * control_frame == 1. This ensures that the proper endianess is set so |
466 | * that the bytes are transmitted in the right order for a task frame. | 466 | * that the bytes are transmitted in the right order for a task frame. |
467 | * @sci_req: This parameter specifies the task request object being | 467 | * @sci_req: This parameter specifies the task request object being |
468 | * constructed. | 468 | * constructed. |
469 | * | 469 | * |
470 | */ | 470 | */ |
471 | static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) | 471 | static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) |
472 | { | 472 | { |
473 | struct scu_task_context *task_context = ireq->tc; | 473 | struct scu_task_context *task_context = ireq->tc; |
474 | 474 | ||
475 | scu_ssp_reqeust_construct_task_context(ireq, task_context); | 475 | scu_ssp_reqeust_construct_task_context(ireq, task_context); |
476 | 476 | ||
477 | task_context->control_frame = 1; | 477 | task_context->control_frame = 1; |
478 | task_context->priority = SCU_TASK_PRIORITY_HIGH; | 478 | task_context->priority = SCU_TASK_PRIORITY_HIGH; |
479 | task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; | 479 | task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; |
480 | task_context->transfer_length_bytes = 0; | 480 | task_context->transfer_length_bytes = 0; |
481 | task_context->type.ssp.frame_type = SSP_TASK; | 481 | task_context->type.ssp.frame_type = SSP_TASK; |
482 | task_context->ssp_command_iu_length = | 482 | task_context->ssp_command_iu_length = |
483 | sizeof(struct ssp_task_iu) / sizeof(u32); | 483 | sizeof(struct ssp_task_iu) / sizeof(u32); |
484 | } | 484 | } |
485 | 485 | ||
486 | /** | 486 | /** |
487 | * This method is will fill in the SCU Task Context for any type of SATA | 487 | * This method is will fill in the SCU Task Context for any type of SATA |
488 | * request. This is called from the various SATA constructors. | 488 | * request. This is called from the various SATA constructors. |
489 | * @sci_req: The general IO request object which is to be used in | 489 | * @sci_req: The general IO request object which is to be used in |
490 | * constructing the SCU task context. | 490 | * constructing the SCU task context. |
491 | * @task_context: The buffer pointer for the SCU task context which is being | 491 | * @task_context: The buffer pointer for the SCU task context which is being |
492 | * constructed. | 492 | * constructed. |
493 | * | 493 | * |
494 | * The general io request construction is complete. The buffer assignment for | 494 | * The general io request construction is complete. The buffer assignment for |
495 | * the command buffer is complete. none Revisit task context construction to | 495 | * the command buffer is complete. none Revisit task context construction to |
496 | * determine what is common for SSP/SMP/STP task context structures. | 496 | * determine what is common for SSP/SMP/STP task context structures. |
497 | */ | 497 | */ |
498 | static void scu_sata_reqeust_construct_task_context( | 498 | static void scu_sata_reqeust_construct_task_context( |
499 | struct isci_request *ireq, | 499 | struct isci_request *ireq, |
500 | struct scu_task_context *task_context) | 500 | struct scu_task_context *task_context) |
501 | { | 501 | { |
502 | dma_addr_t dma_addr; | 502 | dma_addr_t dma_addr; |
503 | struct isci_remote_device *idev; | 503 | struct isci_remote_device *idev; |
504 | struct isci_port *iport; | 504 | struct isci_port *iport; |
505 | 505 | ||
506 | idev = ireq->target_device; | 506 | idev = ireq->target_device; |
507 | iport = idev->owning_port; | 507 | iport = idev->owning_port; |
508 | 508 | ||
509 | /* Fill in the TC with the its required data */ | 509 | /* Fill in the TC with the its required data */ |
510 | task_context->abort = 0; | 510 | task_context->abort = 0; |
511 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; | 511 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; |
512 | task_context->initiator_request = 1; | 512 | task_context->initiator_request = 1; |
513 | task_context->connection_rate = idev->connection_rate; | 513 | task_context->connection_rate = idev->connection_rate; |
514 | task_context->protocol_engine_index = ISCI_PEG; | 514 | task_context->protocol_engine_index = ISCI_PEG; |
515 | task_context->logical_port_index = iport->physical_port_index; | 515 | task_context->logical_port_index = iport->physical_port_index; |
516 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; | 516 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; |
517 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 517 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
518 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 518 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
519 | 519 | ||
520 | task_context->remote_node_index = idev->rnc.remote_node_index; | 520 | task_context->remote_node_index = idev->rnc.remote_node_index; |
521 | task_context->command_code = 0; | 521 | task_context->command_code = 0; |
522 | 522 | ||
523 | task_context->link_layer_control = 0; | 523 | task_context->link_layer_control = 0; |
524 | task_context->do_not_dma_ssp_good_response = 1; | 524 | task_context->do_not_dma_ssp_good_response = 1; |
525 | task_context->strict_ordering = 0; | 525 | task_context->strict_ordering = 0; |
526 | task_context->control_frame = 0; | 526 | task_context->control_frame = 0; |
527 | task_context->timeout_enable = 0; | 527 | task_context->timeout_enable = 0; |
528 | task_context->block_guard_enable = 0; | 528 | task_context->block_guard_enable = 0; |
529 | 529 | ||
530 | task_context->address_modifier = 0; | 530 | task_context->address_modifier = 0; |
531 | task_context->task_phase = 0x01; | 531 | task_context->task_phase = 0x01; |
532 | 532 | ||
533 | task_context->ssp_command_iu_length = | 533 | task_context->ssp_command_iu_length = |
534 | (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); | 534 | (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); |
535 | 535 | ||
536 | /* Set the first word of the H2D REG FIS */ | 536 | /* Set the first word of the H2D REG FIS */ |
537 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; | 537 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; |
538 | 538 | ||
539 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 539 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
540 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 540 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
541 | (iport->physical_port_index << | 541 | (iport->physical_port_index << |
542 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 542 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
543 | ISCI_TAG_TCI(ireq->io_tag)); | 543 | ISCI_TAG_TCI(ireq->io_tag)); |
544 | /* | 544 | /* |
545 | * Copy the physical address for the command buffer to the SCU Task | 545 | * Copy the physical address for the command buffer to the SCU Task |
546 | * Context. We must offset the command buffer by 4 bytes because the | 546 | * Context. We must offset the command buffer by 4 bytes because the |
547 | * first 4 bytes are transfered in the body of the TC. | 547 | * first 4 bytes are transfered in the body of the TC. |
548 | */ | 548 | */ |
549 | dma_addr = sci_io_request_get_dma_addr(ireq, | 549 | dma_addr = sci_io_request_get_dma_addr(ireq, |
550 | ((char *) &ireq->stp.cmd) + | 550 | ((char *) &ireq->stp.cmd) + |
551 | sizeof(u32)); | 551 | sizeof(u32)); |
552 | 552 | ||
553 | task_context->command_iu_upper = upper_32_bits(dma_addr); | 553 | task_context->command_iu_upper = upper_32_bits(dma_addr); |
554 | task_context->command_iu_lower = lower_32_bits(dma_addr); | 554 | task_context->command_iu_lower = lower_32_bits(dma_addr); |
555 | 555 | ||
556 | /* SATA Requests do not have a response buffer */ | 556 | /* SATA Requests do not have a response buffer */ |
557 | task_context->response_iu_upper = 0; | 557 | task_context->response_iu_upper = 0; |
558 | task_context->response_iu_lower = 0; | 558 | task_context->response_iu_lower = 0; |
559 | } | 559 | } |
560 | 560 | ||
561 | static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) | 561 | static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) |
562 | { | 562 | { |
563 | struct scu_task_context *task_context = ireq->tc; | 563 | struct scu_task_context *task_context = ireq->tc; |
564 | 564 | ||
565 | scu_sata_reqeust_construct_task_context(ireq, task_context); | 565 | scu_sata_reqeust_construct_task_context(ireq, task_context); |
566 | 566 | ||
567 | task_context->control_frame = 0; | 567 | task_context->control_frame = 0; |
568 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; | 568 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; |
569 | task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; | 569 | task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; |
570 | task_context->type.stp.fis_type = FIS_REGH2D; | 570 | task_context->type.stp.fis_type = FIS_REGH2D; |
571 | task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); | 571 | task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); |
572 | } | 572 | } |
573 | 573 | ||
574 | static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, | 574 | static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, |
575 | bool copy_rx_frame) | 575 | bool copy_rx_frame) |
576 | { | 576 | { |
577 | struct isci_stp_request *stp_req = &ireq->stp.req; | 577 | struct isci_stp_request *stp_req = &ireq->stp.req; |
578 | 578 | ||
579 | scu_stp_raw_request_construct_task_context(ireq); | 579 | scu_stp_raw_request_construct_task_context(ireq); |
580 | 580 | ||
581 | stp_req->status = 0; | 581 | stp_req->status = 0; |
582 | stp_req->sgl.offset = 0; | 582 | stp_req->sgl.offset = 0; |
583 | stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; | 583 | stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; |
584 | 584 | ||
585 | if (copy_rx_frame) { | 585 | if (copy_rx_frame) { |
586 | sci_request_build_sgl(ireq); | 586 | sci_request_build_sgl(ireq); |
587 | stp_req->sgl.index = 0; | 587 | stp_req->sgl.index = 0; |
588 | } else { | 588 | } else { |
589 | /* The user does not want the data copied to the SGL buffer location */ | 589 | /* The user does not want the data copied to the SGL buffer location */ |
590 | stp_req->sgl.index = -1; | 590 | stp_req->sgl.index = -1; |
591 | } | 591 | } |
592 | 592 | ||
593 | return SCI_SUCCESS; | 593 | return SCI_SUCCESS; |
594 | } | 594 | } |
595 | 595 | ||
596 | /** | 596 | /** |
597 | * | 597 | * |
598 | * @sci_req: This parameter specifies the request to be constructed as an | 598 | * @sci_req: This parameter specifies the request to be constructed as an |
599 | * optimized request. | 599 | * optimized request. |
600 | * @optimized_task_type: This parameter specifies whether the request is to be | 600 | * @optimized_task_type: This parameter specifies whether the request is to be |
601 | * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A | 601 | * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A |
602 | * value of 1 indicates NCQ. | 602 | * value of 1 indicates NCQ. |
603 | * | 603 | * |
604 | * This method will perform request construction common to all types of STP | 604 | * This method will perform request construction common to all types of STP |
605 | * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method | 605 | * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method |
606 | * returns an indication as to whether the construction was successful. | 606 | * returns an indication as to whether the construction was successful. |
607 | */ | 607 | */ |
608 | static void sci_stp_optimized_request_construct(struct isci_request *ireq, | 608 | static void sci_stp_optimized_request_construct(struct isci_request *ireq, |
609 | u8 optimized_task_type, | 609 | u8 optimized_task_type, |
610 | u32 len, | 610 | u32 len, |
611 | enum dma_data_direction dir) | 611 | enum dma_data_direction dir) |
612 | { | 612 | { |
613 | struct scu_task_context *task_context = ireq->tc; | 613 | struct scu_task_context *task_context = ireq->tc; |
614 | 614 | ||
615 | /* Build the STP task context structure */ | 615 | /* Build the STP task context structure */ |
616 | scu_sata_reqeust_construct_task_context(ireq, task_context); | 616 | scu_sata_reqeust_construct_task_context(ireq, task_context); |
617 | 617 | ||
618 | /* Copy over the SGL elements */ | 618 | /* Copy over the SGL elements */ |
619 | sci_request_build_sgl(ireq); | 619 | sci_request_build_sgl(ireq); |
620 | 620 | ||
621 | /* Copy over the number of bytes to be transfered */ | 621 | /* Copy over the number of bytes to be transfered */ |
622 | task_context->transfer_length_bytes = len; | 622 | task_context->transfer_length_bytes = len; |
623 | 623 | ||
624 | if (dir == DMA_TO_DEVICE) { | 624 | if (dir == DMA_TO_DEVICE) { |
625 | /* | 625 | /* |
626 | * The difference between the DMA IN and DMA OUT request task type | 626 | * The difference between the DMA IN and DMA OUT request task type |
627 | * values are consistent with the difference between FPDMA READ | 627 | * values are consistent with the difference between FPDMA READ |
628 | * and FPDMA WRITE values. Add the supplied task type parameter | 628 | * and FPDMA WRITE values. Add the supplied task type parameter |
629 | * to this difference to set the task type properly for this | 629 | * to this difference to set the task type properly for this |
630 | * DATA OUT (WRITE) case. */ | 630 | * DATA OUT (WRITE) case. */ |
631 | task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT | 631 | task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT |
632 | - SCU_TASK_TYPE_DMA_IN); | 632 | - SCU_TASK_TYPE_DMA_IN); |
633 | } else { | 633 | } else { |
634 | /* | 634 | /* |
635 | * For the DATA IN (READ) case, simply save the supplied | 635 | * For the DATA IN (READ) case, simply save the supplied |
636 | * optimized task type. */ | 636 | * optimized task type. */ |
637 | task_context->task_type = optimized_task_type; | 637 | task_context->task_type = optimized_task_type; |
638 | } | 638 | } |
639 | } | 639 | } |
640 | 640 | ||
641 | static void sci_atapi_construct(struct isci_request *ireq) | 641 | static void sci_atapi_construct(struct isci_request *ireq) |
642 | { | 642 | { |
643 | struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; | 643 | struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; |
644 | struct sas_task *task; | 644 | struct sas_task *task; |
645 | 645 | ||
646 | /* To simplify the implementation we take advantage of the | 646 | /* To simplify the implementation we take advantage of the |
647 | * silicon's partial acceleration of atapi protocol (dma data | 647 | * silicon's partial acceleration of atapi protocol (dma data |
648 | * transfers), so we promote all commands to dma protocol. This | 648 | * transfers), so we promote all commands to dma protocol. This |
649 | * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. | 649 | * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. |
650 | */ | 650 | */ |
651 | h2d_fis->features |= ATAPI_PKT_DMA; | 651 | h2d_fis->features |= ATAPI_PKT_DMA; |
652 | 652 | ||
653 | scu_stp_raw_request_construct_task_context(ireq); | 653 | scu_stp_raw_request_construct_task_context(ireq); |
654 | 654 | ||
655 | task = isci_request_access_task(ireq); | 655 | task = isci_request_access_task(ireq); |
656 | if (task->data_dir == DMA_NONE) | 656 | if (task->data_dir == DMA_NONE) |
657 | task->total_xfer_len = 0; | 657 | task->total_xfer_len = 0; |
658 | 658 | ||
659 | /* clear the response so we can detect arrivial of an | 659 | /* clear the response so we can detect arrivial of an |
660 | * unsolicited h2d fis | 660 | * unsolicited h2d fis |
661 | */ | 661 | */ |
662 | ireq->stp.rsp.fis_type = 0; | 662 | ireq->stp.rsp.fis_type = 0; |
663 | } | 663 | } |
664 | 664 | ||
665 | static enum sci_status | 665 | static enum sci_status |
666 | sci_io_request_construct_sata(struct isci_request *ireq, | 666 | sci_io_request_construct_sata(struct isci_request *ireq, |
667 | u32 len, | 667 | u32 len, |
668 | enum dma_data_direction dir, | 668 | enum dma_data_direction dir, |
669 | bool copy) | 669 | bool copy) |
670 | { | 670 | { |
671 | enum sci_status status = SCI_SUCCESS; | 671 | enum sci_status status = SCI_SUCCESS; |
672 | struct sas_task *task = isci_request_access_task(ireq); | 672 | struct sas_task *task = isci_request_access_task(ireq); |
673 | struct domain_device *dev = ireq->target_device->domain_dev; | 673 | struct domain_device *dev = ireq->target_device->domain_dev; |
674 | 674 | ||
675 | /* check for management protocols */ | 675 | /* check for management protocols */ |
676 | if (test_bit(IREQ_TMF, &ireq->flags)) { | 676 | if (test_bit(IREQ_TMF, &ireq->flags)) { |
677 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); | 677 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
678 | 678 | ||
679 | dev_err(&ireq->owning_controller->pdev->dev, | 679 | dev_err(&ireq->owning_controller->pdev->dev, |
680 | "%s: Request 0x%p received un-handled SAT " | 680 | "%s: Request 0x%p received un-handled SAT " |
681 | "management protocol 0x%x.\n", | 681 | "management protocol 0x%x.\n", |
682 | __func__, ireq, tmf->tmf_code); | 682 | __func__, ireq, tmf->tmf_code); |
683 | 683 | ||
684 | return SCI_FAILURE; | 684 | return SCI_FAILURE; |
685 | } | 685 | } |
686 | 686 | ||
687 | if (!sas_protocol_ata(task->task_proto)) { | 687 | if (!sas_protocol_ata(task->task_proto)) { |
688 | dev_err(&ireq->owning_controller->pdev->dev, | 688 | dev_err(&ireq->owning_controller->pdev->dev, |
689 | "%s: Non-ATA protocol in SATA path: 0x%x\n", | 689 | "%s: Non-ATA protocol in SATA path: 0x%x\n", |
690 | __func__, | 690 | __func__, |
691 | task->task_proto); | 691 | task->task_proto); |
692 | return SCI_FAILURE; | 692 | return SCI_FAILURE; |
693 | 693 | ||
694 | } | 694 | } |
695 | 695 | ||
696 | /* ATAPI */ | 696 | /* ATAPI */ |
697 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && | 697 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && |
698 | task->ata_task.fis.command == ATA_CMD_PACKET) { | 698 | task->ata_task.fis.command == ATA_CMD_PACKET) { |
699 | sci_atapi_construct(ireq); | 699 | sci_atapi_construct(ireq); |
700 | return SCI_SUCCESS; | 700 | return SCI_SUCCESS; |
701 | } | 701 | } |
702 | 702 | ||
703 | /* non data */ | 703 | /* non data */ |
704 | if (task->data_dir == DMA_NONE) { | 704 | if (task->data_dir == DMA_NONE) { |
705 | scu_stp_raw_request_construct_task_context(ireq); | 705 | scu_stp_raw_request_construct_task_context(ireq); |
706 | return SCI_SUCCESS; | 706 | return SCI_SUCCESS; |
707 | } | 707 | } |
708 | 708 | ||
709 | /* NCQ */ | 709 | /* NCQ */ |
710 | if (task->ata_task.use_ncq) { | 710 | if (task->ata_task.use_ncq) { |
711 | sci_stp_optimized_request_construct(ireq, | 711 | sci_stp_optimized_request_construct(ireq, |
712 | SCU_TASK_TYPE_FPDMAQ_READ, | 712 | SCU_TASK_TYPE_FPDMAQ_READ, |
713 | len, dir); | 713 | len, dir); |
714 | return SCI_SUCCESS; | 714 | return SCI_SUCCESS; |
715 | } | 715 | } |
716 | 716 | ||
717 | /* DMA */ | 717 | /* DMA */ |
718 | if (task->ata_task.dma_xfer) { | 718 | if (task->ata_task.dma_xfer) { |
719 | sci_stp_optimized_request_construct(ireq, | 719 | sci_stp_optimized_request_construct(ireq, |
720 | SCU_TASK_TYPE_DMA_IN, | 720 | SCU_TASK_TYPE_DMA_IN, |
721 | len, dir); | 721 | len, dir); |
722 | return SCI_SUCCESS; | 722 | return SCI_SUCCESS; |
723 | } else /* PIO */ | 723 | } else /* PIO */ |
724 | return sci_stp_pio_request_construct(ireq, copy); | 724 | return sci_stp_pio_request_construct(ireq, copy); |
725 | 725 | ||
726 | return status; | 726 | return status; |
727 | } | 727 | } |
728 | 728 | ||
729 | static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) | 729 | static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) |
730 | { | 730 | { |
731 | struct sas_task *task = isci_request_access_task(ireq); | 731 | struct sas_task *task = isci_request_access_task(ireq); |
732 | 732 | ||
733 | ireq->protocol = SAS_PROTOCOL_SSP; | 733 | ireq->protocol = SAS_PROTOCOL_SSP; |
734 | 734 | ||
735 | scu_ssp_io_request_construct_task_context(ireq, | 735 | scu_ssp_io_request_construct_task_context(ireq, |
736 | task->data_dir, | 736 | task->data_dir, |
737 | task->total_xfer_len); | 737 | task->total_xfer_len); |
738 | 738 | ||
739 | sci_io_request_build_ssp_command_iu(ireq); | 739 | sci_io_request_build_ssp_command_iu(ireq); |
740 | 740 | ||
741 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 741 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
742 | 742 | ||
743 | return SCI_SUCCESS; | 743 | return SCI_SUCCESS; |
744 | } | 744 | } |
745 | 745 | ||
746 | enum sci_status sci_task_request_construct_ssp( | 746 | enum sci_status sci_task_request_construct_ssp( |
747 | struct isci_request *ireq) | 747 | struct isci_request *ireq) |
748 | { | 748 | { |
749 | /* Construct the SSP Task SCU Task Context */ | 749 | /* Construct the SSP Task SCU Task Context */ |
750 | scu_ssp_task_request_construct_task_context(ireq); | 750 | scu_ssp_task_request_construct_task_context(ireq); |
751 | 751 | ||
752 | /* Fill in the SSP Task IU */ | 752 | /* Fill in the SSP Task IU */ |
753 | sci_task_request_build_ssp_task_iu(ireq); | 753 | sci_task_request_build_ssp_task_iu(ireq); |
754 | 754 | ||
755 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 755 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
756 | 756 | ||
757 | return SCI_SUCCESS; | 757 | return SCI_SUCCESS; |
758 | } | 758 | } |
759 | 759 | ||
760 | static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) | 760 | static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) |
761 | { | 761 | { |
762 | enum sci_status status; | 762 | enum sci_status status; |
763 | bool copy = false; | 763 | bool copy = false; |
764 | struct sas_task *task = isci_request_access_task(ireq); | 764 | struct sas_task *task = isci_request_access_task(ireq); |
765 | 765 | ||
766 | ireq->protocol = SAS_PROTOCOL_STP; | 766 | ireq->protocol = SAS_PROTOCOL_STP; |
767 | 767 | ||
768 | copy = (task->data_dir == DMA_NONE) ? false : true; | 768 | copy = (task->data_dir == DMA_NONE) ? false : true; |
769 | 769 | ||
770 | status = sci_io_request_construct_sata(ireq, | 770 | status = sci_io_request_construct_sata(ireq, |
771 | task->total_xfer_len, | 771 | task->total_xfer_len, |
772 | task->data_dir, | 772 | task->data_dir, |
773 | copy); | 773 | copy); |
774 | 774 | ||
775 | if (status == SCI_SUCCESS) | 775 | if (status == SCI_SUCCESS) |
776 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 776 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
777 | 777 | ||
778 | return status; | 778 | return status; |
779 | } | 779 | } |
780 | 780 | ||
781 | /** | 781 | /** |
782 | * sci_req_tx_bytes - bytes transferred when reply underruns request | 782 | * sci_req_tx_bytes - bytes transferred when reply underruns request |
783 | * @ireq: request that was terminated early | 783 | * @ireq: request that was terminated early |
784 | */ | 784 | */ |
785 | #define SCU_TASK_CONTEXT_SRAM 0x200000 | 785 | #define SCU_TASK_CONTEXT_SRAM 0x200000 |
786 | static u32 sci_req_tx_bytes(struct isci_request *ireq) | 786 | static u32 sci_req_tx_bytes(struct isci_request *ireq) |
787 | { | 787 | { |
788 | struct isci_host *ihost = ireq->owning_controller; | 788 | struct isci_host *ihost = ireq->owning_controller; |
789 | u32 ret_val = 0; | 789 | u32 ret_val = 0; |
790 | 790 | ||
791 | if (readl(&ihost->smu_registers->address_modifier) == 0) { | 791 | if (readl(&ihost->smu_registers->address_modifier) == 0) { |
792 | void __iomem *scu_reg_base = ihost->scu_registers; | 792 | void __iomem *scu_reg_base = ihost->scu_registers; |
793 | 793 | ||
794 | /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where | 794 | /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where |
795 | * BAR1 is the scu_registers | 795 | * BAR1 is the scu_registers |
796 | * 0x20002C = 0x200000 + 0x2c | 796 | * 0x20002C = 0x200000 + 0x2c |
797 | * = start of task context SRAM + offset of (type.ssp.data_offset) | 797 | * = start of task context SRAM + offset of (type.ssp.data_offset) |
798 | * TCi is the io_tag of struct sci_request | 798 | * TCi is the io_tag of struct sci_request |
799 | */ | 799 | */ |
800 | ret_val = readl(scu_reg_base + | 800 | ret_val = readl(scu_reg_base + |
801 | (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + | 801 | (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + |
802 | ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); | 802 | ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); |
803 | } | 803 | } |
804 | 804 | ||
805 | return ret_val; | 805 | return ret_val; |
806 | } | 806 | } |
807 | 807 | ||
808 | enum sci_status sci_request_start(struct isci_request *ireq) | 808 | enum sci_status sci_request_start(struct isci_request *ireq) |
809 | { | 809 | { |
810 | enum sci_base_request_states state; | 810 | enum sci_base_request_states state; |
811 | struct scu_task_context *tc = ireq->tc; | 811 | struct scu_task_context *tc = ireq->tc; |
812 | struct isci_host *ihost = ireq->owning_controller; | 812 | struct isci_host *ihost = ireq->owning_controller; |
813 | 813 | ||
814 | state = ireq->sm.current_state_id; | 814 | state = ireq->sm.current_state_id; |
815 | if (state != SCI_REQ_CONSTRUCTED) { | 815 | if (state != SCI_REQ_CONSTRUCTED) { |
816 | dev_warn(&ihost->pdev->dev, | 816 | dev_warn(&ihost->pdev->dev, |
817 | "%s: SCIC IO Request requested to start while in wrong " | 817 | "%s: SCIC IO Request requested to start while in wrong " |
818 | "state %d\n", __func__, state); | 818 | "state %d\n", __func__, state); |
819 | return SCI_FAILURE_INVALID_STATE; | 819 | return SCI_FAILURE_INVALID_STATE; |
820 | } | 820 | } |
821 | 821 | ||
822 | tc->task_index = ISCI_TAG_TCI(ireq->io_tag); | 822 | tc->task_index = ISCI_TAG_TCI(ireq->io_tag); |
823 | 823 | ||
824 | switch (tc->protocol_type) { | 824 | switch (tc->protocol_type) { |
825 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: | 825 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: |
826 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: | 826 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: |
827 | /* SSP/SMP Frame */ | 827 | /* SSP/SMP Frame */ |
828 | tc->type.ssp.tag = ireq->io_tag; | 828 | tc->type.ssp.tag = ireq->io_tag; |
829 | tc->type.ssp.target_port_transfer_tag = 0xFFFF; | 829 | tc->type.ssp.target_port_transfer_tag = 0xFFFF; |
830 | break; | 830 | break; |
831 | 831 | ||
832 | case SCU_TASK_CONTEXT_PROTOCOL_STP: | 832 | case SCU_TASK_CONTEXT_PROTOCOL_STP: |
833 | /* STP/SATA Frame | 833 | /* STP/SATA Frame |
834 | * tc->type.stp.ncq_tag = ireq->ncq_tag; | 834 | * tc->type.stp.ncq_tag = ireq->ncq_tag; |
835 | */ | 835 | */ |
836 | break; | 836 | break; |
837 | 837 | ||
838 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: | 838 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: |
839 | /* / @todo When do we set no protocol type? */ | 839 | /* / @todo When do we set no protocol type? */ |
840 | break; | 840 | break; |
841 | 841 | ||
842 | default: | 842 | default: |
843 | /* This should never happen since we build the IO | 843 | /* This should never happen since we build the IO |
844 | * requests */ | 844 | * requests */ |
845 | break; | 845 | break; |
846 | } | 846 | } |
847 | 847 | ||
848 | /* Add to the post_context the io tag value */ | 848 | /* Add to the post_context the io tag value */ |
849 | ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); | 849 | ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); |
850 | 850 | ||
851 | /* Everything is good go ahead and change state */ | 851 | /* Everything is good go ahead and change state */ |
852 | sci_change_state(&ireq->sm, SCI_REQ_STARTED); | 852 | sci_change_state(&ireq->sm, SCI_REQ_STARTED); |
853 | 853 | ||
854 | return SCI_SUCCESS; | 854 | return SCI_SUCCESS; |
855 | } | 855 | } |
856 | 856 | ||
857 | enum sci_status | 857 | enum sci_status |
858 | sci_io_request_terminate(struct isci_request *ireq) | 858 | sci_io_request_terminate(struct isci_request *ireq) |
859 | { | 859 | { |
860 | enum sci_base_request_states state; | 860 | enum sci_base_request_states state; |
861 | 861 | ||
862 | state = ireq->sm.current_state_id; | 862 | state = ireq->sm.current_state_id; |
863 | 863 | ||
864 | switch (state) { | 864 | switch (state) { |
865 | case SCI_REQ_CONSTRUCTED: | 865 | case SCI_REQ_CONSTRUCTED: |
866 | /* Set to make sure no HW terminate posting is done: */ | 866 | /* Set to make sure no HW terminate posting is done: */ |
867 | set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); | 867 | set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); |
868 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; | 868 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
869 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; | 869 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
870 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 870 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
871 | return SCI_SUCCESS; | 871 | return SCI_SUCCESS; |
872 | case SCI_REQ_STARTED: | 872 | case SCI_REQ_STARTED: |
873 | case SCI_REQ_TASK_WAIT_TC_COMP: | 873 | case SCI_REQ_TASK_WAIT_TC_COMP: |
874 | case SCI_REQ_SMP_WAIT_RESP: | 874 | case SCI_REQ_SMP_WAIT_RESP: |
875 | case SCI_REQ_SMP_WAIT_TC_COMP: | 875 | case SCI_REQ_SMP_WAIT_TC_COMP: |
876 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: | 876 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
877 | case SCI_REQ_STP_UDMA_WAIT_D2H: | 877 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
878 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: | 878 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: |
879 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: | 879 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: |
880 | case SCI_REQ_STP_PIO_WAIT_H2D: | 880 | case SCI_REQ_STP_PIO_WAIT_H2D: |
881 | case SCI_REQ_STP_PIO_WAIT_FRAME: | 881 | case SCI_REQ_STP_PIO_WAIT_FRAME: |
882 | case SCI_REQ_STP_PIO_DATA_IN: | 882 | case SCI_REQ_STP_PIO_DATA_IN: |
883 | case SCI_REQ_STP_PIO_DATA_OUT: | 883 | case SCI_REQ_STP_PIO_DATA_OUT: |
884 | case SCI_REQ_ATAPI_WAIT_H2D: | 884 | case SCI_REQ_ATAPI_WAIT_H2D: |
885 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: | 885 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: |
886 | case SCI_REQ_ATAPI_WAIT_D2H: | 886 | case SCI_REQ_ATAPI_WAIT_D2H: |
887 | case SCI_REQ_ATAPI_WAIT_TC_COMP: | 887 | case SCI_REQ_ATAPI_WAIT_TC_COMP: |
888 | /* Fall through and change state to ABORTING... */ | 888 | /* Fall through and change state to ABORTING... */ |
889 | case SCI_REQ_TASK_WAIT_TC_RESP: | 889 | case SCI_REQ_TASK_WAIT_TC_RESP: |
890 | /* The task frame was already confirmed to have been | 890 | /* The task frame was already confirmed to have been |
891 | * sent by the SCU HW. Since the state machine is | 891 | * sent by the SCU HW. Since the state machine is |
892 | * now only waiting for the task response itself, | 892 | * now only waiting for the task response itself, |
893 | * abort the request and complete it immediately | 893 | * abort the request and complete it immediately |
894 | * and don't wait for the task response. | 894 | * and don't wait for the task response. |
895 | */ | 895 | */ |
896 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); | 896 | sci_change_state(&ireq->sm, SCI_REQ_ABORTING); |
897 | /* Fall through and handle like ABORTING... */ | 897 | /* Fall through and handle like ABORTING... */ |
898 | case SCI_REQ_ABORTING: | 898 | case SCI_REQ_ABORTING: |
899 | if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) | 899 | if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) |
900 | set_bit(IREQ_PENDING_ABORT, &ireq->flags); | 900 | set_bit(IREQ_PENDING_ABORT, &ireq->flags); |
901 | else | 901 | else |
902 | clear_bit(IREQ_PENDING_ABORT, &ireq->flags); | 902 | clear_bit(IREQ_PENDING_ABORT, &ireq->flags); |
903 | /* If the request is only waiting on the remote device | 903 | /* If the request is only waiting on the remote device |
904 | * suspension, return SUCCESS so the caller will wait too. | 904 | * suspension, return SUCCESS so the caller will wait too. |
905 | */ | 905 | */ |
906 | return SCI_SUCCESS; | 906 | return SCI_SUCCESS; |
907 | case SCI_REQ_COMPLETED: | 907 | case SCI_REQ_COMPLETED: |
908 | default: | 908 | default: |
909 | dev_warn(&ireq->owning_controller->pdev->dev, | 909 | dev_warn(&ireq->owning_controller->pdev->dev, |
910 | "%s: SCIC IO Request requested to abort while in wrong " | 910 | "%s: SCIC IO Request requested to abort while in wrong " |
911 | "state %d\n", __func__, ireq->sm.current_state_id); | 911 | "state %d\n", __func__, ireq->sm.current_state_id); |
912 | break; | 912 | break; |
913 | } | 913 | } |
914 | 914 | ||
915 | return SCI_FAILURE_INVALID_STATE; | 915 | return SCI_FAILURE_INVALID_STATE; |
916 | } | 916 | } |
917 | 917 | ||
918 | enum sci_status sci_request_complete(struct isci_request *ireq) | 918 | enum sci_status sci_request_complete(struct isci_request *ireq) |
919 | { | 919 | { |
920 | enum sci_base_request_states state; | 920 | enum sci_base_request_states state; |
921 | struct isci_host *ihost = ireq->owning_controller; | 921 | struct isci_host *ihost = ireq->owning_controller; |
922 | 922 | ||
923 | state = ireq->sm.current_state_id; | 923 | state = ireq->sm.current_state_id; |
924 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, | 924 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, |
925 | "isci: request completion from wrong state (%s)\n", | 925 | "isci: request completion from wrong state (%s)\n", |
926 | req_state_name(state))) | 926 | req_state_name(state))) |
927 | return SCI_FAILURE_INVALID_STATE; | 927 | return SCI_FAILURE_INVALID_STATE; |
928 | 928 | ||
929 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) | 929 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
930 | sci_controller_release_frame(ihost, | 930 | sci_controller_release_frame(ihost, |
931 | ireq->saved_rx_frame_index); | 931 | ireq->saved_rx_frame_index); |
932 | 932 | ||
933 | /* XXX can we just stop the machine and remove the 'final' state? */ | 933 | /* XXX can we just stop the machine and remove the 'final' state? */ |
934 | sci_change_state(&ireq->sm, SCI_REQ_FINAL); | 934 | sci_change_state(&ireq->sm, SCI_REQ_FINAL); |
935 | return SCI_SUCCESS; | 935 | return SCI_SUCCESS; |
936 | } | 936 | } |
937 | 937 | ||
938 | enum sci_status sci_io_request_event_handler(struct isci_request *ireq, | 938 | enum sci_status sci_io_request_event_handler(struct isci_request *ireq, |
939 | u32 event_code) | 939 | u32 event_code) |
940 | { | 940 | { |
941 | enum sci_base_request_states state; | 941 | enum sci_base_request_states state; |
942 | struct isci_host *ihost = ireq->owning_controller; | 942 | struct isci_host *ihost = ireq->owning_controller; |
943 | 943 | ||
944 | state = ireq->sm.current_state_id; | 944 | state = ireq->sm.current_state_id; |
945 | 945 | ||
946 | if (state != SCI_REQ_STP_PIO_DATA_IN) { | 946 | if (state != SCI_REQ_STP_PIO_DATA_IN) { |
947 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", | 947 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", |
948 | __func__, event_code, req_state_name(state)); | 948 | __func__, event_code, req_state_name(state)); |
949 | 949 | ||
950 | return SCI_FAILURE_INVALID_STATE; | 950 | return SCI_FAILURE_INVALID_STATE; |
951 | } | 951 | } |
952 | 952 | ||
953 | switch (scu_get_event_specifier(event_code)) { | 953 | switch (scu_get_event_specifier(event_code)) { |
954 | case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: | 954 | case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: |
955 | /* We are waiting for data and the SCU has R_ERR the data frame. | 955 | /* We are waiting for data and the SCU has R_ERR the data frame. |
956 | * Go back to waiting for the D2H Register FIS | 956 | * Go back to waiting for the D2H Register FIS |
957 | */ | 957 | */ |
958 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 958 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
959 | return SCI_SUCCESS; | 959 | return SCI_SUCCESS; |
960 | default: | 960 | default: |
961 | dev_err(&ihost->pdev->dev, | 961 | dev_err(&ihost->pdev->dev, |
962 | "%s: pio request unexpected event %#x\n", | 962 | "%s: pio request unexpected event %#x\n", |
963 | __func__, event_code); | 963 | __func__, event_code); |
964 | 964 | ||
965 | /* TODO Should we fail the PIO request when we get an | 965 | /* TODO Should we fail the PIO request when we get an |
966 | * unexpected event? | 966 | * unexpected event? |
967 | */ | 967 | */ |
968 | return SCI_FAILURE; | 968 | return SCI_FAILURE; |
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | /* | 972 | /* |
973 | * This function copies response data for requests returning response data | 973 | * This function copies response data for requests returning response data |
974 | * instead of sense data. | 974 | * instead of sense data. |
975 | * @sci_req: This parameter specifies the request object for which to copy | 975 | * @sci_req: This parameter specifies the request object for which to copy |
976 | * the response data. | 976 | * the response data. |
977 | */ | 977 | */ |
978 | static void sci_io_request_copy_response(struct isci_request *ireq) | 978 | static void sci_io_request_copy_response(struct isci_request *ireq) |
979 | { | 979 | { |
980 | void *resp_buf; | 980 | void *resp_buf; |
981 | u32 len; | 981 | u32 len; |
982 | struct ssp_response_iu *ssp_response; | 982 | struct ssp_response_iu *ssp_response; |
983 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); | 983 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); |
984 | 984 | ||
985 | ssp_response = &ireq->ssp.rsp; | 985 | ssp_response = &ireq->ssp.rsp; |
986 | 986 | ||
987 | resp_buf = &isci_tmf->resp.resp_iu; | 987 | resp_buf = &isci_tmf->resp.resp_iu; |
988 | 988 | ||
989 | len = min_t(u32, | 989 | len = min_t(u32, |
990 | SSP_RESP_IU_MAX_SIZE, | 990 | SSP_RESP_IU_MAX_SIZE, |
991 | be32_to_cpu(ssp_response->response_data_len)); | 991 | be32_to_cpu(ssp_response->response_data_len)); |
992 | 992 | ||
993 | memcpy(resp_buf, ssp_response->resp_data, len); | 993 | memcpy(resp_buf, ssp_response->resp_data, len); |
994 | } | 994 | } |
995 | 995 | ||
996 | static enum sci_status | 996 | static enum sci_status |
997 | request_started_state_tc_event(struct isci_request *ireq, | 997 | request_started_state_tc_event(struct isci_request *ireq, |
998 | u32 completion_code) | 998 | u32 completion_code) |
999 | { | 999 | { |
1000 | struct ssp_response_iu *resp_iu; | 1000 | struct ssp_response_iu *resp_iu; |
1001 | u8 datapres; | 1001 | u8 datapres; |
1002 | 1002 | ||
1003 | /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 | 1003 | /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 |
1004 | * to determine SDMA status | 1004 | * to determine SDMA status |
1005 | */ | 1005 | */ |
1006 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1006 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1007 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1007 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1008 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1008 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1009 | ireq->sci_status = SCI_SUCCESS; | 1009 | ireq->sci_status = SCI_SUCCESS; |
1010 | break; | 1010 | break; |
1011 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { | 1011 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { |
1012 | /* There are times when the SCU hardware will return an early | 1012 | /* There are times when the SCU hardware will return an early |
1013 | * response because the io request specified more data than is | 1013 | * response because the io request specified more data than is |
1014 | * returned by the target device (mode pages, inquiry data, | 1014 | * returned by the target device (mode pages, inquiry data, |
1015 | * etc.). We must check the response stats to see if this is | 1015 | * etc.). We must check the response stats to see if this is |
1016 | * truly a failed request or a good request that just got | 1016 | * truly a failed request or a good request that just got |
1017 | * completed early. | 1017 | * completed early. |
1018 | */ | 1018 | */ |
1019 | struct ssp_response_iu *resp = &ireq->ssp.rsp; | 1019 | struct ssp_response_iu *resp = &ireq->ssp.rsp; |
1020 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); | 1020 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1021 | 1021 | ||
1022 | sci_swab32_cpy(&ireq->ssp.rsp, | 1022 | sci_swab32_cpy(&ireq->ssp.rsp, |
1023 | &ireq->ssp.rsp, | 1023 | &ireq->ssp.rsp, |
1024 | word_cnt); | 1024 | word_cnt); |
1025 | 1025 | ||
1026 | if (resp->status == 0) { | 1026 | if (resp->status == 0) { |
1027 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1027 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1028 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; | 1028 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; |
1029 | } else { | 1029 | } else { |
1030 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1030 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1031 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1031 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1032 | } | 1032 | } |
1033 | break; | 1033 | break; |
1034 | } | 1034 | } |
1035 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { | 1035 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { |
1036 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); | 1036 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1037 | 1037 | ||
1038 | sci_swab32_cpy(&ireq->ssp.rsp, | 1038 | sci_swab32_cpy(&ireq->ssp.rsp, |
1039 | &ireq->ssp.rsp, | 1039 | &ireq->ssp.rsp, |
1040 | word_cnt); | 1040 | word_cnt); |
1041 | 1041 | ||
1042 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1042 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1043 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1043 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1044 | break; | 1044 | break; |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): | 1047 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): |
1048 | /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame | 1048 | /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame |
1049 | * guaranteed to be received before this completion status is | 1049 | * guaranteed to be received before this completion status is |
1050 | * posted? | 1050 | * posted? |
1051 | */ | 1051 | */ |
1052 | resp_iu = &ireq->ssp.rsp; | 1052 | resp_iu = &ireq->ssp.rsp; |
1053 | datapres = resp_iu->datapres; | 1053 | datapres = resp_iu->datapres; |
1054 | 1054 | ||
1055 | if (datapres == 1 || datapres == 2) { | 1055 | if (datapres == 1 || datapres == 2) { |
1056 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1056 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1057 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1057 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1058 | } else { | 1058 | } else { |
1059 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1059 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1060 | ireq->sci_status = SCI_SUCCESS; | 1060 | ireq->sci_status = SCI_SUCCESS; |
1061 | } | 1061 | } |
1062 | break; | 1062 | break; |
1063 | /* only stp device gets suspended. */ | 1063 | /* only stp device gets suspended. */ |
1064 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): | 1064 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
1065 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): | 1065 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): |
1066 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): | 1066 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): |
1067 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): | 1067 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): |
1068 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): | 1068 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): |
1069 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): | 1069 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): |
1070 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): | 1070 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): |
1071 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): | 1071 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): |
1072 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): | 1072 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): |
1073 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 1073 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
1074 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): | 1074 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
1075 | if (ireq->protocol == SAS_PROTOCOL_STP) { | 1075 | if (ireq->protocol == SAS_PROTOCOL_STP) { |
1076 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 1076 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1077 | SCU_COMPLETION_TL_STATUS_SHIFT; | 1077 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1078 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 1078 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
1079 | } else { | 1079 | } else { |
1080 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 1080 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1081 | SCU_COMPLETION_TL_STATUS_SHIFT; | 1081 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1082 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1082 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1083 | } | 1083 | } |
1084 | break; | 1084 | break; |
1085 | 1085 | ||
1086 | /* both stp/ssp device gets suspended */ | 1086 | /* both stp/ssp device gets suspended */ |
1087 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): | 1087 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): |
1088 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): | 1088 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): |
1089 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): | 1089 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): |
1090 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): | 1090 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): |
1091 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): | 1091 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): |
1092 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): | 1092 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): |
1093 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): | 1093 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): |
1094 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): | 1094 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): |
1095 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): | 1095 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): |
1096 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): | 1096 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): |
1097 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 1097 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1098 | SCU_COMPLETION_TL_STATUS_SHIFT; | 1098 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1099 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; | 1099 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
1100 | break; | 1100 | break; |
1101 | 1101 | ||
1102 | /* neither ssp nor stp gets suspended. */ | 1102 | /* neither ssp nor stp gets suspended. */ |
1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): | 1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): |
1104 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): | 1104 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): |
1105 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): | 1105 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): |
1106 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): | 1106 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): |
1107 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): | 1107 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): |
1108 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): | 1108 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): |
1109 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): | 1109 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): |
1110 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): | 1110 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): |
1111 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): | 1111 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): |
1112 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): | 1112 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): |
1113 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): | 1113 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): |
1114 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): | 1114 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): |
1115 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): | 1115 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): |
1116 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): | 1116 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): |
1117 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): | 1117 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): |
1118 | default: | 1118 | default: |
1119 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> | 1119 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1120 | SCU_COMPLETION_TL_STATUS_SHIFT; | 1120 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1121 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1121 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1122 | break; | 1122 | break; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | /* | 1125 | /* |
1126 | * TODO: This is probably wrong for ACK/NAK timeout conditions | 1126 | * TODO: This is probably wrong for ACK/NAK timeout conditions |
1127 | */ | 1127 | */ |
1128 | 1128 | ||
1129 | /* In all cases we will treat this as the completion of the IO req. */ | 1129 | /* In all cases we will treat this as the completion of the IO req. */ |
1130 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1130 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1131 | return SCI_SUCCESS; | 1131 | return SCI_SUCCESS; |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | static enum sci_status | 1134 | static enum sci_status |
1135 | request_aborting_state_tc_event(struct isci_request *ireq, | 1135 | request_aborting_state_tc_event(struct isci_request *ireq, |
1136 | u32 completion_code) | 1136 | u32 completion_code) |
1137 | { | 1137 | { |
1138 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1138 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1139 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): | 1139 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
1140 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): | 1140 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): |
1141 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; | 1141 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
1142 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; | 1142 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
1143 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1143 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1144 | break; | 1144 | break; |
1145 | 1145 | ||
1146 | default: | 1146 | default: |
1147 | /* Unless we get some strange error wait for the task abort to complete | 1147 | /* Unless we get some strange error wait for the task abort to complete |
1148 | * TODO: Should there be a state change for this completion? | 1148 | * TODO: Should there be a state change for this completion? |
1149 | */ | 1149 | */ |
1150 | break; | 1150 | break; |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | return SCI_SUCCESS; | 1153 | return SCI_SUCCESS; |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, | 1156 | static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, |
1157 | u32 completion_code) | 1157 | u32 completion_code) |
1158 | { | 1158 | { |
1159 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1159 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1160 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1160 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1161 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1161 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1162 | ireq->sci_status = SCI_SUCCESS; | 1162 | ireq->sci_status = SCI_SUCCESS; |
1163 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); | 1163 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); |
1164 | break; | 1164 | break; |
1165 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): | 1165 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
1166 | /* Currently, the decision is to simply allow the task request | 1166 | /* Currently, the decision is to simply allow the task request |
1167 | * to timeout if the task IU wasn't received successfully. | 1167 | * to timeout if the task IU wasn't received successfully. |
1168 | * There is a potential for receiving multiple task responses if | 1168 | * There is a potential for receiving multiple task responses if |
1169 | * we decide to send the task IU again. | 1169 | * we decide to send the task IU again. |
1170 | */ | 1170 | */ |
1171 | dev_warn(&ireq->owning_controller->pdev->dev, | 1171 | dev_warn(&ireq->owning_controller->pdev->dev, |
1172 | "%s: TaskRequest:0x%p CompletionCode:%x - " | 1172 | "%s: TaskRequest:0x%p CompletionCode:%x - " |
1173 | "ACK/NAK timeout\n", __func__, ireq, | 1173 | "ACK/NAK timeout\n", __func__, ireq, |
1174 | completion_code); | 1174 | completion_code); |
1175 | 1175 | ||
1176 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); | 1176 | sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); |
1177 | break; | 1177 | break; |
1178 | default: | 1178 | default: |
1179 | /* | 1179 | /* |
1180 | * All other completion status cause the IO to be complete. | 1180 | * All other completion status cause the IO to be complete. |
1181 | * If a NAK was received, then it is up to the user to retry | 1181 | * If a NAK was received, then it is up to the user to retry |
1182 | * the request. | 1182 | * the request. |
1183 | */ | 1183 | */ |
1184 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1184 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1185 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1185 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1186 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1186 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1187 | break; | 1187 | break; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | return SCI_SUCCESS; | 1190 | return SCI_SUCCESS; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | static enum sci_status | 1193 | static enum sci_status |
1194 | smp_request_await_response_tc_event(struct isci_request *ireq, | 1194 | smp_request_await_response_tc_event(struct isci_request *ireq, |
1195 | u32 completion_code) | 1195 | u32 completion_code) |
1196 | { | 1196 | { |
1197 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1197 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1198 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1198 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1199 | /* In the AWAIT RESPONSE state, any TC completion is | 1199 | /* In the AWAIT RESPONSE state, any TC completion is |
1200 | * unexpected. but if the TC has success status, we | 1200 | * unexpected. but if the TC has success status, we |
1201 | * complete the IO anyway. | 1201 | * complete the IO anyway. |
1202 | */ | 1202 | */ |
1203 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1203 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1204 | ireq->sci_status = SCI_SUCCESS; | 1204 | ireq->sci_status = SCI_SUCCESS; |
1205 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1205 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1206 | break; | 1206 | break; |
1207 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): | 1207 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): |
1208 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): | 1208 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): |
1209 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): | 1209 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): |
1210 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): | 1210 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): |
1211 | /* These status has been seen in a specific LSI | 1211 | /* These status has been seen in a specific LSI |
1212 | * expander, which sometimes is not able to send smp | 1212 | * expander, which sometimes is not able to send smp |
1213 | * response within 2 ms. This causes our hardware break | 1213 | * response within 2 ms. This causes our hardware break |
1214 | * the connection and set TC completion with one of | 1214 | * the connection and set TC completion with one of |
1215 | * these SMP_XXX_XX_ERR status. For these type of error, | 1215 | * these SMP_XXX_XX_ERR status. For these type of error, |
1216 | * we ask ihost user to retry the request. | 1216 | * we ask ihost user to retry the request. |
1217 | */ | 1217 | */ |
1218 | ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; | 1218 | ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; |
1219 | ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; | 1219 | ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; |
1220 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1220 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1221 | break; | 1221 | break; |
1222 | default: | 1222 | default: |
1223 | /* All other completion status cause the IO to be complete. If a NAK | 1223 | /* All other completion status cause the IO to be complete. If a NAK |
1224 | * was received, then it is up to the user to retry the request | 1224 | * was received, then it is up to the user to retry the request |
1225 | */ | 1225 | */ |
1226 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1226 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1227 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1227 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1228 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1228 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1229 | break; | 1229 | break; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | return SCI_SUCCESS; | 1232 | return SCI_SUCCESS; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | static enum sci_status | 1235 | static enum sci_status |
1236 | smp_request_await_tc_event(struct isci_request *ireq, | 1236 | smp_request_await_tc_event(struct isci_request *ireq, |
1237 | u32 completion_code) | 1237 | u32 completion_code) |
1238 | { | 1238 | { |
1239 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1239 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1240 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1240 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1241 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1241 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1242 | ireq->sci_status = SCI_SUCCESS; | 1242 | ireq->sci_status = SCI_SUCCESS; |
1243 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1243 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1244 | break; | 1244 | break; |
1245 | default: | 1245 | default: |
1246 | /* All other completion status cause the IO to be | 1246 | /* All other completion status cause the IO to be |
1247 | * complete. If a NAK was received, then it is up to | 1247 | * complete. If a NAK was received, then it is up to |
1248 | * the user to retry the request. | 1248 | * the user to retry the request. |
1249 | */ | 1249 | */ |
1250 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1250 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1251 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1251 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1252 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1252 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1253 | break; | 1253 | break; |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | return SCI_SUCCESS; | 1256 | return SCI_SUCCESS; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) | 1259 | static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) |
1260 | { | 1260 | { |
1261 | struct scu_sgl_element *sgl; | 1261 | struct scu_sgl_element *sgl; |
1262 | struct scu_sgl_element_pair *sgl_pair; | 1262 | struct scu_sgl_element_pair *sgl_pair; |
1263 | struct isci_request *ireq = to_ireq(stp_req); | 1263 | struct isci_request *ireq = to_ireq(stp_req); |
1264 | struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; | 1264 | struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; |
1265 | 1265 | ||
1266 | sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); | 1266 | sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); |
1267 | if (!sgl_pair) | 1267 | if (!sgl_pair) |
1268 | sgl = NULL; | 1268 | sgl = NULL; |
1269 | else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { | 1269 | else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { |
1270 | if (sgl_pair->B.address_lower == 0 && | 1270 | if (sgl_pair->B.address_lower == 0 && |
1271 | sgl_pair->B.address_upper == 0) { | 1271 | sgl_pair->B.address_upper == 0) { |
1272 | sgl = NULL; | 1272 | sgl = NULL; |
1273 | } else { | 1273 | } else { |
1274 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; | 1274 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; |
1275 | sgl = &sgl_pair->B; | 1275 | sgl = &sgl_pair->B; |
1276 | } | 1276 | } |
1277 | } else { | 1277 | } else { |
1278 | if (sgl_pair->next_pair_lower == 0 && | 1278 | if (sgl_pair->next_pair_lower == 0 && |
1279 | sgl_pair->next_pair_upper == 0) { | 1279 | sgl_pair->next_pair_upper == 0) { |
1280 | sgl = NULL; | 1280 | sgl = NULL; |
1281 | } else { | 1281 | } else { |
1282 | pio_sgl->index++; | 1282 | pio_sgl->index++; |
1283 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; | 1283 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; |
1284 | sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); | 1284 | sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); |
1285 | sgl = &sgl_pair->A; | 1285 | sgl = &sgl_pair->A; |
1286 | } | 1286 | } |
1287 | } | 1287 | } |
1288 | 1288 | ||
1289 | return sgl; | 1289 | return sgl; |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | static enum sci_status | 1292 | static enum sci_status |
1293 | stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, | 1293 | stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, |
1294 | u32 completion_code) | 1294 | u32 completion_code) |
1295 | { | 1295 | { |
1296 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1296 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1297 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1297 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1298 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1298 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1299 | ireq->sci_status = SCI_SUCCESS; | 1299 | ireq->sci_status = SCI_SUCCESS; |
1300 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); | 1300 | sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); |
1301 | break; | 1301 | break; |
1302 | 1302 | ||
1303 | default: | 1303 | default: |
1304 | /* All other completion status cause the IO to be | 1304 | /* All other completion status cause the IO to be |
1305 | * complete. If a NAK was received, then it is up to | 1305 | * complete. If a NAK was received, then it is up to |
1306 | * the user to retry the request. | 1306 | * the user to retry the request. |
1307 | */ | 1307 | */ |
1308 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1308 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1309 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1309 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1310 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1310 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1311 | break; | 1311 | break; |
1312 | } | 1312 | } |
1313 | 1313 | ||
1314 | return SCI_SUCCESS; | 1314 | return SCI_SUCCESS; |
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ | 1317 | #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ |
1318 | 1318 | ||
1319 | /* transmit DATA_FIS from (current sgl + offset) for input | 1319 | /* transmit DATA_FIS from (current sgl + offset) for input |
1320 | * parameter length. current sgl and offset is alreay stored in the IO request | 1320 | * parameter length. current sgl and offset is alreay stored in the IO request |
1321 | */ | 1321 | */ |
1322 | static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( | 1322 | static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( |
1323 | struct isci_request *ireq, | 1323 | struct isci_request *ireq, |
1324 | u32 length) | 1324 | u32 length) |
1325 | { | 1325 | { |
1326 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1326 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1327 | struct scu_task_context *task_context = ireq->tc; | 1327 | struct scu_task_context *task_context = ireq->tc; |
1328 | struct scu_sgl_element_pair *sgl_pair; | 1328 | struct scu_sgl_element_pair *sgl_pair; |
1329 | struct scu_sgl_element *current_sgl; | 1329 | struct scu_sgl_element *current_sgl; |
1330 | 1330 | ||
1331 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing | 1331 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing |
1332 | * for the data from current_sgl+offset for the input length | 1332 | * for the data from current_sgl+offset for the input length |
1333 | */ | 1333 | */ |
1334 | sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); | 1334 | sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); |
1335 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) | 1335 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) |
1336 | current_sgl = &sgl_pair->A; | 1336 | current_sgl = &sgl_pair->A; |
1337 | else | 1337 | else |
1338 | current_sgl = &sgl_pair->B; | 1338 | current_sgl = &sgl_pair->B; |
1339 | 1339 | ||
1340 | /* update the TC */ | 1340 | /* update the TC */ |
1341 | task_context->command_iu_upper = current_sgl->address_upper; | 1341 | task_context->command_iu_upper = current_sgl->address_upper; |
1342 | task_context->command_iu_lower = current_sgl->address_lower; | 1342 | task_context->command_iu_lower = current_sgl->address_lower; |
1343 | task_context->transfer_length_bytes = length; | 1343 | task_context->transfer_length_bytes = length; |
1344 | task_context->type.stp.fis_type = FIS_DATA; | 1344 | task_context->type.stp.fis_type = FIS_DATA; |
1345 | 1345 | ||
1346 | /* send the new TC out. */ | 1346 | /* send the new TC out. */ |
1347 | return sci_controller_continue_io(ireq); | 1347 | return sci_controller_continue_io(ireq); |
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) | 1350 | static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) |
1351 | { | 1351 | { |
1352 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1352 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1353 | struct scu_sgl_element_pair *sgl_pair; | 1353 | struct scu_sgl_element_pair *sgl_pair; |
1354 | enum sci_status status = SCI_SUCCESS; | 1354 | enum sci_status status = SCI_SUCCESS; |
1355 | struct scu_sgl_element *sgl; | 1355 | struct scu_sgl_element *sgl; |
1356 | u32 offset; | 1356 | u32 offset; |
1357 | u32 len = 0; | 1357 | u32 len = 0; |
1358 | 1358 | ||
1359 | offset = stp_req->sgl.offset; | 1359 | offset = stp_req->sgl.offset; |
1360 | sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); | 1360 | sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); |
1361 | if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) | 1361 | if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) |
1362 | return SCI_FAILURE; | 1362 | return SCI_FAILURE; |
1363 | 1363 | ||
1364 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { | 1364 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { |
1365 | sgl = &sgl_pair->A; | 1365 | sgl = &sgl_pair->A; |
1366 | len = sgl_pair->A.length - offset; | 1366 | len = sgl_pair->A.length - offset; |
1367 | } else { | 1367 | } else { |
1368 | sgl = &sgl_pair->B; | 1368 | sgl = &sgl_pair->B; |
1369 | len = sgl_pair->B.length - offset; | 1369 | len = sgl_pair->B.length - offset; |
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | if (stp_req->pio_len == 0) | 1372 | if (stp_req->pio_len == 0) |
1373 | return SCI_SUCCESS; | 1373 | return SCI_SUCCESS; |
1374 | 1374 | ||
1375 | if (stp_req->pio_len >= len) { | 1375 | if (stp_req->pio_len >= len) { |
1376 | status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); | 1376 | status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); |
1377 | if (status != SCI_SUCCESS) | 1377 | if (status != SCI_SUCCESS) |
1378 | return status; | 1378 | return status; |
1379 | stp_req->pio_len -= len; | 1379 | stp_req->pio_len -= len; |
1380 | 1380 | ||
1381 | /* update the current sgl, offset and save for future */ | 1381 | /* update the current sgl, offset and save for future */ |
1382 | sgl = pio_sgl_next(stp_req); | 1382 | sgl = pio_sgl_next(stp_req); |
1383 | offset = 0; | 1383 | offset = 0; |
1384 | } else if (stp_req->pio_len < len) { | 1384 | } else if (stp_req->pio_len < len) { |
1385 | sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); | 1385 | sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); |
1386 | 1386 | ||
1387 | /* Sgl offset will be adjusted and saved for future */ | 1387 | /* Sgl offset will be adjusted and saved for future */ |
1388 | offset += stp_req->pio_len; | 1388 | offset += stp_req->pio_len; |
1389 | sgl->address_lower += stp_req->pio_len; | 1389 | sgl->address_lower += stp_req->pio_len; |
1390 | stp_req->pio_len = 0; | 1390 | stp_req->pio_len = 0; |
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | stp_req->sgl.offset = offset; | 1393 | stp_req->sgl.offset = offset; |
1394 | 1394 | ||
1395 | return status; | 1395 | return status; |
1396 | } | 1396 | } |
1397 | 1397 | ||
1398 | /** | 1398 | /** |
1399 | * | 1399 | * |
1400 | * @stp_request: The request that is used for the SGL processing. | 1400 | * @stp_request: The request that is used for the SGL processing. |
1401 | * @data_buffer: The buffer of data to be copied. | 1401 | * @data_buffer: The buffer of data to be copied. |
1402 | * @length: The length of the data transfer. | 1402 | * @length: The length of the data transfer. |
1403 | * | 1403 | * |
1404 | * Copy the data from the buffer for the length specified to the IO reqeust SGL | 1404 | * Copy the data from the buffer for the length specified to the IO reqeust SGL |
1405 | * specified data region. enum sci_status | 1405 | * specified data region. enum sci_status |
1406 | */ | 1406 | */ |
1407 | static enum sci_status | 1407 | static enum sci_status |
1408 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, | 1408 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, |
1409 | u8 *data_buf, u32 len) | 1409 | u8 *data_buf, u32 len) |
1410 | { | 1410 | { |
1411 | struct isci_request *ireq; | 1411 | struct isci_request *ireq; |
1412 | u8 *src_addr; | 1412 | u8 *src_addr; |
1413 | int copy_len; | 1413 | int copy_len; |
1414 | struct sas_task *task; | 1414 | struct sas_task *task; |
1415 | struct scatterlist *sg; | 1415 | struct scatterlist *sg; |
1416 | void *kaddr; | 1416 | void *kaddr; |
1417 | int total_len = len; | 1417 | int total_len = len; |
1418 | 1418 | ||
1419 | ireq = to_ireq(stp_req); | 1419 | ireq = to_ireq(stp_req); |
1420 | task = isci_request_access_task(ireq); | 1420 | task = isci_request_access_task(ireq); |
1421 | src_addr = data_buf; | 1421 | src_addr = data_buf; |
1422 | 1422 | ||
1423 | if (task->num_scatter > 0) { | 1423 | if (task->num_scatter > 0) { |
1424 | sg = task->scatter; | 1424 | sg = task->scatter; |
1425 | 1425 | ||
1426 | while (total_len > 0) { | 1426 | while (total_len > 0) { |
1427 | struct page *page = sg_page(sg); | 1427 | struct page *page = sg_page(sg); |
1428 | 1428 | ||
1429 | copy_len = min_t(int, total_len, sg_dma_len(sg)); | 1429 | copy_len = min_t(int, total_len, sg_dma_len(sg)); |
1430 | kaddr = kmap_atomic(page); | 1430 | kaddr = kmap_atomic(page); |
1431 | memcpy(kaddr + sg->offset, src_addr, copy_len); | 1431 | memcpy(kaddr + sg->offset, src_addr, copy_len); |
1432 | kunmap_atomic(kaddr); | 1432 | kunmap_atomic(kaddr); |
1433 | total_len -= copy_len; | 1433 | total_len -= copy_len; |
1434 | src_addr += copy_len; | 1434 | src_addr += copy_len; |
1435 | sg = sg_next(sg); | 1435 | sg = sg_next(sg); |
1436 | } | 1436 | } |
1437 | } else { | 1437 | } else { |
1438 | BUG_ON(task->total_xfer_len < total_len); | 1438 | BUG_ON(task->total_xfer_len < total_len); |
1439 | memcpy(task->scatter, src_addr, total_len); | 1439 | memcpy(task->scatter, src_addr, total_len); |
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | return SCI_SUCCESS; | 1442 | return SCI_SUCCESS; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | /** | 1445 | /** |
1446 | * | 1446 | * |
1447 | * @sci_req: The PIO DATA IN request that is to receive the data. | 1447 | * @sci_req: The PIO DATA IN request that is to receive the data. |
1448 | * @data_buffer: The buffer to copy from. | 1448 | * @data_buffer: The buffer to copy from. |
1449 | * | 1449 | * |
1450 | * Copy the data buffer to the io request data region. enum sci_status | 1450 | * Copy the data buffer to the io request data region. enum sci_status |
1451 | */ | 1451 | */ |
1452 | static enum sci_status sci_stp_request_pio_data_in_copy_data( | 1452 | static enum sci_status sci_stp_request_pio_data_in_copy_data( |
1453 | struct isci_stp_request *stp_req, | 1453 | struct isci_stp_request *stp_req, |
1454 | u8 *data_buffer) | 1454 | u8 *data_buffer) |
1455 | { | 1455 | { |
1456 | enum sci_status status; | 1456 | enum sci_status status; |
1457 | 1457 | ||
1458 | /* | 1458 | /* |
1459 | * If there is less than 1K remaining in the transfer request | 1459 | * If there is less than 1K remaining in the transfer request |
1460 | * copy just the data for the transfer */ | 1460 | * copy just the data for the transfer */ |
1461 | if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { | 1461 | if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { |
1462 | status = sci_stp_request_pio_data_in_copy_data_buffer( | 1462 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1463 | stp_req, data_buffer, stp_req->pio_len); | 1463 | stp_req, data_buffer, stp_req->pio_len); |
1464 | 1464 | ||
1465 | if (status == SCI_SUCCESS) | 1465 | if (status == SCI_SUCCESS) |
1466 | stp_req->pio_len = 0; | 1466 | stp_req->pio_len = 0; |
1467 | } else { | 1467 | } else { |
1468 | /* We are transfering the whole frame so copy */ | 1468 | /* We are transfering the whole frame so copy */ |
1469 | status = sci_stp_request_pio_data_in_copy_data_buffer( | 1469 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1470 | stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); | 1470 | stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); |
1471 | 1471 | ||
1472 | if (status == SCI_SUCCESS) | 1472 | if (status == SCI_SUCCESS) |
1473 | stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; | 1473 | stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; |
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | return status; | 1476 | return status; |
1477 | } | 1477 | } |
1478 | 1478 | ||
1479 | static enum sci_status | 1479 | static enum sci_status |
1480 | stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, | 1480 | stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, |
1481 | u32 completion_code) | 1481 | u32 completion_code) |
1482 | { | 1482 | { |
1483 | enum sci_status status = SCI_SUCCESS; | 1483 | enum sci_status status = SCI_SUCCESS; |
1484 | 1484 | ||
1485 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1485 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1486 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1486 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1487 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1487 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1488 | ireq->sci_status = SCI_SUCCESS; | 1488 | ireq->sci_status = SCI_SUCCESS; |
1489 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 1489 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
1490 | break; | 1490 | break; |
1491 | 1491 | ||
1492 | default: | 1492 | default: |
1493 | /* All other completion status cause the IO to be | 1493 | /* All other completion status cause the IO to be |
1494 | * complete. If a NAK was received, then it is up to | 1494 | * complete. If a NAK was received, then it is up to |
1495 | * the user to retry the request. | 1495 | * the user to retry the request. |
1496 | */ | 1496 | */ |
1497 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1497 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1498 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1498 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1499 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1499 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1500 | break; | 1500 | break; |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | return status; | 1503 | return status; |
1504 | } | 1504 | } |
1505 | 1505 | ||
1506 | static enum sci_status | 1506 | static enum sci_status |
1507 | pio_data_out_tx_done_tc_event(struct isci_request *ireq, | 1507 | pio_data_out_tx_done_tc_event(struct isci_request *ireq, |
1508 | u32 completion_code) | 1508 | u32 completion_code) |
1509 | { | 1509 | { |
1510 | enum sci_status status = SCI_SUCCESS; | 1510 | enum sci_status status = SCI_SUCCESS; |
1511 | bool all_frames_transferred = false; | 1511 | bool all_frames_transferred = false; |
1512 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1512 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1513 | 1513 | ||
1514 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 1514 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1515 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 1515 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1516 | /* Transmit data */ | 1516 | /* Transmit data */ |
1517 | if (stp_req->pio_len != 0) { | 1517 | if (stp_req->pio_len != 0) { |
1518 | status = sci_stp_request_pio_data_out_transmit_data(ireq); | 1518 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1519 | if (status == SCI_SUCCESS) { | 1519 | if (status == SCI_SUCCESS) { |
1520 | if (stp_req->pio_len == 0) | 1520 | if (stp_req->pio_len == 0) |
1521 | all_frames_transferred = true; | 1521 | all_frames_transferred = true; |
1522 | } | 1522 | } |
1523 | } else if (stp_req->pio_len == 0) { | 1523 | } else if (stp_req->pio_len == 0) { |
1524 | /* | 1524 | /* |
1525 | * this will happen if the all data is written at the | 1525 | * this will happen if the all data is written at the |
1526 | * first time after the pio setup fis is received | 1526 | * first time after the pio setup fis is received |
1527 | */ | 1527 | */ |
1528 | all_frames_transferred = true; | 1528 | all_frames_transferred = true; |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | /* all data transferred. */ | 1531 | /* all data transferred. */ |
1532 | if (all_frames_transferred) { | 1532 | if (all_frames_transferred) { |
1533 | /* | 1533 | /* |
1534 | * Change the state to SCI_REQ_STP_PIO_DATA_IN | 1534 | * Change the state to SCI_REQ_STP_PIO_DATA_IN |
1535 | * and wait for PIO_SETUP fis / or D2H REg fis. */ | 1535 | * and wait for PIO_SETUP fis / or D2H REg fis. */ |
1536 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 1536 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
1537 | } | 1537 | } |
1538 | break; | 1538 | break; |
1539 | 1539 | ||
1540 | default: | 1540 | default: |
1541 | /* | 1541 | /* |
1542 | * All other completion status cause the IO to be complete. | 1542 | * All other completion status cause the IO to be complete. |
1543 | * If a NAK was received, then it is up to the user to retry | 1543 | * If a NAK was received, then it is up to the user to retry |
1544 | * the request. | 1544 | * the request. |
1545 | */ | 1545 | */ |
1546 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 1546 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1547 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1547 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1548 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1548 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1549 | break; | 1549 | break; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | return status; | 1552 | return status; |
1553 | } | 1553 | } |
1554 | 1554 | ||
1555 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, | 1555 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, |
1556 | u32 frame_index) | 1556 | u32 frame_index) |
1557 | { | 1557 | { |
1558 | struct isci_host *ihost = ireq->owning_controller; | 1558 | struct isci_host *ihost = ireq->owning_controller; |
1559 | struct dev_to_host_fis *frame_header; | 1559 | struct dev_to_host_fis *frame_header; |
1560 | enum sci_status status; | 1560 | enum sci_status status; |
1561 | u32 *frame_buffer; | 1561 | u32 *frame_buffer; |
1562 | 1562 | ||
1563 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1563 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1564 | frame_index, | 1564 | frame_index, |
1565 | (void **)&frame_header); | 1565 | (void **)&frame_header); |
1566 | 1566 | ||
1567 | if ((status == SCI_SUCCESS) && | 1567 | if ((status == SCI_SUCCESS) && |
1568 | (frame_header->fis_type == FIS_REGD2H)) { | 1568 | (frame_header->fis_type == FIS_REGD2H)) { |
1569 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1569 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1570 | frame_index, | 1570 | frame_index, |
1571 | (void **)&frame_buffer); | 1571 | (void **)&frame_buffer); |
1572 | 1572 | ||
1573 | sci_controller_copy_sata_response(&ireq->stp.rsp, | 1573 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1574 | frame_header, | 1574 | frame_header, |
1575 | frame_buffer); | 1575 | frame_buffer); |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | sci_controller_release_frame(ihost, frame_index); | 1578 | sci_controller_release_frame(ihost, frame_index); |
1579 | 1579 | ||
1580 | return status; | 1580 | return status; |
1581 | } | 1581 | } |
1582 | 1582 | ||
1583 | static enum sci_status process_unsolicited_fis(struct isci_request *ireq, | 1583 | static enum sci_status process_unsolicited_fis(struct isci_request *ireq, |
1584 | u32 frame_index) | 1584 | u32 frame_index) |
1585 | { | 1585 | { |
1586 | struct isci_host *ihost = ireq->owning_controller; | 1586 | struct isci_host *ihost = ireq->owning_controller; |
1587 | enum sci_status status; | 1587 | enum sci_status status; |
1588 | struct dev_to_host_fis *frame_header; | 1588 | struct dev_to_host_fis *frame_header; |
1589 | u32 *frame_buffer; | 1589 | u32 *frame_buffer; |
1590 | 1590 | ||
1591 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1591 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1592 | frame_index, | 1592 | frame_index, |
1593 | (void **)&frame_header); | 1593 | (void **)&frame_header); |
1594 | 1594 | ||
1595 | if (status != SCI_SUCCESS) | 1595 | if (status != SCI_SUCCESS) |
1596 | return status; | 1596 | return status; |
1597 | 1597 | ||
1598 | if (frame_header->fis_type != FIS_REGD2H) { | 1598 | if (frame_header->fis_type != FIS_REGD2H) { |
1599 | dev_err(&ireq->isci_host->pdev->dev, | 1599 | dev_err(&ireq->isci_host->pdev->dev, |
1600 | "%s ERROR: invalid fis type 0x%X\n", | 1600 | "%s ERROR: invalid fis type 0x%X\n", |
1601 | __func__, frame_header->fis_type); | 1601 | __func__, frame_header->fis_type); |
1602 | return SCI_FAILURE; | 1602 | return SCI_FAILURE; |
1603 | } | 1603 | } |
1604 | 1604 | ||
1605 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1605 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1606 | frame_index, | 1606 | frame_index, |
1607 | (void **)&frame_buffer); | 1607 | (void **)&frame_buffer); |
1608 | 1608 | ||
1609 | sci_controller_copy_sata_response(&ireq->stp.rsp, | 1609 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1610 | (u32 *)frame_header, | 1610 | (u32 *)frame_header, |
1611 | frame_buffer); | 1611 | frame_buffer); |
1612 | 1612 | ||
1613 | /* Frame has been decoded return it to the controller */ | 1613 | /* Frame has been decoded return it to the controller */ |
1614 | sci_controller_release_frame(ihost, frame_index); | 1614 | sci_controller_release_frame(ihost, frame_index); |
1615 | 1615 | ||
1616 | return status; | 1616 | return status; |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, | 1619 | static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, |
1620 | u32 frame_index) | 1620 | u32 frame_index) |
1621 | { | 1621 | { |
1622 | struct sas_task *task = isci_request_access_task(ireq); | 1622 | struct sas_task *task = isci_request_access_task(ireq); |
1623 | enum sci_status status; | 1623 | enum sci_status status; |
1624 | 1624 | ||
1625 | status = process_unsolicited_fis(ireq, frame_index); | 1625 | status = process_unsolicited_fis(ireq, frame_index); |
1626 | 1626 | ||
1627 | if (status == SCI_SUCCESS) { | 1627 | if (status == SCI_SUCCESS) { |
1628 | if (ireq->stp.rsp.status & ATA_ERR) | 1628 | if (ireq->stp.rsp.status & ATA_ERR) |
1629 | status = SCI_IO_FAILURE_RESPONSE_VALID; | 1629 | status = SCI_IO_FAILURE_RESPONSE_VALID; |
1630 | } else { | 1630 | } else { |
1631 | status = SCI_IO_FAILURE_RESPONSE_VALID; | 1631 | status = SCI_IO_FAILURE_RESPONSE_VALID; |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | if (status != SCI_SUCCESS) { | 1634 | if (status != SCI_SUCCESS) { |
1635 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1635 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1636 | ireq->sci_status = status; | 1636 | ireq->sci_status = status; |
1637 | } else { | 1637 | } else { |
1638 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1638 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1639 | ireq->sci_status = SCI_SUCCESS; | 1639 | ireq->sci_status = SCI_SUCCESS; |
1640 | } | 1640 | } |
1641 | 1641 | ||
1642 | /* the d2h ufi is the end of non-data commands */ | 1642 | /* the d2h ufi is the end of non-data commands */ |
1643 | if (task->data_dir == DMA_NONE) | 1643 | if (task->data_dir == DMA_NONE) |
1644 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1644 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1645 | 1645 | ||
1646 | return status; | 1646 | return status; |
1647 | } | 1647 | } |
1648 | 1648 | ||
1649 | static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) | 1649 | static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) |
1650 | { | 1650 | { |
1651 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); | 1651 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); |
1652 | void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; | 1652 | void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; |
1653 | struct scu_task_context *task_context = ireq->tc; | 1653 | struct scu_task_context *task_context = ireq->tc; |
1654 | 1654 | ||
1655 | /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame | 1655 | /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame |
1656 | * type. The TC for previous Packet fis was already there, we only need to | 1656 | * type. The TC for previous Packet fis was already there, we only need to |
1657 | * change the H2D fis content. | 1657 | * change the H2D fis content. |
1658 | */ | 1658 | */ |
1659 | memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); | 1659 | memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); |
1660 | memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); | 1660 | memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); |
1661 | memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); | 1661 | memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); |
1662 | task_context->type.stp.fis_type = FIS_DATA; | 1662 | task_context->type.stp.fis_type = FIS_DATA; |
1663 | task_context->transfer_length_bytes = dev->cdb_len; | 1663 | task_context->transfer_length_bytes = dev->cdb_len; |
1664 | } | 1664 | } |
1665 | 1665 | ||
1666 | static void scu_atapi_construct_task_context(struct isci_request *ireq) | 1666 | static void scu_atapi_construct_task_context(struct isci_request *ireq) |
1667 | { | 1667 | { |
1668 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); | 1668 | struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); |
1669 | struct sas_task *task = isci_request_access_task(ireq); | 1669 | struct sas_task *task = isci_request_access_task(ireq); |
1670 | struct scu_task_context *task_context = ireq->tc; | 1670 | struct scu_task_context *task_context = ireq->tc; |
1671 | int cdb_len = dev->cdb_len; | 1671 | int cdb_len = dev->cdb_len; |
1672 | 1672 | ||
1673 | /* reference: SSTL 1.13.4.2 | 1673 | /* reference: SSTL 1.13.4.2 |
1674 | * task_type, sata_direction | 1674 | * task_type, sata_direction |
1675 | */ | 1675 | */ |
1676 | if (task->data_dir == DMA_TO_DEVICE) { | 1676 | if (task->data_dir == DMA_TO_DEVICE) { |
1677 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; | 1677 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; |
1678 | task_context->sata_direction = 0; | 1678 | task_context->sata_direction = 0; |
1679 | } else { | 1679 | } else { |
1680 | /* todo: for NO_DATA command, we need to send out raw frame. */ | 1680 | /* todo: for NO_DATA command, we need to send out raw frame. */ |
1681 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; | 1681 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; |
1682 | task_context->sata_direction = 1; | 1682 | task_context->sata_direction = 1; |
1683 | } | 1683 | } |
1684 | 1684 | ||
1685 | memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); | 1685 | memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); |
1686 | task_context->type.stp.fis_type = FIS_DATA; | 1686 | task_context->type.stp.fis_type = FIS_DATA; |
1687 | 1687 | ||
1688 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); | 1688 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
1689 | memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); | 1689 | memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); |
1690 | task_context->ssp_command_iu_length = cdb_len / sizeof(u32); | 1690 | task_context->ssp_command_iu_length = cdb_len / sizeof(u32); |
1691 | 1691 | ||
1692 | /* task phase is set to TX_CMD */ | 1692 | /* task phase is set to TX_CMD */ |
1693 | task_context->task_phase = 0x1; | 1693 | task_context->task_phase = 0x1; |
1694 | 1694 | ||
1695 | /* retry counter */ | 1695 | /* retry counter */ |
1696 | task_context->stp_retry_count = 0; | 1696 | task_context->stp_retry_count = 0; |
1697 | 1697 | ||
1698 | /* data transfer size. */ | 1698 | /* data transfer size. */ |
1699 | task_context->transfer_length_bytes = task->total_xfer_len; | 1699 | task_context->transfer_length_bytes = task->total_xfer_len; |
1700 | 1700 | ||
1701 | /* setup sgl */ | 1701 | /* setup sgl */ |
1702 | sci_request_build_sgl(ireq); | 1702 | sci_request_build_sgl(ireq); |
1703 | } | 1703 | } |
1704 | 1704 | ||
1705 | enum sci_status | 1705 | enum sci_status |
1706 | sci_io_request_frame_handler(struct isci_request *ireq, | 1706 | sci_io_request_frame_handler(struct isci_request *ireq, |
1707 | u32 frame_index) | 1707 | u32 frame_index) |
1708 | { | 1708 | { |
1709 | struct isci_host *ihost = ireq->owning_controller; | 1709 | struct isci_host *ihost = ireq->owning_controller; |
1710 | struct isci_stp_request *stp_req = &ireq->stp.req; | 1710 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1711 | enum sci_base_request_states state; | 1711 | enum sci_base_request_states state; |
1712 | enum sci_status status; | 1712 | enum sci_status status; |
1713 | ssize_t word_cnt; | 1713 | ssize_t word_cnt; |
1714 | 1714 | ||
1715 | state = ireq->sm.current_state_id; | 1715 | state = ireq->sm.current_state_id; |
1716 | switch (state) { | 1716 | switch (state) { |
1717 | case SCI_REQ_STARTED: { | 1717 | case SCI_REQ_STARTED: { |
1718 | struct ssp_frame_hdr ssp_hdr; | 1718 | struct ssp_frame_hdr ssp_hdr; |
1719 | void *frame_header; | 1719 | void *frame_header; |
1720 | 1720 | ||
1721 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1721 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1722 | frame_index, | 1722 | frame_index, |
1723 | &frame_header); | 1723 | &frame_header); |
1724 | 1724 | ||
1725 | word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); | 1725 | word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); |
1726 | sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); | 1726 | sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); |
1727 | 1727 | ||
1728 | if (ssp_hdr.frame_type == SSP_RESPONSE) { | 1728 | if (ssp_hdr.frame_type == SSP_RESPONSE) { |
1729 | struct ssp_response_iu *resp_iu; | 1729 | struct ssp_response_iu *resp_iu; |
1730 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); | 1730 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1731 | 1731 | ||
1732 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1732 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1733 | frame_index, | 1733 | frame_index, |
1734 | (void **)&resp_iu); | 1734 | (void **)&resp_iu); |
1735 | 1735 | ||
1736 | sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); | 1736 | sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); |
1737 | 1737 | ||
1738 | resp_iu = &ireq->ssp.rsp; | 1738 | resp_iu = &ireq->ssp.rsp; |
1739 | 1739 | ||
1740 | if (resp_iu->datapres == 0x01 || | 1740 | if (resp_iu->datapres == 0x01 || |
1741 | resp_iu->datapres == 0x02) { | 1741 | resp_iu->datapres == 0x02) { |
1742 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1742 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1743 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1743 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1744 | } else { | 1744 | } else { |
1745 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1745 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1746 | ireq->sci_status = SCI_SUCCESS; | 1746 | ireq->sci_status = SCI_SUCCESS; |
1747 | } | 1747 | } |
1748 | } else { | 1748 | } else { |
1749 | /* not a response frame, why did it get forwarded? */ | 1749 | /* not a response frame, why did it get forwarded? */ |
1750 | dev_err(&ihost->pdev->dev, | 1750 | dev_err(&ihost->pdev->dev, |
1751 | "%s: SCIC IO Request 0x%p received unexpected " | 1751 | "%s: SCIC IO Request 0x%p received unexpected " |
1752 | "frame %d type 0x%02x\n", __func__, ireq, | 1752 | "frame %d type 0x%02x\n", __func__, ireq, |
1753 | frame_index, ssp_hdr.frame_type); | 1753 | frame_index, ssp_hdr.frame_type); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | /* | 1756 | /* |
1757 | * In any case we are done with this frame buffer return it to | 1757 | * In any case we are done with this frame buffer return it to |
1758 | * the controller | 1758 | * the controller |
1759 | */ | 1759 | */ |
1760 | sci_controller_release_frame(ihost, frame_index); | 1760 | sci_controller_release_frame(ihost, frame_index); |
1761 | 1761 | ||
1762 | return SCI_SUCCESS; | 1762 | return SCI_SUCCESS; |
1763 | } | 1763 | } |
1764 | 1764 | ||
1765 | case SCI_REQ_TASK_WAIT_TC_RESP: | 1765 | case SCI_REQ_TASK_WAIT_TC_RESP: |
1766 | sci_io_request_copy_response(ireq); | 1766 | sci_io_request_copy_response(ireq); |
1767 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1767 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1768 | sci_controller_release_frame(ihost, frame_index); | 1768 | sci_controller_release_frame(ihost, frame_index); |
1769 | return SCI_SUCCESS; | 1769 | return SCI_SUCCESS; |
1770 | 1770 | ||
1771 | case SCI_REQ_SMP_WAIT_RESP: { | 1771 | case SCI_REQ_SMP_WAIT_RESP: { |
1772 | struct sas_task *task = isci_request_access_task(ireq); | 1772 | struct sas_task *task = isci_request_access_task(ireq); |
1773 | struct scatterlist *sg = &task->smp_task.smp_resp; | 1773 | struct scatterlist *sg = &task->smp_task.smp_resp; |
1774 | void *frame_header, *kaddr; | 1774 | void *frame_header, *kaddr; |
1775 | u8 *rsp; | 1775 | u8 *rsp; |
1776 | 1776 | ||
1777 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1777 | sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1778 | frame_index, | 1778 | frame_index, |
1779 | &frame_header); | 1779 | &frame_header); |
1780 | kaddr = kmap_atomic(sg_page(sg)); | 1780 | kaddr = kmap_atomic(sg_page(sg)); |
1781 | rsp = kaddr + sg->offset; | 1781 | rsp = kaddr + sg->offset; |
1782 | sci_swab32_cpy(rsp, frame_header, 1); | 1782 | sci_swab32_cpy(rsp, frame_header, 1); |
1783 | 1783 | ||
1784 | if (rsp[0] == SMP_RESPONSE) { | 1784 | if (rsp[0] == SMP_RESPONSE) { |
1785 | void *smp_resp; | 1785 | void *smp_resp; |
1786 | 1786 | ||
1787 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1787 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1788 | frame_index, | 1788 | frame_index, |
1789 | &smp_resp); | 1789 | &smp_resp); |
1790 | 1790 | ||
1791 | word_cnt = (sg->length/4)-1; | 1791 | word_cnt = (sg->length/4)-1; |
1792 | if (word_cnt > 0) | 1792 | if (word_cnt > 0) |
1793 | word_cnt = min_t(unsigned int, word_cnt, | 1793 | word_cnt = min_t(unsigned int, word_cnt, |
1794 | SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); | 1794 | SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); |
1795 | sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); | 1795 | sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); |
1796 | 1796 | ||
1797 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 1797 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1798 | ireq->sci_status = SCI_SUCCESS; | 1798 | ireq->sci_status = SCI_SUCCESS; |
1799 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); | 1799 | sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); |
1800 | } else { | 1800 | } else { |
1801 | /* | 1801 | /* |
1802 | * This was not a response frame why did it get | 1802 | * This was not a response frame why did it get |
1803 | * forwarded? | 1803 | * forwarded? |
1804 | */ | 1804 | */ |
1805 | dev_err(&ihost->pdev->dev, | 1805 | dev_err(&ihost->pdev->dev, |
1806 | "%s: SCIC SMP Request 0x%p received unexpected " | 1806 | "%s: SCIC SMP Request 0x%p received unexpected " |
1807 | "frame %d type 0x%02x\n", | 1807 | "frame %d type 0x%02x\n", |
1808 | __func__, | 1808 | __func__, |
1809 | ireq, | 1809 | ireq, |
1810 | frame_index, | 1810 | frame_index, |
1811 | rsp[0]); | 1811 | rsp[0]); |
1812 | 1812 | ||
1813 | ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; | 1813 | ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; |
1814 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 1814 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1815 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1815 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1816 | } | 1816 | } |
1817 | kunmap_atomic(kaddr); | 1817 | kunmap_atomic(kaddr); |
1818 | 1818 | ||
1819 | sci_controller_release_frame(ihost, frame_index); | 1819 | sci_controller_release_frame(ihost, frame_index); |
1820 | 1820 | ||
1821 | return SCI_SUCCESS; | 1821 | return SCI_SUCCESS; |
1822 | } | 1822 | } |
1823 | 1823 | ||
1824 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: | 1824 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
1825 | return sci_stp_request_udma_general_frame_handler(ireq, | 1825 | return sci_stp_request_udma_general_frame_handler(ireq, |
1826 | frame_index); | 1826 | frame_index); |
1827 | 1827 | ||
1828 | case SCI_REQ_STP_UDMA_WAIT_D2H: | 1828 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
1829 | /* Use the general frame handler to copy the resposne data */ | 1829 | /* Use the general frame handler to copy the resposne data */ |
1830 | status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); | 1830 | status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); |
1831 | 1831 | ||
1832 | if (status != SCI_SUCCESS) | 1832 | if (status != SCI_SUCCESS) |
1833 | return status; | 1833 | return status; |
1834 | 1834 | ||
1835 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1835 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1836 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1836 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1837 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1837 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1838 | return SCI_SUCCESS; | 1838 | return SCI_SUCCESS; |
1839 | 1839 | ||
1840 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: { | 1840 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: { |
1841 | struct dev_to_host_fis *frame_header; | 1841 | struct dev_to_host_fis *frame_header; |
1842 | u32 *frame_buffer; | 1842 | u32 *frame_buffer; |
1843 | 1843 | ||
1844 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1844 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1845 | frame_index, | 1845 | frame_index, |
1846 | (void **)&frame_header); | 1846 | (void **)&frame_header); |
1847 | 1847 | ||
1848 | if (status != SCI_SUCCESS) { | 1848 | if (status != SCI_SUCCESS) { |
1849 | dev_err(&ihost->pdev->dev, | 1849 | dev_err(&ihost->pdev->dev, |
1850 | "%s: SCIC IO Request 0x%p could not get frame " | 1850 | "%s: SCIC IO Request 0x%p could not get frame " |
1851 | "header for frame index %d, status %x\n", | 1851 | "header for frame index %d, status %x\n", |
1852 | __func__, | 1852 | __func__, |
1853 | stp_req, | 1853 | stp_req, |
1854 | frame_index, | 1854 | frame_index, |
1855 | status); | 1855 | status); |
1856 | 1856 | ||
1857 | return status; | 1857 | return status; |
1858 | } | 1858 | } |
1859 | 1859 | ||
1860 | switch (frame_header->fis_type) { | 1860 | switch (frame_header->fis_type) { |
1861 | case FIS_REGD2H: | 1861 | case FIS_REGD2H: |
1862 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1862 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1863 | frame_index, | 1863 | frame_index, |
1864 | (void **)&frame_buffer); | 1864 | (void **)&frame_buffer); |
1865 | 1865 | ||
1866 | sci_controller_copy_sata_response(&ireq->stp.rsp, | 1866 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1867 | frame_header, | 1867 | frame_header, |
1868 | frame_buffer); | 1868 | frame_buffer); |
1869 | 1869 | ||
1870 | /* The command has completed with error */ | 1870 | /* The command has completed with error */ |
1871 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1871 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1872 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1872 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1873 | break; | 1873 | break; |
1874 | 1874 | ||
1875 | default: | 1875 | default: |
1876 | dev_warn(&ihost->pdev->dev, | 1876 | dev_warn(&ihost->pdev->dev, |
1877 | "%s: IO Request:0x%p Frame Id:%d protocol " | 1877 | "%s: IO Request:0x%p Frame Id:%d protocol " |
1878 | "violation occurred\n", __func__, stp_req, | 1878 | "violation occurred\n", __func__, stp_req, |
1879 | frame_index); | 1879 | frame_index); |
1880 | 1880 | ||
1881 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; | 1881 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; |
1882 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; | 1882 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; |
1883 | break; | 1883 | break; |
1884 | } | 1884 | } |
1885 | 1885 | ||
1886 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1886 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1887 | 1887 | ||
1888 | /* Frame has been decoded return it to the controller */ | 1888 | /* Frame has been decoded return it to the controller */ |
1889 | sci_controller_release_frame(ihost, frame_index); | 1889 | sci_controller_release_frame(ihost, frame_index); |
1890 | 1890 | ||
1891 | return status; | 1891 | return status; |
1892 | } | 1892 | } |
1893 | 1893 | ||
1894 | case SCI_REQ_STP_PIO_WAIT_FRAME: { | 1894 | case SCI_REQ_STP_PIO_WAIT_FRAME: { |
1895 | struct sas_task *task = isci_request_access_task(ireq); | 1895 | struct sas_task *task = isci_request_access_task(ireq); |
1896 | struct dev_to_host_fis *frame_header; | 1896 | struct dev_to_host_fis *frame_header; |
1897 | u32 *frame_buffer; | 1897 | u32 *frame_buffer; |
1898 | 1898 | ||
1899 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1899 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
1900 | frame_index, | 1900 | frame_index, |
1901 | (void **)&frame_header); | 1901 | (void **)&frame_header); |
1902 | 1902 | ||
1903 | if (status != SCI_SUCCESS) { | 1903 | if (status != SCI_SUCCESS) { |
1904 | dev_err(&ihost->pdev->dev, | 1904 | dev_err(&ihost->pdev->dev, |
1905 | "%s: SCIC IO Request 0x%p could not get frame " | 1905 | "%s: SCIC IO Request 0x%p could not get frame " |
1906 | "header for frame index %d, status %x\n", | 1906 | "header for frame index %d, status %x\n", |
1907 | __func__, stp_req, frame_index, status); | 1907 | __func__, stp_req, frame_index, status); |
1908 | return status; | 1908 | return status; |
1909 | } | 1909 | } |
1910 | 1910 | ||
1911 | switch (frame_header->fis_type) { | 1911 | switch (frame_header->fis_type) { |
1912 | case FIS_PIO_SETUP: | 1912 | case FIS_PIO_SETUP: |
1913 | /* Get from the frame buffer the PIO Setup Data */ | 1913 | /* Get from the frame buffer the PIO Setup Data */ |
1914 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1914 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1915 | frame_index, | 1915 | frame_index, |
1916 | (void **)&frame_buffer); | 1916 | (void **)&frame_buffer); |
1917 | 1917 | ||
1918 | /* Get the data from the PIO Setup The SCU Hardware | 1918 | /* Get the data from the PIO Setup The SCU Hardware |
1919 | * returns first word in the frame_header and the rest | 1919 | * returns first word in the frame_header and the rest |
1920 | * of the data is in the frame buffer so we need to | 1920 | * of the data is in the frame buffer so we need to |
1921 | * back up one dword | 1921 | * back up one dword |
1922 | */ | 1922 | */ |
1923 | 1923 | ||
1924 | /* transfer_count: first 16bits in the 4th dword */ | 1924 | /* transfer_count: first 16bits in the 4th dword */ |
1925 | stp_req->pio_len = frame_buffer[3] & 0xffff; | 1925 | stp_req->pio_len = frame_buffer[3] & 0xffff; |
1926 | 1926 | ||
1927 | /* status: 4th byte in the 3rd dword */ | 1927 | /* status: 4th byte in the 3rd dword */ |
1928 | stp_req->status = (frame_buffer[2] >> 24) & 0xff; | 1928 | stp_req->status = (frame_buffer[2] >> 24) & 0xff; |
1929 | 1929 | ||
1930 | sci_controller_copy_sata_response(&ireq->stp.rsp, | 1930 | sci_controller_copy_sata_response(&ireq->stp.rsp, |
1931 | frame_header, | 1931 | frame_header, |
1932 | frame_buffer); | 1932 | frame_buffer); |
1933 | 1933 | ||
1934 | ireq->stp.rsp.status = stp_req->status; | 1934 | ireq->stp.rsp.status = stp_req->status; |
1935 | 1935 | ||
1936 | /* The next state is dependent on whether the | 1936 | /* The next state is dependent on whether the |
1937 | * request was PIO Data-in or Data out | 1937 | * request was PIO Data-in or Data out |
1938 | */ | 1938 | */ |
1939 | if (task->data_dir == DMA_FROM_DEVICE) { | 1939 | if (task->data_dir == DMA_FROM_DEVICE) { |
1940 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); | 1940 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); |
1941 | } else if (task->data_dir == DMA_TO_DEVICE) { | 1941 | } else if (task->data_dir == DMA_TO_DEVICE) { |
1942 | /* Transmit data */ | 1942 | /* Transmit data */ |
1943 | status = sci_stp_request_pio_data_out_transmit_data(ireq); | 1943 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1944 | if (status != SCI_SUCCESS) | 1944 | if (status != SCI_SUCCESS) |
1945 | break; | 1945 | break; |
1946 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); | 1946 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); |
1947 | } | 1947 | } |
1948 | break; | 1948 | break; |
1949 | 1949 | ||
1950 | case FIS_SETDEVBITS: | 1950 | case FIS_SETDEVBITS: |
1951 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 1951 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
1952 | break; | 1952 | break; |
1953 | 1953 | ||
1954 | case FIS_REGD2H: | 1954 | case FIS_REGD2H: |
1955 | if (frame_header->status & ATA_BUSY) { | 1955 | if (frame_header->status & ATA_BUSY) { |
1956 | /* | 1956 | /* |
1957 | * Now why is the drive sending a D2H Register | 1957 | * Now why is the drive sending a D2H Register |
1958 | * FIS when it is still busy? Do nothing since | 1958 | * FIS when it is still busy? Do nothing since |
1959 | * we are still in the right state. | 1959 | * we are still in the right state. |
1960 | */ | 1960 | */ |
1961 | dev_dbg(&ihost->pdev->dev, | 1961 | dev_dbg(&ihost->pdev->dev, |
1962 | "%s: SCIC PIO Request 0x%p received " | 1962 | "%s: SCIC PIO Request 0x%p received " |
1963 | "D2H Register FIS with BSY status " | 1963 | "D2H Register FIS with BSY status " |
1964 | "0x%x\n", | 1964 | "0x%x\n", |
1965 | __func__, | 1965 | __func__, |
1966 | stp_req, | 1966 | stp_req, |
1967 | frame_header->status); | 1967 | frame_header->status); |
1968 | break; | 1968 | break; |
1969 | } | 1969 | } |
1970 | 1970 | ||
1971 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 1971 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
1972 | frame_index, | 1972 | frame_index, |
1973 | (void **)&frame_buffer); | 1973 | (void **)&frame_buffer); |
1974 | 1974 | ||
1975 | sci_controller_copy_sata_response(&ireq->stp.req, | 1975 | sci_controller_copy_sata_response(&ireq->stp.req, |
1976 | frame_header, | 1976 | frame_header, |
1977 | frame_buffer); | 1977 | frame_buffer); |
1978 | 1978 | ||
1979 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 1979 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1980 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 1980 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1981 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 1981 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
1982 | break; | 1982 | break; |
1983 | 1983 | ||
1984 | default: | 1984 | default: |
1985 | /* FIXME: what do we do here? */ | 1985 | /* FIXME: what do we do here? */ |
1986 | break; | 1986 | break; |
1987 | } | 1987 | } |
1988 | 1988 | ||
1989 | /* Frame is decoded return it to the controller */ | 1989 | /* Frame is decoded return it to the controller */ |
1990 | sci_controller_release_frame(ihost, frame_index); | 1990 | sci_controller_release_frame(ihost, frame_index); |
1991 | 1991 | ||
1992 | return status; | 1992 | return status; |
1993 | } | 1993 | } |
1994 | 1994 | ||
1995 | case SCI_REQ_STP_PIO_DATA_IN: { | 1995 | case SCI_REQ_STP_PIO_DATA_IN: { |
1996 | struct dev_to_host_fis *frame_header; | 1996 | struct dev_to_host_fis *frame_header; |
1997 | struct sata_fis_data *frame_buffer; | 1997 | struct sata_fis_data *frame_buffer; |
1998 | 1998 | ||
1999 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, | 1999 | status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, |
2000 | frame_index, | 2000 | frame_index, |
2001 | (void **)&frame_header); | 2001 | (void **)&frame_header); |
2002 | 2002 | ||
2003 | if (status != SCI_SUCCESS) { | 2003 | if (status != SCI_SUCCESS) { |
2004 | dev_err(&ihost->pdev->dev, | 2004 | dev_err(&ihost->pdev->dev, |
2005 | "%s: SCIC IO Request 0x%p could not get frame " | 2005 | "%s: SCIC IO Request 0x%p could not get frame " |
2006 | "header for frame index %d, status %x\n", | 2006 | "header for frame index %d, status %x\n", |
2007 | __func__, | 2007 | __func__, |
2008 | stp_req, | 2008 | stp_req, |
2009 | frame_index, | 2009 | frame_index, |
2010 | status); | 2010 | status); |
2011 | return status; | 2011 | return status; |
2012 | } | 2012 | } |
2013 | 2013 | ||
2014 | if (frame_header->fis_type != FIS_DATA) { | 2014 | if (frame_header->fis_type != FIS_DATA) { |
2015 | dev_err(&ihost->pdev->dev, | 2015 | dev_err(&ihost->pdev->dev, |
2016 | "%s: SCIC PIO Request 0x%p received frame %d " | 2016 | "%s: SCIC PIO Request 0x%p received frame %d " |
2017 | "with fis type 0x%02x when expecting a data " | 2017 | "with fis type 0x%02x when expecting a data " |
2018 | "fis.\n", | 2018 | "fis.\n", |
2019 | __func__, | 2019 | __func__, |
2020 | stp_req, | 2020 | stp_req, |
2021 | frame_index, | 2021 | frame_index, |
2022 | frame_header->fis_type); | 2022 | frame_header->fis_type); |
2023 | 2023 | ||
2024 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 2024 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2025 | ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; | 2025 | ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; |
2026 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2026 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2027 | 2027 | ||
2028 | /* Frame is decoded return it to the controller */ | 2028 | /* Frame is decoded return it to the controller */ |
2029 | sci_controller_release_frame(ihost, frame_index); | 2029 | sci_controller_release_frame(ihost, frame_index); |
2030 | return status; | 2030 | return status; |
2031 | } | 2031 | } |
2032 | 2032 | ||
2033 | if (stp_req->sgl.index < 0) { | 2033 | if (stp_req->sgl.index < 0) { |
2034 | ireq->saved_rx_frame_index = frame_index; | 2034 | ireq->saved_rx_frame_index = frame_index; |
2035 | stp_req->pio_len = 0; | 2035 | stp_req->pio_len = 0; |
2036 | } else { | 2036 | } else { |
2037 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, | 2037 | sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, |
2038 | frame_index, | 2038 | frame_index, |
2039 | (void **)&frame_buffer); | 2039 | (void **)&frame_buffer); |
2040 | 2040 | ||
2041 | status = sci_stp_request_pio_data_in_copy_data(stp_req, | 2041 | status = sci_stp_request_pio_data_in_copy_data(stp_req, |
2042 | (u8 *)frame_buffer); | 2042 | (u8 *)frame_buffer); |
2043 | 2043 | ||
2044 | /* Frame is decoded return it to the controller */ | 2044 | /* Frame is decoded return it to the controller */ |
2045 | sci_controller_release_frame(ihost, frame_index); | 2045 | sci_controller_release_frame(ihost, frame_index); |
2046 | } | 2046 | } |
2047 | 2047 | ||
2048 | /* Check for the end of the transfer, are there more | 2048 | /* Check for the end of the transfer, are there more |
2049 | * bytes remaining for this data transfer | 2049 | * bytes remaining for this data transfer |
2050 | */ | 2050 | */ |
2051 | if (status != SCI_SUCCESS || stp_req->pio_len != 0) | 2051 | if (status != SCI_SUCCESS || stp_req->pio_len != 0) |
2052 | return status; | 2052 | return status; |
2053 | 2053 | ||
2054 | if ((stp_req->status & ATA_BUSY) == 0) { | 2054 | if ((stp_req->status & ATA_BUSY) == 0) { |
2055 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 2055 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2056 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 2056 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2057 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2057 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2058 | } else { | 2058 | } else { |
2059 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); | 2059 | sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); |
2060 | } | 2060 | } |
2061 | return status; | 2061 | return status; |
2062 | } | 2062 | } |
2063 | 2063 | ||
2064 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { | 2064 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { |
2065 | struct sas_task *task = isci_request_access_task(ireq); | 2065 | struct sas_task *task = isci_request_access_task(ireq); |
2066 | 2066 | ||
2067 | sci_controller_release_frame(ihost, frame_index); | 2067 | sci_controller_release_frame(ihost, frame_index); |
2068 | ireq->target_device->working_request = ireq; | 2068 | ireq->target_device->working_request = ireq; |
2069 | if (task->data_dir == DMA_NONE) { | 2069 | if (task->data_dir == DMA_NONE) { |
2070 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); | 2070 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); |
2071 | scu_atapi_reconstruct_raw_frame_task_context(ireq); | 2071 | scu_atapi_reconstruct_raw_frame_task_context(ireq); |
2072 | } else { | 2072 | } else { |
2073 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); | 2073 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); |
2074 | scu_atapi_construct_task_context(ireq); | 2074 | scu_atapi_construct_task_context(ireq); |
2075 | } | 2075 | } |
2076 | 2076 | ||
2077 | sci_controller_continue_io(ireq); | 2077 | sci_controller_continue_io(ireq); |
2078 | return SCI_SUCCESS; | 2078 | return SCI_SUCCESS; |
2079 | } | 2079 | } |
2080 | case SCI_REQ_ATAPI_WAIT_D2H: | 2080 | case SCI_REQ_ATAPI_WAIT_D2H: |
2081 | return atapi_d2h_reg_frame_handler(ireq, frame_index); | 2081 | return atapi_d2h_reg_frame_handler(ireq, frame_index); |
2082 | case SCI_REQ_ABORTING: | 2082 | case SCI_REQ_ABORTING: |
2083 | /* | 2083 | /* |
2084 | * TODO: Is it even possible to get an unsolicited frame in the | 2084 | * TODO: Is it even possible to get an unsolicited frame in the |
2085 | * aborting state? | 2085 | * aborting state? |
2086 | */ | 2086 | */ |
2087 | sci_controller_release_frame(ihost, frame_index); | 2087 | sci_controller_release_frame(ihost, frame_index); |
2088 | return SCI_SUCCESS; | 2088 | return SCI_SUCCESS; |
2089 | 2089 | ||
2090 | default: | 2090 | default: |
2091 | dev_warn(&ihost->pdev->dev, | 2091 | dev_warn(&ihost->pdev->dev, |
2092 | "%s: SCIC IO Request given unexpected frame %x while " | 2092 | "%s: SCIC IO Request given unexpected frame %x while " |
2093 | "in state %d\n", | 2093 | "in state %d\n", |
2094 | __func__, | 2094 | __func__, |
2095 | frame_index, | 2095 | frame_index, |
2096 | state); | 2096 | state); |
2097 | 2097 | ||
2098 | sci_controller_release_frame(ihost, frame_index); | 2098 | sci_controller_release_frame(ihost, frame_index); |
2099 | return SCI_FAILURE_INVALID_STATE; | 2099 | return SCI_FAILURE_INVALID_STATE; |
2100 | } | 2100 | } |
2101 | } | 2101 | } |
2102 | 2102 | ||
2103 | static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, | 2103 | static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, |
2104 | u32 completion_code) | 2104 | u32 completion_code) |
2105 | { | 2105 | { |
2106 | enum sci_status status = SCI_SUCCESS; | 2106 | enum sci_status status = SCI_SUCCESS; |
2107 | 2107 | ||
2108 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 2108 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2109 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 2109 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2110 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 2110 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2111 | ireq->sci_status = SCI_SUCCESS; | 2111 | ireq->sci_status = SCI_SUCCESS; |
2112 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2112 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2113 | break; | 2113 | break; |
2114 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): | 2114 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): |
2115 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): | 2115 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
2116 | /* We must check ther response buffer to see if the D2H | 2116 | /* We must check ther response buffer to see if the D2H |
2117 | * Register FIS was received before we got the TC | 2117 | * Register FIS was received before we got the TC |
2118 | * completion. | 2118 | * completion. |
2119 | */ | 2119 | */ |
2120 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { | 2120 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { |
2121 | sci_remote_device_suspend(ireq->target_device, | ||
2122 | SCI_SW_SUSPEND_NORMAL); | ||
2123 | |||
2121 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 2124 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2122 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 2125 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2123 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2126 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2124 | } else { | 2127 | } else { |
2125 | /* If we have an error completion status for the | 2128 | /* If we have an error completion status for the |
2126 | * TC then we can expect a D2H register FIS from | 2129 | * TC then we can expect a D2H register FIS from |
2127 | * the device so we must change state to wait | 2130 | * the device so we must change state to wait |
2128 | * for it | 2131 | * for it |
2129 | */ | 2132 | */ |
2130 | sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); | 2133 | sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); |
2131 | } | 2134 | } |
2132 | break; | 2135 | break; |
2133 | 2136 | ||
2134 | /* TODO Check to see if any of these completion status need to | 2137 | /* TODO Check to see if any of these completion status need to |
2135 | * wait for the device to host register fis. | 2138 | * wait for the device to host register fis. |
2136 | */ | 2139 | */ |
2137 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR | 2140 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR |
2138 | * - this comes only for B0 | 2141 | * - this comes only for B0 |
2139 | */ | 2142 | */ |
2140 | default: | 2143 | default: |
2141 | /* All other completion status cause the IO to be complete. */ | 2144 | /* All other completion status cause the IO to be complete. */ |
2142 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 2145 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2143 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 2146 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2144 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2147 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2145 | break; | 2148 | break; |
2146 | } | 2149 | } |
2147 | 2150 | ||
2148 | return status; | 2151 | return status; |
2149 | } | 2152 | } |
2150 | 2153 | ||
2151 | static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, | 2154 | static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, |
2152 | enum sci_base_request_states next) | 2155 | enum sci_base_request_states next) |
2153 | { | 2156 | { |
2154 | enum sci_status status = SCI_SUCCESS; | 2157 | enum sci_status status = SCI_SUCCESS; |
2155 | 2158 | ||
2156 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 2159 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2157 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): | 2160 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2158 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 2161 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2159 | ireq->sci_status = SCI_SUCCESS; | 2162 | ireq->sci_status = SCI_SUCCESS; |
2160 | sci_change_state(&ireq->sm, next); | 2163 | sci_change_state(&ireq->sm, next); |
2161 | break; | 2164 | break; |
2162 | default: | 2165 | default: |
2163 | /* All other completion status cause the IO to be complete. | 2166 | /* All other completion status cause the IO to be complete. |
2164 | * If a NAK was received, then it is up to the user to retry | 2167 | * If a NAK was received, then it is up to the user to retry |
2165 | * the request. | 2168 | * the request. |
2166 | */ | 2169 | */ |
2167 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); | 2170 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2168 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; | 2171 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2169 | 2172 | ||
2170 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2173 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2171 | break; | 2174 | break; |
2172 | } | 2175 | } |
2173 | 2176 | ||
2174 | return status; | 2177 | return status; |
2175 | } | 2178 | } |
2176 | 2179 | ||
2177 | static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, | 2180 | static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, |
2178 | u32 completion_code) | 2181 | u32 completion_code) |
2179 | { | 2182 | { |
2180 | struct isci_remote_device *idev = ireq->target_device; | 2183 | struct isci_remote_device *idev = ireq->target_device; |
2181 | struct dev_to_host_fis *d2h = &ireq->stp.rsp; | 2184 | struct dev_to_host_fis *d2h = &ireq->stp.rsp; |
2182 | enum sci_status status = SCI_SUCCESS; | 2185 | enum sci_status status = SCI_SUCCESS; |
2183 | 2186 | ||
2184 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { | 2187 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2185 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): | 2188 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
2186 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2189 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2187 | break; | 2190 | break; |
2188 | 2191 | ||
2189 | case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { | 2192 | case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { |
2190 | u16 len = sci_req_tx_bytes(ireq); | 2193 | u16 len = sci_req_tx_bytes(ireq); |
2191 | 2194 | ||
2192 | /* likely non-error data underrrun, workaround missing | 2195 | /* likely non-error data underrrun, workaround missing |
2193 | * d2h frame from the controller | 2196 | * d2h frame from the controller |
2194 | */ | 2197 | */ |
2195 | if (d2h->fis_type != FIS_REGD2H) { | 2198 | if (d2h->fis_type != FIS_REGD2H) { |
2196 | d2h->fis_type = FIS_REGD2H; | 2199 | d2h->fis_type = FIS_REGD2H; |
2197 | d2h->flags = (1 << 6); | 2200 | d2h->flags = (1 << 6); |
2198 | d2h->status = 0x50; | 2201 | d2h->status = 0x50; |
2199 | d2h->error = 0; | 2202 | d2h->error = 0; |
2200 | d2h->lbal = 0; | 2203 | d2h->lbal = 0; |
2201 | d2h->byte_count_low = len & 0xff; | 2204 | d2h->byte_count_low = len & 0xff; |
2202 | d2h->byte_count_high = len >> 8; | 2205 | d2h->byte_count_high = len >> 8; |
2203 | d2h->device = 0xa0; | 2206 | d2h->device = 0xa0; |
2204 | d2h->lbal_exp = 0; | 2207 | d2h->lbal_exp = 0; |
2205 | d2h->lbam_exp = 0; | 2208 | d2h->lbam_exp = 0; |
2206 | d2h->lbah_exp = 0; | 2209 | d2h->lbah_exp = 0; |
2207 | d2h->_r_a = 0; | 2210 | d2h->_r_a = 0; |
2208 | d2h->sector_count = 0x3; | 2211 | d2h->sector_count = 0x3; |
2209 | d2h->sector_count_exp = 0; | 2212 | d2h->sector_count_exp = 0; |
2210 | d2h->_r_b = 0; | 2213 | d2h->_r_b = 0; |
2211 | d2h->_r_c = 0; | 2214 | d2h->_r_c = 0; |
2212 | d2h->_r_d = 0; | 2215 | d2h->_r_d = 0; |
2213 | } | 2216 | } |
2214 | 2217 | ||
2215 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 2218 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2216 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; | 2219 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; |
2217 | status = ireq->sci_status; | 2220 | status = ireq->sci_status; |
2218 | 2221 | ||
2219 | /* the hw will have suspended the rnc, so complete the | 2222 | /* the hw will have suspended the rnc, so complete the |
2220 | * request upon pending resume | 2223 | * request upon pending resume |
2221 | */ | 2224 | */ |
2222 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); | 2225 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); |
2223 | break; | 2226 | break; |
2224 | } | 2227 | } |
2225 | case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): | 2228 | case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): |
2226 | /* In this case, there is no UF coming after. | 2229 | /* In this case, there is no UF coming after. |
2227 | * compelte the IO now. | 2230 | * compelte the IO now. |
2228 | */ | 2231 | */ |
2229 | ireq->scu_status = SCU_TASK_DONE_GOOD; | 2232 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2230 | ireq->sci_status = SCI_SUCCESS; | 2233 | ireq->sci_status = SCI_SUCCESS; |
2231 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); | 2234 | sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); |
2232 | break; | 2235 | break; |
2233 | 2236 | ||
2234 | default: | 2237 | default: |
2235 | if (d2h->fis_type == FIS_REGD2H) { | 2238 | if (d2h->fis_type == FIS_REGD2H) { |
2236 | /* UF received change the device state to ATAPI_ERROR */ | 2239 | /* UF received change the device state to ATAPI_ERROR */ |
2237 | status = ireq->sci_status; | 2240 | status = ireq->sci_status; |
2238 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); | 2241 | sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); |
2239 | } else { | 2242 | } else { |
2240 | /* If receiving any non-sucess TC status, no UF | 2243 | /* If receiving any non-sucess TC status, no UF |
2241 | * received yet, then an UF for the status fis | 2244 | * received yet, then an UF for the status fis |
2242 | * is coming after (XXX: suspect this is | 2245 | * is coming after (XXX: suspect this is |
2243 | * actually a protocol error or a bug like the | 2246 | * actually a protocol error or a bug like the |
2244 | * DONE_UNEXP_FIS case) | 2247 | * DONE_UNEXP_FIS case) |
2245 | */ | 2248 | */ |
2246 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; | 2249 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2247 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; | 2250 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2248 | 2251 | ||
2249 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); | 2252 | sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); |
2250 | } | 2253 | } |
2251 | break; | 2254 | break; |
2252 | } | 2255 | } |
2253 | 2256 | ||
2254 | return status; | 2257 | return status; |
2255 | } | 2258 | } |
2256 | 2259 | ||
2257 | static int sci_request_smp_completion_status_is_tx_suspend( | 2260 | static int sci_request_smp_completion_status_is_tx_suspend( |
2258 | unsigned int completion_status) | 2261 | unsigned int completion_status) |
2259 | { | 2262 | { |
2260 | switch (completion_status) { | 2263 | switch (completion_status) { |
2261 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | 2264 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2262 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2265 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2263 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2266 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2264 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2267 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2265 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2268 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2266 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2269 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2267 | return 1; | 2270 | return 1; |
2268 | } | 2271 | } |
2269 | return 0; | 2272 | return 0; |
2270 | } | 2273 | } |
2271 | 2274 | ||
2272 | static int sci_request_smp_completion_status_is_tx_rx_suspend( | 2275 | static int sci_request_smp_completion_status_is_tx_rx_suspend( |
2273 | unsigned int completion_status) | 2276 | unsigned int completion_status) |
2274 | { | 2277 | { |
2275 | return 0; /* There are no Tx/Rx SMP suspend conditions. */ | 2278 | return 0; /* There are no Tx/Rx SMP suspend conditions. */ |
2276 | } | 2279 | } |
2277 | 2280 | ||
2278 | static int sci_request_ssp_completion_status_is_tx_suspend( | 2281 | static int sci_request_ssp_completion_status_is_tx_suspend( |
2279 | unsigned int completion_status) | 2282 | unsigned int completion_status) |
2280 | { | 2283 | { |
2281 | switch (completion_status) { | 2284 | switch (completion_status) { |
2282 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | 2285 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: |
2283 | case SCU_TASK_DONE_LF_ERR: | 2286 | case SCU_TASK_DONE_LF_ERR: |
2284 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | 2287 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2285 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2288 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2286 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2289 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2287 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2290 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2288 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2291 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2289 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2292 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2290 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2293 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2291 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2294 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2292 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2295 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2293 | return 1; | 2296 | return 1; |
2294 | } | 2297 | } |
2295 | return 0; | 2298 | return 0; |
2296 | } | 2299 | } |
2297 | 2300 | ||
2298 | static int sci_request_ssp_completion_status_is_tx_rx_suspend( | 2301 | static int sci_request_ssp_completion_status_is_tx_rx_suspend( |
2299 | unsigned int completion_status) | 2302 | unsigned int completion_status) |
2300 | { | 2303 | { |
2301 | return 0; /* There are no Tx/Rx SSP suspend conditions. */ | 2304 | return 0; /* There are no Tx/Rx SSP suspend conditions. */ |
2302 | } | 2305 | } |
2303 | 2306 | ||
2304 | static int sci_request_stpsata_completion_status_is_tx_suspend( | 2307 | static int sci_request_stpsata_completion_status_is_tx_suspend( |
2305 | unsigned int completion_status) | 2308 | unsigned int completion_status) |
2306 | { | 2309 | { |
2307 | switch (completion_status) { | 2310 | switch (completion_status) { |
2308 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: | 2311 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: |
2309 | case SCU_TASK_DONE_LL_R_ERR: | 2312 | case SCU_TASK_DONE_LL_R_ERR: |
2310 | case SCU_TASK_DONE_LL_PERR: | 2313 | case SCU_TASK_DONE_LL_PERR: |
2311 | case SCU_TASK_DONE_REG_ERR: | 2314 | case SCU_TASK_DONE_REG_ERR: |
2312 | case SCU_TASK_DONE_SDB_ERR: | 2315 | case SCU_TASK_DONE_SDB_ERR: |
2313 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | 2316 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2314 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2317 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2315 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2318 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2316 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2319 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2317 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2320 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2318 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2321 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2319 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2322 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2320 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2323 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2321 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2324 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2322 | return 1; | 2325 | return 1; |
2323 | } | 2326 | } |
2324 | return 0; | 2327 | return 0; |
2325 | } | 2328 | } |
2326 | 2329 | ||
2327 | 2330 | ||
2328 | static int sci_request_stpsata_completion_status_is_tx_rx_suspend( | 2331 | static int sci_request_stpsata_completion_status_is_tx_rx_suspend( |
2329 | unsigned int completion_status) | 2332 | unsigned int completion_status) |
2330 | { | 2333 | { |
2331 | switch (completion_status) { | 2334 | switch (completion_status) { |
2332 | case SCU_TASK_DONE_LF_ERR: | 2335 | case SCU_TASK_DONE_LF_ERR: |
2333 | case SCU_TASK_DONE_LL_SY_TERM: | 2336 | case SCU_TASK_DONE_LL_SY_TERM: |
2334 | case SCU_TASK_DONE_LL_LF_TERM: | 2337 | case SCU_TASK_DONE_LL_LF_TERM: |
2335 | case SCU_TASK_DONE_BREAK_RCVD: | 2338 | case SCU_TASK_DONE_BREAK_RCVD: |
2336 | case SCU_TASK_DONE_INV_FIS_LEN: | 2339 | case SCU_TASK_DONE_INV_FIS_LEN: |
2337 | case SCU_TASK_DONE_UNEXP_FIS: | 2340 | case SCU_TASK_DONE_UNEXP_FIS: |
2338 | case SCU_TASK_DONE_UNEXP_SDBFIS: | 2341 | case SCU_TASK_DONE_UNEXP_SDBFIS: |
2339 | case SCU_TASK_DONE_MAX_PLD_ERR: | 2342 | case SCU_TASK_DONE_MAX_PLD_ERR: |
2340 | return 1; | 2343 | return 1; |
2341 | } | 2344 | } |
2342 | return 0; | 2345 | return 0; |
2343 | } | 2346 | } |
2344 | 2347 | ||
2345 | static void sci_request_handle_suspending_completions( | 2348 | static void sci_request_handle_suspending_completions( |
2346 | struct isci_request *ireq, | 2349 | struct isci_request *ireq, |
2347 | u32 completion_code) | 2350 | u32 completion_code) |
2348 | { | 2351 | { |
2349 | int is_tx = 0; | 2352 | int is_tx = 0; |
2350 | int is_tx_rx = 0; | 2353 | int is_tx_rx = 0; |
2351 | 2354 | ||
2352 | switch (ireq->protocol) { | 2355 | switch (ireq->protocol) { |
2353 | case SAS_PROTOCOL_SMP: | 2356 | case SAS_PROTOCOL_SMP: |
2354 | is_tx = sci_request_smp_completion_status_is_tx_suspend( | 2357 | is_tx = sci_request_smp_completion_status_is_tx_suspend( |
2355 | completion_code); | 2358 | completion_code); |
2356 | is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( | 2359 | is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( |
2357 | completion_code); | 2360 | completion_code); |
2358 | break; | 2361 | break; |
2359 | case SAS_PROTOCOL_SSP: | 2362 | case SAS_PROTOCOL_SSP: |
2360 | is_tx = sci_request_ssp_completion_status_is_tx_suspend( | 2363 | is_tx = sci_request_ssp_completion_status_is_tx_suspend( |
2361 | completion_code); | 2364 | completion_code); |
2362 | is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( | 2365 | is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( |
2363 | completion_code); | 2366 | completion_code); |
2364 | break; | 2367 | break; |
2365 | case SAS_PROTOCOL_STP: | 2368 | case SAS_PROTOCOL_STP: |
2366 | is_tx = sci_request_stpsata_completion_status_is_tx_suspend( | 2369 | is_tx = sci_request_stpsata_completion_status_is_tx_suspend( |
2367 | completion_code); | 2370 | completion_code); |
2368 | is_tx_rx = | 2371 | is_tx_rx = |
2369 | sci_request_stpsata_completion_status_is_tx_rx_suspend( | 2372 | sci_request_stpsata_completion_status_is_tx_rx_suspend( |
2370 | completion_code); | 2373 | completion_code); |
2371 | break; | 2374 | break; |
2372 | default: | 2375 | default: |
2373 | dev_warn(&ireq->isci_host->pdev->dev, | 2376 | dev_warn(&ireq->isci_host->pdev->dev, |
2374 | "%s: request %p has no valid protocol\n", | 2377 | "%s: request %p has no valid protocol\n", |
2375 | __func__, ireq); | 2378 | __func__, ireq); |
2376 | break; | 2379 | break; |
2377 | } | 2380 | } |
2378 | if (is_tx || is_tx_rx) { | 2381 | if (is_tx || is_tx_rx) { |
2379 | BUG_ON(is_tx && is_tx_rx); | 2382 | BUG_ON(is_tx && is_tx_rx); |
2380 | 2383 | ||
2381 | sci_remote_node_context_suspend( | 2384 | sci_remote_node_context_suspend( |
2382 | &ireq->target_device->rnc, | 2385 | &ireq->target_device->rnc, |
2383 | SCI_HW_SUSPEND, | 2386 | SCI_HW_SUSPEND, |
2384 | (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX | 2387 | (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX |
2385 | : SCU_EVENT_TL_RNC_SUSPEND_TX); | 2388 | : SCU_EVENT_TL_RNC_SUSPEND_TX); |
2386 | } | 2389 | } |
2387 | } | 2390 | } |
2388 | 2391 | ||
2389 | enum sci_status | 2392 | enum sci_status |
2390 | sci_io_request_tc_completion(struct isci_request *ireq, | 2393 | sci_io_request_tc_completion(struct isci_request *ireq, |
2391 | u32 completion_code) | 2394 | u32 completion_code) |
2392 | { | 2395 | { |
2393 | enum sci_base_request_states state; | 2396 | enum sci_base_request_states state; |
2394 | struct isci_host *ihost = ireq->owning_controller; | 2397 | struct isci_host *ihost = ireq->owning_controller; |
2395 | 2398 | ||
2396 | state = ireq->sm.current_state_id; | 2399 | state = ireq->sm.current_state_id; |
2397 | 2400 | ||
2398 | /* Decode those completions that signal upcoming suspension events. */ | 2401 | /* Decode those completions that signal upcoming suspension events. */ |
2399 | sci_request_handle_suspending_completions( | 2402 | sci_request_handle_suspending_completions( |
2400 | ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); | 2403 | ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); |
2401 | 2404 | ||
2402 | switch (state) { | 2405 | switch (state) { |
2403 | case SCI_REQ_STARTED: | 2406 | case SCI_REQ_STARTED: |
2404 | return request_started_state_tc_event(ireq, completion_code); | 2407 | return request_started_state_tc_event(ireq, completion_code); |
2405 | 2408 | ||
2406 | case SCI_REQ_TASK_WAIT_TC_COMP: | 2409 | case SCI_REQ_TASK_WAIT_TC_COMP: |
2407 | return ssp_task_request_await_tc_event(ireq, | 2410 | return ssp_task_request_await_tc_event(ireq, |
2408 | completion_code); | 2411 | completion_code); |
2409 | 2412 | ||
2410 | case SCI_REQ_SMP_WAIT_RESP: | 2413 | case SCI_REQ_SMP_WAIT_RESP: |
2411 | return smp_request_await_response_tc_event(ireq, | 2414 | return smp_request_await_response_tc_event(ireq, |
2412 | completion_code); | 2415 | completion_code); |
2413 | 2416 | ||
2414 | case SCI_REQ_SMP_WAIT_TC_COMP: | 2417 | case SCI_REQ_SMP_WAIT_TC_COMP: |
2415 | return smp_request_await_tc_event(ireq, completion_code); | 2418 | return smp_request_await_tc_event(ireq, completion_code); |
2416 | 2419 | ||
2417 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: | 2420 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
2418 | return stp_request_udma_await_tc_event(ireq, | 2421 | return stp_request_udma_await_tc_event(ireq, |
2419 | completion_code); | 2422 | completion_code); |
2420 | 2423 | ||
2421 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: | 2424 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: |
2422 | return stp_request_non_data_await_h2d_tc_event(ireq, | 2425 | return stp_request_non_data_await_h2d_tc_event(ireq, |
2423 | completion_code); | 2426 | completion_code); |
2424 | 2427 | ||
2425 | case SCI_REQ_STP_PIO_WAIT_H2D: | 2428 | case SCI_REQ_STP_PIO_WAIT_H2D: |
2426 | return stp_request_pio_await_h2d_completion_tc_event(ireq, | 2429 | return stp_request_pio_await_h2d_completion_tc_event(ireq, |
2427 | completion_code); | 2430 | completion_code); |
2428 | 2431 | ||
2429 | case SCI_REQ_STP_PIO_DATA_OUT: | 2432 | case SCI_REQ_STP_PIO_DATA_OUT: |
2430 | return pio_data_out_tx_done_tc_event(ireq, completion_code); | 2433 | return pio_data_out_tx_done_tc_event(ireq, completion_code); |
2431 | 2434 | ||
2432 | case SCI_REQ_ABORTING: | 2435 | case SCI_REQ_ABORTING: |
2433 | return request_aborting_state_tc_event(ireq, | 2436 | return request_aborting_state_tc_event(ireq, |
2434 | completion_code); | 2437 | completion_code); |
2435 | 2438 | ||
2436 | case SCI_REQ_ATAPI_WAIT_H2D: | 2439 | case SCI_REQ_ATAPI_WAIT_H2D: |
2437 | return atapi_raw_completion(ireq, completion_code, | 2440 | return atapi_raw_completion(ireq, completion_code, |
2438 | SCI_REQ_ATAPI_WAIT_PIO_SETUP); | 2441 | SCI_REQ_ATAPI_WAIT_PIO_SETUP); |
2439 | 2442 | ||
2440 | case SCI_REQ_ATAPI_WAIT_TC_COMP: | 2443 | case SCI_REQ_ATAPI_WAIT_TC_COMP: |
2441 | return atapi_raw_completion(ireq, completion_code, | 2444 | return atapi_raw_completion(ireq, completion_code, |
2442 | SCI_REQ_ATAPI_WAIT_D2H); | 2445 | SCI_REQ_ATAPI_WAIT_D2H); |
2443 | 2446 | ||
2444 | case SCI_REQ_ATAPI_WAIT_D2H: | 2447 | case SCI_REQ_ATAPI_WAIT_D2H: |
2445 | return atapi_data_tc_completion_handler(ireq, completion_code); | 2448 | return atapi_data_tc_completion_handler(ireq, completion_code); |
2446 | 2449 | ||
2447 | default: | 2450 | default: |
2448 | dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", | 2451 | dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", |
2449 | __func__, completion_code, req_state_name(state)); | 2452 | __func__, completion_code, req_state_name(state)); |
2450 | return SCI_FAILURE_INVALID_STATE; | 2453 | return SCI_FAILURE_INVALID_STATE; |
2451 | } | 2454 | } |
2452 | } | 2455 | } |
2453 | 2456 | ||
2454 | /** | 2457 | /** |
2455 | * isci_request_process_response_iu() - This function sets the status and | 2458 | * isci_request_process_response_iu() - This function sets the status and |
2456 | * response iu, in the task struct, from the request object for the upper | 2459 | * response iu, in the task struct, from the request object for the upper |
2457 | * layer driver. | 2460 | * layer driver. |
2458 | * @sas_task: This parameter is the task struct from the upper layer driver. | 2461 | * @sas_task: This parameter is the task struct from the upper layer driver. |
2459 | * @resp_iu: This parameter points to the response iu of the completed request. | 2462 | * @resp_iu: This parameter points to the response iu of the completed request. |
2460 | * @dev: This parameter specifies the linux device struct. | 2463 | * @dev: This parameter specifies the linux device struct. |
2461 | * | 2464 | * |
2462 | * none. | 2465 | * none. |
2463 | */ | 2466 | */ |
2464 | static void isci_request_process_response_iu( | 2467 | static void isci_request_process_response_iu( |
2465 | struct sas_task *task, | 2468 | struct sas_task *task, |
2466 | struct ssp_response_iu *resp_iu, | 2469 | struct ssp_response_iu *resp_iu, |
2467 | struct device *dev) | 2470 | struct device *dev) |
2468 | { | 2471 | { |
2469 | dev_dbg(dev, | 2472 | dev_dbg(dev, |
2470 | "%s: resp_iu = %p " | 2473 | "%s: resp_iu = %p " |
2471 | "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " | 2474 | "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " |
2472 | "resp_iu->response_data_len = %x, " | 2475 | "resp_iu->response_data_len = %x, " |
2473 | "resp_iu->sense_data_len = %x\nrepsonse data: ", | 2476 | "resp_iu->sense_data_len = %x\nrepsonse data: ", |
2474 | __func__, | 2477 | __func__, |
2475 | resp_iu, | 2478 | resp_iu, |
2476 | resp_iu->status, | 2479 | resp_iu->status, |
2477 | resp_iu->datapres, | 2480 | resp_iu->datapres, |
2478 | resp_iu->response_data_len, | 2481 | resp_iu->response_data_len, |
2479 | resp_iu->sense_data_len); | 2482 | resp_iu->sense_data_len); |
2480 | 2483 | ||
2481 | task->task_status.stat = resp_iu->status; | 2484 | task->task_status.stat = resp_iu->status; |
2482 | 2485 | ||
2483 | /* libsas updates the task status fields based on the response iu. */ | 2486 | /* libsas updates the task status fields based on the response iu. */ |
2484 | sas_ssp_task_response(dev, task, resp_iu); | 2487 | sas_ssp_task_response(dev, task, resp_iu); |
2485 | } | 2488 | } |
2486 | 2489 | ||
2487 | /** | 2490 | /** |
2488 | * isci_request_set_open_reject_status() - This function prepares the I/O | 2491 | * isci_request_set_open_reject_status() - This function prepares the I/O |
2489 | * completion for OPEN_REJECT conditions. | 2492 | * completion for OPEN_REJECT conditions. |
2490 | * @request: This parameter is the completed isci_request object. | 2493 | * @request: This parameter is the completed isci_request object. |
2491 | * @response_ptr: This parameter specifies the service response for the I/O. | 2494 | * @response_ptr: This parameter specifies the service response for the I/O. |
2492 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2495 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2493 | * @open_rej_reason: This parameter specifies the encoded reason for the | 2496 | * @open_rej_reason: This parameter specifies the encoded reason for the |
2494 | * abandon-class reject. | 2497 | * abandon-class reject. |
2495 | * | 2498 | * |
2496 | * none. | 2499 | * none. |
2497 | */ | 2500 | */ |
2498 | static void isci_request_set_open_reject_status( | 2501 | static void isci_request_set_open_reject_status( |
2499 | struct isci_request *request, | 2502 | struct isci_request *request, |
2500 | struct sas_task *task, | 2503 | struct sas_task *task, |
2501 | enum service_response *response_ptr, | 2504 | enum service_response *response_ptr, |
2502 | enum exec_status *status_ptr, | 2505 | enum exec_status *status_ptr, |
2503 | enum sas_open_rej_reason open_rej_reason) | 2506 | enum sas_open_rej_reason open_rej_reason) |
2504 | { | 2507 | { |
2505 | /* Task in the target is done. */ | 2508 | /* Task in the target is done. */ |
2506 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2509 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2507 | *response_ptr = SAS_TASK_UNDELIVERED; | 2510 | *response_ptr = SAS_TASK_UNDELIVERED; |
2508 | *status_ptr = SAS_OPEN_REJECT; | 2511 | *status_ptr = SAS_OPEN_REJECT; |
2509 | task->task_status.open_rej_reason = open_rej_reason; | 2512 | task->task_status.open_rej_reason = open_rej_reason; |
2510 | } | 2513 | } |
2511 | 2514 | ||
2512 | /** | 2515 | /** |
2513 | * isci_request_handle_controller_specific_errors() - This function decodes | 2516 | * isci_request_handle_controller_specific_errors() - This function decodes |
2514 | * controller-specific I/O completion error conditions. | 2517 | * controller-specific I/O completion error conditions. |
2515 | * @request: This parameter is the completed isci_request object. | 2518 | * @request: This parameter is the completed isci_request object. |
2516 | * @response_ptr: This parameter specifies the service response for the I/O. | 2519 | * @response_ptr: This parameter specifies the service response for the I/O. |
2517 | * @status_ptr: This parameter specifies the exec status for the I/O. | 2520 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2518 | * | 2521 | * |
2519 | * none. | 2522 | * none. |
2520 | */ | 2523 | */ |
2521 | static void isci_request_handle_controller_specific_errors( | 2524 | static void isci_request_handle_controller_specific_errors( |
2522 | struct isci_remote_device *idev, | 2525 | struct isci_remote_device *idev, |
2523 | struct isci_request *request, | 2526 | struct isci_request *request, |
2524 | struct sas_task *task, | 2527 | struct sas_task *task, |
2525 | enum service_response *response_ptr, | 2528 | enum service_response *response_ptr, |
2526 | enum exec_status *status_ptr) | 2529 | enum exec_status *status_ptr) |
2527 | { | 2530 | { |
2528 | unsigned int cstatus; | 2531 | unsigned int cstatus; |
2529 | 2532 | ||
2530 | cstatus = request->scu_status; | 2533 | cstatus = request->scu_status; |
2531 | 2534 | ||
2532 | dev_dbg(&request->isci_host->pdev->dev, | 2535 | dev_dbg(&request->isci_host->pdev->dev, |
2533 | "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " | 2536 | "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " |
2534 | "- controller status = 0x%x\n", | 2537 | "- controller status = 0x%x\n", |
2535 | __func__, request, cstatus); | 2538 | __func__, request, cstatus); |
2536 | 2539 | ||
2537 | /* Decode the controller-specific errors; most | 2540 | /* Decode the controller-specific errors; most |
2538 | * important is to recognize those conditions in which | 2541 | * important is to recognize those conditions in which |
2539 | * the target may still have a task outstanding that | 2542 | * the target may still have a task outstanding that |
2540 | * must be aborted. | 2543 | * must be aborted. |
2541 | * | 2544 | * |
2542 | * Note that there are SCU completion codes being | 2545 | * Note that there are SCU completion codes being |
2543 | * named in the decode below for which SCIC has already | 2546 | * named in the decode below for which SCIC has already |
2544 | * done work to handle them in a way other than as | 2547 | * done work to handle them in a way other than as |
2545 | * a controller-specific completion code; these are left | 2548 | * a controller-specific completion code; these are left |
2546 | * in the decode below for completeness sake. | 2549 | * in the decode below for completeness sake. |
2547 | */ | 2550 | */ |
2548 | switch (cstatus) { | 2551 | switch (cstatus) { |
2549 | case SCU_TASK_DONE_DMASETUP_DIRERR: | 2552 | case SCU_TASK_DONE_DMASETUP_DIRERR: |
2550 | /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ | 2553 | /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ |
2551 | case SCU_TASK_DONE_XFERCNT_ERR: | 2554 | case SCU_TASK_DONE_XFERCNT_ERR: |
2552 | /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ | 2555 | /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ |
2553 | if (task->task_proto == SAS_PROTOCOL_SMP) { | 2556 | if (task->task_proto == SAS_PROTOCOL_SMP) { |
2554 | /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ | 2557 | /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ |
2555 | *response_ptr = SAS_TASK_COMPLETE; | 2558 | *response_ptr = SAS_TASK_COMPLETE; |
2556 | 2559 | ||
2557 | /* See if the device has been/is being stopped. Note | 2560 | /* See if the device has been/is being stopped. Note |
2558 | * that we ignore the quiesce state, since we are | 2561 | * that we ignore the quiesce state, since we are |
2559 | * concerned about the actual device state. | 2562 | * concerned about the actual device state. |
2560 | */ | 2563 | */ |
2561 | if (!idev) | 2564 | if (!idev) |
2562 | *status_ptr = SAS_DEVICE_UNKNOWN; | 2565 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2563 | else | 2566 | else |
2564 | *status_ptr = SAS_ABORTED_TASK; | 2567 | *status_ptr = SAS_ABORTED_TASK; |
2565 | 2568 | ||
2566 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2569 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2567 | } else { | 2570 | } else { |
2568 | /* Task in the target is not done. */ | 2571 | /* Task in the target is not done. */ |
2569 | *response_ptr = SAS_TASK_UNDELIVERED; | 2572 | *response_ptr = SAS_TASK_UNDELIVERED; |
2570 | 2573 | ||
2571 | if (!idev) | 2574 | if (!idev) |
2572 | *status_ptr = SAS_DEVICE_UNKNOWN; | 2575 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2573 | else | 2576 | else |
2574 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2577 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2575 | 2578 | ||
2576 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2579 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2577 | } | 2580 | } |
2578 | 2581 | ||
2579 | break; | 2582 | break; |
2580 | 2583 | ||
2581 | case SCU_TASK_DONE_CRC_ERR: | 2584 | case SCU_TASK_DONE_CRC_ERR: |
2582 | case SCU_TASK_DONE_NAK_CMD_ERR: | 2585 | case SCU_TASK_DONE_NAK_CMD_ERR: |
2583 | case SCU_TASK_DONE_EXCESS_DATA: | 2586 | case SCU_TASK_DONE_EXCESS_DATA: |
2584 | case SCU_TASK_DONE_UNEXP_FIS: | 2587 | case SCU_TASK_DONE_UNEXP_FIS: |
2585 | /* Also SCU_TASK_DONE_UNEXP_RESP: */ | 2588 | /* Also SCU_TASK_DONE_UNEXP_RESP: */ |
2586 | case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ | 2589 | case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ |
2587 | case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ | 2590 | case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ |
2588 | case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ | 2591 | case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ |
2589 | /* These are conditions in which the target | 2592 | /* These are conditions in which the target |
2590 | * has completed the task, so that no cleanup | 2593 | * has completed the task, so that no cleanup |
2591 | * is necessary. | 2594 | * is necessary. |
2592 | */ | 2595 | */ |
2593 | *response_ptr = SAS_TASK_COMPLETE; | 2596 | *response_ptr = SAS_TASK_COMPLETE; |
2594 | 2597 | ||
2595 | /* See if the device has been/is being stopped. Note | 2598 | /* See if the device has been/is being stopped. Note |
2596 | * that we ignore the quiesce state, since we are | 2599 | * that we ignore the quiesce state, since we are |
2597 | * concerned about the actual device state. | 2600 | * concerned about the actual device state. |
2598 | */ | 2601 | */ |
2599 | if (!idev) | 2602 | if (!idev) |
2600 | *status_ptr = SAS_DEVICE_UNKNOWN; | 2603 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2601 | else | 2604 | else |
2602 | *status_ptr = SAS_ABORTED_TASK; | 2605 | *status_ptr = SAS_ABORTED_TASK; |
2603 | 2606 | ||
2604 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2607 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2605 | break; | 2608 | break; |
2606 | 2609 | ||
2607 | 2610 | ||
2608 | /* Note that the only open reject completion codes seen here will be | 2611 | /* Note that the only open reject completion codes seen here will be |
2609 | * abandon-class codes; all others are automatically retried in the SCU. | 2612 | * abandon-class codes; all others are automatically retried in the SCU. |
2610 | */ | 2613 | */ |
2611 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: | 2614 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2612 | 2615 | ||
2613 | isci_request_set_open_reject_status( | 2616 | isci_request_set_open_reject_status( |
2614 | request, task, response_ptr, status_ptr, | 2617 | request, task, response_ptr, status_ptr, |
2615 | SAS_OREJ_WRONG_DEST); | 2618 | SAS_OREJ_WRONG_DEST); |
2616 | break; | 2619 | break; |
2617 | 2620 | ||
2618 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: | 2621 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2619 | 2622 | ||
2620 | /* Note - the return of AB0 will change when | 2623 | /* Note - the return of AB0 will change when |
2621 | * libsas implements detection of zone violations. | 2624 | * libsas implements detection of zone violations. |
2622 | */ | 2625 | */ |
2623 | isci_request_set_open_reject_status( | 2626 | isci_request_set_open_reject_status( |
2624 | request, task, response_ptr, status_ptr, | 2627 | request, task, response_ptr, status_ptr, |
2625 | SAS_OREJ_RESV_AB0); | 2628 | SAS_OREJ_RESV_AB0); |
2626 | break; | 2629 | break; |
2627 | 2630 | ||
2628 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: | 2631 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2629 | 2632 | ||
2630 | isci_request_set_open_reject_status( | 2633 | isci_request_set_open_reject_status( |
2631 | request, task, response_ptr, status_ptr, | 2634 | request, task, response_ptr, status_ptr, |
2632 | SAS_OREJ_RESV_AB1); | 2635 | SAS_OREJ_RESV_AB1); |
2633 | break; | 2636 | break; |
2634 | 2637 | ||
2635 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: | 2638 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2636 | 2639 | ||
2637 | isci_request_set_open_reject_status( | 2640 | isci_request_set_open_reject_status( |
2638 | request, task, response_ptr, status_ptr, | 2641 | request, task, response_ptr, status_ptr, |
2639 | SAS_OREJ_RESV_AB2); | 2642 | SAS_OREJ_RESV_AB2); |
2640 | break; | 2643 | break; |
2641 | 2644 | ||
2642 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: | 2645 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2643 | 2646 | ||
2644 | isci_request_set_open_reject_status( | 2647 | isci_request_set_open_reject_status( |
2645 | request, task, response_ptr, status_ptr, | 2648 | request, task, response_ptr, status_ptr, |
2646 | SAS_OREJ_RESV_AB3); | 2649 | SAS_OREJ_RESV_AB3); |
2647 | break; | 2650 | break; |
2648 | 2651 | ||
2649 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: | 2652 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2650 | 2653 | ||
2651 | isci_request_set_open_reject_status( | 2654 | isci_request_set_open_reject_status( |
2652 | request, task, response_ptr, status_ptr, | 2655 | request, task, response_ptr, status_ptr, |
2653 | SAS_OREJ_BAD_DEST); | 2656 | SAS_OREJ_BAD_DEST); |
2654 | break; | 2657 | break; |
2655 | 2658 | ||
2656 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: | 2659 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2657 | 2660 | ||
2658 | isci_request_set_open_reject_status( | 2661 | isci_request_set_open_reject_status( |
2659 | request, task, response_ptr, status_ptr, | 2662 | request, task, response_ptr, status_ptr, |
2660 | SAS_OREJ_STP_NORES); | 2663 | SAS_OREJ_STP_NORES); |
2661 | break; | 2664 | break; |
2662 | 2665 | ||
2663 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: | 2666 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2664 | 2667 | ||
2665 | isci_request_set_open_reject_status( | 2668 | isci_request_set_open_reject_status( |
2666 | request, task, response_ptr, status_ptr, | 2669 | request, task, response_ptr, status_ptr, |
2667 | SAS_OREJ_EPROTO); | 2670 | SAS_OREJ_EPROTO); |
2668 | break; | 2671 | break; |
2669 | 2672 | ||
2670 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: | 2673 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2671 | 2674 | ||
2672 | isci_request_set_open_reject_status( | 2675 | isci_request_set_open_reject_status( |
2673 | request, task, response_ptr, status_ptr, | 2676 | request, task, response_ptr, status_ptr, |
2674 | SAS_OREJ_CONN_RATE); | 2677 | SAS_OREJ_CONN_RATE); |
2675 | break; | 2678 | break; |
2676 | 2679 | ||
2677 | case SCU_TASK_DONE_LL_R_ERR: | 2680 | case SCU_TASK_DONE_LL_R_ERR: |
2678 | /* Also SCU_TASK_DONE_ACK_NAK_TO: */ | 2681 | /* Also SCU_TASK_DONE_ACK_NAK_TO: */ |
2679 | case SCU_TASK_DONE_LL_PERR: | 2682 | case SCU_TASK_DONE_LL_PERR: |
2680 | case SCU_TASK_DONE_LL_SY_TERM: | 2683 | case SCU_TASK_DONE_LL_SY_TERM: |
2681 | /* Also SCU_TASK_DONE_NAK_ERR:*/ | 2684 | /* Also SCU_TASK_DONE_NAK_ERR:*/ |
2682 | case SCU_TASK_DONE_LL_LF_TERM: | 2685 | case SCU_TASK_DONE_LL_LF_TERM: |
2683 | /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ | 2686 | /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ |
2684 | case SCU_TASK_DONE_LL_ABORT_ERR: | 2687 | case SCU_TASK_DONE_LL_ABORT_ERR: |
2685 | case SCU_TASK_DONE_SEQ_INV_TYPE: | 2688 | case SCU_TASK_DONE_SEQ_INV_TYPE: |
2686 | /* Also SCU_TASK_DONE_UNEXP_XR: */ | 2689 | /* Also SCU_TASK_DONE_UNEXP_XR: */ |
2687 | case SCU_TASK_DONE_XR_IU_LEN_ERR: | 2690 | case SCU_TASK_DONE_XR_IU_LEN_ERR: |
2688 | case SCU_TASK_DONE_INV_FIS_LEN: | 2691 | case SCU_TASK_DONE_INV_FIS_LEN: |
2689 | /* Also SCU_TASK_DONE_XR_WD_LEN: */ | 2692 | /* Also SCU_TASK_DONE_XR_WD_LEN: */ |
2690 | case SCU_TASK_DONE_SDMA_ERR: | 2693 | case SCU_TASK_DONE_SDMA_ERR: |
2691 | case SCU_TASK_DONE_OFFSET_ERR: | 2694 | case SCU_TASK_DONE_OFFSET_ERR: |
2692 | case SCU_TASK_DONE_MAX_PLD_ERR: | 2695 | case SCU_TASK_DONE_MAX_PLD_ERR: |
2693 | case SCU_TASK_DONE_LF_ERR: | 2696 | case SCU_TASK_DONE_LF_ERR: |
2694 | case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ | 2697 | case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ |
2695 | case SCU_TASK_DONE_SMP_LL_RX_ERR: | 2698 | case SCU_TASK_DONE_SMP_LL_RX_ERR: |
2696 | case SCU_TASK_DONE_UNEXP_DATA: | 2699 | case SCU_TASK_DONE_UNEXP_DATA: |
2697 | case SCU_TASK_DONE_UNEXP_SDBFIS: | 2700 | case SCU_TASK_DONE_UNEXP_SDBFIS: |
2698 | case SCU_TASK_DONE_REG_ERR: | 2701 | case SCU_TASK_DONE_REG_ERR: |
2699 | case SCU_TASK_DONE_SDB_ERR: | 2702 | case SCU_TASK_DONE_SDB_ERR: |
2700 | case SCU_TASK_DONE_TASK_ABORT: | 2703 | case SCU_TASK_DONE_TASK_ABORT: |
2701 | default: | 2704 | default: |
2702 | /* Task in the target is not done. */ | 2705 | /* Task in the target is not done. */ |
2703 | *response_ptr = SAS_TASK_UNDELIVERED; | 2706 | *response_ptr = SAS_TASK_UNDELIVERED; |
2704 | *status_ptr = SAM_STAT_TASK_ABORTED; | 2707 | *status_ptr = SAM_STAT_TASK_ABORTED; |
2705 | 2708 | ||
2706 | if (task->task_proto == SAS_PROTOCOL_SMP) | 2709 | if (task->task_proto == SAS_PROTOCOL_SMP) |
2707 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2710 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2708 | else | 2711 | else |
2709 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2712 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2710 | break; | 2713 | break; |
2711 | } | 2714 | } |
2712 | } | 2715 | } |
2713 | 2716 | ||
2714 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) | 2717 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
2715 | { | 2718 | { |
2716 | struct task_status_struct *ts = &task->task_status; | 2719 | struct task_status_struct *ts = &task->task_status; |
2717 | struct ata_task_resp *resp = (void *)&ts->buf[0]; | 2720 | struct ata_task_resp *resp = (void *)&ts->buf[0]; |
2718 | 2721 | ||
2719 | resp->frame_len = sizeof(*fis); | 2722 | resp->frame_len = sizeof(*fis); |
2720 | memcpy(resp->ending_fis, fis, sizeof(*fis)); | 2723 | memcpy(resp->ending_fis, fis, sizeof(*fis)); |
2721 | ts->buf_valid_size = sizeof(*resp); | 2724 | ts->buf_valid_size = sizeof(*resp); |
2722 | 2725 | ||
2723 | /* If the device fault bit is set in the status register, then | 2726 | /* If the device fault bit is set in the status register, then |
2724 | * set the sense data and return. | 2727 | * set the sense data and return. |
2725 | */ | 2728 | */ |
2726 | if (fis->status & ATA_DF) | 2729 | if (fis->status & ATA_DF) |
2727 | ts->stat = SAS_PROTO_RESPONSE; | 2730 | ts->stat = SAS_PROTO_RESPONSE; |
2728 | else if (fis->status & ATA_ERR) | 2731 | else if (fis->status & ATA_ERR) |
2729 | ts->stat = SAM_STAT_CHECK_CONDITION; | 2732 | ts->stat = SAM_STAT_CHECK_CONDITION; |
2730 | else | 2733 | else |
2731 | ts->stat = SAM_STAT_GOOD; | 2734 | ts->stat = SAM_STAT_GOOD; |
2732 | 2735 | ||
2733 | ts->resp = SAS_TASK_COMPLETE; | 2736 | ts->resp = SAS_TASK_COMPLETE; |
2734 | } | 2737 | } |
2735 | 2738 | ||
2736 | static void isci_request_io_request_complete(struct isci_host *ihost, | 2739 | static void isci_request_io_request_complete(struct isci_host *ihost, |
2737 | struct isci_request *request, | 2740 | struct isci_request *request, |
2738 | enum sci_io_status completion_status) | 2741 | enum sci_io_status completion_status) |
2739 | { | 2742 | { |
2740 | struct sas_task *task = isci_request_access_task(request); | 2743 | struct sas_task *task = isci_request_access_task(request); |
2741 | struct ssp_response_iu *resp_iu; | 2744 | struct ssp_response_iu *resp_iu; |
2742 | unsigned long task_flags; | 2745 | unsigned long task_flags; |
2743 | struct isci_remote_device *idev = request->target_device; | 2746 | struct isci_remote_device *idev = request->target_device; |
2744 | enum service_response response = SAS_TASK_UNDELIVERED; | 2747 | enum service_response response = SAS_TASK_UNDELIVERED; |
2745 | enum exec_status status = SAS_ABORTED_TASK; | 2748 | enum exec_status status = SAS_ABORTED_TASK; |
2746 | 2749 | ||
2747 | dev_dbg(&ihost->pdev->dev, | 2750 | dev_dbg(&ihost->pdev->dev, |
2748 | "%s: request = %p, task = %p,\n" | 2751 | "%s: request = %p, task = %p,\n" |
2749 | "task->data_dir = %d completion_status = 0x%x\n", | 2752 | "task->data_dir = %d completion_status = 0x%x\n", |
2750 | __func__, | 2753 | __func__, |
2751 | request, | 2754 | request, |
2752 | task, | 2755 | task, |
2753 | task->data_dir, | 2756 | task->data_dir, |
2754 | completion_status); | 2757 | completion_status); |
2755 | 2758 | ||
2756 | /* The request is done from an SCU HW perspective. */ | 2759 | /* The request is done from an SCU HW perspective. */ |
2757 | 2760 | ||
2758 | /* This is an active request being completed from the core. */ | 2761 | /* This is an active request being completed from the core. */ |
2759 | switch (completion_status) { | 2762 | switch (completion_status) { |
2760 | 2763 | ||
2761 | case SCI_IO_FAILURE_RESPONSE_VALID: | 2764 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2762 | dev_dbg(&ihost->pdev->dev, | 2765 | dev_dbg(&ihost->pdev->dev, |
2763 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", | 2766 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", |
2764 | __func__, request, task); | 2767 | __func__, request, task); |
2765 | 2768 | ||
2766 | if (sas_protocol_ata(task->task_proto)) { | 2769 | if (sas_protocol_ata(task->task_proto)) { |
2767 | isci_process_stp_response(task, &request->stp.rsp); | 2770 | isci_process_stp_response(task, &request->stp.rsp); |
2768 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { | 2771 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { |
2769 | 2772 | ||
2770 | /* crack the iu response buffer. */ | 2773 | /* crack the iu response buffer. */ |
2771 | resp_iu = &request->ssp.rsp; | 2774 | resp_iu = &request->ssp.rsp; |
2772 | isci_request_process_response_iu(task, resp_iu, | 2775 | isci_request_process_response_iu(task, resp_iu, |
2773 | &ihost->pdev->dev); | 2776 | &ihost->pdev->dev); |
2774 | 2777 | ||
2775 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { | 2778 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { |
2776 | 2779 | ||
2777 | dev_err(&ihost->pdev->dev, | 2780 | dev_err(&ihost->pdev->dev, |
2778 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " | 2781 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " |
2779 | "SAS_PROTOCOL_SMP protocol\n", | 2782 | "SAS_PROTOCOL_SMP protocol\n", |
2780 | __func__); | 2783 | __func__); |
2781 | 2784 | ||
2782 | } else | 2785 | } else |
2783 | dev_err(&ihost->pdev->dev, | 2786 | dev_err(&ihost->pdev->dev, |
2784 | "%s: unknown protocol\n", __func__); | 2787 | "%s: unknown protocol\n", __func__); |
2785 | 2788 | ||
2786 | /* use the task status set in the task struct by the | 2789 | /* use the task status set in the task struct by the |
2787 | * isci_request_process_response_iu call. | 2790 | * isci_request_process_response_iu call. |
2788 | */ | 2791 | */ |
2789 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2792 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2790 | response = task->task_status.resp; | 2793 | response = task->task_status.resp; |
2791 | status = task->task_status.stat; | 2794 | status = task->task_status.stat; |
2792 | break; | 2795 | break; |
2793 | 2796 | ||
2794 | case SCI_IO_SUCCESS: | 2797 | case SCI_IO_SUCCESS: |
2795 | case SCI_IO_SUCCESS_IO_DONE_EARLY: | 2798 | case SCI_IO_SUCCESS_IO_DONE_EARLY: |
2796 | 2799 | ||
2797 | response = SAS_TASK_COMPLETE; | 2800 | response = SAS_TASK_COMPLETE; |
2798 | status = SAM_STAT_GOOD; | 2801 | status = SAM_STAT_GOOD; |
2799 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2802 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2800 | 2803 | ||
2801 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { | 2804 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { |
2802 | 2805 | ||
2803 | /* This was an SSP / STP / SATA transfer. | 2806 | /* This was an SSP / STP / SATA transfer. |
2804 | * There is a possibility that less data than | 2807 | * There is a possibility that less data than |
2805 | * the maximum was transferred. | 2808 | * the maximum was transferred. |
2806 | */ | 2809 | */ |
2807 | u32 transferred_length = sci_req_tx_bytes(request); | 2810 | u32 transferred_length = sci_req_tx_bytes(request); |
2808 | 2811 | ||
2809 | task->task_status.residual | 2812 | task->task_status.residual |
2810 | = task->total_xfer_len - transferred_length; | 2813 | = task->total_xfer_len - transferred_length; |
2811 | 2814 | ||
2812 | /* If there were residual bytes, call this an | 2815 | /* If there were residual bytes, call this an |
2813 | * underrun. | 2816 | * underrun. |
2814 | */ | 2817 | */ |
2815 | if (task->task_status.residual != 0) | 2818 | if (task->task_status.residual != 0) |
2816 | status = SAS_DATA_UNDERRUN; | 2819 | status = SAS_DATA_UNDERRUN; |
2817 | 2820 | ||
2818 | dev_dbg(&ihost->pdev->dev, | 2821 | dev_dbg(&ihost->pdev->dev, |
2819 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", | 2822 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", |
2820 | __func__, status); | 2823 | __func__, status); |
2821 | 2824 | ||
2822 | } else | 2825 | } else |
2823 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", | 2826 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", |
2824 | __func__); | 2827 | __func__); |
2825 | break; | 2828 | break; |
2826 | 2829 | ||
2827 | case SCI_IO_FAILURE_TERMINATED: | 2830 | case SCI_IO_FAILURE_TERMINATED: |
2828 | 2831 | ||
2829 | dev_dbg(&ihost->pdev->dev, | 2832 | dev_dbg(&ihost->pdev->dev, |
2830 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", | 2833 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", |
2831 | __func__, request, task); | 2834 | __func__, request, task); |
2832 | 2835 | ||
2833 | /* The request was terminated explicitly. */ | 2836 | /* The request was terminated explicitly. */ |
2834 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2837 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2835 | response = SAS_TASK_UNDELIVERED; | 2838 | response = SAS_TASK_UNDELIVERED; |
2836 | 2839 | ||
2837 | /* See if the device has been/is being stopped. Note | 2840 | /* See if the device has been/is being stopped. Note |
2838 | * that we ignore the quiesce state, since we are | 2841 | * that we ignore the quiesce state, since we are |
2839 | * concerned about the actual device state. | 2842 | * concerned about the actual device state. |
2840 | */ | 2843 | */ |
2841 | if (!idev) | 2844 | if (!idev) |
2842 | status = SAS_DEVICE_UNKNOWN; | 2845 | status = SAS_DEVICE_UNKNOWN; |
2843 | else | 2846 | else |
2844 | status = SAS_ABORTED_TASK; | 2847 | status = SAS_ABORTED_TASK; |
2845 | break; | 2848 | break; |
2846 | 2849 | ||
2847 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: | 2850 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: |
2848 | 2851 | ||
2849 | isci_request_handle_controller_specific_errors(idev, request, | 2852 | isci_request_handle_controller_specific_errors(idev, request, |
2850 | task, &response, | 2853 | task, &response, |
2851 | &status); | 2854 | &status); |
2852 | break; | 2855 | break; |
2853 | 2856 | ||
2854 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: | 2857 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: |
2855 | /* This is a special case, in that the I/O completion | 2858 | /* This is a special case, in that the I/O completion |
2856 | * is telling us that the device needs a reset. | 2859 | * is telling us that the device needs a reset. |
2857 | * In order for the device reset condition to be | 2860 | * In order for the device reset condition to be |
2858 | * noticed, the I/O has to be handled in the error | 2861 | * noticed, the I/O has to be handled in the error |
2859 | * handler. Set the reset flag and cause the | 2862 | * handler. Set the reset flag and cause the |
2860 | * SCSI error thread to be scheduled. | 2863 | * SCSI error thread to be scheduled. |
2861 | */ | 2864 | */ |
2862 | spin_lock_irqsave(&task->task_state_lock, task_flags); | 2865 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
2863 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | 2866 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
2864 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | 2867 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); |
2865 | 2868 | ||
2866 | /* Fail the I/O. */ | 2869 | /* Fail the I/O. */ |
2867 | response = SAS_TASK_UNDELIVERED; | 2870 | response = SAS_TASK_UNDELIVERED; |
2868 | status = SAM_STAT_TASK_ABORTED; | 2871 | status = SAM_STAT_TASK_ABORTED; |
2869 | 2872 | ||
2870 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2873 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2871 | break; | 2874 | break; |
2872 | 2875 | ||
2873 | case SCI_FAILURE_RETRY_REQUIRED: | 2876 | case SCI_FAILURE_RETRY_REQUIRED: |
2874 | 2877 | ||
2875 | /* Fail the I/O so it can be retried. */ | 2878 | /* Fail the I/O so it can be retried. */ |
2876 | response = SAS_TASK_UNDELIVERED; | 2879 | response = SAS_TASK_UNDELIVERED; |
2877 | if (!idev) | 2880 | if (!idev) |
2878 | status = SAS_DEVICE_UNKNOWN; | 2881 | status = SAS_DEVICE_UNKNOWN; |
2879 | else | 2882 | else |
2880 | status = SAS_ABORTED_TASK; | 2883 | status = SAS_ABORTED_TASK; |
2881 | 2884 | ||
2882 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2885 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2883 | break; | 2886 | break; |
2884 | 2887 | ||
2885 | 2888 | ||
2886 | default: | 2889 | default: |
2887 | /* Catch any otherwise unhandled error codes here. */ | 2890 | /* Catch any otherwise unhandled error codes here. */ |
2888 | dev_dbg(&ihost->pdev->dev, | 2891 | dev_dbg(&ihost->pdev->dev, |
2889 | "%s: invalid completion code: 0x%x - " | 2892 | "%s: invalid completion code: 0x%x - " |
2890 | "isci_request = %p\n", | 2893 | "isci_request = %p\n", |
2891 | __func__, completion_status, request); | 2894 | __func__, completion_status, request); |
2892 | 2895 | ||
2893 | response = SAS_TASK_UNDELIVERED; | 2896 | response = SAS_TASK_UNDELIVERED; |
2894 | 2897 | ||
2895 | /* See if the device has been/is being stopped. Note | 2898 | /* See if the device has been/is being stopped. Note |
2896 | * that we ignore the quiesce state, since we are | 2899 | * that we ignore the quiesce state, since we are |
2897 | * concerned about the actual device state. | 2900 | * concerned about the actual device state. |
2898 | */ | 2901 | */ |
2899 | if (!idev) | 2902 | if (!idev) |
2900 | status = SAS_DEVICE_UNKNOWN; | 2903 | status = SAS_DEVICE_UNKNOWN; |
2901 | else | 2904 | else |
2902 | status = SAS_ABORTED_TASK; | 2905 | status = SAS_ABORTED_TASK; |
2903 | 2906 | ||
2904 | if (SAS_PROTOCOL_SMP == task->task_proto) | 2907 | if (SAS_PROTOCOL_SMP == task->task_proto) |
2905 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2908 | set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2906 | else | 2909 | else |
2907 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); | 2910 | clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); |
2908 | break; | 2911 | break; |
2909 | } | 2912 | } |
2910 | 2913 | ||
2911 | switch (task->task_proto) { | 2914 | switch (task->task_proto) { |
2912 | case SAS_PROTOCOL_SSP: | 2915 | case SAS_PROTOCOL_SSP: |
2913 | if (task->data_dir == DMA_NONE) | 2916 | if (task->data_dir == DMA_NONE) |
2914 | break; | 2917 | break; |
2915 | if (task->num_scatter == 0) | 2918 | if (task->num_scatter == 0) |
2916 | /* 0 indicates a single dma address */ | 2919 | /* 0 indicates a single dma address */ |
2917 | dma_unmap_single(&ihost->pdev->dev, | 2920 | dma_unmap_single(&ihost->pdev->dev, |
2918 | request->zero_scatter_daddr, | 2921 | request->zero_scatter_daddr, |
2919 | task->total_xfer_len, task->data_dir); | 2922 | task->total_xfer_len, task->data_dir); |
2920 | else /* unmap the sgl dma addresses */ | 2923 | else /* unmap the sgl dma addresses */ |
2921 | dma_unmap_sg(&ihost->pdev->dev, task->scatter, | 2924 | dma_unmap_sg(&ihost->pdev->dev, task->scatter, |
2922 | request->num_sg_entries, task->data_dir); | 2925 | request->num_sg_entries, task->data_dir); |
2923 | break; | 2926 | break; |
2924 | case SAS_PROTOCOL_SMP: { | 2927 | case SAS_PROTOCOL_SMP: { |
2925 | struct scatterlist *sg = &task->smp_task.smp_req; | 2928 | struct scatterlist *sg = &task->smp_task.smp_req; |
2926 | struct smp_req *smp_req; | 2929 | struct smp_req *smp_req; |
2927 | void *kaddr; | 2930 | void *kaddr; |
2928 | 2931 | ||
2929 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); | 2932 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); |
2930 | 2933 | ||
2931 | /* need to swab it back in case the command buffer is re-used */ | 2934 | /* need to swab it back in case the command buffer is re-used */ |
2932 | kaddr = kmap_atomic(sg_page(sg)); | 2935 | kaddr = kmap_atomic(sg_page(sg)); |
2933 | smp_req = kaddr + sg->offset; | 2936 | smp_req = kaddr + sg->offset; |
2934 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); | 2937 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); |
2935 | kunmap_atomic(kaddr); | 2938 | kunmap_atomic(kaddr); |
2936 | break; | 2939 | break; |
2937 | } | 2940 | } |
2938 | default: | 2941 | default: |
2939 | break; | 2942 | break; |
2940 | } | 2943 | } |
2941 | 2944 | ||
2942 | spin_lock_irqsave(&task->task_state_lock, task_flags); | 2945 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
2943 | 2946 | ||
2944 | task->task_status.resp = response; | 2947 | task->task_status.resp = response; |
2945 | task->task_status.stat = status; | 2948 | task->task_status.stat = status; |
2946 | 2949 | ||
2947 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { | 2950 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { |
2948 | /* Normal notification (task_done) */ | 2951 | /* Normal notification (task_done) */ |
2949 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 2952 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
2950 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | | 2953 | task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | |
2951 | SAS_TASK_STATE_PENDING); | 2954 | SAS_TASK_STATE_PENDING); |
2952 | } | 2955 | } |
2953 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); | 2956 | spin_unlock_irqrestore(&task->task_state_lock, task_flags); |
2954 | 2957 | ||
2955 | /* Add to the completed list. */ | 2958 | /* Add to the completed list. */ |
2956 | list_add(&request->completed_node, &ihost->requests_to_complete); | 2959 | list_add(&request->completed_node, &ihost->requests_to_complete); |
2957 | 2960 | ||
2958 | /* complete the io request to the core. */ | 2961 | /* complete the io request to the core. */ |
2959 | sci_controller_complete_io(ihost, request->target_device, request); | 2962 | sci_controller_complete_io(ihost, request->target_device, request); |
2960 | 2963 | ||
2961 | /* set terminated handle so it cannot be completed or | 2964 | /* set terminated handle so it cannot be completed or |
2962 | * terminated again, and to cause any calls into abort | 2965 | * terminated again, and to cause any calls into abort |
2963 | * task to recognize the already completed case. | 2966 | * task to recognize the already completed case. |
2964 | */ | 2967 | */ |
2965 | set_bit(IREQ_TERMINATED, &request->flags); | 2968 | set_bit(IREQ_TERMINATED, &request->flags); |
2966 | } | 2969 | } |
2967 | 2970 | ||
2968 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) | 2971 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) |
2969 | { | 2972 | { |
2970 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 2973 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2971 | struct domain_device *dev = ireq->target_device->domain_dev; | 2974 | struct domain_device *dev = ireq->target_device->domain_dev; |
2972 | enum sci_base_request_states state; | 2975 | enum sci_base_request_states state; |
2973 | struct sas_task *task; | 2976 | struct sas_task *task; |
2974 | 2977 | ||
2975 | /* XXX as hch said always creating an internal sas_task for tmf | 2978 | /* XXX as hch said always creating an internal sas_task for tmf |
2976 | * requests would simplify the driver | 2979 | * requests would simplify the driver |
2977 | */ | 2980 | */ |
2978 | task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); | 2981 | task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); |
2979 | 2982 | ||
2980 | /* all unaccelerated request types (non ssp or ncq) handled with | 2983 | /* all unaccelerated request types (non ssp or ncq) handled with |
2981 | * substates | 2984 | * substates |
2982 | */ | 2985 | */ |
2983 | if (!task && dev->dev_type == SAS_END_DEV) { | 2986 | if (!task && dev->dev_type == SAS_END_DEV) { |
2984 | state = SCI_REQ_TASK_WAIT_TC_COMP; | 2987 | state = SCI_REQ_TASK_WAIT_TC_COMP; |
2985 | } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { | 2988 | } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { |
2986 | state = SCI_REQ_SMP_WAIT_RESP; | 2989 | state = SCI_REQ_SMP_WAIT_RESP; |
2987 | } else if (task && sas_protocol_ata(task->task_proto) && | 2990 | } else if (task && sas_protocol_ata(task->task_proto) && |
2988 | !task->ata_task.use_ncq) { | 2991 | !task->ata_task.use_ncq) { |
2989 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && | 2992 | if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && |
2990 | task->ata_task.fis.command == ATA_CMD_PACKET) { | 2993 | task->ata_task.fis.command == ATA_CMD_PACKET) { |
2991 | state = SCI_REQ_ATAPI_WAIT_H2D; | 2994 | state = SCI_REQ_ATAPI_WAIT_H2D; |
2992 | } else if (task->data_dir == DMA_NONE) { | 2995 | } else if (task->data_dir == DMA_NONE) { |
2993 | state = SCI_REQ_STP_NON_DATA_WAIT_H2D; | 2996 | state = SCI_REQ_STP_NON_DATA_WAIT_H2D; |
2994 | } else if (task->ata_task.dma_xfer) { | 2997 | } else if (task->ata_task.dma_xfer) { |
2995 | state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; | 2998 | state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; |
2996 | } else /* PIO */ { | 2999 | } else /* PIO */ { |
2997 | state = SCI_REQ_STP_PIO_WAIT_H2D; | 3000 | state = SCI_REQ_STP_PIO_WAIT_H2D; |
2998 | } | 3001 | } |
2999 | } else { | 3002 | } else { |
3000 | /* SSP or NCQ are fully accelerated, no substates */ | 3003 | /* SSP or NCQ are fully accelerated, no substates */ |
3001 | return; | 3004 | return; |
3002 | } | 3005 | } |
3003 | sci_change_state(sm, state); | 3006 | sci_change_state(sm, state); |
3004 | } | 3007 | } |
3005 | 3008 | ||
3006 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) | 3009 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) |
3007 | { | 3010 | { |
3008 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 3011 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3009 | struct isci_host *ihost = ireq->owning_controller; | 3012 | struct isci_host *ihost = ireq->owning_controller; |
3010 | 3013 | ||
3011 | /* Tell the SCI_USER that the IO request is complete */ | 3014 | /* Tell the SCI_USER that the IO request is complete */ |
3012 | if (!test_bit(IREQ_TMF, &ireq->flags)) | 3015 | if (!test_bit(IREQ_TMF, &ireq->flags)) |
3013 | isci_request_io_request_complete(ihost, ireq, | 3016 | isci_request_io_request_complete(ihost, ireq, |
3014 | ireq->sci_status); | 3017 | ireq->sci_status); |
3015 | else | 3018 | else |
3016 | isci_task_request_complete(ihost, ireq, ireq->sci_status); | 3019 | isci_task_request_complete(ihost, ireq, ireq->sci_status); |
3017 | } | 3020 | } |
3018 | 3021 | ||
3019 | static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) | 3022 | static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) |
3020 | { | 3023 | { |
3021 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 3024 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3022 | 3025 | ||
3023 | /* Setting the abort bit in the Task Context is required by the silicon. */ | 3026 | /* Setting the abort bit in the Task Context is required by the silicon. */ |
3024 | ireq->tc->abort = 1; | 3027 | ireq->tc->abort = 1; |
3025 | } | 3028 | } |
3026 | 3029 | ||
3027 | static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 3030 | static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
3028 | { | 3031 | { |
3029 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 3032 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3030 | 3033 | ||
3031 | ireq->target_device->working_request = ireq; | 3034 | ireq->target_device->working_request = ireq; |
3032 | } | 3035 | } |
3033 | 3036 | ||
3034 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) | 3037 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
3035 | { | 3038 | { |
3036 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); | 3039 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3037 | 3040 | ||
3038 | ireq->target_device->working_request = ireq; | 3041 | ireq->target_device->working_request = ireq; |
3039 | } | 3042 | } |
3040 | 3043 | ||
3041 | static const struct sci_base_state sci_request_state_table[] = { | 3044 | static const struct sci_base_state sci_request_state_table[] = { |
3042 | [SCI_REQ_INIT] = { }, | 3045 | [SCI_REQ_INIT] = { }, |
3043 | [SCI_REQ_CONSTRUCTED] = { }, | 3046 | [SCI_REQ_CONSTRUCTED] = { }, |
3044 | [SCI_REQ_STARTED] = { | 3047 | [SCI_REQ_STARTED] = { |
3045 | .enter_state = sci_request_started_state_enter, | 3048 | .enter_state = sci_request_started_state_enter, |
3046 | }, | 3049 | }, |
3047 | [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { | 3050 | [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { |
3048 | .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, | 3051 | .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, |
3049 | }, | 3052 | }, |
3050 | [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, | 3053 | [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, |
3051 | [SCI_REQ_STP_PIO_WAIT_H2D] = { | 3054 | [SCI_REQ_STP_PIO_WAIT_H2D] = { |
3052 | .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, | 3055 | .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, |
3053 | }, | 3056 | }, |
3054 | [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, | 3057 | [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, |
3055 | [SCI_REQ_STP_PIO_DATA_IN] = { }, | 3058 | [SCI_REQ_STP_PIO_DATA_IN] = { }, |
3056 | [SCI_REQ_STP_PIO_DATA_OUT] = { }, | 3059 | [SCI_REQ_STP_PIO_DATA_OUT] = { }, |
3057 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, | 3060 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, |
3058 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, | 3061 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, |
3059 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, | 3062 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, |
3060 | [SCI_REQ_TASK_WAIT_TC_RESP] = { }, | 3063 | [SCI_REQ_TASK_WAIT_TC_RESP] = { }, |
3061 | [SCI_REQ_SMP_WAIT_RESP] = { }, | 3064 | [SCI_REQ_SMP_WAIT_RESP] = { }, |
3062 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, | 3065 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, |
3063 | [SCI_REQ_ATAPI_WAIT_H2D] = { }, | 3066 | [SCI_REQ_ATAPI_WAIT_H2D] = { }, |
3064 | [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, | 3067 | [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, |
3065 | [SCI_REQ_ATAPI_WAIT_D2H] = { }, | 3068 | [SCI_REQ_ATAPI_WAIT_D2H] = { }, |
3066 | [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, | 3069 | [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, |
3067 | [SCI_REQ_COMPLETED] = { | 3070 | [SCI_REQ_COMPLETED] = { |
3068 | .enter_state = sci_request_completed_state_enter, | 3071 | .enter_state = sci_request_completed_state_enter, |
3069 | }, | 3072 | }, |
3070 | [SCI_REQ_ABORTING] = { | 3073 | [SCI_REQ_ABORTING] = { |
3071 | .enter_state = sci_request_aborting_state_enter, | 3074 | .enter_state = sci_request_aborting_state_enter, |
3072 | }, | 3075 | }, |
3073 | [SCI_REQ_FINAL] = { }, | 3076 | [SCI_REQ_FINAL] = { }, |
3074 | }; | 3077 | }; |
3075 | 3078 | ||
3076 | static void | 3079 | static void |
3077 | sci_general_request_construct(struct isci_host *ihost, | 3080 | sci_general_request_construct(struct isci_host *ihost, |
3078 | struct isci_remote_device *idev, | 3081 | struct isci_remote_device *idev, |
3079 | struct isci_request *ireq) | 3082 | struct isci_request *ireq) |
3080 | { | 3083 | { |
3081 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); | 3084 | sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); |
3082 | 3085 | ||
3083 | ireq->target_device = idev; | 3086 | ireq->target_device = idev; |
3084 | ireq->protocol = SAS_PROTOCOL_NONE; | 3087 | ireq->protocol = SAS_PROTOCOL_NONE; |
3085 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; | 3088 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; |
3086 | 3089 | ||
3087 | ireq->sci_status = SCI_SUCCESS; | 3090 | ireq->sci_status = SCI_SUCCESS; |
3088 | ireq->scu_status = 0; | 3091 | ireq->scu_status = 0; |
3089 | ireq->post_context = 0xFFFFFFFF; | 3092 | ireq->post_context = 0xFFFFFFFF; |
3090 | } | 3093 | } |
3091 | 3094 | ||
3092 | static enum sci_status | 3095 | static enum sci_status |
3093 | sci_io_request_construct(struct isci_host *ihost, | 3096 | sci_io_request_construct(struct isci_host *ihost, |
3094 | struct isci_remote_device *idev, | 3097 | struct isci_remote_device *idev, |
3095 | struct isci_request *ireq) | 3098 | struct isci_request *ireq) |
3096 | { | 3099 | { |
3097 | struct domain_device *dev = idev->domain_dev; | 3100 | struct domain_device *dev = idev->domain_dev; |
3098 | enum sci_status status = SCI_SUCCESS; | 3101 | enum sci_status status = SCI_SUCCESS; |
3099 | 3102 | ||
3100 | /* Build the common part of the request */ | 3103 | /* Build the common part of the request */ |
3101 | sci_general_request_construct(ihost, idev, ireq); | 3104 | sci_general_request_construct(ihost, idev, ireq); |
3102 | 3105 | ||
3103 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) | 3106 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
3104 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; | 3107 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; |
3105 | 3108 | ||
3106 | if (dev->dev_type == SAS_END_DEV) | 3109 | if (dev->dev_type == SAS_END_DEV) |
3107 | /* pass */; | 3110 | /* pass */; |
3108 | else if (dev_is_sata(dev)) | 3111 | else if (dev_is_sata(dev)) |
3109 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); | 3112 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
3110 | else if (dev_is_expander(dev)) | 3113 | else if (dev_is_expander(dev)) |
3111 | /* pass */; | 3114 | /* pass */; |
3112 | else | 3115 | else |
3113 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3116 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3114 | 3117 | ||
3115 | memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); | 3118 | memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); |
3116 | 3119 | ||
3117 | return status; | 3120 | return status; |
3118 | } | 3121 | } |
3119 | 3122 | ||
3120 | enum sci_status sci_task_request_construct(struct isci_host *ihost, | 3123 | enum sci_status sci_task_request_construct(struct isci_host *ihost, |
3121 | struct isci_remote_device *idev, | 3124 | struct isci_remote_device *idev, |
3122 | u16 io_tag, struct isci_request *ireq) | 3125 | u16 io_tag, struct isci_request *ireq) |
3123 | { | 3126 | { |
3124 | struct domain_device *dev = idev->domain_dev; | 3127 | struct domain_device *dev = idev->domain_dev; |
3125 | enum sci_status status = SCI_SUCCESS; | 3128 | enum sci_status status = SCI_SUCCESS; |
3126 | 3129 | ||
3127 | /* Build the common part of the request */ | 3130 | /* Build the common part of the request */ |
3128 | sci_general_request_construct(ihost, idev, ireq); | 3131 | sci_general_request_construct(ihost, idev, ireq); |
3129 | 3132 | ||
3130 | if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { | 3133 | if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { |
3131 | set_bit(IREQ_TMF, &ireq->flags); | 3134 | set_bit(IREQ_TMF, &ireq->flags); |
3132 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); | 3135 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); |
3133 | 3136 | ||
3134 | /* Set the protocol indicator. */ | 3137 | /* Set the protocol indicator. */ |
3135 | if (dev_is_sata(dev)) | 3138 | if (dev_is_sata(dev)) |
3136 | ireq->protocol = SAS_PROTOCOL_STP; | 3139 | ireq->protocol = SAS_PROTOCOL_STP; |
3137 | else | 3140 | else |
3138 | ireq->protocol = SAS_PROTOCOL_SSP; | 3141 | ireq->protocol = SAS_PROTOCOL_SSP; |
3139 | } else | 3142 | } else |
3140 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3143 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3141 | 3144 | ||
3142 | return status; | 3145 | return status; |
3143 | } | 3146 | } |
3144 | 3147 | ||
3145 | static enum sci_status isci_request_ssp_request_construct( | 3148 | static enum sci_status isci_request_ssp_request_construct( |
3146 | struct isci_request *request) | 3149 | struct isci_request *request) |
3147 | { | 3150 | { |
3148 | enum sci_status status; | 3151 | enum sci_status status; |
3149 | 3152 | ||
3150 | dev_dbg(&request->isci_host->pdev->dev, | 3153 | dev_dbg(&request->isci_host->pdev->dev, |
3151 | "%s: request = %p\n", | 3154 | "%s: request = %p\n", |
3152 | __func__, | 3155 | __func__, |
3153 | request); | 3156 | request); |
3154 | status = sci_io_request_construct_basic_ssp(request); | 3157 | status = sci_io_request_construct_basic_ssp(request); |
3155 | return status; | 3158 | return status; |
3156 | } | 3159 | } |
3157 | 3160 | ||
3158 | static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) | 3161 | static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) |
3159 | { | 3162 | { |
3160 | struct sas_task *task = isci_request_access_task(ireq); | 3163 | struct sas_task *task = isci_request_access_task(ireq); |
3161 | struct host_to_dev_fis *fis = &ireq->stp.cmd; | 3164 | struct host_to_dev_fis *fis = &ireq->stp.cmd; |
3162 | struct ata_queued_cmd *qc = task->uldd_task; | 3165 | struct ata_queued_cmd *qc = task->uldd_task; |
3163 | enum sci_status status; | 3166 | enum sci_status status; |
3164 | 3167 | ||
3165 | dev_dbg(&ireq->isci_host->pdev->dev, | 3168 | dev_dbg(&ireq->isci_host->pdev->dev, |
3166 | "%s: ireq = %p\n", | 3169 | "%s: ireq = %p\n", |
3167 | __func__, | 3170 | __func__, |
3168 | ireq); | 3171 | ireq); |
3169 | 3172 | ||
3170 | memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); | 3173 | memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); |
3171 | if (!task->ata_task.device_control_reg_update) | 3174 | if (!task->ata_task.device_control_reg_update) |
3172 | fis->flags |= 0x80; | 3175 | fis->flags |= 0x80; |
3173 | fis->flags &= 0xF0; | 3176 | fis->flags &= 0xF0; |
3174 | 3177 | ||
3175 | status = sci_io_request_construct_basic_sata(ireq); | 3178 | status = sci_io_request_construct_basic_sata(ireq); |
3176 | 3179 | ||
3177 | if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || | 3180 | if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || |
3178 | qc->tf.command == ATA_CMD_FPDMA_READ)) { | 3181 | qc->tf.command == ATA_CMD_FPDMA_READ)) { |
3179 | fis->sector_count = qc->tag << 3; | 3182 | fis->sector_count = qc->tag << 3; |
3180 | ireq->tc->type.stp.ncq_tag = qc->tag; | 3183 | ireq->tc->type.stp.ncq_tag = qc->tag; |
3181 | } | 3184 | } |
3182 | 3185 | ||
3183 | return status; | 3186 | return status; |
3184 | } | 3187 | } |
3185 | 3188 | ||
3186 | static enum sci_status | 3189 | static enum sci_status |
3187 | sci_io_request_construct_smp(struct device *dev, | 3190 | sci_io_request_construct_smp(struct device *dev, |
3188 | struct isci_request *ireq, | 3191 | struct isci_request *ireq, |
3189 | struct sas_task *task) | 3192 | struct sas_task *task) |
3190 | { | 3193 | { |
3191 | struct scatterlist *sg = &task->smp_task.smp_req; | 3194 | struct scatterlist *sg = &task->smp_task.smp_req; |
3192 | struct isci_remote_device *idev; | 3195 | struct isci_remote_device *idev; |
3193 | struct scu_task_context *task_context; | 3196 | struct scu_task_context *task_context; |
3194 | struct isci_port *iport; | 3197 | struct isci_port *iport; |
3195 | struct smp_req *smp_req; | 3198 | struct smp_req *smp_req; |
3196 | void *kaddr; | 3199 | void *kaddr; |
3197 | u8 req_len; | 3200 | u8 req_len; |
3198 | u32 cmd; | 3201 | u32 cmd; |
3199 | 3202 | ||
3200 | kaddr = kmap_atomic(sg_page(sg)); | 3203 | kaddr = kmap_atomic(sg_page(sg)); |
3201 | smp_req = kaddr + sg->offset; | 3204 | smp_req = kaddr + sg->offset; |
3202 | /* | 3205 | /* |
3203 | * Look at the SMP requests' header fields; for certain SAS 1.x SMP | 3206 | * Look at the SMP requests' header fields; for certain SAS 1.x SMP |
3204 | * functions under SAS 2.0, a zero request length really indicates | 3207 | * functions under SAS 2.0, a zero request length really indicates |
3205 | * a non-zero default length. | 3208 | * a non-zero default length. |
3206 | */ | 3209 | */ |
3207 | if (smp_req->req_len == 0) { | 3210 | if (smp_req->req_len == 0) { |
3208 | switch (smp_req->func) { | 3211 | switch (smp_req->func) { |
3209 | case SMP_DISCOVER: | 3212 | case SMP_DISCOVER: |
3210 | case SMP_REPORT_PHY_ERR_LOG: | 3213 | case SMP_REPORT_PHY_ERR_LOG: |
3211 | case SMP_REPORT_PHY_SATA: | 3214 | case SMP_REPORT_PHY_SATA: |
3212 | case SMP_REPORT_ROUTE_INFO: | 3215 | case SMP_REPORT_ROUTE_INFO: |
3213 | smp_req->req_len = 2; | 3216 | smp_req->req_len = 2; |
3214 | break; | 3217 | break; |
3215 | case SMP_CONF_ROUTE_INFO: | 3218 | case SMP_CONF_ROUTE_INFO: |
3216 | case SMP_PHY_CONTROL: | 3219 | case SMP_PHY_CONTROL: |
3217 | case SMP_PHY_TEST_FUNCTION: | 3220 | case SMP_PHY_TEST_FUNCTION: |
3218 | smp_req->req_len = 9; | 3221 | smp_req->req_len = 9; |
3219 | break; | 3222 | break; |
3220 | /* Default - zero is a valid default for 2.0. */ | 3223 | /* Default - zero is a valid default for 2.0. */ |
3221 | } | 3224 | } |
3222 | } | 3225 | } |
3223 | req_len = smp_req->req_len; | 3226 | req_len = smp_req->req_len; |
3224 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); | 3227 | sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); |
3225 | cmd = *(u32 *) smp_req; | 3228 | cmd = *(u32 *) smp_req; |
3226 | kunmap_atomic(kaddr); | 3229 | kunmap_atomic(kaddr); |
3227 | 3230 | ||
3228 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) | 3231 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) |
3229 | return SCI_FAILURE; | 3232 | return SCI_FAILURE; |
3230 | 3233 | ||
3231 | ireq->protocol = SAS_PROTOCOL_SMP; | 3234 | ireq->protocol = SAS_PROTOCOL_SMP; |
3232 | 3235 | ||
3233 | /* byte swap the smp request. */ | 3236 | /* byte swap the smp request. */ |
3234 | 3237 | ||
3235 | task_context = ireq->tc; | 3238 | task_context = ireq->tc; |
3236 | 3239 | ||
3237 | idev = ireq->target_device; | 3240 | idev = ireq->target_device; |
3238 | iport = idev->owning_port; | 3241 | iport = idev->owning_port; |
3239 | 3242 | ||
3240 | /* | 3243 | /* |
3241 | * Fill in the TC with the its required data | 3244 | * Fill in the TC with the its required data |
3242 | * 00h | 3245 | * 00h |
3243 | */ | 3246 | */ |
3244 | task_context->priority = 0; | 3247 | task_context->priority = 0; |
3245 | task_context->initiator_request = 1; | 3248 | task_context->initiator_request = 1; |
3246 | task_context->connection_rate = idev->connection_rate; | 3249 | task_context->connection_rate = idev->connection_rate; |
3247 | task_context->protocol_engine_index = ISCI_PEG; | 3250 | task_context->protocol_engine_index = ISCI_PEG; |
3248 | task_context->logical_port_index = iport->physical_port_index; | 3251 | task_context->logical_port_index = iport->physical_port_index; |
3249 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; | 3252 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; |
3250 | task_context->abort = 0; | 3253 | task_context->abort = 0; |
3251 | task_context->valid = SCU_TASK_CONTEXT_VALID; | 3254 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
3252 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; | 3255 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
3253 | 3256 | ||
3254 | /* 04h */ | 3257 | /* 04h */ |
3255 | task_context->remote_node_index = idev->rnc.remote_node_index; | 3258 | task_context->remote_node_index = idev->rnc.remote_node_index; |
3256 | task_context->command_code = 0; | 3259 | task_context->command_code = 0; |
3257 | task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; | 3260 | task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; |
3258 | 3261 | ||
3259 | /* 08h */ | 3262 | /* 08h */ |
3260 | task_context->link_layer_control = 0; | 3263 | task_context->link_layer_control = 0; |
3261 | task_context->do_not_dma_ssp_good_response = 1; | 3264 | task_context->do_not_dma_ssp_good_response = 1; |
3262 | task_context->strict_ordering = 0; | 3265 | task_context->strict_ordering = 0; |
3263 | task_context->control_frame = 1; | 3266 | task_context->control_frame = 1; |
3264 | task_context->timeout_enable = 0; | 3267 | task_context->timeout_enable = 0; |
3265 | task_context->block_guard_enable = 0; | 3268 | task_context->block_guard_enable = 0; |
3266 | 3269 | ||
3267 | /* 0ch */ | 3270 | /* 0ch */ |
3268 | task_context->address_modifier = 0; | 3271 | task_context->address_modifier = 0; |
3269 | 3272 | ||
3270 | /* 10h */ | 3273 | /* 10h */ |
3271 | task_context->ssp_command_iu_length = req_len; | 3274 | task_context->ssp_command_iu_length = req_len; |
3272 | 3275 | ||
3273 | /* 14h */ | 3276 | /* 14h */ |
3274 | task_context->transfer_length_bytes = 0; | 3277 | task_context->transfer_length_bytes = 0; |
3275 | 3278 | ||
3276 | /* | 3279 | /* |
3277 | * 18h ~ 30h, protocol specific | 3280 | * 18h ~ 30h, protocol specific |
3278 | * since commandIU has been build by framework at this point, we just | 3281 | * since commandIU has been build by framework at this point, we just |
3279 | * copy the frist DWord from command IU to this location. */ | 3282 | * copy the frist DWord from command IU to this location. */ |
3280 | memcpy(&task_context->type.smp, &cmd, sizeof(u32)); | 3283 | memcpy(&task_context->type.smp, &cmd, sizeof(u32)); |
3281 | 3284 | ||
3282 | /* | 3285 | /* |
3283 | * 40h | 3286 | * 40h |
3284 | * "For SMP you could program it to zero. We would prefer that way | 3287 | * "For SMP you could program it to zero. We would prefer that way |
3285 | * so that done code will be consistent." - Venki | 3288 | * so that done code will be consistent." - Venki |
3286 | */ | 3289 | */ |
3287 | task_context->task_phase = 0; | 3290 | task_context->task_phase = 0; |
3288 | 3291 | ||
3289 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | | 3292 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3290 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | | 3293 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3291 | (iport->physical_port_index << | 3294 | (iport->physical_port_index << |
3292 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | | 3295 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
3293 | ISCI_TAG_TCI(ireq->io_tag)); | 3296 | ISCI_TAG_TCI(ireq->io_tag)); |
3294 | /* | 3297 | /* |
3295 | * Copy the physical address for the command buffer to the SCU Task | 3298 | * Copy the physical address for the command buffer to the SCU Task |
3296 | * Context command buffer should not contain command header. | 3299 | * Context command buffer should not contain command header. |
3297 | */ | 3300 | */ |
3298 | task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); | 3301 | task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); |
3299 | task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); | 3302 | task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); |
3300 | 3303 | ||
3301 | /* SMP response comes as UF, so no need to set response IU address. */ | 3304 | /* SMP response comes as UF, so no need to set response IU address. */ |
3302 | task_context->response_iu_upper = 0; | 3305 | task_context->response_iu_upper = 0; |
3303 | task_context->response_iu_lower = 0; | 3306 | task_context->response_iu_lower = 0; |
3304 | 3307 | ||
3305 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); | 3308 | sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); |
3306 | 3309 | ||
3307 | return SCI_SUCCESS; | 3310 | return SCI_SUCCESS; |
3308 | } | 3311 | } |
3309 | 3312 | ||
3310 | /* | 3313 | /* |
3311 | * isci_smp_request_build() - This function builds the smp request. | 3314 | * isci_smp_request_build() - This function builds the smp request. |
3312 | * @ireq: This parameter points to the isci_request allocated in the | 3315 | * @ireq: This parameter points to the isci_request allocated in the |
3313 | * request construct function. | 3316 | * request construct function. |
3314 | * | 3317 | * |
3315 | * SCI_SUCCESS on successfull completion, or specific failure code. | 3318 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3316 | */ | 3319 | */ |
3317 | static enum sci_status isci_smp_request_build(struct isci_request *ireq) | 3320 | static enum sci_status isci_smp_request_build(struct isci_request *ireq) |
3318 | { | 3321 | { |
3319 | struct sas_task *task = isci_request_access_task(ireq); | 3322 | struct sas_task *task = isci_request_access_task(ireq); |
3320 | struct device *dev = &ireq->isci_host->pdev->dev; | 3323 | struct device *dev = &ireq->isci_host->pdev->dev; |
3321 | enum sci_status status = SCI_FAILURE; | 3324 | enum sci_status status = SCI_FAILURE; |
3322 | 3325 | ||
3323 | status = sci_io_request_construct_smp(dev, ireq, task); | 3326 | status = sci_io_request_construct_smp(dev, ireq, task); |
3324 | if (status != SCI_SUCCESS) | 3327 | if (status != SCI_SUCCESS) |
3325 | dev_dbg(&ireq->isci_host->pdev->dev, | 3328 | dev_dbg(&ireq->isci_host->pdev->dev, |
3326 | "%s: failed with status = %d\n", | 3329 | "%s: failed with status = %d\n", |
3327 | __func__, | 3330 | __func__, |
3328 | status); | 3331 | status); |
3329 | 3332 | ||
3330 | return status; | 3333 | return status; |
3331 | } | 3334 | } |
3332 | 3335 | ||
3333 | /** | 3336 | /** |
3334 | * isci_io_request_build() - This function builds the io request object. | 3337 | * isci_io_request_build() - This function builds the io request object. |
3335 | * @ihost: This parameter specifies the ISCI host object | 3338 | * @ihost: This parameter specifies the ISCI host object |
3336 | * @request: This parameter points to the isci_request object allocated in the | 3339 | * @request: This parameter points to the isci_request object allocated in the |
3337 | * request construct function. | 3340 | * request construct function. |
3338 | * @sci_device: This parameter is the handle for the sci core's remote device | 3341 | * @sci_device: This parameter is the handle for the sci core's remote device |
3339 | * object that is the destination for this request. | 3342 | * object that is the destination for this request. |
3340 | * | 3343 | * |
3341 | * SCI_SUCCESS on successfull completion, or specific failure code. | 3344 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3342 | */ | 3345 | */ |
3343 | static enum sci_status isci_io_request_build(struct isci_host *ihost, | 3346 | static enum sci_status isci_io_request_build(struct isci_host *ihost, |
3344 | struct isci_request *request, | 3347 | struct isci_request *request, |
3345 | struct isci_remote_device *idev) | 3348 | struct isci_remote_device *idev) |
3346 | { | 3349 | { |
3347 | enum sci_status status = SCI_SUCCESS; | 3350 | enum sci_status status = SCI_SUCCESS; |
3348 | struct sas_task *task = isci_request_access_task(request); | 3351 | struct sas_task *task = isci_request_access_task(request); |
3349 | 3352 | ||
3350 | dev_dbg(&ihost->pdev->dev, | 3353 | dev_dbg(&ihost->pdev->dev, |
3351 | "%s: idev = 0x%p; request = %p, " | 3354 | "%s: idev = 0x%p; request = %p, " |
3352 | "num_scatter = %d\n", | 3355 | "num_scatter = %d\n", |
3353 | __func__, | 3356 | __func__, |
3354 | idev, | 3357 | idev, |
3355 | request, | 3358 | request, |
3356 | task->num_scatter); | 3359 | task->num_scatter); |
3357 | 3360 | ||
3358 | /* map the sgl addresses, if present. | 3361 | /* map the sgl addresses, if present. |
3359 | * libata does the mapping for sata devices | 3362 | * libata does the mapping for sata devices |
3360 | * before we get the request. | 3363 | * before we get the request. |
3361 | */ | 3364 | */ |
3362 | if (task->num_scatter && | 3365 | if (task->num_scatter && |
3363 | !sas_protocol_ata(task->task_proto) && | 3366 | !sas_protocol_ata(task->task_proto) && |
3364 | !(SAS_PROTOCOL_SMP & task->task_proto)) { | 3367 | !(SAS_PROTOCOL_SMP & task->task_proto)) { |
3365 | 3368 | ||
3366 | request->num_sg_entries = dma_map_sg( | 3369 | request->num_sg_entries = dma_map_sg( |
3367 | &ihost->pdev->dev, | 3370 | &ihost->pdev->dev, |
3368 | task->scatter, | 3371 | task->scatter, |
3369 | task->num_scatter, | 3372 | task->num_scatter, |
3370 | task->data_dir | 3373 | task->data_dir |
3371 | ); | 3374 | ); |
3372 | 3375 | ||
3373 | if (request->num_sg_entries == 0) | 3376 | if (request->num_sg_entries == 0) |
3374 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; | 3377 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; |
3375 | } | 3378 | } |
3376 | 3379 | ||
3377 | status = sci_io_request_construct(ihost, idev, request); | 3380 | status = sci_io_request_construct(ihost, idev, request); |
3378 | 3381 | ||
3379 | if (status != SCI_SUCCESS) { | 3382 | if (status != SCI_SUCCESS) { |
3380 | dev_dbg(&ihost->pdev->dev, | 3383 | dev_dbg(&ihost->pdev->dev, |
3381 | "%s: failed request construct\n", | 3384 | "%s: failed request construct\n", |
3382 | __func__); | 3385 | __func__); |
3383 | return SCI_FAILURE; | 3386 | return SCI_FAILURE; |
3384 | } | 3387 | } |
3385 | 3388 | ||
3386 | switch (task->task_proto) { | 3389 | switch (task->task_proto) { |
3387 | case SAS_PROTOCOL_SMP: | 3390 | case SAS_PROTOCOL_SMP: |
3388 | status = isci_smp_request_build(request); | 3391 | status = isci_smp_request_build(request); |
3389 | break; | 3392 | break; |
3390 | case SAS_PROTOCOL_SSP: | 3393 | case SAS_PROTOCOL_SSP: |
3391 | status = isci_request_ssp_request_construct(request); | 3394 | status = isci_request_ssp_request_construct(request); |
3392 | break; | 3395 | break; |
3393 | case SAS_PROTOCOL_SATA: | 3396 | case SAS_PROTOCOL_SATA: |
3394 | case SAS_PROTOCOL_STP: | 3397 | case SAS_PROTOCOL_STP: |
3395 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: | 3398 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: |
3396 | status = isci_request_stp_request_construct(request); | 3399 | status = isci_request_stp_request_construct(request); |
3397 | break; | 3400 | break; |
3398 | default: | 3401 | default: |
3399 | dev_dbg(&ihost->pdev->dev, | 3402 | dev_dbg(&ihost->pdev->dev, |
3400 | "%s: unknown protocol\n", __func__); | 3403 | "%s: unknown protocol\n", __func__); |
3401 | return SCI_FAILURE; | 3404 | return SCI_FAILURE; |
3402 | } | 3405 | } |
3403 | 3406 | ||
3404 | return SCI_SUCCESS; | 3407 | return SCI_SUCCESS; |
3405 | } | 3408 | } |
3406 | 3409 | ||
3407 | static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) | 3410 | static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) |
3408 | { | 3411 | { |
3409 | struct isci_request *ireq; | 3412 | struct isci_request *ireq; |
3410 | 3413 | ||
3411 | ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; | 3414 | ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; |
3412 | ireq->io_tag = tag; | 3415 | ireq->io_tag = tag; |
3413 | ireq->io_request_completion = NULL; | 3416 | ireq->io_request_completion = NULL; |
3414 | ireq->flags = 0; | 3417 | ireq->flags = 0; |
3415 | ireq->num_sg_entries = 0; | 3418 | ireq->num_sg_entries = 0; |
3416 | INIT_LIST_HEAD(&ireq->completed_node); | 3419 | INIT_LIST_HEAD(&ireq->completed_node); |
3417 | 3420 | ||
3418 | return ireq; | 3421 | return ireq; |
3419 | } | 3422 | } |
3420 | 3423 | ||
3421 | static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, | 3424 | static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, |
3422 | struct sas_task *task, | 3425 | struct sas_task *task, |
3423 | u16 tag) | 3426 | u16 tag) |
3424 | { | 3427 | { |
3425 | struct isci_request *ireq; | 3428 | struct isci_request *ireq; |
3426 | 3429 | ||
3427 | ireq = isci_request_from_tag(ihost, tag); | 3430 | ireq = isci_request_from_tag(ihost, tag); |
3428 | ireq->ttype_ptr.io_task_ptr = task; | 3431 | ireq->ttype_ptr.io_task_ptr = task; |
3429 | clear_bit(IREQ_TMF, &ireq->flags); | 3432 | clear_bit(IREQ_TMF, &ireq->flags); |
3430 | task->lldd_task = ireq; | 3433 | task->lldd_task = ireq; |
3431 | 3434 | ||
3432 | return ireq; | 3435 | return ireq; |
3433 | } | 3436 | } |
3434 | 3437 | ||
3435 | struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, | 3438 | struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, |
3436 | struct isci_tmf *isci_tmf, | 3439 | struct isci_tmf *isci_tmf, |
3437 | u16 tag) | 3440 | u16 tag) |
3438 | { | 3441 | { |
3439 | struct isci_request *ireq; | 3442 | struct isci_request *ireq; |
3440 | 3443 | ||
3441 | ireq = isci_request_from_tag(ihost, tag); | 3444 | ireq = isci_request_from_tag(ihost, tag); |
3442 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; | 3445 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; |
3443 | set_bit(IREQ_TMF, &ireq->flags); | 3446 | set_bit(IREQ_TMF, &ireq->flags); |
3444 | 3447 | ||
3445 | return ireq; | 3448 | return ireq; |
3446 | } | 3449 | } |
3447 | 3450 | ||
3448 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, | 3451 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
3449 | struct sas_task *task, u16 tag) | 3452 | struct sas_task *task, u16 tag) |
3450 | { | 3453 | { |
3451 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; | 3454 | enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3452 | struct isci_request *ireq; | 3455 | struct isci_request *ireq; |
3453 | unsigned long flags; | 3456 | unsigned long flags; |
3454 | int ret = 0; | 3457 | int ret = 0; |
3455 | 3458 | ||
3456 | /* do common allocation and init of request object. */ | 3459 | /* do common allocation and init of request object. */ |
3457 | ireq = isci_io_request_from_tag(ihost, task, tag); | 3460 | ireq = isci_io_request_from_tag(ihost, task, tag); |
3458 | 3461 | ||
3459 | status = isci_io_request_build(ihost, ireq, idev); | 3462 | status = isci_io_request_build(ihost, ireq, idev); |
3460 | if (status != SCI_SUCCESS) { | 3463 | if (status != SCI_SUCCESS) { |
3461 | dev_dbg(&ihost->pdev->dev, | 3464 | dev_dbg(&ihost->pdev->dev, |
3462 | "%s: request_construct failed - status = 0x%x\n", | 3465 | "%s: request_construct failed - status = 0x%x\n", |
3463 | __func__, | 3466 | __func__, |
3464 | status); | 3467 | status); |
3465 | return status; | 3468 | return status; |
3466 | } | 3469 | } |
3467 | 3470 | ||
3468 | spin_lock_irqsave(&ihost->scic_lock, flags); | 3471 | spin_lock_irqsave(&ihost->scic_lock, flags); |
3469 | 3472 | ||
3470 | if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { | 3473 | if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { |
3471 | 3474 | ||
3472 | if (isci_task_is_ncq_recovery(task)) { | 3475 | if (isci_task_is_ncq_recovery(task)) { |
3473 | 3476 | ||
3474 | /* The device is in an NCQ recovery state. Issue the | 3477 | /* The device is in an NCQ recovery state. Issue the |
3475 | * request on the task side. Note that it will | 3478 | * request on the task side. Note that it will |
3476 | * complete on the I/O request side because the | 3479 | * complete on the I/O request side because the |
3477 | * request was built that way (ie. | 3480 | * request was built that way (ie. |
3478 | * ireq->is_task_management_request is false). | 3481 | * ireq->is_task_management_request is false). |
3479 | */ | 3482 | */ |
3480 | status = sci_controller_start_task(ihost, | 3483 | status = sci_controller_start_task(ihost, |
3481 | idev, | 3484 | idev, |
3482 | ireq); | 3485 | ireq); |
3483 | } else { | 3486 | } else { |
3484 | status = SCI_FAILURE; | 3487 | status = SCI_FAILURE; |
3485 | } | 3488 | } |
3486 | } else { | 3489 | } else { |
3487 | /* send the request, let the core assign the IO TAG. */ | 3490 | /* send the request, let the core assign the IO TAG. */ |
3488 | status = sci_controller_start_io(ihost, idev, | 3491 | status = sci_controller_start_io(ihost, idev, |
3489 | ireq); | 3492 | ireq); |
3490 | } | 3493 | } |
3491 | 3494 | ||
3492 | if (status != SCI_SUCCESS && | 3495 | if (status != SCI_SUCCESS && |
3493 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3496 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3494 | dev_dbg(&ihost->pdev->dev, | 3497 | dev_dbg(&ihost->pdev->dev, |
3495 | "%s: failed request start (0x%x)\n", | 3498 | "%s: failed request start (0x%x)\n", |
3496 | __func__, status); | 3499 | __func__, status); |
3497 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3500 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3498 | return status; | 3501 | return status; |
3499 | } | 3502 | } |
3500 | /* Either I/O started OK, or the core has signaled that | 3503 | /* Either I/O started OK, or the core has signaled that |
3501 | * the device needs a target reset. | 3504 | * the device needs a target reset. |
3502 | */ | 3505 | */ |
3503 | if (status != SCI_SUCCESS) { | 3506 | if (status != SCI_SUCCESS) { |
3504 | /* The request did not really start in the | 3507 | /* The request did not really start in the |
3505 | * hardware, so clear the request handle | 3508 | * hardware, so clear the request handle |
3506 | * here so no terminations will be done. | 3509 | * here so no terminations will be done. |
3507 | */ | 3510 | */ |
3508 | set_bit(IREQ_TERMINATED, &ireq->flags); | 3511 | set_bit(IREQ_TERMINATED, &ireq->flags); |
3509 | } | 3512 | } |
3510 | spin_unlock_irqrestore(&ihost->scic_lock, flags); | 3513 | spin_unlock_irqrestore(&ihost->scic_lock, flags); |
3511 | 3514 | ||
3512 | if (status == | 3515 | if (status == |
3513 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { | 3516 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3514 | /* Signal libsas that we need the SCSI error | 3517 | /* Signal libsas that we need the SCSI error |
3515 | * handler thread to work on this I/O and that | 3518 | * handler thread to work on this I/O and that |
3516 | * we want a device reset. | 3519 | * we want a device reset. |
3517 | */ | 3520 | */ |
3518 | spin_lock_irqsave(&task->task_state_lock, flags); | 3521 | spin_lock_irqsave(&task->task_state_lock, flags); |
3519 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; | 3522 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
3520 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 3523 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
3521 | 3524 | ||
3522 | /* Cause this task to be scheduled in the SCSI error | 3525 | /* Cause this task to be scheduled in the SCSI error |
3523 | * handler thread. | 3526 | * handler thread. |
3524 | */ | 3527 | */ |
3525 | sas_task_abort(task); | 3528 | sas_task_abort(task); |
3526 | 3529 | ||
3527 | /* Change the status, since we are holding | 3530 | /* Change the status, since we are holding |
3528 | * the I/O until it is managed by the SCSI | 3531 | * the I/O until it is managed by the SCSI |
3529 | * error handler. | 3532 | * error handler. |
3530 | */ | 3533 | */ |
3531 | status = SCI_SUCCESS; | 3534 | status = SCI_SUCCESS; |
3532 | } | 3535 | } |
3533 | 3536 | ||
3534 | return ret; | 3537 | return ret; |
3535 | } | 3538 | } |
3536 | 3539 |