Commit 726980d56908f2e230624394f03743689db3110c

Authored by Jeff Skirvin
Committed by Dan Williams
1 parent ac78ed0f78

isci: Terminate outstanding TCs on TX/RX RNC suspensions.

TCs must only be terminated when RNCs are suspended.

Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 7 changed files with 188 additions and 32 deletions Inline Diff

drivers/scsi/isci/host.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 #include <linux/circ_buf.h> 55 #include <linux/circ_buf.h>
56 #include <linux/device.h> 56 #include <linux/device.h>
57 #include <scsi/sas.h> 57 #include <scsi/sas.h>
58 #include "host.h" 58 #include "host.h"
59 #include "isci.h" 59 #include "isci.h"
60 #include "port.h" 60 #include "port.h"
61 #include "probe_roms.h" 61 #include "probe_roms.h"
62 #include "remote_device.h" 62 #include "remote_device.h"
63 #include "request.h" 63 #include "request.h"
64 #include "scu_completion_codes.h" 64 #include "scu_completion_codes.h"
65 #include "scu_event_codes.h" 65 #include "scu_event_codes.h"
66 #include "registers.h" 66 #include "registers.h"
67 #include "scu_remote_node_context.h" 67 #include "scu_remote_node_context.h"
68 #include "scu_task_context.h" 68 #include "scu_task_context.h"
69 69
70 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 70 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
71 71
72 #define smu_max_ports(dcc_value) \ 72 #define smu_max_ports(dcc_value) \
73 (\ 73 (\
74 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ 74 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
75 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ 75 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
76 ) 76 )
77 77
78 #define smu_max_task_contexts(dcc_value) \ 78 #define smu_max_task_contexts(dcc_value) \
79 (\ 79 (\
80 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ 80 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
81 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ 81 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
82 ) 82 )
83 83
84 #define smu_max_rncs(dcc_value) \ 84 #define smu_max_rncs(dcc_value) \
85 (\ 85 (\
86 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ 86 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
87 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ 87 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
88 ) 88 )
89 89
90 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 90 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
91 91
92 /** 92 /**
93 * 93 *
94 * 94 *
95 * The number of milliseconds to wait while a given phy is consuming power 95 * The number of milliseconds to wait while a given phy is consuming power
96 * before allowing another set of phys to consume power. Ultimately, this will 96 * before allowing another set of phys to consume power. Ultimately, this will
97 * be specified by OEM parameter. 97 * be specified by OEM parameter.
98 */ 98 */
99 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 99 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
100 100
101 /** 101 /**
102 * NORMALIZE_PUT_POINTER() - 102 * NORMALIZE_PUT_POINTER() -
103 * 103 *
104 * This macro will normalize the completion queue put pointer so its value can 104 * This macro will normalize the completion queue put pointer so its value can
105 * be used as an array inde 105 * be used as an array inde
106 */ 106 */
107 #define NORMALIZE_PUT_POINTER(x) \ 107 #define NORMALIZE_PUT_POINTER(x) \
108 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) 108 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
109 109
110 110
111 /** 111 /**
112 * NORMALIZE_EVENT_POINTER() - 112 * NORMALIZE_EVENT_POINTER() -
113 * 113 *
114 * This macro will normalize the completion queue event entry so its value can 114 * This macro will normalize the completion queue event entry so its value can
115 * be used as an index. 115 * be used as an index.
116 */ 116 */
117 #define NORMALIZE_EVENT_POINTER(x) \ 117 #define NORMALIZE_EVENT_POINTER(x) \
118 (\ 118 (\
119 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ 119 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
120 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ 120 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
121 ) 121 )
122 122
123 /** 123 /**
124 * NORMALIZE_GET_POINTER() - 124 * NORMALIZE_GET_POINTER() -
125 * 125 *
126 * This macro will normalize the completion queue get pointer so its value can 126 * This macro will normalize the completion queue get pointer so its value can
127 * be used as an index into an array 127 * be used as an index into an array
128 */ 128 */
129 #define NORMALIZE_GET_POINTER(x) \ 129 #define NORMALIZE_GET_POINTER(x) \
130 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) 130 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
131 131
132 /** 132 /**
133 * NORMALIZE_GET_POINTER_CYCLE_BIT() - 133 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
134 * 134 *
135 * This macro will normalize the completion queue cycle pointer so it matches 135 * This macro will normalize the completion queue cycle pointer so it matches
136 * the completion queue cycle bit 136 * the completion queue cycle bit
137 */ 137 */
138 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ 138 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
139 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) 139 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
140 140
141 /** 141 /**
142 * COMPLETION_QUEUE_CYCLE_BIT() - 142 * COMPLETION_QUEUE_CYCLE_BIT() -
143 * 143 *
144 * This macro will return the cycle bit of the completion queue entry 144 * This macro will return the cycle bit of the completion queue entry
145 */ 145 */
146 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) 146 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
147 147
148 /* Init the state machine and call the state entry function (if any) */ 148 /* Init the state machine and call the state entry function (if any) */
149 void sci_init_sm(struct sci_base_state_machine *sm, 149 void sci_init_sm(struct sci_base_state_machine *sm,
150 const struct sci_base_state *state_table, u32 initial_state) 150 const struct sci_base_state *state_table, u32 initial_state)
151 { 151 {
152 sci_state_transition_t handler; 152 sci_state_transition_t handler;
153 153
154 sm->initial_state_id = initial_state; 154 sm->initial_state_id = initial_state;
155 sm->previous_state_id = initial_state; 155 sm->previous_state_id = initial_state;
156 sm->current_state_id = initial_state; 156 sm->current_state_id = initial_state;
157 sm->state_table = state_table; 157 sm->state_table = state_table;
158 158
159 handler = sm->state_table[initial_state].enter_state; 159 handler = sm->state_table[initial_state].enter_state;
160 if (handler) 160 if (handler)
161 handler(sm); 161 handler(sm);
162 } 162 }
163 163
164 /* Call the state exit fn, update the current state, call the state entry fn */ 164 /* Call the state exit fn, update the current state, call the state entry fn */
165 void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) 165 void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
166 { 166 {
167 sci_state_transition_t handler; 167 sci_state_transition_t handler;
168 168
169 handler = sm->state_table[sm->current_state_id].exit_state; 169 handler = sm->state_table[sm->current_state_id].exit_state;
170 if (handler) 170 if (handler)
171 handler(sm); 171 handler(sm);
172 172
173 sm->previous_state_id = sm->current_state_id; 173 sm->previous_state_id = sm->current_state_id;
174 sm->current_state_id = next_state; 174 sm->current_state_id = next_state;
175 175
176 handler = sm->state_table[sm->current_state_id].enter_state; 176 handler = sm->state_table[sm->current_state_id].enter_state;
177 if (handler) 177 if (handler)
178 handler(sm); 178 handler(sm);
179 } 179 }
180 180
181 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) 181 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
182 { 182 {
183 u32 get_value = ihost->completion_queue_get; 183 u32 get_value = ihost->completion_queue_get;
184 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; 184 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
185 185
186 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == 186 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
187 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) 187 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
188 return true; 188 return true;
189 189
190 return false; 190 return false;
191 } 191 }
192 192
193 static bool sci_controller_isr(struct isci_host *ihost) 193 static bool sci_controller_isr(struct isci_host *ihost)
194 { 194 {
195 if (sci_controller_completion_queue_has_entries(ihost)) 195 if (sci_controller_completion_queue_has_entries(ihost))
196 return true; 196 return true;
197 197
198 /* we have a spurious interrupt it could be that we have already 198 /* we have a spurious interrupt it could be that we have already
199 * emptied the completion queue from a previous interrupt 199 * emptied the completion queue from a previous interrupt
200 * FIXME: really!? 200 * FIXME: really!?
201 */ 201 */
202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 202 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
203 203
204 /* There is a race in the hardware that could cause us not to be 204 /* There is a race in the hardware that could cause us not to be
205 * notified of an interrupt completion if we do not take this 205 * notified of an interrupt completion if we do not take this
206 * step. We will mask then unmask the interrupts so if there is 206 * step. We will mask then unmask the interrupts so if there is
207 * another interrupt pending the clearing of the interrupt 207 * another interrupt pending the clearing of the interrupt
208 * source we get the next interrupt message. 208 * source we get the next interrupt message.
209 */ 209 */
210 spin_lock(&ihost->scic_lock); 210 spin_lock(&ihost->scic_lock);
211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { 211 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 212 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
213 writel(0, &ihost->smu_registers->interrupt_mask); 213 writel(0, &ihost->smu_registers->interrupt_mask);
214 } 214 }
215 spin_unlock(&ihost->scic_lock); 215 spin_unlock(&ihost->scic_lock);
216 216
217 return false; 217 return false;
218 } 218 }
219 219
220 irqreturn_t isci_msix_isr(int vec, void *data) 220 irqreturn_t isci_msix_isr(int vec, void *data)
221 { 221 {
222 struct isci_host *ihost = data; 222 struct isci_host *ihost = data;
223 223
224 if (sci_controller_isr(ihost)) 224 if (sci_controller_isr(ihost))
225 tasklet_schedule(&ihost->completion_tasklet); 225 tasklet_schedule(&ihost->completion_tasklet);
226 226
227 return IRQ_HANDLED; 227 return IRQ_HANDLED;
228 } 228 }
229 229
230 static bool sci_controller_error_isr(struct isci_host *ihost) 230 static bool sci_controller_error_isr(struct isci_host *ihost)
231 { 231 {
232 u32 interrupt_status; 232 u32 interrupt_status;
233 233
234 interrupt_status = 234 interrupt_status =
235 readl(&ihost->smu_registers->interrupt_status); 235 readl(&ihost->smu_registers->interrupt_status);
236 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); 236 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
237 237
238 if (interrupt_status != 0) { 238 if (interrupt_status != 0) {
239 /* 239 /*
240 * There is an error interrupt pending so let it through and handle 240 * There is an error interrupt pending so let it through and handle
241 * in the callback */ 241 * in the callback */
242 return true; 242 return true;
243 } 243 }
244 244
245 /* 245 /*
246 * There is a race in the hardware that could cause us not to be notified 246 * There is a race in the hardware that could cause us not to be notified
247 * of an interrupt completion if we do not take this step. We will mask 247 * of an interrupt completion if we do not take this step. We will mask
248 * then unmask the error interrupts so if there was another interrupt 248 * then unmask the error interrupts so if there was another interrupt
249 * pending we will be notified. 249 * pending we will be notified.
250 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ 250 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
251 writel(0xff, &ihost->smu_registers->interrupt_mask); 251 writel(0xff, &ihost->smu_registers->interrupt_mask);
252 writel(0, &ihost->smu_registers->interrupt_mask); 252 writel(0, &ihost->smu_registers->interrupt_mask);
253 253
254 return false; 254 return false;
255 } 255 }
256 256
257 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) 257 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
258 { 258 {
259 u32 index = SCU_GET_COMPLETION_INDEX(ent); 259 u32 index = SCU_GET_COMPLETION_INDEX(ent);
260 struct isci_request *ireq = ihost->reqs[index]; 260 struct isci_request *ireq = ihost->reqs[index];
261 261
262 /* Make sure that we really want to process this IO request */ 262 /* Make sure that we really want to process this IO request */
263 if (test_bit(IREQ_ACTIVE, &ireq->flags) && 263 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
264 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && 264 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
265 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) 265 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
266 /* Yep this is a valid io request pass it along to the 266 /* Yep this is a valid io request pass it along to the
267 * io request handler 267 * io request handler
268 */ 268 */
269 sci_io_request_tc_completion(ireq, ent); 269 sci_io_request_tc_completion(ireq, ent);
270 } 270 }
271 271
272 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) 272 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
273 { 273 {
274 u32 index; 274 u32 index;
275 struct isci_request *ireq; 275 struct isci_request *ireq;
276 struct isci_remote_device *idev; 276 struct isci_remote_device *idev;
277 277
278 index = SCU_GET_COMPLETION_INDEX(ent); 278 index = SCU_GET_COMPLETION_INDEX(ent);
279 279
280 switch (scu_get_command_request_type(ent)) { 280 switch (scu_get_command_request_type(ent)) {
281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: 281 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
282 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: 282 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
283 ireq = ihost->reqs[index]; 283 ireq = ihost->reqs[index];
284 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", 284 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
285 __func__, ent, ireq); 285 __func__, ent, ireq);
286 /* @todo For a post TC operation we need to fail the IO 286 /* @todo For a post TC operation we need to fail the IO
287 * request 287 * request
288 */ 288 */
289 break; 289 break;
290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: 290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
291 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: 291 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
292 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: 292 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
293 idev = ihost->device_table[index]; 293 idev = ihost->device_table[index];
294 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", 294 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
295 __func__, ent, idev); 295 __func__, ent, idev);
296 /* @todo For a port RNC operation we need to fail the 296 /* @todo For a port RNC operation we need to fail the
297 * device 297 * device
298 */ 298 */
299 break; 299 break;
300 default: 300 default:
301 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", 301 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
302 __func__, ent); 302 __func__, ent);
303 break; 303 break;
304 } 304 }
305 } 305 }
306 306
307 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) 307 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
308 { 308 {
309 u32 index; 309 u32 index;
310 u32 frame_index; 310 u32 frame_index;
311 311
312 struct scu_unsolicited_frame_header *frame_header; 312 struct scu_unsolicited_frame_header *frame_header;
313 struct isci_phy *iphy; 313 struct isci_phy *iphy;
314 struct isci_remote_device *idev; 314 struct isci_remote_device *idev;
315 315
316 enum sci_status result = SCI_FAILURE; 316 enum sci_status result = SCI_FAILURE;
317 317
318 frame_index = SCU_GET_FRAME_INDEX(ent); 318 frame_index = SCU_GET_FRAME_INDEX(ent);
319 319
320 frame_header = ihost->uf_control.buffers.array[frame_index].header; 320 frame_header = ihost->uf_control.buffers.array[frame_index].header;
321 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; 321 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
322 322
323 if (SCU_GET_FRAME_ERROR(ent)) { 323 if (SCU_GET_FRAME_ERROR(ent)) {
324 /* 324 /*
325 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will 325 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
326 * / this cause a problem? We expect the phy initialization will 326 * / this cause a problem? We expect the phy initialization will
327 * / fail if there is an error in the frame. */ 327 * / fail if there is an error in the frame. */
328 sci_controller_release_frame(ihost, frame_index); 328 sci_controller_release_frame(ihost, frame_index);
329 return; 329 return;
330 } 330 }
331 331
332 if (frame_header->is_address_frame) { 332 if (frame_header->is_address_frame) {
333 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 333 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
334 iphy = &ihost->phys[index]; 334 iphy = &ihost->phys[index];
335 result = sci_phy_frame_handler(iphy, frame_index); 335 result = sci_phy_frame_handler(iphy, frame_index);
336 } else { 336 } else {
337 337
338 index = SCU_GET_COMPLETION_INDEX(ent); 338 index = SCU_GET_COMPLETION_INDEX(ent);
339 339
340 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 340 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
341 /* 341 /*
342 * This is a signature fis or a frame from a direct attached SATA 342 * This is a signature fis or a frame from a direct attached SATA
343 * device that has not yet been created. In either case forwared 343 * device that has not yet been created. In either case forwared
344 * the frame to the PE and let it take care of the frame data. */ 344 * the frame to the PE and let it take care of the frame data. */
345 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 345 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
346 iphy = &ihost->phys[index]; 346 iphy = &ihost->phys[index];
347 result = sci_phy_frame_handler(iphy, frame_index); 347 result = sci_phy_frame_handler(iphy, frame_index);
348 } else { 348 } else {
349 if (index < ihost->remote_node_entries) 349 if (index < ihost->remote_node_entries)
350 idev = ihost->device_table[index]; 350 idev = ihost->device_table[index];
351 else 351 else
352 idev = NULL; 352 idev = NULL;
353 353
354 if (idev != NULL) 354 if (idev != NULL)
355 result = sci_remote_device_frame_handler(idev, frame_index); 355 result = sci_remote_device_frame_handler(idev, frame_index);
356 else 356 else
357 sci_controller_release_frame(ihost, frame_index); 357 sci_controller_release_frame(ihost, frame_index);
358 } 358 }
359 } 359 }
360 360
361 if (result != SCI_SUCCESS) { 361 if (result != SCI_SUCCESS) {
362 /* 362 /*
363 * / @todo Is there any reason to report some additional error message 363 * / @todo Is there any reason to report some additional error message
364 * / when we get this failure notifiction? */ 364 * / when we get this failure notifiction? */
365 } 365 }
366 } 366 }
367 367
368 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) 368 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
369 { 369 {
370 struct isci_remote_device *idev; 370 struct isci_remote_device *idev;
371 struct isci_request *ireq; 371 struct isci_request *ireq;
372 struct isci_phy *iphy; 372 struct isci_phy *iphy;
373 u32 index; 373 u32 index;
374 374
375 index = SCU_GET_COMPLETION_INDEX(ent); 375 index = SCU_GET_COMPLETION_INDEX(ent);
376 376
377 switch (scu_get_event_type(ent)) { 377 switch (scu_get_event_type(ent)) {
378 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: 378 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
379 /* / @todo The driver did something wrong and we need to fix the condtion. */ 379 /* / @todo The driver did something wrong and we need to fix the condtion. */
380 dev_err(&ihost->pdev->dev, 380 dev_err(&ihost->pdev->dev,
381 "%s: SCIC Controller 0x%p received SMU command error " 381 "%s: SCIC Controller 0x%p received SMU command error "
382 "0x%x\n", 382 "0x%x\n",
383 __func__, 383 __func__,
384 ihost, 384 ihost,
385 ent); 385 ent);
386 break; 386 break;
387 387
388 case SCU_EVENT_TYPE_SMU_PCQ_ERROR: 388 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
389 case SCU_EVENT_TYPE_SMU_ERROR: 389 case SCU_EVENT_TYPE_SMU_ERROR:
390 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: 390 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
391 /* 391 /*
392 * / @todo This is a hardware failure and its likely that we want to 392 * / @todo This is a hardware failure and its likely that we want to
393 * / reset the controller. */ 393 * / reset the controller. */
394 dev_err(&ihost->pdev->dev, 394 dev_err(&ihost->pdev->dev,
395 "%s: SCIC Controller 0x%p received fatal controller " 395 "%s: SCIC Controller 0x%p received fatal controller "
396 "event 0x%x\n", 396 "event 0x%x\n",
397 __func__, 397 __func__,
398 ihost, 398 ihost,
399 ent); 399 ent);
400 break; 400 break;
401 401
402 case SCU_EVENT_TYPE_TRANSPORT_ERROR: 402 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
403 ireq = ihost->reqs[index]; 403 ireq = ihost->reqs[index];
404 sci_io_request_event_handler(ireq, ent); 404 sci_io_request_event_handler(ireq, ent);
405 break; 405 break;
406 406
407 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 407 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
408 switch (scu_get_event_specifier(ent)) { 408 switch (scu_get_event_specifier(ent)) {
409 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: 409 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
410 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: 410 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
411 ireq = ihost->reqs[index]; 411 ireq = ihost->reqs[index];
412 if (ireq != NULL) 412 if (ireq != NULL)
413 sci_io_request_event_handler(ireq, ent); 413 sci_io_request_event_handler(ireq, ent);
414 else 414 else
415 dev_warn(&ihost->pdev->dev, 415 dev_warn(&ihost->pdev->dev,
416 "%s: SCIC Controller 0x%p received " 416 "%s: SCIC Controller 0x%p received "
417 "event 0x%x for io request object " 417 "event 0x%x for io request object "
418 "that doesnt exist.\n", 418 "that doesnt exist.\n",
419 __func__, 419 __func__,
420 ihost, 420 ihost,
421 ent); 421 ent);
422 422
423 break; 423 break;
424 424
425 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: 425 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
426 idev = ihost->device_table[index]; 426 idev = ihost->device_table[index];
427 if (idev != NULL) 427 if (idev != NULL)
428 sci_remote_device_event_handler(idev, ent); 428 sci_remote_device_event_handler(idev, ent);
429 else 429 else
430 dev_warn(&ihost->pdev->dev, 430 dev_warn(&ihost->pdev->dev,
431 "%s: SCIC Controller 0x%p received " 431 "%s: SCIC Controller 0x%p received "
432 "event 0x%x for remote device object " 432 "event 0x%x for remote device object "
433 "that doesnt exist.\n", 433 "that doesnt exist.\n",
434 __func__, 434 __func__,
435 ihost, 435 ihost,
436 ent); 436 ent);
437 437
438 break; 438 break;
439 } 439 }
440 break; 440 break;
441 441
442 case SCU_EVENT_TYPE_BROADCAST_CHANGE: 442 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
443 /* 443 /*
444 * direct the broadcast change event to the phy first and then let 444 * direct the broadcast change event to the phy first and then let
445 * the phy redirect the broadcast change to the port object */ 445 * the phy redirect the broadcast change to the port object */
446 case SCU_EVENT_TYPE_ERR_CNT_EVENT: 446 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
447 /* 447 /*
448 * direct error counter event to the phy object since that is where 448 * direct error counter event to the phy object since that is where
449 * we get the event notification. This is a type 4 event. */ 449 * we get the event notification. This is a type 4 event. */
450 case SCU_EVENT_TYPE_OSSP_EVENT: 450 case SCU_EVENT_TYPE_OSSP_EVENT:
451 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); 451 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
452 iphy = &ihost->phys[index]; 452 iphy = &ihost->phys[index];
453 sci_phy_event_handler(iphy, ent); 453 sci_phy_event_handler(iphy, ent);
454 break; 454 break;
455 455
456 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 456 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
457 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 457 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
458 case SCU_EVENT_TYPE_RNC_OPS_MISC: 458 case SCU_EVENT_TYPE_RNC_OPS_MISC:
459 if (index < ihost->remote_node_entries) { 459 if (index < ihost->remote_node_entries) {
460 idev = ihost->device_table[index]; 460 idev = ihost->device_table[index];
461 461
462 if (idev != NULL) 462 if (idev != NULL)
463 sci_remote_device_event_handler(idev, ent); 463 sci_remote_device_event_handler(idev, ent);
464 } else 464 } else
465 dev_err(&ihost->pdev->dev, 465 dev_err(&ihost->pdev->dev,
466 "%s: SCIC Controller 0x%p received event 0x%x " 466 "%s: SCIC Controller 0x%p received event 0x%x "
467 "for remote device object 0x%0x that doesnt " 467 "for remote device object 0x%0x that doesnt "
468 "exist.\n", 468 "exist.\n",
469 __func__, 469 __func__,
470 ihost, 470 ihost,
471 ent, 471 ent,
472 index); 472 index);
473 473
474 break; 474 break;
475 475
476 default: 476 default:
477 dev_warn(&ihost->pdev->dev, 477 dev_warn(&ihost->pdev->dev,
478 "%s: SCIC Controller received unknown event code %x\n", 478 "%s: SCIC Controller received unknown event code %x\n",
479 __func__, 479 __func__,
480 ent); 480 ent);
481 break; 481 break;
482 } 482 }
483 } 483 }
484 484
485 static void sci_controller_process_completions(struct isci_host *ihost) 485 static void sci_controller_process_completions(struct isci_host *ihost)
486 { 486 {
487 u32 completion_count = 0; 487 u32 completion_count = 0;
488 u32 ent; 488 u32 ent;
489 u32 get_index; 489 u32 get_index;
490 u32 get_cycle; 490 u32 get_cycle;
491 u32 event_get; 491 u32 event_get;
492 u32 event_cycle; 492 u32 event_cycle;
493 493
494 dev_dbg(&ihost->pdev->dev, 494 dev_dbg(&ihost->pdev->dev,
495 "%s: completion queue begining get:0x%08x\n", 495 "%s: completion queue begining get:0x%08x\n",
496 __func__, 496 __func__,
497 ihost->completion_queue_get); 497 ihost->completion_queue_get);
498 498
499 /* Get the component parts of the completion queue */ 499 /* Get the component parts of the completion queue */
500 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); 500 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
501 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; 501 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
502 502
503 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); 503 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
504 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; 504 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
505 505
506 while ( 506 while (
507 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) 507 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
508 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) 508 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
509 ) { 509 ) {
510 completion_count++; 510 completion_count++;
511 511
512 ent = ihost->completion_queue[get_index]; 512 ent = ihost->completion_queue[get_index];
513 513
514 /* increment the get pointer and check for rollover to toggle the cycle bit */ 514 /* increment the get pointer and check for rollover to toggle the cycle bit */
515 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << 515 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
516 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); 516 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
517 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); 517 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
518 518
519 dev_dbg(&ihost->pdev->dev, 519 dev_dbg(&ihost->pdev->dev,
520 "%s: completion queue entry:0x%08x\n", 520 "%s: completion queue entry:0x%08x\n",
521 __func__, 521 __func__,
522 ent); 522 ent);
523 523
524 switch (SCU_GET_COMPLETION_TYPE(ent)) { 524 switch (SCU_GET_COMPLETION_TYPE(ent)) {
525 case SCU_COMPLETION_TYPE_TASK: 525 case SCU_COMPLETION_TYPE_TASK:
526 sci_controller_task_completion(ihost, ent); 526 sci_controller_task_completion(ihost, ent);
527 break; 527 break;
528 528
529 case SCU_COMPLETION_TYPE_SDMA: 529 case SCU_COMPLETION_TYPE_SDMA:
530 sci_controller_sdma_completion(ihost, ent); 530 sci_controller_sdma_completion(ihost, ent);
531 break; 531 break;
532 532
533 case SCU_COMPLETION_TYPE_UFI: 533 case SCU_COMPLETION_TYPE_UFI:
534 sci_controller_unsolicited_frame(ihost, ent); 534 sci_controller_unsolicited_frame(ihost, ent);
535 break; 535 break;
536 536
537 case SCU_COMPLETION_TYPE_EVENT: 537 case SCU_COMPLETION_TYPE_EVENT:
538 sci_controller_event_completion(ihost, ent); 538 sci_controller_event_completion(ihost, ent);
539 break; 539 break;
540 540
541 case SCU_COMPLETION_TYPE_NOTIFY: { 541 case SCU_COMPLETION_TYPE_NOTIFY: {
542 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << 542 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
543 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); 543 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
544 event_get = (event_get+1) & (SCU_MAX_EVENTS-1); 544 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
545 545
546 sci_controller_event_completion(ihost, ent); 546 sci_controller_event_completion(ihost, ent);
547 break; 547 break;
548 } 548 }
549 default: 549 default:
550 dev_warn(&ihost->pdev->dev, 550 dev_warn(&ihost->pdev->dev,
551 "%s: SCIC Controller received unknown " 551 "%s: SCIC Controller received unknown "
552 "completion type %x\n", 552 "completion type %x\n",
553 __func__, 553 __func__,
554 ent); 554 ent);
555 break; 555 break;
556 } 556 }
557 } 557 }
558 558
559 /* Update the get register if we completed one or more entries */ 559 /* Update the get register if we completed one or more entries */
560 if (completion_count > 0) { 560 if (completion_count > 0) {
561 ihost->completion_queue_get = 561 ihost->completion_queue_get =
562 SMU_CQGR_GEN_BIT(ENABLE) | 562 SMU_CQGR_GEN_BIT(ENABLE) |
563 SMU_CQGR_GEN_BIT(EVENT_ENABLE) | 563 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
564 event_cycle | 564 event_cycle |
565 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | 565 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
566 get_cycle | 566 get_cycle |
567 SMU_CQGR_GEN_VAL(POINTER, get_index); 567 SMU_CQGR_GEN_VAL(POINTER, get_index);
568 568
569 writel(ihost->completion_queue_get, 569 writel(ihost->completion_queue_get,
570 &ihost->smu_registers->completion_queue_get); 570 &ihost->smu_registers->completion_queue_get);
571 571
572 } 572 }
573 573
574 dev_dbg(&ihost->pdev->dev, 574 dev_dbg(&ihost->pdev->dev,
575 "%s: completion queue ending get:0x%08x\n", 575 "%s: completion queue ending get:0x%08x\n",
576 __func__, 576 __func__,
577 ihost->completion_queue_get); 577 ihost->completion_queue_get);
578 578
579 } 579 }
580 580
581 static void sci_controller_error_handler(struct isci_host *ihost) 581 static void sci_controller_error_handler(struct isci_host *ihost)
582 { 582 {
583 u32 interrupt_status; 583 u32 interrupt_status;
584 584
585 interrupt_status = 585 interrupt_status =
586 readl(&ihost->smu_registers->interrupt_status); 586 readl(&ihost->smu_registers->interrupt_status);
587 587
588 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && 588 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
589 sci_controller_completion_queue_has_entries(ihost)) { 589 sci_controller_completion_queue_has_entries(ihost)) {
590 590
591 sci_controller_process_completions(ihost); 591 sci_controller_process_completions(ihost);
592 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); 592 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
593 } else { 593 } else {
594 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, 594 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
595 interrupt_status); 595 interrupt_status);
596 596
597 sci_change_state(&ihost->sm, SCIC_FAILED); 597 sci_change_state(&ihost->sm, SCIC_FAILED);
598 598
599 return; 599 return;
600 } 600 }
601 601
602 /* If we dont process any completions I am not sure that we want to do this. 602 /* If we dont process any completions I am not sure that we want to do this.
603 * We are in the middle of a hardware fault and should probably be reset. 603 * We are in the middle of a hardware fault and should probably be reset.
604 */ 604 */
605 writel(0, &ihost->smu_registers->interrupt_mask); 605 writel(0, &ihost->smu_registers->interrupt_mask);
606 } 606 }
607 607
608 irqreturn_t isci_intx_isr(int vec, void *data) 608 irqreturn_t isci_intx_isr(int vec, void *data)
609 { 609 {
610 irqreturn_t ret = IRQ_NONE; 610 irqreturn_t ret = IRQ_NONE;
611 struct isci_host *ihost = data; 611 struct isci_host *ihost = data;
612 612
613 if (sci_controller_isr(ihost)) { 613 if (sci_controller_isr(ihost)) {
614 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 614 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
615 tasklet_schedule(&ihost->completion_tasklet); 615 tasklet_schedule(&ihost->completion_tasklet);
616 ret = IRQ_HANDLED; 616 ret = IRQ_HANDLED;
617 } else if (sci_controller_error_isr(ihost)) { 617 } else if (sci_controller_error_isr(ihost)) {
618 spin_lock(&ihost->scic_lock); 618 spin_lock(&ihost->scic_lock);
619 sci_controller_error_handler(ihost); 619 sci_controller_error_handler(ihost);
620 spin_unlock(&ihost->scic_lock); 620 spin_unlock(&ihost->scic_lock);
621 ret = IRQ_HANDLED; 621 ret = IRQ_HANDLED;
622 } 622 }
623 623
624 return ret; 624 return ret;
625 } 625 }
626 626
627 irqreturn_t isci_error_isr(int vec, void *data) 627 irqreturn_t isci_error_isr(int vec, void *data)
628 { 628 {
629 struct isci_host *ihost = data; 629 struct isci_host *ihost = data;
630 630
631 if (sci_controller_error_isr(ihost)) 631 if (sci_controller_error_isr(ihost))
632 sci_controller_error_handler(ihost); 632 sci_controller_error_handler(ihost);
633 633
634 return IRQ_HANDLED; 634 return IRQ_HANDLED;
635 } 635 }
636 636
637 /** 637 /**
638 * isci_host_start_complete() - This function is called by the core library, 638 * isci_host_start_complete() - This function is called by the core library,
639 * through the ISCI Module, to indicate controller start status. 639 * through the ISCI Module, to indicate controller start status.
640 * @isci_host: This parameter specifies the ISCI host object 640 * @isci_host: This parameter specifies the ISCI host object
641 * @completion_status: This parameter specifies the completion status from the 641 * @completion_status: This parameter specifies the completion status from the
642 * core library. 642 * core library.
643 * 643 *
644 */ 644 */
645 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) 645 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
646 { 646 {
647 if (completion_status != SCI_SUCCESS) 647 if (completion_status != SCI_SUCCESS)
648 dev_info(&ihost->pdev->dev, 648 dev_info(&ihost->pdev->dev,
649 "controller start timed out, continuing...\n"); 649 "controller start timed out, continuing...\n");
650 clear_bit(IHOST_START_PENDING, &ihost->flags); 650 clear_bit(IHOST_START_PENDING, &ihost->flags);
651 wake_up(&ihost->eventq); 651 wake_up(&ihost->eventq);
652 } 652 }
653 653
654 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) 654 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
655 { 655 {
656 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 656 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
657 struct isci_host *ihost = ha->lldd_ha; 657 struct isci_host *ihost = ha->lldd_ha;
658 658
659 if (test_bit(IHOST_START_PENDING, &ihost->flags)) 659 if (test_bit(IHOST_START_PENDING, &ihost->flags))
660 return 0; 660 return 0;
661 661
662 sas_drain_work(ha); 662 sas_drain_work(ha);
663 663
664 return 1; 664 return 1;
665 } 665 }
666 666
667 /** 667 /**
668 * sci_controller_get_suggested_start_timeout() - This method returns the 668 * sci_controller_get_suggested_start_timeout() - This method returns the
669 * suggested sci_controller_start() timeout amount. The user is free to 669 * suggested sci_controller_start() timeout amount. The user is free to
670 * use any timeout value, but this method provides the suggested minimum 670 * use any timeout value, but this method provides the suggested minimum
671 * start timeout value. The returned value is based upon empirical 671 * start timeout value. The returned value is based upon empirical
672 * information determined as a result of interoperability testing. 672 * information determined as a result of interoperability testing.
673 * @controller: the handle to the controller object for which to return the 673 * @controller: the handle to the controller object for which to return the
674 * suggested start timeout. 674 * suggested start timeout.
675 * 675 *
676 * This method returns the number of milliseconds for the suggested start 676 * This method returns the number of milliseconds for the suggested start
677 * operation timeout. 677 * operation timeout.
678 */ 678 */
679 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) 679 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
680 { 680 {
681 /* Validate the user supplied parameters. */ 681 /* Validate the user supplied parameters. */
682 if (!ihost) 682 if (!ihost)
683 return 0; 683 return 0;
684 684
685 /* 685 /*
686 * The suggested minimum timeout value for a controller start operation: 686 * The suggested minimum timeout value for a controller start operation:
687 * 687 *
688 * Signature FIS Timeout 688 * Signature FIS Timeout
689 * + Phy Start Timeout 689 * + Phy Start Timeout
690 * + Number of Phy Spin Up Intervals 690 * + Number of Phy Spin Up Intervals
691 * --------------------------------- 691 * ---------------------------------
692 * Number of milliseconds for the controller start operation. 692 * Number of milliseconds for the controller start operation.
693 * 693 *
694 * NOTE: The number of phy spin up intervals will be equivalent 694 * NOTE: The number of phy spin up intervals will be equivalent
695 * to the number of phys divided by the number phys allowed 695 * to the number of phys divided by the number phys allowed
696 * per interval - 1 (once OEM parameters are supported). 696 * per interval - 1 (once OEM parameters are supported).
697 * Currently we assume only 1 phy per interval. */ 697 * Currently we assume only 1 phy per interval. */
698 698
699 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT 699 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
700 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 700 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
701 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 701 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
702 } 702 }
703 703
704 static void sci_controller_enable_interrupts(struct isci_host *ihost) 704 static void sci_controller_enable_interrupts(struct isci_host *ihost)
705 { 705 {
706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags); 706 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
707 writel(0, &ihost->smu_registers->interrupt_mask); 707 writel(0, &ihost->smu_registers->interrupt_mask);
708 } 708 }
709 709
710 void sci_controller_disable_interrupts(struct isci_host *ihost) 710 void sci_controller_disable_interrupts(struct isci_host *ihost)
711 { 711 {
712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); 712 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask); 713 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
714 readl(&ihost->smu_registers->interrupt_mask); /* flush */ 714 readl(&ihost->smu_registers->interrupt_mask); /* flush */
715 } 715 }
716 716
717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) 717 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
718 { 718 {
719 u32 port_task_scheduler_value; 719 u32 port_task_scheduler_value;
720 720
721 port_task_scheduler_value = 721 port_task_scheduler_value =
722 readl(&ihost->scu_registers->peg0.ptsg.control); 722 readl(&ihost->scu_registers->peg0.ptsg.control);
723 port_task_scheduler_value |= 723 port_task_scheduler_value |=
724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | 724 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); 725 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
726 writel(port_task_scheduler_value, 726 writel(port_task_scheduler_value,
727 &ihost->scu_registers->peg0.ptsg.control); 727 &ihost->scu_registers->peg0.ptsg.control);
728 } 728 }
729 729
730 static void sci_controller_assign_task_entries(struct isci_host *ihost) 730 static void sci_controller_assign_task_entries(struct isci_host *ihost)
731 { 731 {
732 u32 task_assignment; 732 u32 task_assignment;
733 733
734 /* 734 /*
735 * Assign all the TCs to function 0 735 * Assign all the TCs to function 0
736 * TODO: Do we actually need to read this register to write it back? 736 * TODO: Do we actually need to read this register to write it back?
737 */ 737 */
738 738
739 task_assignment = 739 task_assignment =
740 readl(&ihost->smu_registers->task_context_assignment[0]); 740 readl(&ihost->smu_registers->task_context_assignment[0]);
741 741
742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | 742 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | 743 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); 744 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
745 745
746 writel(task_assignment, 746 writel(task_assignment,
747 &ihost->smu_registers->task_context_assignment[0]); 747 &ihost->smu_registers->task_context_assignment[0]);
748 748
749 } 749 }
750 750
751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost) 751 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
752 { 752 {
753 u32 index; 753 u32 index;
754 u32 completion_queue_control_value; 754 u32 completion_queue_control_value;
755 u32 completion_queue_get_value; 755 u32 completion_queue_get_value;
756 u32 completion_queue_put_value; 756 u32 completion_queue_put_value;
757 757
758 ihost->completion_queue_get = 0; 758 ihost->completion_queue_get = 0;
759 759
760 completion_queue_control_value = 760 completion_queue_control_value =
761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | 761 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); 762 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
763 763
764 writel(completion_queue_control_value, 764 writel(completion_queue_control_value,
765 &ihost->smu_registers->completion_queue_control); 765 &ihost->smu_registers->completion_queue_control);
766 766
767 767
768 /* Set the completion queue get pointer and enable the queue */ 768 /* Set the completion queue get pointer and enable the queue */
769 completion_queue_get_value = ( 769 completion_queue_get_value = (
770 (SMU_CQGR_GEN_VAL(POINTER, 0)) 770 (SMU_CQGR_GEN_VAL(POINTER, 0))
771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) 771 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
772 | (SMU_CQGR_GEN_BIT(ENABLE)) 772 | (SMU_CQGR_GEN_BIT(ENABLE))
773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) 773 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
774 ); 774 );
775 775
776 writel(completion_queue_get_value, 776 writel(completion_queue_get_value,
777 &ihost->smu_registers->completion_queue_get); 777 &ihost->smu_registers->completion_queue_get);
778 778
779 /* Set the completion queue put pointer */ 779 /* Set the completion queue put pointer */
780 completion_queue_put_value = ( 780 completion_queue_put_value = (
781 (SMU_CQPR_GEN_VAL(POINTER, 0)) 781 (SMU_CQPR_GEN_VAL(POINTER, 0))
782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) 782 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
783 ); 783 );
784 784
785 writel(completion_queue_put_value, 785 writel(completion_queue_put_value,
786 &ihost->smu_registers->completion_queue_put); 786 &ihost->smu_registers->completion_queue_put);
787 787
788 /* Initialize the cycle bit of the completion queue entries */ 788 /* Initialize the cycle bit of the completion queue entries */
789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { 789 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
790 /* 790 /*
791 * If get.cycle_bit != completion_queue.cycle_bit 791 * If get.cycle_bit != completion_queue.cycle_bit
792 * its not a valid completion queue entry 792 * its not a valid completion queue entry
793 * so at system start all entries are invalid */ 793 * so at system start all entries are invalid */
794 ihost->completion_queue[index] = 0x80000000; 794 ihost->completion_queue[index] = 0x80000000;
795 } 795 }
796 } 796 }
797 797
798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) 798 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
799 { 799 {
800 u32 frame_queue_control_value; 800 u32 frame_queue_control_value;
801 u32 frame_queue_get_value; 801 u32 frame_queue_get_value;
802 u32 frame_queue_put_value; 802 u32 frame_queue_put_value;
803 803
804 /* Write the queue size */ 804 /* Write the queue size */
805 frame_queue_control_value = 805 frame_queue_control_value =
806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); 806 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
807 807
808 writel(frame_queue_control_value, 808 writel(frame_queue_control_value,
809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control); 809 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
810 810
811 /* Setup the get pointer for the unsolicited frame queue */ 811 /* Setup the get pointer for the unsolicited frame queue */
812 frame_queue_get_value = ( 812 frame_queue_get_value = (
813 SCU_UFQGP_GEN_VAL(POINTER, 0) 813 SCU_UFQGP_GEN_VAL(POINTER, 0)
814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT) 814 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
815 ); 815 );
816 816
817 writel(frame_queue_get_value, 817 writel(frame_queue_get_value,
818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 818 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
819 /* Setup the put pointer for the unsolicited frame queue */ 819 /* Setup the put pointer for the unsolicited frame queue */
820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); 820 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
821 writel(frame_queue_put_value, 821 writel(frame_queue_put_value,
822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); 822 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
823 } 823 }
824 824
825 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) 825 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
826 { 826 {
827 if (ihost->sm.current_state_id == SCIC_STARTING) { 827 if (ihost->sm.current_state_id == SCIC_STARTING) {
828 /* 828 /*
829 * We move into the ready state, because some of the phys/ports 829 * We move into the ready state, because some of the phys/ports
830 * may be up and operational. 830 * may be up and operational.
831 */ 831 */
832 sci_change_state(&ihost->sm, SCIC_READY); 832 sci_change_state(&ihost->sm, SCIC_READY);
833 833
834 isci_host_start_complete(ihost, status); 834 isci_host_start_complete(ihost, status);
835 } 835 }
836 } 836 }
837 837
838 static bool is_phy_starting(struct isci_phy *iphy) 838 static bool is_phy_starting(struct isci_phy *iphy)
839 { 839 {
840 enum sci_phy_states state; 840 enum sci_phy_states state;
841 841
842 state = iphy->sm.current_state_id; 842 state = iphy->sm.current_state_id;
843 switch (state) { 843 switch (state) {
844 case SCI_PHY_STARTING: 844 case SCI_PHY_STARTING:
845 case SCI_PHY_SUB_INITIAL: 845 case SCI_PHY_SUB_INITIAL:
846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: 846 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
847 case SCI_PHY_SUB_AWAIT_IAF_UF: 847 case SCI_PHY_SUB_AWAIT_IAF_UF:
848 case SCI_PHY_SUB_AWAIT_SAS_POWER: 848 case SCI_PHY_SUB_AWAIT_SAS_POWER:
849 case SCI_PHY_SUB_AWAIT_SATA_POWER: 849 case SCI_PHY_SUB_AWAIT_SATA_POWER:
850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: 850 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: 851 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
852 case SCI_PHY_SUB_AWAIT_OSSP_EN: 852 case SCI_PHY_SUB_AWAIT_OSSP_EN:
853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: 853 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
854 case SCI_PHY_SUB_FINAL: 854 case SCI_PHY_SUB_FINAL:
855 return true; 855 return true;
856 default: 856 default:
857 return false; 857 return false;
858 } 858 }
859 } 859 }
860 860
861 bool is_controller_start_complete(struct isci_host *ihost) 861 bool is_controller_start_complete(struct isci_host *ihost)
862 { 862 {
863 int i; 863 int i;
864 864
865 for (i = 0; i < SCI_MAX_PHYS; i++) { 865 for (i = 0; i < SCI_MAX_PHYS; i++) {
866 struct isci_phy *iphy = &ihost->phys[i]; 866 struct isci_phy *iphy = &ihost->phys[i];
867 u32 state = iphy->sm.current_state_id; 867 u32 state = iphy->sm.current_state_id;
868 868
869 /* in apc mode we need to check every phy, in 869 /* in apc mode we need to check every phy, in
870 * mpc mode we only need to check phys that have 870 * mpc mode we only need to check phys that have
871 * been configured into a port 871 * been configured into a port
872 */ 872 */
873 if (is_port_config_apc(ihost)) 873 if (is_port_config_apc(ihost))
874 /* pass */; 874 /* pass */;
875 else if (!phy_get_non_dummy_port(iphy)) 875 else if (!phy_get_non_dummy_port(iphy))
876 continue; 876 continue;
877 877
878 /* The controller start operation is complete iff: 878 /* The controller start operation is complete iff:
879 * - all links have been given an opportunity to start 879 * - all links have been given an opportunity to start
880 * - have no indication of a connected device 880 * - have no indication of a connected device
881 * - have an indication of a connected device and it has 881 * - have an indication of a connected device and it has
882 * finished the link training process. 882 * finished the link training process.
883 */ 883 */
884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || 884 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || 885 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) || 886 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) 887 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
888 return false; 888 return false;
889 } 889 }
890 890
891 return true; 891 return true;
892 } 892 }
893 893
894 /** 894 /**
895 * sci_controller_start_next_phy - start phy 895 * sci_controller_start_next_phy - start phy
896 * @scic: controller 896 * @scic: controller
897 * 897 *
898 * If all the phys have been started, then attempt to transition the 898 * If all the phys have been started, then attempt to transition the
899 * controller to the READY state and inform the user 899 * controller to the READY state and inform the user
900 * (sci_cb_controller_start_complete()). 900 * (sci_cb_controller_start_complete()).
901 */ 901 */
902 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) 902 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
903 { 903 {
904 struct sci_oem_params *oem = &ihost->oem_parameters; 904 struct sci_oem_params *oem = &ihost->oem_parameters;
905 struct isci_phy *iphy; 905 struct isci_phy *iphy;
906 enum sci_status status; 906 enum sci_status status;
907 907
908 status = SCI_SUCCESS; 908 status = SCI_SUCCESS;
909 909
910 if (ihost->phy_startup_timer_pending) 910 if (ihost->phy_startup_timer_pending)
911 return status; 911 return status;
912 912
913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { 913 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
914 if (is_controller_start_complete(ihost)) { 914 if (is_controller_start_complete(ihost)) {
915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS); 915 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
916 sci_del_timer(&ihost->phy_timer); 916 sci_del_timer(&ihost->phy_timer);
917 ihost->phy_startup_timer_pending = false; 917 ihost->phy_startup_timer_pending = false;
918 } 918 }
919 } else { 919 } else {
920 iphy = &ihost->phys[ihost->next_phy_to_start]; 920 iphy = &ihost->phys[ihost->next_phy_to_start];
921 921
922 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 922 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
923 if (phy_get_non_dummy_port(iphy) == NULL) { 923 if (phy_get_non_dummy_port(iphy) == NULL) {
924 ihost->next_phy_to_start++; 924 ihost->next_phy_to_start++;
925 925
926 /* Caution recursion ahead be forwarned 926 /* Caution recursion ahead be forwarned
927 * 927 *
928 * The PHY was never added to a PORT in MPC mode 928 * The PHY was never added to a PORT in MPC mode
929 * so start the next phy in sequence This phy 929 * so start the next phy in sequence This phy
930 * will never go link up and will not draw power 930 * will never go link up and will not draw power
931 * the OEM parameters either configured the phy 931 * the OEM parameters either configured the phy
932 * incorrectly for the PORT or it was never 932 * incorrectly for the PORT or it was never
933 * assigned to a PORT 933 * assigned to a PORT
934 */ 934 */
935 return sci_controller_start_next_phy(ihost); 935 return sci_controller_start_next_phy(ihost);
936 } 936 }
937 } 937 }
938 938
939 status = sci_phy_start(iphy); 939 status = sci_phy_start(iphy);
940 940
941 if (status == SCI_SUCCESS) { 941 if (status == SCI_SUCCESS) {
942 sci_mod_timer(&ihost->phy_timer, 942 sci_mod_timer(&ihost->phy_timer,
943 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); 943 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
944 ihost->phy_startup_timer_pending = true; 944 ihost->phy_startup_timer_pending = true;
945 } else { 945 } else {
946 dev_warn(&ihost->pdev->dev, 946 dev_warn(&ihost->pdev->dev,
947 "%s: Controller stop operation failed " 947 "%s: Controller stop operation failed "
948 "to stop phy %d because of status " 948 "to stop phy %d because of status "
949 "%d.\n", 949 "%d.\n",
950 __func__, 950 __func__,
951 ihost->phys[ihost->next_phy_to_start].phy_index, 951 ihost->phys[ihost->next_phy_to_start].phy_index,
952 status); 952 status);
953 } 953 }
954 954
955 ihost->next_phy_to_start++; 955 ihost->next_phy_to_start++;
956 } 956 }
957 957
958 return status; 958 return status;
959 } 959 }
960 960
961 static void phy_startup_timeout(unsigned long data) 961 static void phy_startup_timeout(unsigned long data)
962 { 962 {
963 struct sci_timer *tmr = (struct sci_timer *)data; 963 struct sci_timer *tmr = (struct sci_timer *)data;
964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); 964 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
965 unsigned long flags; 965 unsigned long flags;
966 enum sci_status status; 966 enum sci_status status;
967 967
968 spin_lock_irqsave(&ihost->scic_lock, flags); 968 spin_lock_irqsave(&ihost->scic_lock, flags);
969 969
970 if (tmr->cancel) 970 if (tmr->cancel)
971 goto done; 971 goto done;
972 972
973 ihost->phy_startup_timer_pending = false; 973 ihost->phy_startup_timer_pending = false;
974 974
975 do { 975 do {
976 status = sci_controller_start_next_phy(ihost); 976 status = sci_controller_start_next_phy(ihost);
977 } while (status != SCI_SUCCESS); 977 } while (status != SCI_SUCCESS);
978 978
979 done: 979 done:
980 spin_unlock_irqrestore(&ihost->scic_lock, flags); 980 spin_unlock_irqrestore(&ihost->scic_lock, flags);
981 } 981 }
982 982
983 static u16 isci_tci_active(struct isci_host *ihost) 983 static u16 isci_tci_active(struct isci_host *ihost)
984 { 984 {
985 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); 985 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
986 } 986 }
987 987
988 static enum sci_status sci_controller_start(struct isci_host *ihost, 988 static enum sci_status sci_controller_start(struct isci_host *ihost,
989 u32 timeout) 989 u32 timeout)
990 { 990 {
991 enum sci_status result; 991 enum sci_status result;
992 u16 index; 992 u16 index;
993 993
994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) { 994 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 995 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
996 __func__, ihost->sm.current_state_id); 996 __func__, ihost->sm.current_state_id);
997 return SCI_FAILURE_INVALID_STATE; 997 return SCI_FAILURE_INVALID_STATE;
998 } 998 }
999 999
1000 /* Build the TCi free pool */ 1000 /* Build the TCi free pool */
1001 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); 1001 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1002 ihost->tci_head = 0; 1002 ihost->tci_head = 0;
1003 ihost->tci_tail = 0; 1003 ihost->tci_tail = 0;
1004 for (index = 0; index < ihost->task_context_entries; index++) 1004 for (index = 0; index < ihost->task_context_entries; index++)
1005 isci_tci_free(ihost, index); 1005 isci_tci_free(ihost, index);
1006 1006
1007 /* Build the RNi free pool */ 1007 /* Build the RNi free pool */
1008 sci_remote_node_table_initialize(&ihost->available_remote_nodes, 1008 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1009 ihost->remote_node_entries); 1009 ihost->remote_node_entries);
1010 1010
1011 /* 1011 /*
1012 * Before anything else lets make sure we will not be 1012 * Before anything else lets make sure we will not be
1013 * interrupted by the hardware. 1013 * interrupted by the hardware.
1014 */ 1014 */
1015 sci_controller_disable_interrupts(ihost); 1015 sci_controller_disable_interrupts(ihost);
1016 1016
1017 /* Enable the port task scheduler */ 1017 /* Enable the port task scheduler */
1018 sci_controller_enable_port_task_scheduler(ihost); 1018 sci_controller_enable_port_task_scheduler(ihost);
1019 1019
1020 /* Assign all the task entries to ihost physical function */ 1020 /* Assign all the task entries to ihost physical function */
1021 sci_controller_assign_task_entries(ihost); 1021 sci_controller_assign_task_entries(ihost);
1022 1022
1023 /* Now initialize the completion queue */ 1023 /* Now initialize the completion queue */
1024 sci_controller_initialize_completion_queue(ihost); 1024 sci_controller_initialize_completion_queue(ihost);
1025 1025
1026 /* Initialize the unsolicited frame queue for use */ 1026 /* Initialize the unsolicited frame queue for use */
1027 sci_controller_initialize_unsolicited_frame_queue(ihost); 1027 sci_controller_initialize_unsolicited_frame_queue(ihost);
1028 1028
1029 /* Start all of the ports on this controller */ 1029 /* Start all of the ports on this controller */
1030 for (index = 0; index < ihost->logical_port_entries; index++) { 1030 for (index = 0; index < ihost->logical_port_entries; index++) {
1031 struct isci_port *iport = &ihost->ports[index]; 1031 struct isci_port *iport = &ihost->ports[index];
1032 1032
1033 result = sci_port_start(iport); 1033 result = sci_port_start(iport);
1034 if (result) 1034 if (result)
1035 return result; 1035 return result;
1036 } 1036 }
1037 1037
1038 sci_controller_start_next_phy(ihost); 1038 sci_controller_start_next_phy(ihost);
1039 1039
1040 sci_mod_timer(&ihost->timer, timeout); 1040 sci_mod_timer(&ihost->timer, timeout);
1041 1041
1042 sci_change_state(&ihost->sm, SCIC_STARTING); 1042 sci_change_state(&ihost->sm, SCIC_STARTING);
1043 1043
1044 return SCI_SUCCESS; 1044 return SCI_SUCCESS;
1045 } 1045 }
1046 1046
1047 void isci_host_scan_start(struct Scsi_Host *shost) 1047 void isci_host_scan_start(struct Scsi_Host *shost)
1048 { 1048 {
1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 1049 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); 1050 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1051 1051
1052 set_bit(IHOST_START_PENDING, &ihost->flags); 1052 set_bit(IHOST_START_PENDING, &ihost->flags);
1053 1053
1054 spin_lock_irq(&ihost->scic_lock); 1054 spin_lock_irq(&ihost->scic_lock);
1055 sci_controller_start(ihost, tmo); 1055 sci_controller_start(ihost, tmo);
1056 sci_controller_enable_interrupts(ihost); 1056 sci_controller_enable_interrupts(ihost);
1057 spin_unlock_irq(&ihost->scic_lock); 1057 spin_unlock_irq(&ihost->scic_lock);
1058 } 1058 }
1059 1059
1060 static void isci_host_stop_complete(struct isci_host *ihost) 1060 static void isci_host_stop_complete(struct isci_host *ihost)
1061 { 1061 {
1062 sci_controller_disable_interrupts(ihost); 1062 sci_controller_disable_interrupts(ihost);
1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags); 1063 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1064 wake_up(&ihost->eventq); 1064 wake_up(&ihost->eventq);
1065 } 1065 }
1066 1066
1067 static void sci_controller_completion_handler(struct isci_host *ihost) 1067 static void sci_controller_completion_handler(struct isci_host *ihost)
1068 { 1068 {
1069 /* Empty out the completion queue */ 1069 /* Empty out the completion queue */
1070 if (sci_controller_completion_queue_has_entries(ihost)) 1070 if (sci_controller_completion_queue_has_entries(ihost))
1071 sci_controller_process_completions(ihost); 1071 sci_controller_process_completions(ihost);
1072 1072
1073 /* Clear the interrupt and enable all interrupts again */ 1073 /* Clear the interrupt and enable all interrupts again */
1074 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); 1074 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1075 /* Could we write the value of SMU_ISR_COMPLETION? */ 1075 /* Could we write the value of SMU_ISR_COMPLETION? */
1076 writel(0xFF000000, &ihost->smu_registers->interrupt_mask); 1076 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1077 writel(0, &ihost->smu_registers->interrupt_mask); 1077 writel(0, &ihost->smu_registers->interrupt_mask);
1078 } 1078 }
1079 1079
1080 /** 1080 /**
1081 * isci_host_completion_routine() - This function is the delayed service 1081 * isci_host_completion_routine() - This function is the delayed service
1082 * routine that calls the sci core library's completion handler. It's 1082 * routine that calls the sci core library's completion handler. It's
1083 * scheduled as a tasklet from the interrupt service routine when interrupts 1083 * scheduled as a tasklet from the interrupt service routine when interrupts
1084 * in use, or set as the timeout function in polled mode. 1084 * in use, or set as the timeout function in polled mode.
1085 * @data: This parameter specifies the ISCI host object 1085 * @data: This parameter specifies the ISCI host object
1086 * 1086 *
1087 */ 1087 */
1088 void isci_host_completion_routine(unsigned long data) 1088 void isci_host_completion_routine(unsigned long data)
1089 { 1089 {
1090 struct isci_host *ihost = (struct isci_host *)data; 1090 struct isci_host *ihost = (struct isci_host *)data;
1091 struct list_head completed_request_list; 1091 struct list_head completed_request_list;
1092 struct list_head errored_request_list; 1092 struct list_head errored_request_list;
1093 struct list_head *current_position; 1093 struct list_head *current_position;
1094 struct list_head *next_position; 1094 struct list_head *next_position;
1095 struct isci_request *request; 1095 struct isci_request *request;
1096 struct isci_request *next_request; 1096 struct isci_request *next_request;
1097 struct sas_task *task; 1097 struct sas_task *task;
1098 u16 active; 1098 u16 active;
1099 1099
1100 INIT_LIST_HEAD(&completed_request_list); 1100 INIT_LIST_HEAD(&completed_request_list);
1101 INIT_LIST_HEAD(&errored_request_list); 1101 INIT_LIST_HEAD(&errored_request_list);
1102 1102
1103 spin_lock_irq(&ihost->scic_lock); 1103 spin_lock_irq(&ihost->scic_lock);
1104 1104
1105 sci_controller_completion_handler(ihost); 1105 sci_controller_completion_handler(ihost);
1106 1106
1107 /* Take the lists of completed I/Os from the host. */ 1107 /* Take the lists of completed I/Os from the host. */
1108 1108
1109 list_splice_init(&ihost->requests_to_complete, 1109 list_splice_init(&ihost->requests_to_complete,
1110 &completed_request_list); 1110 &completed_request_list);
1111 1111
1112 /* Take the list of errored I/Os from the host. */ 1112 /* Take the list of errored I/Os from the host. */
1113 list_splice_init(&ihost->requests_to_errorback, 1113 list_splice_init(&ihost->requests_to_errorback,
1114 &errored_request_list); 1114 &errored_request_list);
1115 1115
1116 spin_unlock_irq(&ihost->scic_lock); 1116 spin_unlock_irq(&ihost->scic_lock);
1117 1117
1118 /* Process any completions in the lists. */ 1118 /* Process any completions in the lists. */
1119 list_for_each_safe(current_position, next_position, 1119 list_for_each_safe(current_position, next_position,
1120 &completed_request_list) { 1120 &completed_request_list) {
1121 1121
1122 request = list_entry(current_position, struct isci_request, 1122 request = list_entry(current_position, struct isci_request,
1123 completed_node); 1123 completed_node);
1124 task = isci_request_access_task(request); 1124 task = isci_request_access_task(request);
1125 1125
1126 /* Normal notification (task_done) */ 1126 /* Normal notification (task_done) */
1127 dev_dbg(&ihost->pdev->dev, 1127 dev_dbg(&ihost->pdev->dev,
1128 "%s: Normal - request/task = %p/%p\n", 1128 "%s: Normal - request/task = %p/%p\n",
1129 __func__, 1129 __func__,
1130 request, 1130 request,
1131 task); 1131 task);
1132 1132
1133 /* Return the task to libsas */ 1133 /* Return the task to libsas */
1134 if (task != NULL) { 1134 if (task != NULL) {
1135 1135
1136 task->lldd_task = NULL; 1136 task->lldd_task = NULL;
1137 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1137 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1138 1138
1139 /* If the task is already in the abort path, 1139 /* If the task is already in the abort path,
1140 * the task_done callback cannot be called. 1140 * the task_done callback cannot be called.
1141 */ 1141 */
1142 task->task_done(task); 1142 task->task_done(task);
1143 } 1143 }
1144 } 1144 }
1145 1145
1146 spin_lock_irq(&ihost->scic_lock); 1146 spin_lock_irq(&ihost->scic_lock);
1147 isci_free_tag(ihost, request->io_tag); 1147 isci_free_tag(ihost, request->io_tag);
1148 spin_unlock_irq(&ihost->scic_lock); 1148 spin_unlock_irq(&ihost->scic_lock);
1149 } 1149 }
1150 list_for_each_entry_safe(request, next_request, &errored_request_list, 1150 list_for_each_entry_safe(request, next_request, &errored_request_list,
1151 completed_node) { 1151 completed_node) {
1152 1152
1153 task = isci_request_access_task(request); 1153 task = isci_request_access_task(request);
1154 1154
1155 /* Use sas_task_abort */ 1155 /* Use sas_task_abort */
1156 dev_warn(&ihost->pdev->dev, 1156 dev_warn(&ihost->pdev->dev,
1157 "%s: Error - request/task = %p/%p\n", 1157 "%s: Error - request/task = %p/%p\n",
1158 __func__, 1158 __func__,
1159 request, 1159 request,
1160 task); 1160 task);
1161 1161
1162 if (task != NULL) { 1162 if (task != NULL) {
1163 1163
1164 /* Put the task into the abort path if it's not there 1164 /* Put the task into the abort path if it's not there
1165 * already. 1165 * already.
1166 */ 1166 */
1167 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) 1167 if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
1168 sas_task_abort(task); 1168 sas_task_abort(task);
1169 1169
1170 } else { 1170 } else {
1171 /* This is a case where the request has completed with a 1171 /* This is a case where the request has completed with a
1172 * status such that it needed further target servicing, 1172 * status such that it needed further target servicing,
1173 * but the sas_task reference has already been removed 1173 * but the sas_task reference has already been removed
1174 * from the request. Since it was errored, it was not 1174 * from the request. Since it was errored, it was not
1175 * being aborted, so there is nothing to do except free 1175 * being aborted, so there is nothing to do except free
1176 * it. 1176 * it.
1177 */ 1177 */
1178 1178
1179 spin_lock_irq(&ihost->scic_lock); 1179 spin_lock_irq(&ihost->scic_lock);
1180 /* Remove the request from the remote device's list 1180 /* Remove the request from the remote device's list
1181 * of pending requests. 1181 * of pending requests.
1182 */ 1182 */
1183 list_del_init(&request->dev_node); 1183 list_del_init(&request->dev_node);
1184 isci_free_tag(ihost, request->io_tag); 1184 isci_free_tag(ihost, request->io_tag);
1185 spin_unlock_irq(&ihost->scic_lock); 1185 spin_unlock_irq(&ihost->scic_lock);
1186 } 1186 }
1187 } 1187 }
1188 1188
1189 /* the coalesence timeout doubles at each encoding step, so 1189 /* the coalesence timeout doubles at each encoding step, so
1190 * update it based on the ilog2 value of the outstanding requests 1190 * update it based on the ilog2 value of the outstanding requests
1191 */ 1191 */
1192 active = isci_tci_active(ihost); 1192 active = isci_tci_active(ihost);
1193 writel(SMU_ICC_GEN_VAL(NUMBER, active) | 1193 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1194 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), 1194 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1195 &ihost->smu_registers->interrupt_coalesce_control); 1195 &ihost->smu_registers->interrupt_coalesce_control);
1196 } 1196 }
1197 1197
1198 /** 1198 /**
1199 * sci_controller_stop() - This method will stop an individual controller 1199 * sci_controller_stop() - This method will stop an individual controller
1200 * object.This method will invoke the associated user callback upon 1200 * object.This method will invoke the associated user callback upon
1201 * completion. The completion callback is called when the following 1201 * completion. The completion callback is called when the following
1202 * conditions are met: -# the method return status is SCI_SUCCESS. -# the 1202 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
1203 * controller has been quiesced. This method will ensure that all IO 1203 * controller has been quiesced. This method will ensure that all IO
1204 * requests are quiesced, phys are stopped, and all additional operation by 1204 * requests are quiesced, phys are stopped, and all additional operation by
1205 * the hardware is halted. 1205 * the hardware is halted.
1206 * @controller: the handle to the controller object to stop. 1206 * @controller: the handle to the controller object to stop.
1207 * @timeout: This parameter specifies the number of milliseconds in which the 1207 * @timeout: This parameter specifies the number of milliseconds in which the
1208 * stop operation should complete. 1208 * stop operation should complete.
1209 * 1209 *
1210 * The controller must be in the STARTED or STOPPED state. Indicate if the 1210 * The controller must be in the STARTED or STOPPED state. Indicate if the
1211 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the 1211 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
1212 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the 1212 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
1213 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the 1213 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
1214 * controller is not either in the STARTED or STOPPED states. 1214 * controller is not either in the STARTED or STOPPED states.
1215 */ 1215 */
1216 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) 1216 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1217 { 1217 {
1218 if (ihost->sm.current_state_id != SCIC_READY) { 1218 if (ihost->sm.current_state_id != SCIC_READY) {
1219 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 1219 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1220 __func__, ihost->sm.current_state_id); 1220 __func__, ihost->sm.current_state_id);
1221 return SCI_FAILURE_INVALID_STATE; 1221 return SCI_FAILURE_INVALID_STATE;
1222 } 1222 }
1223 1223
1224 sci_mod_timer(&ihost->timer, timeout); 1224 sci_mod_timer(&ihost->timer, timeout);
1225 sci_change_state(&ihost->sm, SCIC_STOPPING); 1225 sci_change_state(&ihost->sm, SCIC_STOPPING);
1226 return SCI_SUCCESS; 1226 return SCI_SUCCESS;
1227 } 1227 }
1228 1228
1229 /** 1229 /**
1230 * sci_controller_reset() - This method will reset the supplied core 1230 * sci_controller_reset() - This method will reset the supplied core
1231 * controller regardless of the state of said controller. This operation is 1231 * controller regardless of the state of said controller. This operation is
1232 * considered destructive. In other words, all current operations are wiped 1232 * considered destructive. In other words, all current operations are wiped
1233 * out. No IO completions for outstanding devices occur. Outstanding IO 1233 * out. No IO completions for outstanding devices occur. Outstanding IO
1234 * requests are not aborted or completed at the actual remote device. 1234 * requests are not aborted or completed at the actual remote device.
1235 * @controller: the handle to the controller object to reset. 1235 * @controller: the handle to the controller object to reset.
1236 * 1236 *
1237 * Indicate if the controller reset method succeeded or failed in some way. 1237 * Indicate if the controller reset method succeeded or failed in some way.
1238 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if 1238 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
1239 * the controller reset operation is unable to complete. 1239 * the controller reset operation is unable to complete.
1240 */ 1240 */
1241 static enum sci_status sci_controller_reset(struct isci_host *ihost) 1241 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1242 { 1242 {
1243 switch (ihost->sm.current_state_id) { 1243 switch (ihost->sm.current_state_id) {
1244 case SCIC_RESET: 1244 case SCIC_RESET:
1245 case SCIC_READY: 1245 case SCIC_READY:
1246 case SCIC_STOPPING: 1246 case SCIC_STOPPING:
1247 case SCIC_FAILED: 1247 case SCIC_FAILED:
1248 /* 1248 /*
1249 * The reset operation is not a graceful cleanup, just 1249 * The reset operation is not a graceful cleanup, just
1250 * perform the state transition. 1250 * perform the state transition.
1251 */ 1251 */
1252 sci_change_state(&ihost->sm, SCIC_RESETTING); 1252 sci_change_state(&ihost->sm, SCIC_RESETTING);
1253 return SCI_SUCCESS; 1253 return SCI_SUCCESS;
1254 default: 1254 default:
1255 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 1255 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1256 __func__, ihost->sm.current_state_id); 1256 __func__, ihost->sm.current_state_id);
1257 return SCI_FAILURE_INVALID_STATE; 1257 return SCI_FAILURE_INVALID_STATE;
1258 } 1258 }
1259 } 1259 }
1260 1260
1261 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) 1261 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1262 { 1262 {
1263 u32 index; 1263 u32 index;
1264 enum sci_status status; 1264 enum sci_status status;
1265 enum sci_status phy_status; 1265 enum sci_status phy_status;
1266 1266
1267 status = SCI_SUCCESS; 1267 status = SCI_SUCCESS;
1268 1268
1269 for (index = 0; index < SCI_MAX_PHYS; index++) { 1269 for (index = 0; index < SCI_MAX_PHYS; index++) {
1270 phy_status = sci_phy_stop(&ihost->phys[index]); 1270 phy_status = sci_phy_stop(&ihost->phys[index]);
1271 1271
1272 if (phy_status != SCI_SUCCESS && 1272 if (phy_status != SCI_SUCCESS &&
1273 phy_status != SCI_FAILURE_INVALID_STATE) { 1273 phy_status != SCI_FAILURE_INVALID_STATE) {
1274 status = SCI_FAILURE; 1274 status = SCI_FAILURE;
1275 1275
1276 dev_warn(&ihost->pdev->dev, 1276 dev_warn(&ihost->pdev->dev,
1277 "%s: Controller stop operation failed to stop " 1277 "%s: Controller stop operation failed to stop "
1278 "phy %d because of status %d.\n", 1278 "phy %d because of status %d.\n",
1279 __func__, 1279 __func__,
1280 ihost->phys[index].phy_index, phy_status); 1280 ihost->phys[index].phy_index, phy_status);
1281 } 1281 }
1282 } 1282 }
1283 1283
1284 return status; 1284 return status;
1285 } 1285 }
1286 1286
1287 1287
1288 /** 1288 /**
1289 * isci_host_deinit - shutdown frame reception and dma 1289 * isci_host_deinit - shutdown frame reception and dma
1290 * @ihost: host to take down 1290 * @ihost: host to take down
1291 * 1291 *
1292 * This is called in either the driver shutdown or the suspend path. In 1292 * This is called in either the driver shutdown or the suspend path. In
1293 * the shutdown case libsas went through port teardown and normal device 1293 * the shutdown case libsas went through port teardown and normal device
1294 * removal (i.e. physical links stayed up to service scsi_device removal 1294 * removal (i.e. physical links stayed up to service scsi_device removal
1295 * commands). In the suspend case we disable the hardware without 1295 * commands). In the suspend case we disable the hardware without
1296 * notifying libsas of the link down events since we want libsas to 1296 * notifying libsas of the link down events since we want libsas to
1297 * remember the domain across the suspend/resume cycle 1297 * remember the domain across the suspend/resume cycle
1298 */ 1298 */
1299 void isci_host_deinit(struct isci_host *ihost) 1299 void isci_host_deinit(struct isci_host *ihost)
1300 { 1300 {
1301 int i; 1301 int i;
1302 1302
1303 /* disable output data selects */ 1303 /* disable output data selects */
1304 for (i = 0; i < isci_gpio_count(ihost); i++) 1304 for (i = 0; i < isci_gpio_count(ihost); i++)
1305 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 1305 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1306 1306
1307 set_bit(IHOST_STOP_PENDING, &ihost->flags); 1307 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1308 1308
1309 spin_lock_irq(&ihost->scic_lock); 1309 spin_lock_irq(&ihost->scic_lock);
1310 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); 1310 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1311 spin_unlock_irq(&ihost->scic_lock); 1311 spin_unlock_irq(&ihost->scic_lock);
1312 1312
1313 wait_for_stop(ihost); 1313 wait_for_stop(ihost);
1314 1314
1315 /* phy stop is after controller stop to allow port and device to 1315 /* phy stop is after controller stop to allow port and device to
1316 * go idle before shutting down the phys, but the expectation is 1316 * go idle before shutting down the phys, but the expectation is
1317 * that i/o has been shut off well before we reach this 1317 * that i/o has been shut off well before we reach this
1318 * function. 1318 * function.
1319 */ 1319 */
1320 sci_controller_stop_phys(ihost); 1320 sci_controller_stop_phys(ihost);
1321 1321
1322 /* disable sgpio: where the above wait should give time for the 1322 /* disable sgpio: where the above wait should give time for the
1323 * enclosure to sample the gpios going inactive 1323 * enclosure to sample the gpios going inactive
1324 */ 1324 */
1325 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); 1325 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1326 1326
1327 spin_lock_irq(&ihost->scic_lock); 1327 spin_lock_irq(&ihost->scic_lock);
1328 sci_controller_reset(ihost); 1328 sci_controller_reset(ihost);
1329 spin_unlock_irq(&ihost->scic_lock); 1329 spin_unlock_irq(&ihost->scic_lock);
1330 1330
1331 /* Cancel any/all outstanding port timers */ 1331 /* Cancel any/all outstanding port timers */
1332 for (i = 0; i < ihost->logical_port_entries; i++) { 1332 for (i = 0; i < ihost->logical_port_entries; i++) {
1333 struct isci_port *iport = &ihost->ports[i]; 1333 struct isci_port *iport = &ihost->ports[i];
1334 del_timer_sync(&iport->timer.timer); 1334 del_timer_sync(&iport->timer.timer);
1335 } 1335 }
1336 1336
1337 /* Cancel any/all outstanding phy timers */ 1337 /* Cancel any/all outstanding phy timers */
1338 for (i = 0; i < SCI_MAX_PHYS; i++) { 1338 for (i = 0; i < SCI_MAX_PHYS; i++) {
1339 struct isci_phy *iphy = &ihost->phys[i]; 1339 struct isci_phy *iphy = &ihost->phys[i];
1340 del_timer_sync(&iphy->sata_timer.timer); 1340 del_timer_sync(&iphy->sata_timer.timer);
1341 } 1341 }
1342 1342
1343 del_timer_sync(&ihost->port_agent.timer.timer); 1343 del_timer_sync(&ihost->port_agent.timer.timer);
1344 1344
1345 del_timer_sync(&ihost->power_control.timer.timer); 1345 del_timer_sync(&ihost->power_control.timer.timer);
1346 1346
1347 del_timer_sync(&ihost->timer.timer); 1347 del_timer_sync(&ihost->timer.timer);
1348 1348
1349 del_timer_sync(&ihost->phy_timer.timer); 1349 del_timer_sync(&ihost->phy_timer.timer);
1350 } 1350 }
1351 1351
1352 static void __iomem *scu_base(struct isci_host *isci_host) 1352 static void __iomem *scu_base(struct isci_host *isci_host)
1353 { 1353 {
1354 struct pci_dev *pdev = isci_host->pdev; 1354 struct pci_dev *pdev = isci_host->pdev;
1355 int id = isci_host->id; 1355 int id = isci_host->id;
1356 1356
1357 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; 1357 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1358 } 1358 }
1359 1359
1360 static void __iomem *smu_base(struct isci_host *isci_host) 1360 static void __iomem *smu_base(struct isci_host *isci_host)
1361 { 1361 {
1362 struct pci_dev *pdev = isci_host->pdev; 1362 struct pci_dev *pdev = isci_host->pdev;
1363 int id = isci_host->id; 1363 int id = isci_host->id;
1364 1364
1365 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; 1365 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1366 } 1366 }
1367 1367
1368 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) 1368 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1369 { 1369 {
1370 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1370 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1371 1371
1372 sci_change_state(&ihost->sm, SCIC_RESET); 1372 sci_change_state(&ihost->sm, SCIC_RESET);
1373 } 1373 }
1374 1374
1375 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) 1375 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1376 { 1376 {
1377 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1377 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1378 1378
1379 sci_del_timer(&ihost->timer); 1379 sci_del_timer(&ihost->timer);
1380 } 1380 }
1381 1381
1382 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 1382 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1383 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 1383 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1384 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 1384 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1385 #define INTERRUPT_COALESCE_NUMBER_MAX 256 1385 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1386 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 1386 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1387 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 1387 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1388 1388
1389 /** 1389 /**
1390 * sci_controller_set_interrupt_coalescence() - This method allows the user to 1390 * sci_controller_set_interrupt_coalescence() - This method allows the user to
1391 * configure the interrupt coalescence. 1391 * configure the interrupt coalescence.
1392 * @controller: This parameter represents the handle to the controller object 1392 * @controller: This parameter represents the handle to the controller object
1393 * for which its interrupt coalesce register is overridden. 1393 * for which its interrupt coalesce register is overridden.
1394 * @coalesce_number: Used to control the number of entries in the Completion 1394 * @coalesce_number: Used to control the number of entries in the Completion
1395 * Queue before an interrupt is generated. If the number of entries exceed 1395 * Queue before an interrupt is generated. If the number of entries exceed
1396 * this number, an interrupt will be generated. The valid range of the input 1396 * this number, an interrupt will be generated. The valid range of the input
1397 * is [0, 256]. A setting of 0 results in coalescing being disabled. 1397 * is [0, 256]. A setting of 0 results in coalescing being disabled.
1398 * @coalesce_timeout: Timeout value in microseconds. The valid range of the 1398 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
1399 * input is [0, 2700000] . A setting of 0 is allowed and results in no 1399 * input is [0, 2700000] . A setting of 0 is allowed and results in no
1400 * interrupt coalescing timeout. 1400 * interrupt coalescing timeout.
1401 * 1401 *
1402 * Indicate if the user successfully set the interrupt coalesce parameters. 1402 * Indicate if the user successfully set the interrupt coalesce parameters.
1403 * SCI_SUCCESS The user successfully updated the interrutp coalescence. 1403 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
1404 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. 1404 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
1405 */ 1405 */
1406 static enum sci_status 1406 static enum sci_status
1407 sci_controller_set_interrupt_coalescence(struct isci_host *ihost, 1407 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1408 u32 coalesce_number, 1408 u32 coalesce_number,
1409 u32 coalesce_timeout) 1409 u32 coalesce_timeout)
1410 { 1410 {
1411 u8 timeout_encode = 0; 1411 u8 timeout_encode = 0;
1412 u32 min = 0; 1412 u32 min = 0;
1413 u32 max = 0; 1413 u32 max = 0;
1414 1414
1415 /* Check if the input parameters fall in the range. */ 1415 /* Check if the input parameters fall in the range. */
1416 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) 1416 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1417 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1417 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1418 1418
1419 /* 1419 /*
1420 * Defined encoding for interrupt coalescing timeout: 1420 * Defined encoding for interrupt coalescing timeout:
1421 * Value Min Max Units 1421 * Value Min Max Units
1422 * ----- --- --- ----- 1422 * ----- --- --- -----
1423 * 0 - - Disabled 1423 * 0 - - Disabled
1424 * 1 13.3 20.0 ns 1424 * 1 13.3 20.0 ns
1425 * 2 26.7 40.0 1425 * 2 26.7 40.0
1426 * 3 53.3 80.0 1426 * 3 53.3 80.0
1427 * 4 106.7 160.0 1427 * 4 106.7 160.0
1428 * 5 213.3 320.0 1428 * 5 213.3 320.0
1429 * 6 426.7 640.0 1429 * 6 426.7 640.0
1430 * 7 853.3 1280.0 1430 * 7 853.3 1280.0
1431 * 8 1.7 2.6 us 1431 * 8 1.7 2.6 us
1432 * 9 3.4 5.1 1432 * 9 3.4 5.1
1433 * 10 6.8 10.2 1433 * 10 6.8 10.2
1434 * 11 13.7 20.5 1434 * 11 13.7 20.5
1435 * 12 27.3 41.0 1435 * 12 27.3 41.0
1436 * 13 54.6 81.9 1436 * 13 54.6 81.9
1437 * 14 109.2 163.8 1437 * 14 109.2 163.8
1438 * 15 218.5 327.7 1438 * 15 218.5 327.7
1439 * 16 436.9 655.4 1439 * 16 436.9 655.4
1440 * 17 873.8 1310.7 1440 * 17 873.8 1310.7
1441 * 18 1.7 2.6 ms 1441 * 18 1.7 2.6 ms
1442 * 19 3.5 5.2 1442 * 19 3.5 5.2
1443 * 20 7.0 10.5 1443 * 20 7.0 10.5
1444 * 21 14.0 21.0 1444 * 21 14.0 21.0
1445 * 22 28.0 41.9 1445 * 22 28.0 41.9
1446 * 23 55.9 83.9 1446 * 23 55.9 83.9
1447 * 24 111.8 167.8 1447 * 24 111.8 167.8
1448 * 25 223.7 335.5 1448 * 25 223.7 335.5
1449 * 26 447.4 671.1 1449 * 26 447.4 671.1
1450 * 27 894.8 1342.2 1450 * 27 894.8 1342.2
1451 * 28 1.8 2.7 s 1451 * 28 1.8 2.7 s
1452 * Others Undefined */ 1452 * Others Undefined */
1453 1453
1454 /* 1454 /*
1455 * Use the table above to decide the encode of interrupt coalescing timeout 1455 * Use the table above to decide the encode of interrupt coalescing timeout
1456 * value for register writing. */ 1456 * value for register writing. */
1457 if (coalesce_timeout == 0) 1457 if (coalesce_timeout == 0)
1458 timeout_encode = 0; 1458 timeout_encode = 0;
1459 else{ 1459 else{
1460 /* make the timeout value in unit of (10 ns). */ 1460 /* make the timeout value in unit of (10 ns). */
1461 coalesce_timeout = coalesce_timeout * 100; 1461 coalesce_timeout = coalesce_timeout * 100;
1462 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; 1462 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1463 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; 1463 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1464 1464
1465 /* get the encode of timeout for register writing. */ 1465 /* get the encode of timeout for register writing. */
1466 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; 1466 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1467 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; 1467 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1468 timeout_encode++) { 1468 timeout_encode++) {
1469 if (min <= coalesce_timeout && max > coalesce_timeout) 1469 if (min <= coalesce_timeout && max > coalesce_timeout)
1470 break; 1470 break;
1471 else if (coalesce_timeout >= max && coalesce_timeout < min * 2 1471 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1472 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { 1472 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1473 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) 1473 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1474 break; 1474 break;
1475 else{ 1475 else{
1476 timeout_encode++; 1476 timeout_encode++;
1477 break; 1477 break;
1478 } 1478 }
1479 } else { 1479 } else {
1480 max = max * 2; 1480 max = max * 2;
1481 min = min * 2; 1481 min = min * 2;
1482 } 1482 }
1483 } 1483 }
1484 1484
1485 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) 1485 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1486 /* the value is out of range. */ 1486 /* the value is out of range. */
1487 return SCI_FAILURE_INVALID_PARAMETER_VALUE; 1487 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1488 } 1488 }
1489 1489
1490 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | 1490 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1491 SMU_ICC_GEN_VAL(TIMER, timeout_encode), 1491 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1492 &ihost->smu_registers->interrupt_coalesce_control); 1492 &ihost->smu_registers->interrupt_coalesce_control);
1493 1493
1494 1494
1495 ihost->interrupt_coalesce_number = (u16)coalesce_number; 1495 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1496 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; 1496 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1497 1497
1498 return SCI_SUCCESS; 1498 return SCI_SUCCESS;
1499 } 1499 }
1500 1500
1501 1501
1502 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) 1502 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1503 { 1503 {
1504 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1504 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1505 u32 val; 1505 u32 val;
1506 1506
1507 /* enable clock gating for power control of the scu unit */ 1507 /* enable clock gating for power control of the scu unit */
1508 val = readl(&ihost->smu_registers->clock_gating_control); 1508 val = readl(&ihost->smu_registers->clock_gating_control);
1509 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) | 1509 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1510 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) | 1510 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1511 SMU_CGUCR_GEN_BIT(XCLK_ENABLE)); 1511 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1512 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE); 1512 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1513 writel(val, &ihost->smu_registers->clock_gating_control); 1513 writel(val, &ihost->smu_registers->clock_gating_control);
1514 1514
1515 /* set the default interrupt coalescence number and timeout value. */ 1515 /* set the default interrupt coalescence number and timeout value. */
1516 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1516 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1517 } 1517 }
1518 1518
1519 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) 1519 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1520 { 1520 {
1521 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1521 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1522 1522
1523 /* disable interrupt coalescence. */ 1523 /* disable interrupt coalescence. */
1524 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1524 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1525 } 1525 }
1526 1526
1527 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) 1527 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1528 { 1528 {
1529 u32 index; 1529 u32 index;
1530 enum sci_status port_status; 1530 enum sci_status port_status;
1531 enum sci_status status = SCI_SUCCESS; 1531 enum sci_status status = SCI_SUCCESS;
1532 1532
1533 for (index = 0; index < ihost->logical_port_entries; index++) { 1533 for (index = 0; index < ihost->logical_port_entries; index++) {
1534 struct isci_port *iport = &ihost->ports[index]; 1534 struct isci_port *iport = &ihost->ports[index];
1535 1535
1536 port_status = sci_port_stop(iport); 1536 port_status = sci_port_stop(iport);
1537 1537
1538 if ((port_status != SCI_SUCCESS) && 1538 if ((port_status != SCI_SUCCESS) &&
1539 (port_status != SCI_FAILURE_INVALID_STATE)) { 1539 (port_status != SCI_FAILURE_INVALID_STATE)) {
1540 status = SCI_FAILURE; 1540 status = SCI_FAILURE;
1541 1541
1542 dev_warn(&ihost->pdev->dev, 1542 dev_warn(&ihost->pdev->dev,
1543 "%s: Controller stop operation failed to " 1543 "%s: Controller stop operation failed to "
1544 "stop port %d because of status %d.\n", 1544 "stop port %d because of status %d.\n",
1545 __func__, 1545 __func__,
1546 iport->logical_port_index, 1546 iport->logical_port_index,
1547 port_status); 1547 port_status);
1548 } 1548 }
1549 } 1549 }
1550 1550
1551 return status; 1551 return status;
1552 } 1552 }
1553 1553
1554 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) 1554 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1555 { 1555 {
1556 u32 index; 1556 u32 index;
1557 enum sci_status status; 1557 enum sci_status status;
1558 enum sci_status device_status; 1558 enum sci_status device_status;
1559 1559
1560 status = SCI_SUCCESS; 1560 status = SCI_SUCCESS;
1561 1561
1562 for (index = 0; index < ihost->remote_node_entries; index++) { 1562 for (index = 0; index < ihost->remote_node_entries; index++) {
1563 if (ihost->device_table[index] != NULL) { 1563 if (ihost->device_table[index] != NULL) {
1564 /* / @todo What timeout value do we want to provide to this request? */ 1564 /* / @todo What timeout value do we want to provide to this request? */
1565 device_status = sci_remote_device_stop(ihost->device_table[index], 0); 1565 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1566 1566
1567 if ((device_status != SCI_SUCCESS) && 1567 if ((device_status != SCI_SUCCESS) &&
1568 (device_status != SCI_FAILURE_INVALID_STATE)) { 1568 (device_status != SCI_FAILURE_INVALID_STATE)) {
1569 dev_warn(&ihost->pdev->dev, 1569 dev_warn(&ihost->pdev->dev,
1570 "%s: Controller stop operation failed " 1570 "%s: Controller stop operation failed "
1571 "to stop device 0x%p because of " 1571 "to stop device 0x%p because of "
1572 "status %d.\n", 1572 "status %d.\n",
1573 __func__, 1573 __func__,
1574 ihost->device_table[index], device_status); 1574 ihost->device_table[index], device_status);
1575 } 1575 }
1576 } 1576 }
1577 } 1577 }
1578 1578
1579 return status; 1579 return status;
1580 } 1580 }
1581 1581
1582 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) 1582 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1583 { 1583 {
1584 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1584 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1585 1585
1586 sci_controller_stop_devices(ihost); 1586 sci_controller_stop_devices(ihost);
1587 sci_controller_stop_ports(ihost); 1587 sci_controller_stop_ports(ihost);
1588 1588
1589 if (!sci_controller_has_remote_devices_stopping(ihost)) 1589 if (!sci_controller_has_remote_devices_stopping(ihost))
1590 isci_host_stop_complete(ihost); 1590 isci_host_stop_complete(ihost);
1591 } 1591 }
1592 1592
1593 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) 1593 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1594 { 1594 {
1595 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1595 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1596 1596
1597 sci_del_timer(&ihost->timer); 1597 sci_del_timer(&ihost->timer);
1598 } 1598 }
1599 1599
1600 static void sci_controller_reset_hardware(struct isci_host *ihost) 1600 static void sci_controller_reset_hardware(struct isci_host *ihost)
1601 { 1601 {
1602 /* Disable interrupts so we dont take any spurious interrupts */ 1602 /* Disable interrupts so we dont take any spurious interrupts */
1603 sci_controller_disable_interrupts(ihost); 1603 sci_controller_disable_interrupts(ihost);
1604 1604
1605 /* Reset the SCU */ 1605 /* Reset the SCU */
1606 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); 1606 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1607 1607
1608 /* Delay for 1ms to before clearing the CQP and UFQPR. */ 1608 /* Delay for 1ms to before clearing the CQP and UFQPR. */
1609 udelay(1000); 1609 udelay(1000);
1610 1610
1611 /* The write to the CQGR clears the CQP */ 1611 /* The write to the CQGR clears the CQP */
1612 writel(0x00000000, &ihost->smu_registers->completion_queue_get); 1612 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1613 1613
1614 /* The write to the UFQGP clears the UFQPR */ 1614 /* The write to the UFQGP clears the UFQPR */
1615 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 1615 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1616 1616
1617 /* clear all interrupts */ 1617 /* clear all interrupts */
1618 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); 1618 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1619 } 1619 }
1620 1620
1621 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) 1621 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1622 { 1622 {
1623 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1623 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1624 1624
1625 sci_controller_reset_hardware(ihost); 1625 sci_controller_reset_hardware(ihost);
1626 sci_change_state(&ihost->sm, SCIC_RESET); 1626 sci_change_state(&ihost->sm, SCIC_RESET);
1627 } 1627 }
1628 1628
1629 static const struct sci_base_state sci_controller_state_table[] = { 1629 static const struct sci_base_state sci_controller_state_table[] = {
1630 [SCIC_INITIAL] = { 1630 [SCIC_INITIAL] = {
1631 .enter_state = sci_controller_initial_state_enter, 1631 .enter_state = sci_controller_initial_state_enter,
1632 }, 1632 },
1633 [SCIC_RESET] = {}, 1633 [SCIC_RESET] = {},
1634 [SCIC_INITIALIZING] = {}, 1634 [SCIC_INITIALIZING] = {},
1635 [SCIC_INITIALIZED] = {}, 1635 [SCIC_INITIALIZED] = {},
1636 [SCIC_STARTING] = { 1636 [SCIC_STARTING] = {
1637 .exit_state = sci_controller_starting_state_exit, 1637 .exit_state = sci_controller_starting_state_exit,
1638 }, 1638 },
1639 [SCIC_READY] = { 1639 [SCIC_READY] = {
1640 .enter_state = sci_controller_ready_state_enter, 1640 .enter_state = sci_controller_ready_state_enter,
1641 .exit_state = sci_controller_ready_state_exit, 1641 .exit_state = sci_controller_ready_state_exit,
1642 }, 1642 },
1643 [SCIC_RESETTING] = { 1643 [SCIC_RESETTING] = {
1644 .enter_state = sci_controller_resetting_state_enter, 1644 .enter_state = sci_controller_resetting_state_enter,
1645 }, 1645 },
1646 [SCIC_STOPPING] = { 1646 [SCIC_STOPPING] = {
1647 .enter_state = sci_controller_stopping_state_enter, 1647 .enter_state = sci_controller_stopping_state_enter,
1648 .exit_state = sci_controller_stopping_state_exit, 1648 .exit_state = sci_controller_stopping_state_exit,
1649 }, 1649 },
1650 [SCIC_FAILED] = {} 1650 [SCIC_FAILED] = {}
1651 }; 1651 };
1652 1652
1653 static void controller_timeout(unsigned long data) 1653 static void controller_timeout(unsigned long data)
1654 { 1654 {
1655 struct sci_timer *tmr = (struct sci_timer *)data; 1655 struct sci_timer *tmr = (struct sci_timer *)data;
1656 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); 1656 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1657 struct sci_base_state_machine *sm = &ihost->sm; 1657 struct sci_base_state_machine *sm = &ihost->sm;
1658 unsigned long flags; 1658 unsigned long flags;
1659 1659
1660 spin_lock_irqsave(&ihost->scic_lock, flags); 1660 spin_lock_irqsave(&ihost->scic_lock, flags);
1661 1661
1662 if (tmr->cancel) 1662 if (tmr->cancel)
1663 goto done; 1663 goto done;
1664 1664
1665 if (sm->current_state_id == SCIC_STARTING) 1665 if (sm->current_state_id == SCIC_STARTING)
1666 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); 1666 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1667 else if (sm->current_state_id == SCIC_STOPPING) { 1667 else if (sm->current_state_id == SCIC_STOPPING) {
1668 sci_change_state(sm, SCIC_FAILED); 1668 sci_change_state(sm, SCIC_FAILED);
1669 isci_host_stop_complete(ihost); 1669 isci_host_stop_complete(ihost);
1670 } else /* / @todo Now what do we want to do in this case? */ 1670 } else /* / @todo Now what do we want to do in this case? */
1671 dev_err(&ihost->pdev->dev, 1671 dev_err(&ihost->pdev->dev,
1672 "%s: Controller timer fired when controller was not " 1672 "%s: Controller timer fired when controller was not "
1673 "in a state being timed.\n", 1673 "in a state being timed.\n",
1674 __func__); 1674 __func__);
1675 1675
1676 done: 1676 done:
1677 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1677 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1678 } 1678 }
1679 1679
1680 static enum sci_status sci_controller_construct(struct isci_host *ihost, 1680 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1681 void __iomem *scu_base, 1681 void __iomem *scu_base,
1682 void __iomem *smu_base) 1682 void __iomem *smu_base)
1683 { 1683 {
1684 u8 i; 1684 u8 i;
1685 1685
1686 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); 1686 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1687 1687
1688 ihost->scu_registers = scu_base; 1688 ihost->scu_registers = scu_base;
1689 ihost->smu_registers = smu_base; 1689 ihost->smu_registers = smu_base;
1690 1690
1691 sci_port_configuration_agent_construct(&ihost->port_agent); 1691 sci_port_configuration_agent_construct(&ihost->port_agent);
1692 1692
1693 /* Construct the ports for this controller */ 1693 /* Construct the ports for this controller */
1694 for (i = 0; i < SCI_MAX_PORTS; i++) 1694 for (i = 0; i < SCI_MAX_PORTS; i++)
1695 sci_port_construct(&ihost->ports[i], i, ihost); 1695 sci_port_construct(&ihost->ports[i], i, ihost);
1696 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); 1696 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1697 1697
1698 /* Construct the phys for this controller */ 1698 /* Construct the phys for this controller */
1699 for (i = 0; i < SCI_MAX_PHYS; i++) { 1699 for (i = 0; i < SCI_MAX_PHYS; i++) {
1700 /* Add all the PHYs to the dummy port */ 1700 /* Add all the PHYs to the dummy port */
1701 sci_phy_construct(&ihost->phys[i], 1701 sci_phy_construct(&ihost->phys[i],
1702 &ihost->ports[SCI_MAX_PORTS], i); 1702 &ihost->ports[SCI_MAX_PORTS], i);
1703 } 1703 }
1704 1704
1705 ihost->invalid_phy_mask = 0; 1705 ihost->invalid_phy_mask = 0;
1706 1706
1707 sci_init_timer(&ihost->timer, controller_timeout); 1707 sci_init_timer(&ihost->timer, controller_timeout);
1708 1708
1709 return sci_controller_reset(ihost); 1709 return sci_controller_reset(ihost);
1710 } 1710 }
1711 1711
1712 int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) 1712 int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1713 { 1713 {
1714 int i; 1714 int i;
1715 1715
1716 for (i = 0; i < SCI_MAX_PORTS; i++) 1716 for (i = 0; i < SCI_MAX_PORTS; i++)
1717 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) 1717 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1718 return -EINVAL; 1718 return -EINVAL;
1719 1719
1720 for (i = 0; i < SCI_MAX_PHYS; i++) 1720 for (i = 0; i < SCI_MAX_PHYS; i++)
1721 if (oem->phys[i].sas_address.high == 0 && 1721 if (oem->phys[i].sas_address.high == 0 &&
1722 oem->phys[i].sas_address.low == 0) 1722 oem->phys[i].sas_address.low == 0)
1723 return -EINVAL; 1723 return -EINVAL;
1724 1724
1725 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { 1725 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1726 for (i = 0; i < SCI_MAX_PHYS; i++) 1726 for (i = 0; i < SCI_MAX_PHYS; i++)
1727 if (oem->ports[i].phy_mask != 0) 1727 if (oem->ports[i].phy_mask != 0)
1728 return -EINVAL; 1728 return -EINVAL;
1729 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { 1729 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1730 u8 phy_mask = 0; 1730 u8 phy_mask = 0;
1731 1731
1732 for (i = 0; i < SCI_MAX_PHYS; i++) 1732 for (i = 0; i < SCI_MAX_PHYS; i++)
1733 phy_mask |= oem->ports[i].phy_mask; 1733 phy_mask |= oem->ports[i].phy_mask;
1734 1734
1735 if (phy_mask == 0) 1735 if (phy_mask == 0)
1736 return -EINVAL; 1736 return -EINVAL;
1737 } else 1737 } else
1738 return -EINVAL; 1738 return -EINVAL;
1739 1739
1740 if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || 1740 if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1741 oem->controller.max_concurr_spin_up < 1) 1741 oem->controller.max_concurr_spin_up < 1)
1742 return -EINVAL; 1742 return -EINVAL;
1743 1743
1744 if (oem->controller.do_enable_ssc) { 1744 if (oem->controller.do_enable_ssc) {
1745 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) 1745 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1746 return -EINVAL; 1746 return -EINVAL;
1747 1747
1748 if (version >= ISCI_ROM_VER_1_1) { 1748 if (version >= ISCI_ROM_VER_1_1) {
1749 u8 test = oem->controller.ssc_sata_tx_spread_level; 1749 u8 test = oem->controller.ssc_sata_tx_spread_level;
1750 1750
1751 switch (test) { 1751 switch (test) {
1752 case 0: 1752 case 0:
1753 case 2: 1753 case 2:
1754 case 3: 1754 case 3:
1755 case 6: 1755 case 6:
1756 case 7: 1756 case 7:
1757 break; 1757 break;
1758 default: 1758 default:
1759 return -EINVAL; 1759 return -EINVAL;
1760 } 1760 }
1761 1761
1762 test = oem->controller.ssc_sas_tx_spread_level; 1762 test = oem->controller.ssc_sas_tx_spread_level;
1763 if (oem->controller.ssc_sas_tx_type == 0) { 1763 if (oem->controller.ssc_sas_tx_type == 0) {
1764 switch (test) { 1764 switch (test) {
1765 case 0: 1765 case 0:
1766 case 2: 1766 case 2:
1767 case 3: 1767 case 3:
1768 break; 1768 break;
1769 default: 1769 default:
1770 return -EINVAL; 1770 return -EINVAL;
1771 } 1771 }
1772 } else if (oem->controller.ssc_sas_tx_type == 1) { 1772 } else if (oem->controller.ssc_sas_tx_type == 1) {
1773 switch (test) { 1773 switch (test) {
1774 case 0: 1774 case 0:
1775 case 3: 1775 case 3:
1776 case 6: 1776 case 6:
1777 break; 1777 break;
1778 default: 1778 default:
1779 return -EINVAL; 1779 return -EINVAL;
1780 } 1780 }
1781 } 1781 }
1782 } 1782 }
1783 } 1783 }
1784 1784
1785 return 0; 1785 return 0;
1786 } 1786 }
1787 1787
1788 static u8 max_spin_up(struct isci_host *ihost) 1788 static u8 max_spin_up(struct isci_host *ihost)
1789 { 1789 {
1790 if (ihost->user_parameters.max_concurr_spinup) 1790 if (ihost->user_parameters.max_concurr_spinup)
1791 return min_t(u8, ihost->user_parameters.max_concurr_spinup, 1791 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1792 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); 1792 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1793 else 1793 else
1794 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, 1794 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1795 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); 1795 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1796 } 1796 }
1797 1797
1798 static void power_control_timeout(unsigned long data) 1798 static void power_control_timeout(unsigned long data)
1799 { 1799 {
1800 struct sci_timer *tmr = (struct sci_timer *)data; 1800 struct sci_timer *tmr = (struct sci_timer *)data;
1801 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); 1801 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1802 struct isci_phy *iphy; 1802 struct isci_phy *iphy;
1803 unsigned long flags; 1803 unsigned long flags;
1804 u8 i; 1804 u8 i;
1805 1805
1806 spin_lock_irqsave(&ihost->scic_lock, flags); 1806 spin_lock_irqsave(&ihost->scic_lock, flags);
1807 1807
1808 if (tmr->cancel) 1808 if (tmr->cancel)
1809 goto done; 1809 goto done;
1810 1810
1811 ihost->power_control.phys_granted_power = 0; 1811 ihost->power_control.phys_granted_power = 0;
1812 1812
1813 if (ihost->power_control.phys_waiting == 0) { 1813 if (ihost->power_control.phys_waiting == 0) {
1814 ihost->power_control.timer_started = false; 1814 ihost->power_control.timer_started = false;
1815 goto done; 1815 goto done;
1816 } 1816 }
1817 1817
1818 for (i = 0; i < SCI_MAX_PHYS; i++) { 1818 for (i = 0; i < SCI_MAX_PHYS; i++) {
1819 1819
1820 if (ihost->power_control.phys_waiting == 0) 1820 if (ihost->power_control.phys_waiting == 0)
1821 break; 1821 break;
1822 1822
1823 iphy = ihost->power_control.requesters[i]; 1823 iphy = ihost->power_control.requesters[i];
1824 if (iphy == NULL) 1824 if (iphy == NULL)
1825 continue; 1825 continue;
1826 1826
1827 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) 1827 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1828 break; 1828 break;
1829 1829
1830 ihost->power_control.requesters[i] = NULL; 1830 ihost->power_control.requesters[i] = NULL;
1831 ihost->power_control.phys_waiting--; 1831 ihost->power_control.phys_waiting--;
1832 ihost->power_control.phys_granted_power++; 1832 ihost->power_control.phys_granted_power++;
1833 sci_phy_consume_power_handler(iphy); 1833 sci_phy_consume_power_handler(iphy);
1834 1834
1835 if (iphy->protocol == SAS_PROTOCOL_SSP) { 1835 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1836 u8 j; 1836 u8 j;
1837 1837
1838 for (j = 0; j < SCI_MAX_PHYS; j++) { 1838 for (j = 0; j < SCI_MAX_PHYS; j++) {
1839 struct isci_phy *requester = ihost->power_control.requesters[j]; 1839 struct isci_phy *requester = ihost->power_control.requesters[j];
1840 1840
1841 /* 1841 /*
1842 * Search the power_control queue to see if there are other phys 1842 * Search the power_control queue to see if there are other phys
1843 * attached to the same remote device. If found, take all of 1843 * attached to the same remote device. If found, take all of
1844 * them out of await_sas_power state. 1844 * them out of await_sas_power state.
1845 */ 1845 */
1846 if (requester != NULL && requester != iphy) { 1846 if (requester != NULL && requester != iphy) {
1847 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, 1847 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1848 iphy->frame_rcvd.iaf.sas_addr, 1848 iphy->frame_rcvd.iaf.sas_addr,
1849 sizeof(requester->frame_rcvd.iaf.sas_addr)); 1849 sizeof(requester->frame_rcvd.iaf.sas_addr));
1850 1850
1851 if (other == 0) { 1851 if (other == 0) {
1852 ihost->power_control.requesters[j] = NULL; 1852 ihost->power_control.requesters[j] = NULL;
1853 ihost->power_control.phys_waiting--; 1853 ihost->power_control.phys_waiting--;
1854 sci_phy_consume_power_handler(requester); 1854 sci_phy_consume_power_handler(requester);
1855 } 1855 }
1856 } 1856 }
1857 } 1857 }
1858 } 1858 }
1859 } 1859 }
1860 1860
1861 /* 1861 /*
1862 * It doesn't matter if the power list is empty, we need to start the 1862 * It doesn't matter if the power list is empty, we need to start the
1863 * timer in case another phy becomes ready. 1863 * timer in case another phy becomes ready.
1864 */ 1864 */
1865 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 1865 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1866 ihost->power_control.timer_started = true; 1866 ihost->power_control.timer_started = true;
1867 1867
1868 done: 1868 done:
1869 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1869 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1870 } 1870 }
1871 1871
1872 void sci_controller_power_control_queue_insert(struct isci_host *ihost, 1872 void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1873 struct isci_phy *iphy) 1873 struct isci_phy *iphy)
1874 { 1874 {
1875 BUG_ON(iphy == NULL); 1875 BUG_ON(iphy == NULL);
1876 1876
1877 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { 1877 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1878 ihost->power_control.phys_granted_power++; 1878 ihost->power_control.phys_granted_power++;
1879 sci_phy_consume_power_handler(iphy); 1879 sci_phy_consume_power_handler(iphy);
1880 1880
1881 /* 1881 /*
1882 * stop and start the power_control timer. When the timer fires, the 1882 * stop and start the power_control timer. When the timer fires, the
1883 * no_of_phys_granted_power will be set to 0 1883 * no_of_phys_granted_power will be set to 0
1884 */ 1884 */
1885 if (ihost->power_control.timer_started) 1885 if (ihost->power_control.timer_started)
1886 sci_del_timer(&ihost->power_control.timer); 1886 sci_del_timer(&ihost->power_control.timer);
1887 1887
1888 sci_mod_timer(&ihost->power_control.timer, 1888 sci_mod_timer(&ihost->power_control.timer,
1889 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); 1889 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1890 ihost->power_control.timer_started = true; 1890 ihost->power_control.timer_started = true;
1891 1891
1892 } else { 1892 } else {
1893 /* 1893 /*
1894 * There are phys, attached to the same sas address as this phy, are 1894 * There are phys, attached to the same sas address as this phy, are
1895 * already in READY state, this phy don't need wait. 1895 * already in READY state, this phy don't need wait.
1896 */ 1896 */
1897 u8 i; 1897 u8 i;
1898 struct isci_phy *current_phy; 1898 struct isci_phy *current_phy;
1899 1899
1900 for (i = 0; i < SCI_MAX_PHYS; i++) { 1900 for (i = 0; i < SCI_MAX_PHYS; i++) {
1901 u8 other; 1901 u8 other;
1902 current_phy = &ihost->phys[i]; 1902 current_phy = &ihost->phys[i];
1903 1903
1904 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, 1904 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1905 iphy->frame_rcvd.iaf.sas_addr, 1905 iphy->frame_rcvd.iaf.sas_addr,
1906 sizeof(current_phy->frame_rcvd.iaf.sas_addr)); 1906 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1907 1907
1908 if (current_phy->sm.current_state_id == SCI_PHY_READY && 1908 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1909 current_phy->protocol == SAS_PROTOCOL_SSP && 1909 current_phy->protocol == SAS_PROTOCOL_SSP &&
1910 other == 0) { 1910 other == 0) {
1911 sci_phy_consume_power_handler(iphy); 1911 sci_phy_consume_power_handler(iphy);
1912 break; 1912 break;
1913 } 1913 }
1914 } 1914 }
1915 1915
1916 if (i == SCI_MAX_PHYS) { 1916 if (i == SCI_MAX_PHYS) {
1917 /* Add the phy in the waiting list */ 1917 /* Add the phy in the waiting list */
1918 ihost->power_control.requesters[iphy->phy_index] = iphy; 1918 ihost->power_control.requesters[iphy->phy_index] = iphy;
1919 ihost->power_control.phys_waiting++; 1919 ihost->power_control.phys_waiting++;
1920 } 1920 }
1921 } 1921 }
1922 } 1922 }
1923 1923
1924 void sci_controller_power_control_queue_remove(struct isci_host *ihost, 1924 void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1925 struct isci_phy *iphy) 1925 struct isci_phy *iphy)
1926 { 1926 {
1927 BUG_ON(iphy == NULL); 1927 BUG_ON(iphy == NULL);
1928 1928
1929 if (ihost->power_control.requesters[iphy->phy_index]) 1929 if (ihost->power_control.requesters[iphy->phy_index])
1930 ihost->power_control.phys_waiting--; 1930 ihost->power_control.phys_waiting--;
1931 1931
1932 ihost->power_control.requesters[iphy->phy_index] = NULL; 1932 ihost->power_control.requesters[iphy->phy_index] = NULL;
1933 } 1933 }
1934 1934
1935 static int is_long_cable(int phy, unsigned char selection_byte) 1935 static int is_long_cable(int phy, unsigned char selection_byte)
1936 { 1936 {
1937 return !!(selection_byte & (1 << phy)); 1937 return !!(selection_byte & (1 << phy));
1938 } 1938 }
1939 1939
1940 static int is_medium_cable(int phy, unsigned char selection_byte) 1940 static int is_medium_cable(int phy, unsigned char selection_byte)
1941 { 1941 {
1942 return !!(selection_byte & (1 << (phy + 4))); 1942 return !!(selection_byte & (1 << (phy + 4)));
1943 } 1943 }
1944 1944
1945 static enum cable_selections decode_selection_byte( 1945 static enum cable_selections decode_selection_byte(
1946 int phy, 1946 int phy,
1947 unsigned char selection_byte) 1947 unsigned char selection_byte)
1948 { 1948 {
1949 return ((selection_byte & (1 << phy)) ? 1 : 0) 1949 return ((selection_byte & (1 << phy)) ? 1 : 0)
1950 + (selection_byte & (1 << (phy + 4)) ? 2 : 0); 1950 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1951 } 1951 }
1952 1952
1953 static unsigned char *to_cable_select(struct isci_host *ihost) 1953 static unsigned char *to_cable_select(struct isci_host *ihost)
1954 { 1954 {
1955 if (is_cable_select_overridden()) 1955 if (is_cable_select_overridden())
1956 return ((unsigned char *)&cable_selection_override) 1956 return ((unsigned char *)&cable_selection_override)
1957 + ihost->id; 1957 + ihost->id;
1958 else 1958 else
1959 return &ihost->oem_parameters.controller.cable_selection_mask; 1959 return &ihost->oem_parameters.controller.cable_selection_mask;
1960 } 1960 }
1961 1961
1962 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) 1962 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1963 { 1963 {
1964 return decode_selection_byte(phy, *to_cable_select(ihost)); 1964 return decode_selection_byte(phy, *to_cable_select(ihost));
1965 } 1965 }
1966 1966
1967 char *lookup_cable_names(enum cable_selections selection) 1967 char *lookup_cable_names(enum cable_selections selection)
1968 { 1968 {
1969 static char *cable_names[] = { 1969 static char *cable_names[] = {
1970 [short_cable] = "short", 1970 [short_cable] = "short",
1971 [long_cable] = "long", 1971 [long_cable] = "long",
1972 [medium_cable] = "medium", 1972 [medium_cable] = "medium",
1973 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */ 1973 [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
1974 }; 1974 };
1975 return (selection <= undefined_cable) ? cable_names[selection] 1975 return (selection <= undefined_cable) ? cable_names[selection]
1976 : cable_names[undefined_cable]; 1976 : cable_names[undefined_cable];
1977 } 1977 }
1978 1978
1979 #define AFE_REGISTER_WRITE_DELAY 10 1979 #define AFE_REGISTER_WRITE_DELAY 10
1980 1980
1981 static void sci_controller_afe_initialization(struct isci_host *ihost) 1981 static void sci_controller_afe_initialization(struct isci_host *ihost)
1982 { 1982 {
1983 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; 1983 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1984 const struct sci_oem_params *oem = &ihost->oem_parameters; 1984 const struct sci_oem_params *oem = &ihost->oem_parameters;
1985 struct pci_dev *pdev = ihost->pdev; 1985 struct pci_dev *pdev = ihost->pdev;
1986 u32 afe_status; 1986 u32 afe_status;
1987 u32 phy_id; 1987 u32 phy_id;
1988 unsigned char cable_selection_mask = *to_cable_select(ihost); 1988 unsigned char cable_selection_mask = *to_cable_select(ihost);
1989 1989
1990 /* Clear DFX Status registers */ 1990 /* Clear DFX Status registers */
1991 writel(0x0081000f, &afe->afe_dfx_master_control0); 1991 writel(0x0081000f, &afe->afe_dfx_master_control0);
1992 udelay(AFE_REGISTER_WRITE_DELAY); 1992 udelay(AFE_REGISTER_WRITE_DELAY);
1993 1993
1994 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { 1994 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1995 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement 1995 /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
1996 * Timer, PM Stagger Timer 1996 * Timer, PM Stagger Timer
1997 */ 1997 */
1998 writel(0x0007FFFF, &afe->afe_pmsn_master_control2); 1998 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1999 udelay(AFE_REGISTER_WRITE_DELAY); 1999 udelay(AFE_REGISTER_WRITE_DELAY);
2000 } 2000 }
2001 2001
2002 /* Configure bias currents to normal */ 2002 /* Configure bias currents to normal */
2003 if (is_a2(pdev)) 2003 if (is_a2(pdev))
2004 writel(0x00005A00, &afe->afe_bias_control); 2004 writel(0x00005A00, &afe->afe_bias_control);
2005 else if (is_b0(pdev) || is_c0(pdev)) 2005 else if (is_b0(pdev) || is_c0(pdev))
2006 writel(0x00005F00, &afe->afe_bias_control); 2006 writel(0x00005F00, &afe->afe_bias_control);
2007 else if (is_c1(pdev)) 2007 else if (is_c1(pdev))
2008 writel(0x00005500, &afe->afe_bias_control); 2008 writel(0x00005500, &afe->afe_bias_control);
2009 2009
2010 udelay(AFE_REGISTER_WRITE_DELAY); 2010 udelay(AFE_REGISTER_WRITE_DELAY);
2011 2011
2012 /* Enable PLL */ 2012 /* Enable PLL */
2013 if (is_a2(pdev)) 2013 if (is_a2(pdev))
2014 writel(0x80040908, &afe->afe_pll_control0); 2014 writel(0x80040908, &afe->afe_pll_control0);
2015 else if (is_b0(pdev) || is_c0(pdev)) 2015 else if (is_b0(pdev) || is_c0(pdev))
2016 writel(0x80040A08, &afe->afe_pll_control0); 2016 writel(0x80040A08, &afe->afe_pll_control0);
2017 else if (is_c1(pdev)) { 2017 else if (is_c1(pdev)) {
2018 writel(0x80000B08, &afe->afe_pll_control0); 2018 writel(0x80000B08, &afe->afe_pll_control0);
2019 udelay(AFE_REGISTER_WRITE_DELAY); 2019 udelay(AFE_REGISTER_WRITE_DELAY);
2020 writel(0x00000B08, &afe->afe_pll_control0); 2020 writel(0x00000B08, &afe->afe_pll_control0);
2021 udelay(AFE_REGISTER_WRITE_DELAY); 2021 udelay(AFE_REGISTER_WRITE_DELAY);
2022 writel(0x80000B08, &afe->afe_pll_control0); 2022 writel(0x80000B08, &afe->afe_pll_control0);
2023 } 2023 }
2024 2024
2025 udelay(AFE_REGISTER_WRITE_DELAY); 2025 udelay(AFE_REGISTER_WRITE_DELAY);
2026 2026
2027 /* Wait for the PLL to lock */ 2027 /* Wait for the PLL to lock */
2028 do { 2028 do {
2029 afe_status = readl(&afe->afe_common_block_status); 2029 afe_status = readl(&afe->afe_common_block_status);
2030 udelay(AFE_REGISTER_WRITE_DELAY); 2030 udelay(AFE_REGISTER_WRITE_DELAY);
2031 } while ((afe_status & 0x00001000) == 0); 2031 } while ((afe_status & 0x00001000) == 0);
2032 2032
2033 if (is_a2(pdev)) { 2033 if (is_a2(pdev)) {
2034 /* Shorten SAS SNW lock time (RxLock timer value from 76 2034 /* Shorten SAS SNW lock time (RxLock timer value from 76
2035 * us to 50 us) 2035 * us to 50 us)
2036 */ 2036 */
2037 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); 2037 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
2038 udelay(AFE_REGISTER_WRITE_DELAY); 2038 udelay(AFE_REGISTER_WRITE_DELAY);
2039 } 2039 }
2040 2040
2041 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { 2041 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
2042 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id]; 2042 struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
2043 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; 2043 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
2044 int cable_length_long = 2044 int cable_length_long =
2045 is_long_cable(phy_id, cable_selection_mask); 2045 is_long_cable(phy_id, cable_selection_mask);
2046 int cable_length_medium = 2046 int cable_length_medium =
2047 is_medium_cable(phy_id, cable_selection_mask); 2047 is_medium_cable(phy_id, cable_selection_mask);
2048 2048
2049 if (is_a2(pdev)) { 2049 if (is_a2(pdev)) {
2050 /* All defaults, except the Receive Word 2050 /* All defaults, except the Receive Word
2051 * Alignament/Comma Detect Enable....(0xe800) 2051 * Alignament/Comma Detect Enable....(0xe800)
2052 */ 2052 */
2053 writel(0x00004512, &xcvr->afe_xcvr_control0); 2053 writel(0x00004512, &xcvr->afe_xcvr_control0);
2054 udelay(AFE_REGISTER_WRITE_DELAY); 2054 udelay(AFE_REGISTER_WRITE_DELAY);
2055 2055
2056 writel(0x0050100F, &xcvr->afe_xcvr_control1); 2056 writel(0x0050100F, &xcvr->afe_xcvr_control1);
2057 udelay(AFE_REGISTER_WRITE_DELAY); 2057 udelay(AFE_REGISTER_WRITE_DELAY);
2058 } else if (is_b0(pdev)) { 2058 } else if (is_b0(pdev)) {
2059 /* Configure transmitter SSC parameters */ 2059 /* Configure transmitter SSC parameters */
2060 writel(0x00030000, &xcvr->afe_tx_ssc_control); 2060 writel(0x00030000, &xcvr->afe_tx_ssc_control);
2061 udelay(AFE_REGISTER_WRITE_DELAY); 2061 udelay(AFE_REGISTER_WRITE_DELAY);
2062 } else if (is_c0(pdev)) { 2062 } else if (is_c0(pdev)) {
2063 /* Configure transmitter SSC parameters */ 2063 /* Configure transmitter SSC parameters */
2064 writel(0x00010202, &xcvr->afe_tx_ssc_control); 2064 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2065 udelay(AFE_REGISTER_WRITE_DELAY); 2065 udelay(AFE_REGISTER_WRITE_DELAY);
2066 2066
2067 /* All defaults, except the Receive Word 2067 /* All defaults, except the Receive Word
2068 * Alignament/Comma Detect Enable....(0xe800) 2068 * Alignament/Comma Detect Enable....(0xe800)
2069 */ 2069 */
2070 writel(0x00014500, &xcvr->afe_xcvr_control0); 2070 writel(0x00014500, &xcvr->afe_xcvr_control0);
2071 udelay(AFE_REGISTER_WRITE_DELAY); 2071 udelay(AFE_REGISTER_WRITE_DELAY);
2072 } else if (is_c1(pdev)) { 2072 } else if (is_c1(pdev)) {
2073 /* Configure transmitter SSC parameters */ 2073 /* Configure transmitter SSC parameters */
2074 writel(0x00010202, &xcvr->afe_tx_ssc_control); 2074 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2075 udelay(AFE_REGISTER_WRITE_DELAY); 2075 udelay(AFE_REGISTER_WRITE_DELAY);
2076 2076
2077 /* All defaults, except the Receive Word 2077 /* All defaults, except the Receive Word
2078 * Alignament/Comma Detect Enable....(0xe800) 2078 * Alignament/Comma Detect Enable....(0xe800)
2079 */ 2079 */
2080 writel(0x0001C500, &xcvr->afe_xcvr_control0); 2080 writel(0x0001C500, &xcvr->afe_xcvr_control0);
2081 udelay(AFE_REGISTER_WRITE_DELAY); 2081 udelay(AFE_REGISTER_WRITE_DELAY);
2082 } 2082 }
2083 2083
2084 /* Power up TX and RX out from power down (PWRDNTX and 2084 /* Power up TX and RX out from power down (PWRDNTX and
2085 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) 2085 * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
2086 */ 2086 */
2087 if (is_a2(pdev)) 2087 if (is_a2(pdev))
2088 writel(0x000003F0, &xcvr->afe_channel_control); 2088 writel(0x000003F0, &xcvr->afe_channel_control);
2089 else if (is_b0(pdev)) { 2089 else if (is_b0(pdev)) {
2090 writel(0x000003D7, &xcvr->afe_channel_control); 2090 writel(0x000003D7, &xcvr->afe_channel_control);
2091 udelay(AFE_REGISTER_WRITE_DELAY); 2091 udelay(AFE_REGISTER_WRITE_DELAY);
2092 2092
2093 writel(0x000003D4, &xcvr->afe_channel_control); 2093 writel(0x000003D4, &xcvr->afe_channel_control);
2094 } else if (is_c0(pdev)) { 2094 } else if (is_c0(pdev)) {
2095 writel(0x000001E7, &xcvr->afe_channel_control); 2095 writel(0x000001E7, &xcvr->afe_channel_control);
2096 udelay(AFE_REGISTER_WRITE_DELAY); 2096 udelay(AFE_REGISTER_WRITE_DELAY);
2097 2097
2098 writel(0x000001E4, &xcvr->afe_channel_control); 2098 writel(0x000001E4, &xcvr->afe_channel_control);
2099 } else if (is_c1(pdev)) { 2099 } else if (is_c1(pdev)) {
2100 writel(cable_length_long ? 0x000002F7 : 0x000001F7, 2100 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2101 &xcvr->afe_channel_control); 2101 &xcvr->afe_channel_control);
2102 udelay(AFE_REGISTER_WRITE_DELAY); 2102 udelay(AFE_REGISTER_WRITE_DELAY);
2103 2103
2104 writel(cable_length_long ? 0x000002F4 : 0x000001F4, 2104 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2105 &xcvr->afe_channel_control); 2105 &xcvr->afe_channel_control);
2106 } 2106 }
2107 udelay(AFE_REGISTER_WRITE_DELAY); 2107 udelay(AFE_REGISTER_WRITE_DELAY);
2108 2108
2109 if (is_a2(pdev)) { 2109 if (is_a2(pdev)) {
2110 /* Enable TX equalization (0xe824) */ 2110 /* Enable TX equalization (0xe824) */
2111 writel(0x00040000, &xcvr->afe_tx_control); 2111 writel(0x00040000, &xcvr->afe_tx_control);
2112 udelay(AFE_REGISTER_WRITE_DELAY); 2112 udelay(AFE_REGISTER_WRITE_DELAY);
2113 } 2113 }
2114 2114
2115 if (is_a2(pdev) || is_b0(pdev)) 2115 if (is_a2(pdev) || is_b0(pdev))
2116 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, 2116 /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
2117 * TPD=0x0(TX Power On), RDD=0x0(RX Detect 2117 * TPD=0x0(TX Power On), RDD=0x0(RX Detect
2118 * Enabled) ....(0xe800) 2118 * Enabled) ....(0xe800)
2119 */ 2119 */
2120 writel(0x00004100, &xcvr->afe_xcvr_control0); 2120 writel(0x00004100, &xcvr->afe_xcvr_control0);
2121 else if (is_c0(pdev)) 2121 else if (is_c0(pdev))
2122 writel(0x00014100, &xcvr->afe_xcvr_control0); 2122 writel(0x00014100, &xcvr->afe_xcvr_control0);
2123 else if (is_c1(pdev)) 2123 else if (is_c1(pdev))
2124 writel(0x0001C100, &xcvr->afe_xcvr_control0); 2124 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2125 udelay(AFE_REGISTER_WRITE_DELAY); 2125 udelay(AFE_REGISTER_WRITE_DELAY);
2126 2126
2127 /* Leave DFE/FFE on */ 2127 /* Leave DFE/FFE on */
2128 if (is_a2(pdev)) 2128 if (is_a2(pdev))
2129 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); 2129 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2130 else if (is_b0(pdev)) { 2130 else if (is_b0(pdev)) {
2131 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); 2131 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2132 udelay(AFE_REGISTER_WRITE_DELAY); 2132 udelay(AFE_REGISTER_WRITE_DELAY);
2133 /* Enable TX equalization (0xe824) */ 2133 /* Enable TX equalization (0xe824) */
2134 writel(0x00040000, &xcvr->afe_tx_control); 2134 writel(0x00040000, &xcvr->afe_tx_control);
2135 } else if (is_c0(pdev)) { 2135 } else if (is_c0(pdev)) {
2136 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); 2136 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2137 udelay(AFE_REGISTER_WRITE_DELAY); 2137 udelay(AFE_REGISTER_WRITE_DELAY);
2138 2138
2139 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); 2139 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2140 udelay(AFE_REGISTER_WRITE_DELAY); 2140 udelay(AFE_REGISTER_WRITE_DELAY);
2141 2141
2142 /* Enable TX equalization (0xe824) */ 2142 /* Enable TX equalization (0xe824) */
2143 writel(0x00040000, &xcvr->afe_tx_control); 2143 writel(0x00040000, &xcvr->afe_tx_control);
2144 } else if (is_c1(pdev)) { 2144 } else if (is_c1(pdev)) {
2145 writel(cable_length_long ? 0x01500C0C : 2145 writel(cable_length_long ? 0x01500C0C :
2146 cable_length_medium ? 0x01400C0D : 0x02400C0D, 2146 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2147 &xcvr->afe_xcvr_control1); 2147 &xcvr->afe_xcvr_control1);
2148 udelay(AFE_REGISTER_WRITE_DELAY); 2148 udelay(AFE_REGISTER_WRITE_DELAY);
2149 2149
2150 writel(0x000003E0, &xcvr->afe_dfx_rx_control1); 2150 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2151 udelay(AFE_REGISTER_WRITE_DELAY); 2151 udelay(AFE_REGISTER_WRITE_DELAY);
2152 2152
2153 writel(cable_length_long ? 0x33091C1F : 2153 writel(cable_length_long ? 0x33091C1F :
2154 cable_length_medium ? 0x3315181F : 0x2B17161F, 2154 cable_length_medium ? 0x3315181F : 0x2B17161F,
2155 &xcvr->afe_rx_ssc_control0); 2155 &xcvr->afe_rx_ssc_control0);
2156 udelay(AFE_REGISTER_WRITE_DELAY); 2156 udelay(AFE_REGISTER_WRITE_DELAY);
2157 2157
2158 /* Enable TX equalization (0xe824) */ 2158 /* Enable TX equalization (0xe824) */
2159 writel(0x00040000, &xcvr->afe_tx_control); 2159 writel(0x00040000, &xcvr->afe_tx_control);
2160 } 2160 }
2161 2161
2162 udelay(AFE_REGISTER_WRITE_DELAY); 2162 udelay(AFE_REGISTER_WRITE_DELAY);
2163 2163
2164 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); 2164 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2165 udelay(AFE_REGISTER_WRITE_DELAY); 2165 udelay(AFE_REGISTER_WRITE_DELAY);
2166 2166
2167 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); 2167 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2168 udelay(AFE_REGISTER_WRITE_DELAY); 2168 udelay(AFE_REGISTER_WRITE_DELAY);
2169 2169
2170 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); 2170 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2171 udelay(AFE_REGISTER_WRITE_DELAY); 2171 udelay(AFE_REGISTER_WRITE_DELAY);
2172 2172
2173 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); 2173 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2174 udelay(AFE_REGISTER_WRITE_DELAY); 2174 udelay(AFE_REGISTER_WRITE_DELAY);
2175 } 2175 }
2176 2176
2177 /* Transfer control to the PEs */ 2177 /* Transfer control to the PEs */
2178 writel(0x00010f00, &afe->afe_dfx_master_control0); 2178 writel(0x00010f00, &afe->afe_dfx_master_control0);
2179 udelay(AFE_REGISTER_WRITE_DELAY); 2179 udelay(AFE_REGISTER_WRITE_DELAY);
2180 } 2180 }
2181 2181
2182 static void sci_controller_initialize_power_control(struct isci_host *ihost) 2182 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2183 { 2183 {
2184 sci_init_timer(&ihost->power_control.timer, power_control_timeout); 2184 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2185 2185
2186 memset(ihost->power_control.requesters, 0, 2186 memset(ihost->power_control.requesters, 0,
2187 sizeof(ihost->power_control.requesters)); 2187 sizeof(ihost->power_control.requesters));
2188 2188
2189 ihost->power_control.phys_waiting = 0; 2189 ihost->power_control.phys_waiting = 0;
2190 ihost->power_control.phys_granted_power = 0; 2190 ihost->power_control.phys_granted_power = 0;
2191 } 2191 }
2192 2192
2193 static enum sci_status sci_controller_initialize(struct isci_host *ihost) 2193 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2194 { 2194 {
2195 struct sci_base_state_machine *sm = &ihost->sm; 2195 struct sci_base_state_machine *sm = &ihost->sm;
2196 enum sci_status result = SCI_FAILURE; 2196 enum sci_status result = SCI_FAILURE;
2197 unsigned long i, state, val; 2197 unsigned long i, state, val;
2198 2198
2199 if (ihost->sm.current_state_id != SCIC_RESET) { 2199 if (ihost->sm.current_state_id != SCIC_RESET) {
2200 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 2200 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2201 __func__, ihost->sm.current_state_id); 2201 __func__, ihost->sm.current_state_id);
2202 return SCI_FAILURE_INVALID_STATE; 2202 return SCI_FAILURE_INVALID_STATE;
2203 } 2203 }
2204 2204
2205 sci_change_state(sm, SCIC_INITIALIZING); 2205 sci_change_state(sm, SCIC_INITIALIZING);
2206 2206
2207 sci_init_timer(&ihost->phy_timer, phy_startup_timeout); 2207 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2208 2208
2209 ihost->next_phy_to_start = 0; 2209 ihost->next_phy_to_start = 0;
2210 ihost->phy_startup_timer_pending = false; 2210 ihost->phy_startup_timer_pending = false;
2211 2211
2212 sci_controller_initialize_power_control(ihost); 2212 sci_controller_initialize_power_control(ihost);
2213 2213
2214 /* 2214 /*
2215 * There is nothing to do here for B0 since we do not have to 2215 * There is nothing to do here for B0 since we do not have to
2216 * program the AFE registers. 2216 * program the AFE registers.
2217 * / @todo The AFE settings are supposed to be correct for the B0 but 2217 * / @todo The AFE settings are supposed to be correct for the B0 but
2218 * / presently they seem to be wrong. */ 2218 * / presently they seem to be wrong. */
2219 sci_controller_afe_initialization(ihost); 2219 sci_controller_afe_initialization(ihost);
2220 2220
2221 2221
2222 /* Take the hardware out of reset */ 2222 /* Take the hardware out of reset */
2223 writel(0, &ihost->smu_registers->soft_reset_control); 2223 writel(0, &ihost->smu_registers->soft_reset_control);
2224 2224
2225 /* 2225 /*
2226 * / @todo Provide meaningfull error code for hardware failure 2226 * / @todo Provide meaningfull error code for hardware failure
2227 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ 2227 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2228 for (i = 100; i >= 1; i--) { 2228 for (i = 100; i >= 1; i--) {
2229 u32 status; 2229 u32 status;
2230 2230
2231 /* Loop until the hardware reports success */ 2231 /* Loop until the hardware reports success */
2232 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); 2232 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2233 status = readl(&ihost->smu_registers->control_status); 2233 status = readl(&ihost->smu_registers->control_status);
2234 2234
2235 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) 2235 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2236 break; 2236 break;
2237 } 2237 }
2238 if (i == 0) 2238 if (i == 0)
2239 goto out; 2239 goto out;
2240 2240
2241 /* 2241 /*
2242 * Determine what are the actaul device capacities that the 2242 * Determine what are the actaul device capacities that the
2243 * hardware will support */ 2243 * hardware will support */
2244 val = readl(&ihost->smu_registers->device_context_capacity); 2244 val = readl(&ihost->smu_registers->device_context_capacity);
2245 2245
2246 /* Record the smaller of the two capacity values */ 2246 /* Record the smaller of the two capacity values */
2247 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); 2247 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2248 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); 2248 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2249 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); 2249 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2250 2250
2251 /* 2251 /*
2252 * Make all PEs that are unassigned match up with the 2252 * Make all PEs that are unassigned match up with the
2253 * logical ports 2253 * logical ports
2254 */ 2254 */
2255 for (i = 0; i < ihost->logical_port_entries; i++) { 2255 for (i = 0; i < ihost->logical_port_entries; i++) {
2256 struct scu_port_task_scheduler_group_registers __iomem 2256 struct scu_port_task_scheduler_group_registers __iomem
2257 *ptsg = &ihost->scu_registers->peg0.ptsg; 2257 *ptsg = &ihost->scu_registers->peg0.ptsg;
2258 2258
2259 writel(i, &ptsg->protocol_engine[i]); 2259 writel(i, &ptsg->protocol_engine[i]);
2260 } 2260 }
2261 2261
2262 /* Initialize hardware PCI Relaxed ordering in DMA engines */ 2262 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2263 val = readl(&ihost->scu_registers->sdma.pdma_configuration); 2263 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2264 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2264 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2265 writel(val, &ihost->scu_registers->sdma.pdma_configuration); 2265 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2266 2266
2267 val = readl(&ihost->scu_registers->sdma.cdma_configuration); 2267 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2268 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); 2268 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2269 writel(val, &ihost->scu_registers->sdma.cdma_configuration); 2269 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2270 2270
2271 /* 2271 /*
2272 * Initialize the PHYs before the PORTs because the PHY registers 2272 * Initialize the PHYs before the PORTs because the PHY registers
2273 * are accessed during the port initialization. 2273 * are accessed during the port initialization.
2274 */ 2274 */
2275 for (i = 0; i < SCI_MAX_PHYS; i++) { 2275 for (i = 0; i < SCI_MAX_PHYS; i++) {
2276 result = sci_phy_initialize(&ihost->phys[i], 2276 result = sci_phy_initialize(&ihost->phys[i],
2277 &ihost->scu_registers->peg0.pe[i].tl, 2277 &ihost->scu_registers->peg0.pe[i].tl,
2278 &ihost->scu_registers->peg0.pe[i].ll); 2278 &ihost->scu_registers->peg0.pe[i].ll);
2279 if (result != SCI_SUCCESS) 2279 if (result != SCI_SUCCESS)
2280 goto out; 2280 goto out;
2281 } 2281 }
2282 2282
2283 for (i = 0; i < ihost->logical_port_entries; i++) { 2283 for (i = 0; i < ihost->logical_port_entries; i++) {
2284 struct isci_port *iport = &ihost->ports[i]; 2284 struct isci_port *iport = &ihost->ports[i];
2285 2285
2286 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; 2286 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2287 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; 2287 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2288 iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; 2288 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2289 } 2289 }
2290 2290
2291 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); 2291 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2292 2292
2293 out: 2293 out:
2294 /* Advance the controller state machine */ 2294 /* Advance the controller state machine */
2295 if (result == SCI_SUCCESS) 2295 if (result == SCI_SUCCESS)
2296 state = SCIC_INITIALIZED; 2296 state = SCIC_INITIALIZED;
2297 else 2297 else
2298 state = SCIC_FAILED; 2298 state = SCIC_FAILED;
2299 sci_change_state(sm, state); 2299 sci_change_state(sm, state);
2300 2300
2301 return result; 2301 return result;
2302 } 2302 }
2303 2303
2304 static int sci_controller_dma_alloc(struct isci_host *ihost) 2304 static int sci_controller_dma_alloc(struct isci_host *ihost)
2305 { 2305 {
2306 struct device *dev = &ihost->pdev->dev; 2306 struct device *dev = &ihost->pdev->dev;
2307 size_t size; 2307 size_t size;
2308 int i; 2308 int i;
2309 2309
2310 /* detect re-initialization */ 2310 /* detect re-initialization */
2311 if (ihost->completion_queue) 2311 if (ihost->completion_queue)
2312 return 0; 2312 return 0;
2313 2313
2314 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); 2314 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2315 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, 2315 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2316 GFP_KERNEL); 2316 GFP_KERNEL);
2317 if (!ihost->completion_queue) 2317 if (!ihost->completion_queue)
2318 return -ENOMEM; 2318 return -ENOMEM;
2319 2319
2320 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); 2320 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2321 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, 2321 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2322 GFP_KERNEL); 2322 GFP_KERNEL);
2323 2323
2324 if (!ihost->remote_node_context_table) 2324 if (!ihost->remote_node_context_table)
2325 return -ENOMEM; 2325 return -ENOMEM;
2326 2326
2327 size = ihost->task_context_entries * sizeof(struct scu_task_context), 2327 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2328 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, 2328 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2329 GFP_KERNEL); 2329 GFP_KERNEL);
2330 if (!ihost->task_context_table) 2330 if (!ihost->task_context_table)
2331 return -ENOMEM; 2331 return -ENOMEM;
2332 2332
2333 size = SCI_UFI_TOTAL_SIZE; 2333 size = SCI_UFI_TOTAL_SIZE;
2334 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); 2334 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2335 if (!ihost->ufi_buf) 2335 if (!ihost->ufi_buf)
2336 return -ENOMEM; 2336 return -ENOMEM;
2337 2337
2338 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 2338 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2339 struct isci_request *ireq; 2339 struct isci_request *ireq;
2340 dma_addr_t dma; 2340 dma_addr_t dma;
2341 2341
2342 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); 2342 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2343 if (!ireq) 2343 if (!ireq)
2344 return -ENOMEM; 2344 return -ENOMEM;
2345 2345
2346 ireq->tc = &ihost->task_context_table[i]; 2346 ireq->tc = &ihost->task_context_table[i];
2347 ireq->owning_controller = ihost; 2347 ireq->owning_controller = ihost;
2348 spin_lock_init(&ireq->state_lock); 2348 spin_lock_init(&ireq->state_lock);
2349 ireq->request_daddr = dma; 2349 ireq->request_daddr = dma;
2350 ireq->isci_host = ihost; 2350 ireq->isci_host = ihost;
2351 ihost->reqs[i] = ireq; 2351 ihost->reqs[i] = ireq;
2352 } 2352 }
2353 2353
2354 return 0; 2354 return 0;
2355 } 2355 }
2356 2356
2357 static int sci_controller_mem_init(struct isci_host *ihost) 2357 static int sci_controller_mem_init(struct isci_host *ihost)
2358 { 2358 {
2359 int err = sci_controller_dma_alloc(ihost); 2359 int err = sci_controller_dma_alloc(ihost);
2360 2360
2361 if (err) 2361 if (err)
2362 return err; 2362 return err;
2363 2363
2364 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); 2364 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2365 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); 2365 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2366 2366
2367 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); 2367 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2368 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); 2368 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2369 2369
2370 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); 2370 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2371 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); 2371 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2372 2372
2373 sci_unsolicited_frame_control_construct(ihost); 2373 sci_unsolicited_frame_control_construct(ihost);
2374 2374
2375 /* 2375 /*
2376 * Inform the silicon as to the location of the UF headers and 2376 * Inform the silicon as to the location of the UF headers and
2377 * address table. 2377 * address table.
2378 */ 2378 */
2379 writel(lower_32_bits(ihost->uf_control.headers.physical_address), 2379 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2380 &ihost->scu_registers->sdma.uf_header_base_address_lower); 2380 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2381 writel(upper_32_bits(ihost->uf_control.headers.physical_address), 2381 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2382 &ihost->scu_registers->sdma.uf_header_base_address_upper); 2382 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2383 2383
2384 writel(lower_32_bits(ihost->uf_control.address_table.physical_address), 2384 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2385 &ihost->scu_registers->sdma.uf_address_table_lower); 2385 &ihost->scu_registers->sdma.uf_address_table_lower);
2386 writel(upper_32_bits(ihost->uf_control.address_table.physical_address), 2386 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2387 &ihost->scu_registers->sdma.uf_address_table_upper); 2387 &ihost->scu_registers->sdma.uf_address_table_upper);
2388 2388
2389 return 0; 2389 return 0;
2390 } 2390 }
2391 2391
2392 /** 2392 /**
2393 * isci_host_init - (re-)initialize hardware and internal (private) state 2393 * isci_host_init - (re-)initialize hardware and internal (private) state
2394 * @ihost: host to init 2394 * @ihost: host to init
2395 * 2395 *
2396 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or 2396 * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
2397 * one-time initialization objects like locks and waitqueues, are 2397 * one-time initialization objects like locks and waitqueues, are
2398 * not touched (they are initialized in isci_host_alloc) 2398 * not touched (they are initialized in isci_host_alloc)
2399 */ 2399 */
2400 int isci_host_init(struct isci_host *ihost) 2400 int isci_host_init(struct isci_host *ihost)
2401 { 2401 {
2402 int i, err; 2402 int i, err;
2403 enum sci_status status; 2403 enum sci_status status;
2404 2404
2405 spin_lock_irq(&ihost->scic_lock); 2405 spin_lock_irq(&ihost->scic_lock);
2406 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); 2406 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2407 spin_unlock_irq(&ihost->scic_lock); 2407 spin_unlock_irq(&ihost->scic_lock);
2408 if (status != SCI_SUCCESS) { 2408 if (status != SCI_SUCCESS) {
2409 dev_err(&ihost->pdev->dev, 2409 dev_err(&ihost->pdev->dev,
2410 "%s: sci_controller_construct failed - status = %x\n", 2410 "%s: sci_controller_construct failed - status = %x\n",
2411 __func__, 2411 __func__,
2412 status); 2412 status);
2413 return -ENODEV; 2413 return -ENODEV;
2414 } 2414 }
2415 2415
2416 spin_lock_irq(&ihost->scic_lock); 2416 spin_lock_irq(&ihost->scic_lock);
2417 status = sci_controller_initialize(ihost); 2417 status = sci_controller_initialize(ihost);
2418 spin_unlock_irq(&ihost->scic_lock); 2418 spin_unlock_irq(&ihost->scic_lock);
2419 if (status != SCI_SUCCESS) { 2419 if (status != SCI_SUCCESS) {
2420 dev_warn(&ihost->pdev->dev, 2420 dev_warn(&ihost->pdev->dev,
2421 "%s: sci_controller_initialize failed -" 2421 "%s: sci_controller_initialize failed -"
2422 " status = 0x%x\n", 2422 " status = 0x%x\n",
2423 __func__, status); 2423 __func__, status);
2424 return -ENODEV; 2424 return -ENODEV;
2425 } 2425 }
2426 2426
2427 err = sci_controller_mem_init(ihost); 2427 err = sci_controller_mem_init(ihost);
2428 if (err) 2428 if (err)
2429 return err; 2429 return err;
2430 2430
2431 /* enable sgpio */ 2431 /* enable sgpio */
2432 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); 2432 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2433 for (i = 0; i < isci_gpio_count(ihost); i++) 2433 for (i = 0; i < isci_gpio_count(ihost); i++)
2434 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); 2434 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2435 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); 2435 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2436 2436
2437 return 0; 2437 return 0;
2438 } 2438 }
2439 2439
2440 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, 2440 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2441 struct isci_phy *iphy) 2441 struct isci_phy *iphy)
2442 { 2442 {
2443 switch (ihost->sm.current_state_id) { 2443 switch (ihost->sm.current_state_id) {
2444 case SCIC_STARTING: 2444 case SCIC_STARTING:
2445 sci_del_timer(&ihost->phy_timer); 2445 sci_del_timer(&ihost->phy_timer);
2446 ihost->phy_startup_timer_pending = false; 2446 ihost->phy_startup_timer_pending = false;
2447 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2447 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2448 iport, iphy); 2448 iport, iphy);
2449 sci_controller_start_next_phy(ihost); 2449 sci_controller_start_next_phy(ihost);
2450 break; 2450 break;
2451 case SCIC_READY: 2451 case SCIC_READY:
2452 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, 2452 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2453 iport, iphy); 2453 iport, iphy);
2454 break; 2454 break;
2455 default: 2455 default:
2456 dev_dbg(&ihost->pdev->dev, 2456 dev_dbg(&ihost->pdev->dev,
2457 "%s: SCIC Controller linkup event from phy %d in " 2457 "%s: SCIC Controller linkup event from phy %d in "
2458 "unexpected state %d\n", __func__, iphy->phy_index, 2458 "unexpected state %d\n", __func__, iphy->phy_index,
2459 ihost->sm.current_state_id); 2459 ihost->sm.current_state_id);
2460 } 2460 }
2461 } 2461 }
2462 2462
2463 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, 2463 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2464 struct isci_phy *iphy) 2464 struct isci_phy *iphy)
2465 { 2465 {
2466 switch (ihost->sm.current_state_id) { 2466 switch (ihost->sm.current_state_id) {
2467 case SCIC_STARTING: 2467 case SCIC_STARTING:
2468 case SCIC_READY: 2468 case SCIC_READY:
2469 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, 2469 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2470 iport, iphy); 2470 iport, iphy);
2471 break; 2471 break;
2472 default: 2472 default:
2473 dev_dbg(&ihost->pdev->dev, 2473 dev_dbg(&ihost->pdev->dev,
2474 "%s: SCIC Controller linkdown event from phy %d in " 2474 "%s: SCIC Controller linkdown event from phy %d in "
2475 "unexpected state %d\n", 2475 "unexpected state %d\n",
2476 __func__, 2476 __func__,
2477 iphy->phy_index, 2477 iphy->phy_index,
2478 ihost->sm.current_state_id); 2478 ihost->sm.current_state_id);
2479 } 2479 }
2480 } 2480 }
2481 2481
2482 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) 2482 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2483 { 2483 {
2484 u32 index; 2484 u32 index;
2485 2485
2486 for (index = 0; index < ihost->remote_node_entries; index++) { 2486 for (index = 0; index < ihost->remote_node_entries; index++) {
2487 if ((ihost->device_table[index] != NULL) && 2487 if ((ihost->device_table[index] != NULL) &&
2488 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) 2488 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2489 return true; 2489 return true;
2490 } 2490 }
2491 2491
2492 return false; 2492 return false;
2493 } 2493 }
2494 2494
2495 void sci_controller_remote_device_stopped(struct isci_host *ihost, 2495 void sci_controller_remote_device_stopped(struct isci_host *ihost,
2496 struct isci_remote_device *idev) 2496 struct isci_remote_device *idev)
2497 { 2497 {
2498 if (ihost->sm.current_state_id != SCIC_STOPPING) { 2498 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2499 dev_dbg(&ihost->pdev->dev, 2499 dev_dbg(&ihost->pdev->dev,
2500 "SCIC Controller 0x%p remote device stopped event " 2500 "SCIC Controller 0x%p remote device stopped event "
2501 "from device 0x%p in unexpected state %d\n", 2501 "from device 0x%p in unexpected state %d\n",
2502 ihost, idev, 2502 ihost, idev,
2503 ihost->sm.current_state_id); 2503 ihost->sm.current_state_id);
2504 return; 2504 return;
2505 } 2505 }
2506 2506
2507 if (!sci_controller_has_remote_devices_stopping(ihost)) 2507 if (!sci_controller_has_remote_devices_stopping(ihost))
2508 isci_host_stop_complete(ihost); 2508 isci_host_stop_complete(ihost);
2509 } 2509 }
2510 2510
2511 void sci_controller_post_request(struct isci_host *ihost, u32 request) 2511 void sci_controller_post_request(struct isci_host *ihost, u32 request)
2512 { 2512 {
2513 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", 2513 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2514 __func__, ihost->id, request); 2514 __func__, ihost->id, request);
2515 2515
2516 writel(request, &ihost->smu_registers->post_context_port); 2516 writel(request, &ihost->smu_registers->post_context_port);
2517 } 2517 }
2518 2518
2519 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) 2519 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2520 { 2520 {
2521 u16 task_index; 2521 u16 task_index;
2522 u16 task_sequence; 2522 u16 task_sequence;
2523 2523
2524 task_index = ISCI_TAG_TCI(io_tag); 2524 task_index = ISCI_TAG_TCI(io_tag);
2525 2525
2526 if (task_index < ihost->task_context_entries) { 2526 if (task_index < ihost->task_context_entries) {
2527 struct isci_request *ireq = ihost->reqs[task_index]; 2527 struct isci_request *ireq = ihost->reqs[task_index];
2528 2528
2529 if (test_bit(IREQ_ACTIVE, &ireq->flags)) { 2529 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2530 task_sequence = ISCI_TAG_SEQ(io_tag); 2530 task_sequence = ISCI_TAG_SEQ(io_tag);
2531 2531
2532 if (task_sequence == ihost->io_request_sequence[task_index]) 2532 if (task_sequence == ihost->io_request_sequence[task_index])
2533 return ireq; 2533 return ireq;
2534 } 2534 }
2535 } 2535 }
2536 2536
2537 return NULL; 2537 return NULL;
2538 } 2538 }
2539 2539
2540 /** 2540 /**
2541 * This method allocates remote node index and the reserves the remote node 2541 * This method allocates remote node index and the reserves the remote node
2542 * context space for use. This method can fail if there are no more remote 2542 * context space for use. This method can fail if there are no more remote
2543 * node index available. 2543 * node index available.
2544 * @scic: This is the controller object which contains the set of 2544 * @scic: This is the controller object which contains the set of
2545 * free remote node ids 2545 * free remote node ids
2546 * @sci_dev: This is the device object which is requesting the a remote node 2546 * @sci_dev: This is the device object which is requesting the a remote node
2547 * id 2547 * id
2548 * @node_id: This is the remote node id that is assinged to the device if one 2548 * @node_id: This is the remote node id that is assinged to the device if one
2549 * is available 2549 * is available
2550 * 2550 *
2551 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote 2551 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
2552 * node index available. 2552 * node index available.
2553 */ 2553 */
2554 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, 2554 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2555 struct isci_remote_device *idev, 2555 struct isci_remote_device *idev,
2556 u16 *node_id) 2556 u16 *node_id)
2557 { 2557 {
2558 u16 node_index; 2558 u16 node_index;
2559 u32 remote_node_count = sci_remote_device_node_count(idev); 2559 u32 remote_node_count = sci_remote_device_node_count(idev);
2560 2560
2561 node_index = sci_remote_node_table_allocate_remote_node( 2561 node_index = sci_remote_node_table_allocate_remote_node(
2562 &ihost->available_remote_nodes, remote_node_count 2562 &ihost->available_remote_nodes, remote_node_count
2563 ); 2563 );
2564 2564
2565 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { 2565 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2566 ihost->device_table[node_index] = idev; 2566 ihost->device_table[node_index] = idev;
2567 2567
2568 *node_id = node_index; 2568 *node_id = node_index;
2569 2569
2570 return SCI_SUCCESS; 2570 return SCI_SUCCESS;
2571 } 2571 }
2572 2572
2573 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 2573 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2574 } 2574 }
2575 2575
2576 void sci_controller_free_remote_node_context(struct isci_host *ihost, 2576 void sci_controller_free_remote_node_context(struct isci_host *ihost,
2577 struct isci_remote_device *idev, 2577 struct isci_remote_device *idev,
2578 u16 node_id) 2578 u16 node_id)
2579 { 2579 {
2580 u32 remote_node_count = sci_remote_device_node_count(idev); 2580 u32 remote_node_count = sci_remote_device_node_count(idev);
2581 2581
2582 if (ihost->device_table[node_id] == idev) { 2582 if (ihost->device_table[node_id] == idev) {
2583 ihost->device_table[node_id] = NULL; 2583 ihost->device_table[node_id] = NULL;
2584 2584
2585 sci_remote_node_table_release_remote_node_index( 2585 sci_remote_node_table_release_remote_node_index(
2586 &ihost->available_remote_nodes, remote_node_count, node_id 2586 &ihost->available_remote_nodes, remote_node_count, node_id
2587 ); 2587 );
2588 } 2588 }
2589 } 2589 }
2590 2590
2591 void sci_controller_copy_sata_response(void *response_buffer, 2591 void sci_controller_copy_sata_response(void *response_buffer,
2592 void *frame_header, 2592 void *frame_header,
2593 void *frame_buffer) 2593 void *frame_buffer)
2594 { 2594 {
2595 /* XXX type safety? */ 2595 /* XXX type safety? */
2596 memcpy(response_buffer, frame_header, sizeof(u32)); 2596 memcpy(response_buffer, frame_header, sizeof(u32));
2597 2597
2598 memcpy(response_buffer + sizeof(u32), 2598 memcpy(response_buffer + sizeof(u32),
2599 frame_buffer, 2599 frame_buffer,
2600 sizeof(struct dev_to_host_fis) - sizeof(u32)); 2600 sizeof(struct dev_to_host_fis) - sizeof(u32));
2601 } 2601 }
2602 2602
2603 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) 2603 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2604 { 2604 {
2605 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) 2605 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2606 writel(ihost->uf_control.get, 2606 writel(ihost->uf_control.get,
2607 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); 2607 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2608 } 2608 }
2609 2609
2610 void isci_tci_free(struct isci_host *ihost, u16 tci) 2610 void isci_tci_free(struct isci_host *ihost, u16 tci)
2611 { 2611 {
2612 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); 2612 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2613 2613
2614 ihost->tci_pool[tail] = tci; 2614 ihost->tci_pool[tail] = tci;
2615 ihost->tci_tail = tail + 1; 2615 ihost->tci_tail = tail + 1;
2616 } 2616 }
2617 2617
2618 static u16 isci_tci_alloc(struct isci_host *ihost) 2618 static u16 isci_tci_alloc(struct isci_host *ihost)
2619 { 2619 {
2620 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); 2620 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2621 u16 tci = ihost->tci_pool[head]; 2621 u16 tci = ihost->tci_pool[head];
2622 2622
2623 ihost->tci_head = head + 1; 2623 ihost->tci_head = head + 1;
2624 return tci; 2624 return tci;
2625 } 2625 }
2626 2626
2627 static u16 isci_tci_space(struct isci_host *ihost) 2627 static u16 isci_tci_space(struct isci_host *ihost)
2628 { 2628 {
2629 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); 2629 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2630 } 2630 }
2631 2631
2632 u16 isci_alloc_tag(struct isci_host *ihost) 2632 u16 isci_alloc_tag(struct isci_host *ihost)
2633 { 2633 {
2634 if (isci_tci_space(ihost)) { 2634 if (isci_tci_space(ihost)) {
2635 u16 tci = isci_tci_alloc(ihost); 2635 u16 tci = isci_tci_alloc(ihost);
2636 u8 seq = ihost->io_request_sequence[tci]; 2636 u8 seq = ihost->io_request_sequence[tci];
2637 2637
2638 return ISCI_TAG(seq, tci); 2638 return ISCI_TAG(seq, tci);
2639 } 2639 }
2640 2640
2641 return SCI_CONTROLLER_INVALID_IO_TAG; 2641 return SCI_CONTROLLER_INVALID_IO_TAG;
2642 } 2642 }
2643 2643
2644 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) 2644 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2645 { 2645 {
2646 u16 tci = ISCI_TAG_TCI(io_tag); 2646 u16 tci = ISCI_TAG_TCI(io_tag);
2647 u16 seq = ISCI_TAG_SEQ(io_tag); 2647 u16 seq = ISCI_TAG_SEQ(io_tag);
2648 2648
2649 /* prevent tail from passing head */ 2649 /* prevent tail from passing head */
2650 if (isci_tci_active(ihost) == 0) 2650 if (isci_tci_active(ihost) == 0)
2651 return SCI_FAILURE_INVALID_IO_TAG; 2651 return SCI_FAILURE_INVALID_IO_TAG;
2652 2652
2653 if (seq == ihost->io_request_sequence[tci]) { 2653 if (seq == ihost->io_request_sequence[tci]) {
2654 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); 2654 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2655 2655
2656 isci_tci_free(ihost, tci); 2656 isci_tci_free(ihost, tci);
2657 2657
2658 return SCI_SUCCESS; 2658 return SCI_SUCCESS;
2659 } 2659 }
2660 return SCI_FAILURE_INVALID_IO_TAG; 2660 return SCI_FAILURE_INVALID_IO_TAG;
2661 } 2661 }
2662 2662
2663 enum sci_status sci_controller_start_io(struct isci_host *ihost, 2663 enum sci_status sci_controller_start_io(struct isci_host *ihost,
2664 struct isci_remote_device *idev, 2664 struct isci_remote_device *idev,
2665 struct isci_request *ireq) 2665 struct isci_request *ireq)
2666 { 2666 {
2667 enum sci_status status; 2667 enum sci_status status;
2668 2668
2669 if (ihost->sm.current_state_id != SCIC_READY) { 2669 if (ihost->sm.current_state_id != SCIC_READY) {
2670 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 2670 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2671 __func__, ihost->sm.current_state_id); 2671 __func__, ihost->sm.current_state_id);
2672 return SCI_FAILURE_INVALID_STATE; 2672 return SCI_FAILURE_INVALID_STATE;
2673 } 2673 }
2674 2674
2675 status = sci_remote_device_start_io(ihost, idev, ireq); 2675 status = sci_remote_device_start_io(ihost, idev, ireq);
2676 if (status != SCI_SUCCESS) 2676 if (status != SCI_SUCCESS)
2677 return status; 2677 return status;
2678 2678
2679 set_bit(IREQ_ACTIVE, &ireq->flags); 2679 set_bit(IREQ_ACTIVE, &ireq->flags);
2680 sci_controller_post_request(ihost, ireq->post_context); 2680 sci_controller_post_request(ihost, ireq->post_context);
2681 return SCI_SUCCESS; 2681 return SCI_SUCCESS;
2682 } 2682 }
2683 2683
2684 enum sci_status sci_controller_terminate_request(struct isci_host *ihost, 2684 enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2685 struct isci_remote_device *idev, 2685 struct isci_remote_device *idev,
2686 struct isci_request *ireq) 2686 struct isci_request *ireq)
2687 { 2687 {
2688 /* terminate an ongoing (i.e. started) core IO request. This does not 2688 /* terminate an ongoing (i.e. started) core IO request. This does not
2689 * abort the IO request at the target, but rather removes the IO 2689 * abort the IO request at the target, but rather removes the IO
2690 * request from the host controller. 2690 * request from the host controller.
2691 */ 2691 */
2692 enum sci_status status; 2692 enum sci_status status;
2693 2693
2694 if (ihost->sm.current_state_id != SCIC_READY) { 2694 if (ihost->sm.current_state_id != SCIC_READY) {
2695 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 2695 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2696 __func__, ihost->sm.current_state_id); 2696 __func__, ihost->sm.current_state_id);
2697 return SCI_FAILURE_INVALID_STATE; 2697 return SCI_FAILURE_INVALID_STATE;
2698 } 2698 }
2699
2700 status = sci_io_request_terminate(ireq); 2699 status = sci_io_request_terminate(ireq);
2701 if (status != SCI_SUCCESS) 2700 if ((status == SCI_SUCCESS) &&
2702 return status; 2701 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2703 2702 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2704 /* 2703 /* Utilize the original post context command and or in the
2705 * Utilize the original post context command and or in the POST_TC_ABORT 2704 * POST_TC_ABORT request sub-type.
2706 * request sub-type. 2705 */
2707 */ 2706 sci_controller_post_request(
2708 sci_controller_post_request(ihost, 2707 ihost, ireq->post_context |
2709 ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); 2708 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2710 return SCI_SUCCESS; 2709 }
2710 return status;
2711 } 2711 }
2712 2712
2713 /** 2713 /**
2714 * sci_controller_complete_io() - This method will perform core specific 2714 * sci_controller_complete_io() - This method will perform core specific
2715 * completion operations for an IO request. After this method is invoked, 2715 * completion operations for an IO request. After this method is invoked,
2716 * the user should consider the IO request as invalid until it is properly 2716 * the user should consider the IO request as invalid until it is properly
2717 * reused (i.e. re-constructed). 2717 * reused (i.e. re-constructed).
2718 * @ihost: The handle to the controller object for which to complete the 2718 * @ihost: The handle to the controller object for which to complete the
2719 * IO request. 2719 * IO request.
2720 * @idev: The handle to the remote device object for which to complete 2720 * @idev: The handle to the remote device object for which to complete
2721 * the IO request. 2721 * the IO request.
2722 * @ireq: the handle to the io request object to complete. 2722 * @ireq: the handle to the io request object to complete.
2723 */ 2723 */
2724 enum sci_status sci_controller_complete_io(struct isci_host *ihost, 2724 enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2725 struct isci_remote_device *idev, 2725 struct isci_remote_device *idev,
2726 struct isci_request *ireq) 2726 struct isci_request *ireq)
2727 { 2727 {
2728 enum sci_status status; 2728 enum sci_status status;
2729 u16 index; 2729 u16 index;
2730 2730
2731 switch (ihost->sm.current_state_id) { 2731 switch (ihost->sm.current_state_id) {
2732 case SCIC_STOPPING: 2732 case SCIC_STOPPING:
2733 /* XXX: Implement this function */ 2733 /* XXX: Implement this function */
2734 return SCI_FAILURE; 2734 return SCI_FAILURE;
2735 case SCIC_READY: 2735 case SCIC_READY:
2736 status = sci_remote_device_complete_io(ihost, idev, ireq); 2736 status = sci_remote_device_complete_io(ihost, idev, ireq);
2737 if (status != SCI_SUCCESS) 2737 if (status != SCI_SUCCESS)
2738 return status; 2738 return status;
2739 2739
2740 index = ISCI_TAG_TCI(ireq->io_tag); 2740 index = ISCI_TAG_TCI(ireq->io_tag);
2741 clear_bit(IREQ_ACTIVE, &ireq->flags); 2741 clear_bit(IREQ_ACTIVE, &ireq->flags);
2742 return SCI_SUCCESS; 2742 return SCI_SUCCESS;
2743 default: 2743 default:
2744 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 2744 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2745 __func__, ihost->sm.current_state_id); 2745 __func__, ihost->sm.current_state_id);
2746 return SCI_FAILURE_INVALID_STATE; 2746 return SCI_FAILURE_INVALID_STATE;
2747 } 2747 }
2748 2748
2749 } 2749 }
2750 2750
2751 enum sci_status sci_controller_continue_io(struct isci_request *ireq) 2751 enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2752 { 2752 {
2753 struct isci_host *ihost = ireq->owning_controller; 2753 struct isci_host *ihost = ireq->owning_controller;
2754 2754
2755 if (ihost->sm.current_state_id != SCIC_READY) { 2755 if (ihost->sm.current_state_id != SCIC_READY) {
2756 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", 2756 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2757 __func__, ihost->sm.current_state_id); 2757 __func__, ihost->sm.current_state_id);
2758 return SCI_FAILURE_INVALID_STATE; 2758 return SCI_FAILURE_INVALID_STATE;
2759 } 2759 }
2760 2760
2761 set_bit(IREQ_ACTIVE, &ireq->flags); 2761 set_bit(IREQ_ACTIVE, &ireq->flags);
2762 sci_controller_post_request(ihost, ireq->post_context); 2762 sci_controller_post_request(ihost, ireq->post_context);
2763 return SCI_SUCCESS; 2763 return SCI_SUCCESS;
2764 } 2764 }
2765 2765
2766 /** 2766 /**
2767 * sci_controller_start_task() - This method is called by the SCIC user to 2767 * sci_controller_start_task() - This method is called by the SCIC user to
2768 * send/start a framework task management request. 2768 * send/start a framework task management request.
2769 * @controller: the handle to the controller object for which to start the task 2769 * @controller: the handle to the controller object for which to start the task
2770 * management request. 2770 * management request.
2771 * @remote_device: the handle to the remote device object for which to start 2771 * @remote_device: the handle to the remote device object for which to start
2772 * the task management request. 2772 * the task management request.
2773 * @task_request: the handle to the task request object to start. 2773 * @task_request: the handle to the task request object to start.
2774 */ 2774 */
2775 enum sci_task_status sci_controller_start_task(struct isci_host *ihost, 2775 enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2776 struct isci_remote_device *idev, 2776 struct isci_remote_device *idev,
2777 struct isci_request *ireq) 2777 struct isci_request *ireq)
2778 { 2778 {
2779 enum sci_status status; 2779 enum sci_status status;
2780 2780
2781 if (ihost->sm.current_state_id != SCIC_READY) { 2781 if (ihost->sm.current_state_id != SCIC_READY) {
2782 dev_warn(&ihost->pdev->dev, 2782 dev_warn(&ihost->pdev->dev,
2783 "%s: SCIC Controller starting task from invalid " 2783 "%s: SCIC Controller starting task from invalid "
2784 "state\n", 2784 "state\n",
2785 __func__); 2785 __func__);
2786 return SCI_TASK_FAILURE_INVALID_STATE; 2786 return SCI_TASK_FAILURE_INVALID_STATE;
2787 } 2787 }
2788 2788
2789 status = sci_remote_device_start_task(ihost, idev, ireq); 2789 status = sci_remote_device_start_task(ihost, idev, ireq);
2790 switch (status) { 2790 switch (status) {
2791 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: 2791 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2792 set_bit(IREQ_ACTIVE, &ireq->flags); 2792 set_bit(IREQ_ACTIVE, &ireq->flags);
2793 2793
2794 /* 2794 /*
2795 * We will let framework know this task request started successfully, 2795 * We will let framework know this task request started successfully,
2796 * although core is still woring on starting the request (to post tc when 2796 * although core is still woring on starting the request (to post tc when
2797 * RNC is resumed.) 2797 * RNC is resumed.)
2798 */ 2798 */
2799 return SCI_SUCCESS; 2799 return SCI_SUCCESS;
2800 case SCI_SUCCESS: 2800 case SCI_SUCCESS:
2801 set_bit(IREQ_ACTIVE, &ireq->flags); 2801 set_bit(IREQ_ACTIVE, &ireq->flags);
2802 sci_controller_post_request(ihost, ireq->post_context); 2802 sci_controller_post_request(ihost, ireq->post_context);
2803 break; 2803 break;
2804 default: 2804 default:
2805 break; 2805 break;
2806 } 2806 }
2807 2807
2808 return status; 2808 return status;
2809 } 2809 }
2810 2810
2811 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) 2811 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2812 { 2812 {
2813 int d; 2813 int d;
2814 2814
2815 /* no support for TX_GP_CFG */ 2815 /* no support for TX_GP_CFG */
2816 if (reg_index == 0) 2816 if (reg_index == 0)
2817 return -EINVAL; 2817 return -EINVAL;
2818 2818
2819 for (d = 0; d < isci_gpio_count(ihost); d++) { 2819 for (d = 0; d < isci_gpio_count(ihost); d++) {
2820 u32 val = 0x444; /* all ODx.n clear */ 2820 u32 val = 0x444; /* all ODx.n clear */
2821 int i; 2821 int i;
2822 2822
2823 for (i = 0; i < 3; i++) { 2823 for (i = 0; i < 3; i++) {
2824 int bit = (i << 2) + 2; 2824 int bit = (i << 2) + 2;
2825 2825
2826 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), 2826 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2827 write_data, reg_index, 2827 write_data, reg_index,
2828 reg_count); 2828 reg_count);
2829 if (bit < 0) 2829 if (bit < 0)
2830 break; 2830 break;
2831 2831
2832 /* if od is set, clear the 'invert' bit */ 2832 /* if od is set, clear the 'invert' bit */
2833 val &= ~(bit << ((i << 2) + 2)); 2833 val &= ~(bit << ((i << 2) + 2));
2834 } 2834 }
2835 2835
2836 if (i < 3) 2836 if (i < 3)
2837 break; 2837 break;
2838 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); 2838 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2839 } 2839 }
2840 2840
2841 /* unless reg_index is > 1, we should always be able to write at 2841 /* unless reg_index is > 1, we should always be able to write at
2842 * least one register 2842 * least one register
2843 */ 2843 */
2844 return d > 0; 2844 return d > 0;
2845 } 2845 }
2846 2846
2847 int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, 2847 int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2848 u8 reg_count, u8 *write_data) 2848 u8 reg_count, u8 *write_data)
2849 { 2849 {
2850 struct isci_host *ihost = sas_ha->lldd_ha; 2850 struct isci_host *ihost = sas_ha->lldd_ha;
2851 int written; 2851 int written;
2852 2852
2853 switch (reg_type) { 2853 switch (reg_type) {
2854 case SAS_GPIO_REG_TX_GP: 2854 case SAS_GPIO_REG_TX_GP:
2855 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); 2855 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2856 break; 2856 break;
2857 default: 2857 default:
2858 written = -EINVAL; 2858 written = -EINVAL;
2859 } 2859 }
2860 2860
2861 return written; 2861 return written;
2862 } 2862 }
drivers/scsi/isci/remote_device.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 #include <scsi/sas.h> 55 #include <scsi/sas.h>
56 #include <linux/bitops.h> 56 #include <linux/bitops.h>
57 #include "isci.h" 57 #include "isci.h"
58 #include "port.h" 58 #include "port.h"
59 #include "remote_device.h" 59 #include "remote_device.h"
60 #include "request.h" 60 #include "request.h"
61 #include "remote_node_context.h" 61 #include "remote_node_context.h"
62 #include "scu_event_codes.h" 62 #include "scu_event_codes.h"
63 #include "task.h" 63 #include "task.h"
64 64
65 #undef C 65 #undef C
66 #define C(a) (#a) 66 #define C(a) (#a)
67 const char *dev_state_name(enum sci_remote_device_states state) 67 const char *dev_state_name(enum sci_remote_device_states state)
68 { 68 {
69 static const char * const strings[] = REMOTE_DEV_STATES; 69 static const char * const strings[] = REMOTE_DEV_STATES;
70 70
71 return strings[state]; 71 return strings[state];
72 } 72 }
73 #undef C 73 #undef C
74 74
75 /** 75 /**
76 * isci_remote_device_not_ready() - This function is called by the ihost when 76 * isci_remote_device_not_ready() - This function is called by the ihost when
77 * the remote device is not ready. We mark the isci device as ready (not 77 * the remote device is not ready. We mark the isci device as ready (not
78 * "ready_for_io") and signal the waiting proccess. 78 * "ready_for_io") and signal the waiting proccess.
79 * @isci_host: This parameter specifies the isci host object. 79 * @isci_host: This parameter specifies the isci host object.
80 * @isci_device: This parameter specifies the remote device 80 * @isci_device: This parameter specifies the remote device
81 * 81 *
82 * sci_lock is held on entrance to this function. 82 * sci_lock is held on entrance to this function.
83 */ 83 */
84 static void isci_remote_device_not_ready(struct isci_host *ihost, 84 static void isci_remote_device_not_ready(struct isci_host *ihost,
85 struct isci_remote_device *idev, u32 reason) 85 struct isci_remote_device *idev, u32 reason)
86 { 86 {
87 struct isci_request *ireq; 87 struct isci_request *ireq;
88 88
89 dev_dbg(&ihost->pdev->dev, 89 dev_dbg(&ihost->pdev->dev,
90 "%s: isci_device = %p\n", __func__, idev); 90 "%s: isci_device = %p\n", __func__, idev);
91 91
92 switch (reason) { 92 switch (reason) {
93 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: 93 case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
94 set_bit(IDEV_GONE, &idev->flags); 94 set_bit(IDEV_GONE, &idev->flags);
95 break; 95 break;
96 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: 96 case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
97 set_bit(IDEV_IO_NCQERROR, &idev->flags); 97 set_bit(IDEV_IO_NCQERROR, &idev->flags);
98 98
99 /* Kill all outstanding requests for the device. */ 99 /* Kill all outstanding requests for the device. */
100 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { 100 list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
101 101
102 dev_dbg(&ihost->pdev->dev, 102 dev_dbg(&ihost->pdev->dev,
103 "%s: isci_device = %p request = %p\n", 103 "%s: isci_device = %p request = %p\n",
104 __func__, idev, ireq); 104 __func__, idev, ireq);
105 105
106 sci_controller_terminate_request(ihost, 106 sci_controller_terminate_request(ihost,
107 idev, 107 idev,
108 ireq); 108 ireq);
109 } 109 }
110 /* Fall through into the default case... */ 110 /* Fall through into the default case... */
111 default: 111 default:
112 clear_bit(IDEV_IO_READY, &idev->flags); 112 clear_bit(IDEV_IO_READY, &idev->flags);
113 break; 113 break;
114 } 114 }
115 } 115 }
116 116
117 /** 117 /**
118 * isci_remote_device_ready() - This function is called by the ihost when the 118 * isci_remote_device_ready() - This function is called by the ihost when the
119 * remote device is ready. We mark the isci device as ready and signal the 119 * remote device is ready. We mark the isci device as ready and signal the
120 * waiting proccess. 120 * waiting proccess.
121 * @ihost: our valid isci_host 121 * @ihost: our valid isci_host
122 * @idev: remote device 122 * @idev: remote device
123 * 123 *
124 */ 124 */
125 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev) 125 static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
126 { 126 {
127 dev_dbg(&ihost->pdev->dev, 127 dev_dbg(&ihost->pdev->dev,
128 "%s: idev = %p\n", __func__, idev); 128 "%s: idev = %p\n", __func__, idev);
129 129
130 clear_bit(IDEV_IO_NCQERROR, &idev->flags); 130 clear_bit(IDEV_IO_NCQERROR, &idev->flags);
131 set_bit(IDEV_IO_READY, &idev->flags); 131 set_bit(IDEV_IO_READY, &idev->flags);
132 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags)) 132 if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
133 wake_up(&ihost->eventq); 133 wake_up(&ihost->eventq);
134 } 134 }
135 135
136 static enum sci_status sci_remote_device_suspend(
137 struct isci_remote_device *idev)
138 {
139 return sci_remote_node_context_suspend(
140 &idev->rnc,
141 SCI_SOFTWARE_SUSPENSION,
142 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT,
143 NULL, NULL);
144 }
145
146 enum sci_status isci_remote_device_suspend(
147 struct isci_host *ihost,
148 struct isci_remote_device *idev)
149 {
150 enum sci_status status;
151 unsigned long flags;
152
153 spin_lock_irqsave(&ihost->scic_lock, flags);
154
155 if (isci_lookup_device(idev->domain_dev) == NULL) {
156 spin_unlock_irqrestore(&ihost->scic_lock, flags);
157 status = SCI_FAILURE;
158 } else {
159 status = sci_remote_device_suspend(idev);
160 spin_unlock_irqrestore(&ihost->scic_lock, flags);
161 if (status == SCI_SUCCESS) {
162 wait_event(ihost->eventq,
163 test_bit(IDEV_TXRX_SUSPENDED, &idev->flags)
164 || !test_bit(IDEV_ALLOCATED, &idev->flags));
165
166 status = test_bit(IDEV_TXRX_SUSPENDED, &idev->flags)
167 ? SCI_SUCCESS : SCI_FAILURE;
168 dev_dbg(&ihost->pdev->dev,
169 "%s: idev=%p, wait done, device is %s\n",
170 __func__, idev,
171 test_bit(IDEV_TXRX_SUSPENDED, &idev->flags)
172 ? "<suspended>" : "<deallocated!>");
173
174 }
175 isci_put_device(idev);
176 }
177 return status;
178 }
179
136 /* called once the remote node context is ready to be freed. 180 /* called once the remote node context is ready to be freed.
137 * The remote device can now report that its stop operation is complete. none 181 * The remote device can now report that its stop operation is complete. none
138 */ 182 */
139 static void rnc_destruct_done(void *_dev) 183 static void rnc_destruct_done(void *_dev)
140 { 184 {
141 struct isci_remote_device *idev = _dev; 185 struct isci_remote_device *idev = _dev;
142 186
143 BUG_ON(idev->started_request_count != 0); 187 BUG_ON(idev->started_request_count != 0);
144 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 188 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
145 } 189 }
146 190
147 static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) 191 static enum sci_status sci_remote_device_terminate_requests_checkabort(
192 struct isci_remote_device *idev,
193 int check_abort_pending)
148 { 194 {
149 struct isci_host *ihost = idev->owning_port->owning_controller; 195 struct isci_host *ihost = idev->owning_port->owning_controller;
150 enum sci_status status = SCI_SUCCESS; 196 enum sci_status status = SCI_SUCCESS;
151 u32 i; 197 u32 i;
152 198
153 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { 199 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
154 struct isci_request *ireq = ihost->reqs[i]; 200 struct isci_request *ireq = ihost->reqs[i];
155 enum sci_status s; 201 enum sci_status s;
156 202
157 if (!test_bit(IREQ_ACTIVE, &ireq->flags) || 203 if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
158 ireq->target_device != idev) 204 (ireq->target_device != idev) ||
205 (check_abort_pending && !test_bit(IREQ_PENDING_ABORT,
206 &ireq->flags)))
159 continue; 207 continue;
160 208
161 s = sci_controller_terminate_request(ihost, idev, ireq); 209 s = sci_controller_terminate_request(ihost, idev, ireq);
162 if (s != SCI_SUCCESS) 210 if (s != SCI_SUCCESS)
163 status = s; 211 status = s;
164 } 212 }
165 213
166 return status; 214 return status;
167 } 215 }
168 216
217 enum sci_status sci_remote_device_terminate_requests(
218 struct isci_remote_device *idev)
219 {
220 return sci_remote_device_terminate_requests_checkabort(idev, 0);
221 }
222
169 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, 223 enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
170 u32 timeout) 224 u32 timeout)
171 { 225 {
172 struct sci_base_state_machine *sm = &idev->sm; 226 struct sci_base_state_machine *sm = &idev->sm;
173 enum sci_remote_device_states state = sm->current_state_id; 227 enum sci_remote_device_states state = sm->current_state_id;
174 228
175 switch (state) { 229 switch (state) {
176 case SCI_DEV_INITIAL: 230 case SCI_DEV_INITIAL:
177 case SCI_DEV_FAILED: 231 case SCI_DEV_FAILED:
178 case SCI_DEV_FINAL: 232 case SCI_DEV_FINAL:
179 default: 233 default:
180 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 234 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
181 __func__, dev_state_name(state)); 235 __func__, dev_state_name(state));
182 return SCI_FAILURE_INVALID_STATE; 236 return SCI_FAILURE_INVALID_STATE;
183 case SCI_DEV_STOPPED: 237 case SCI_DEV_STOPPED:
184 return SCI_SUCCESS; 238 return SCI_SUCCESS;
185 case SCI_DEV_STARTING: 239 case SCI_DEV_STARTING:
186 /* device not started so there had better be no requests */ 240 /* device not started so there had better be no requests */
187 BUG_ON(idev->started_request_count != 0); 241 BUG_ON(idev->started_request_count != 0);
188 sci_remote_node_context_destruct(&idev->rnc, 242 sci_remote_node_context_destruct(&idev->rnc,
189 rnc_destruct_done, idev); 243 rnc_destruct_done, idev);
190 /* Transition to the stopping state and wait for the 244 /* Transition to the stopping state and wait for the
191 * remote node to complete being posted and invalidated. 245 * remote node to complete being posted and invalidated.
192 */ 246 */
193 sci_change_state(sm, SCI_DEV_STOPPING); 247 sci_change_state(sm, SCI_DEV_STOPPING);
194 return SCI_SUCCESS; 248 return SCI_SUCCESS;
195 case SCI_DEV_READY: 249 case SCI_DEV_READY:
196 case SCI_STP_DEV_IDLE: 250 case SCI_STP_DEV_IDLE:
197 case SCI_STP_DEV_CMD: 251 case SCI_STP_DEV_CMD:
198 case SCI_STP_DEV_NCQ: 252 case SCI_STP_DEV_NCQ:
199 case SCI_STP_DEV_NCQ_ERROR: 253 case SCI_STP_DEV_NCQ_ERROR:
200 case SCI_STP_DEV_AWAIT_RESET: 254 case SCI_STP_DEV_AWAIT_RESET:
201 case SCI_SMP_DEV_IDLE: 255 case SCI_SMP_DEV_IDLE:
202 case SCI_SMP_DEV_CMD: 256 case SCI_SMP_DEV_CMD:
203 sci_change_state(sm, SCI_DEV_STOPPING); 257 sci_change_state(sm, SCI_DEV_STOPPING);
204 if (idev->started_request_count == 0) { 258 if (idev->started_request_count == 0) {
205 sci_remote_node_context_destruct(&idev->rnc, 259 sci_remote_node_context_destruct(&idev->rnc,
206 rnc_destruct_done, idev); 260 rnc_destruct_done, idev);
207 return SCI_SUCCESS; 261 return SCI_SUCCESS;
208 } else 262 } else
209 return sci_remote_device_terminate_requests(idev); 263 return sci_remote_device_terminate_requests(idev);
210 break; 264 break;
211 case SCI_DEV_STOPPING: 265 case SCI_DEV_STOPPING:
212 /* All requests should have been terminated, but if there is an 266 /* All requests should have been terminated, but if there is an
213 * attempt to stop a device already in the stopping state, then 267 * attempt to stop a device already in the stopping state, then
214 * try again to terminate. 268 * try again to terminate.
215 */ 269 */
216 return sci_remote_device_terminate_requests(idev); 270 return sci_remote_device_terminate_requests(idev);
217 case SCI_DEV_RESETTING: 271 case SCI_DEV_RESETTING:
218 sci_change_state(sm, SCI_DEV_STOPPING); 272 sci_change_state(sm, SCI_DEV_STOPPING);
219 return SCI_SUCCESS; 273 return SCI_SUCCESS;
220 } 274 }
221 } 275 }
222 276
223 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev) 277 enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
224 { 278 {
225 struct sci_base_state_machine *sm = &idev->sm; 279 struct sci_base_state_machine *sm = &idev->sm;
226 enum sci_remote_device_states state = sm->current_state_id; 280 enum sci_remote_device_states state = sm->current_state_id;
227 281
228 switch (state) { 282 switch (state) {
229 case SCI_DEV_INITIAL: 283 case SCI_DEV_INITIAL:
230 case SCI_DEV_STOPPED: 284 case SCI_DEV_STOPPED:
231 case SCI_DEV_STARTING: 285 case SCI_DEV_STARTING:
232 case SCI_SMP_DEV_IDLE: 286 case SCI_SMP_DEV_IDLE:
233 case SCI_SMP_DEV_CMD: 287 case SCI_SMP_DEV_CMD:
234 case SCI_DEV_STOPPING: 288 case SCI_DEV_STOPPING:
235 case SCI_DEV_FAILED: 289 case SCI_DEV_FAILED:
236 case SCI_DEV_RESETTING: 290 case SCI_DEV_RESETTING:
237 case SCI_DEV_FINAL: 291 case SCI_DEV_FINAL:
238 default: 292 default:
239 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 293 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
240 __func__, dev_state_name(state)); 294 __func__, dev_state_name(state));
241 return SCI_FAILURE_INVALID_STATE; 295 return SCI_FAILURE_INVALID_STATE;
242 case SCI_DEV_READY: 296 case SCI_DEV_READY:
243 case SCI_STP_DEV_IDLE: 297 case SCI_STP_DEV_IDLE:
244 case SCI_STP_DEV_CMD: 298 case SCI_STP_DEV_CMD:
245 case SCI_STP_DEV_NCQ: 299 case SCI_STP_DEV_NCQ:
246 case SCI_STP_DEV_NCQ_ERROR: 300 case SCI_STP_DEV_NCQ_ERROR:
247 case SCI_STP_DEV_AWAIT_RESET: 301 case SCI_STP_DEV_AWAIT_RESET:
248 sci_change_state(sm, SCI_DEV_RESETTING); 302 sci_change_state(sm, SCI_DEV_RESETTING);
249 return SCI_SUCCESS; 303 return SCI_SUCCESS;
250 } 304 }
251 } 305 }
252 306
253 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev) 307 enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
254 { 308 {
255 struct sci_base_state_machine *sm = &idev->sm; 309 struct sci_base_state_machine *sm = &idev->sm;
256 enum sci_remote_device_states state = sm->current_state_id; 310 enum sci_remote_device_states state = sm->current_state_id;
257 311
258 if (state != SCI_DEV_RESETTING) { 312 if (state != SCI_DEV_RESETTING) {
259 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 313 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
260 __func__, dev_state_name(state)); 314 __func__, dev_state_name(state));
261 return SCI_FAILURE_INVALID_STATE; 315 return SCI_FAILURE_INVALID_STATE;
262 } 316 }
263 317
264 sci_change_state(sm, SCI_DEV_READY); 318 sci_change_state(sm, SCI_DEV_READY);
265 return SCI_SUCCESS; 319 return SCI_SUCCESS;
266 } 320 }
267 321
268 enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev)
269 {
270 return sci_remote_node_context_suspend(&idev->rnc,
271 SCI_SOFTWARE_SUSPENSION,
272 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT,
273 NULL, NULL);
274 }
275
276 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, 322 enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
277 u32 frame_index) 323 u32 frame_index)
278 { 324 {
279 struct sci_base_state_machine *sm = &idev->sm; 325 struct sci_base_state_machine *sm = &idev->sm;
280 enum sci_remote_device_states state = sm->current_state_id; 326 enum sci_remote_device_states state = sm->current_state_id;
281 struct isci_host *ihost = idev->owning_port->owning_controller; 327 struct isci_host *ihost = idev->owning_port->owning_controller;
282 enum sci_status status; 328 enum sci_status status;
283 329
284 switch (state) { 330 switch (state) {
285 case SCI_DEV_INITIAL: 331 case SCI_DEV_INITIAL:
286 case SCI_DEV_STOPPED: 332 case SCI_DEV_STOPPED:
287 case SCI_DEV_STARTING: 333 case SCI_DEV_STARTING:
288 case SCI_STP_DEV_IDLE: 334 case SCI_STP_DEV_IDLE:
289 case SCI_SMP_DEV_IDLE: 335 case SCI_SMP_DEV_IDLE:
290 case SCI_DEV_FINAL: 336 case SCI_DEV_FINAL:
291 default: 337 default:
292 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 338 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
293 __func__, dev_state_name(state)); 339 __func__, dev_state_name(state));
294 /* Return the frame back to the controller */ 340 /* Return the frame back to the controller */
295 sci_controller_release_frame(ihost, frame_index); 341 sci_controller_release_frame(ihost, frame_index);
296 return SCI_FAILURE_INVALID_STATE; 342 return SCI_FAILURE_INVALID_STATE;
297 case SCI_DEV_READY: 343 case SCI_DEV_READY:
298 case SCI_STP_DEV_NCQ_ERROR: 344 case SCI_STP_DEV_NCQ_ERROR:
299 case SCI_STP_DEV_AWAIT_RESET: 345 case SCI_STP_DEV_AWAIT_RESET:
300 case SCI_DEV_STOPPING: 346 case SCI_DEV_STOPPING:
301 case SCI_DEV_FAILED: 347 case SCI_DEV_FAILED:
302 case SCI_DEV_RESETTING: { 348 case SCI_DEV_RESETTING: {
303 struct isci_request *ireq; 349 struct isci_request *ireq;
304 struct ssp_frame_hdr hdr; 350 struct ssp_frame_hdr hdr;
305 void *frame_header; 351 void *frame_header;
306 ssize_t word_cnt; 352 ssize_t word_cnt;
307 353
308 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 354 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
309 frame_index, 355 frame_index,
310 &frame_header); 356 &frame_header);
311 if (status != SCI_SUCCESS) 357 if (status != SCI_SUCCESS)
312 return status; 358 return status;
313 359
314 word_cnt = sizeof(hdr) / sizeof(u32); 360 word_cnt = sizeof(hdr) / sizeof(u32);
315 sci_swab32_cpy(&hdr, frame_header, word_cnt); 361 sci_swab32_cpy(&hdr, frame_header, word_cnt);
316 362
317 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag)); 363 ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
318 if (ireq && ireq->target_device == idev) { 364 if (ireq && ireq->target_device == idev) {
319 /* The IO request is now in charge of releasing the frame */ 365 /* The IO request is now in charge of releasing the frame */
320 status = sci_io_request_frame_handler(ireq, frame_index); 366 status = sci_io_request_frame_handler(ireq, frame_index);
321 } else { 367 } else {
322 /* We could not map this tag to a valid IO 368 /* We could not map this tag to a valid IO
323 * request Just toss the frame and continue 369 * request Just toss the frame and continue
324 */ 370 */
325 sci_controller_release_frame(ihost, frame_index); 371 sci_controller_release_frame(ihost, frame_index);
326 } 372 }
327 break; 373 break;
328 } 374 }
329 case SCI_STP_DEV_NCQ: { 375 case SCI_STP_DEV_NCQ: {
330 struct dev_to_host_fis *hdr; 376 struct dev_to_host_fis *hdr;
331 377
332 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 378 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
333 frame_index, 379 frame_index,
334 (void **)&hdr); 380 (void **)&hdr);
335 if (status != SCI_SUCCESS) 381 if (status != SCI_SUCCESS)
336 return status; 382 return status;
337 383
338 if (hdr->fis_type == FIS_SETDEVBITS && 384 if (hdr->fis_type == FIS_SETDEVBITS &&
339 (hdr->status & ATA_ERR)) { 385 (hdr->status & ATA_ERR)) {
340 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 386 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
341 387
342 /* TODO Check sactive and complete associated IO if any. */ 388 /* TODO Check sactive and complete associated IO if any. */
343 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR); 389 sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
344 } else if (hdr->fis_type == FIS_REGD2H && 390 } else if (hdr->fis_type == FIS_REGD2H &&
345 (hdr->status & ATA_ERR)) { 391 (hdr->status & ATA_ERR)) {
346 /* 392 /*
347 * Some devices return D2H FIS when an NCQ error is detected. 393 * Some devices return D2H FIS when an NCQ error is detected.
348 * Treat this like an SDB error FIS ready reason. 394 * Treat this like an SDB error FIS ready reason.
349 */ 395 */
350 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED; 396 idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
351 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR); 397 sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
352 } else 398 } else
353 status = SCI_FAILURE; 399 status = SCI_FAILURE;
354 400
355 sci_controller_release_frame(ihost, frame_index); 401 sci_controller_release_frame(ihost, frame_index);
356 break; 402 break;
357 } 403 }
358 case SCI_STP_DEV_CMD: 404 case SCI_STP_DEV_CMD:
359 case SCI_SMP_DEV_CMD: 405 case SCI_SMP_DEV_CMD:
360 /* The device does not process any UF received from the hardware while 406 /* The device does not process any UF received from the hardware while
361 * in this state. All unsolicited frames are forwarded to the io request 407 * in this state. All unsolicited frames are forwarded to the io request
362 * object. 408 * object.
363 */ 409 */
364 status = sci_io_request_frame_handler(idev->working_request, frame_index); 410 status = sci_io_request_frame_handler(idev->working_request, frame_index);
365 break; 411 break;
366 } 412 }
367 413
368 return status; 414 return status;
369 } 415 }
370 416
371 static bool is_remote_device_ready(struct isci_remote_device *idev) 417 static bool is_remote_device_ready(struct isci_remote_device *idev)
372 { 418 {
373 419
374 struct sci_base_state_machine *sm = &idev->sm; 420 struct sci_base_state_machine *sm = &idev->sm;
375 enum sci_remote_device_states state = sm->current_state_id; 421 enum sci_remote_device_states state = sm->current_state_id;
376 422
377 switch (state) { 423 switch (state) {
378 case SCI_DEV_READY: 424 case SCI_DEV_READY:
379 case SCI_STP_DEV_IDLE: 425 case SCI_STP_DEV_IDLE:
380 case SCI_STP_DEV_CMD: 426 case SCI_STP_DEV_CMD:
381 case SCI_STP_DEV_NCQ: 427 case SCI_STP_DEV_NCQ:
382 case SCI_STP_DEV_NCQ_ERROR: 428 case SCI_STP_DEV_NCQ_ERROR:
383 case SCI_STP_DEV_AWAIT_RESET: 429 case SCI_STP_DEV_AWAIT_RESET:
384 case SCI_SMP_DEV_IDLE: 430 case SCI_SMP_DEV_IDLE:
385 case SCI_SMP_DEV_CMD: 431 case SCI_SMP_DEV_CMD:
386 return true; 432 return true;
387 default: 433 default:
388 return false; 434 return false;
389 } 435 }
390 } 436 }
391 437
392 /* 438 /*
393 * called once the remote node context has transisitioned to a ready 439 * called once the remote node context has transisitioned to a ready
394 * state (after suspending RX and/or TX due to early D2H fis) 440 * state (after suspending RX and/or TX due to early D2H fis)
395 */ 441 */
396 static void atapi_remote_device_resume_done(void *_dev) 442 static void atapi_remote_device_resume_done(void *_dev)
397 { 443 {
398 struct isci_remote_device *idev = _dev; 444 struct isci_remote_device *idev = _dev;
399 struct isci_request *ireq = idev->working_request; 445 struct isci_request *ireq = idev->working_request;
400 446
401 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 447 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
402 } 448 }
403 449
404 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 450 enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
405 u32 event_code) 451 u32 event_code)
406 { 452 {
407 enum sci_status status; 453 enum sci_status status;
408 454
409 switch (scu_get_event_type(event_code)) { 455 switch (scu_get_event_type(event_code)) {
410 case SCU_EVENT_TYPE_RNC_OPS_MISC: 456 case SCU_EVENT_TYPE_RNC_OPS_MISC:
411 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 457 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
412 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 458 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
413 status = sci_remote_node_context_event_handler(&idev->rnc, event_code); 459 status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
414 break; 460 break;
415 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: 461 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
416 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) { 462 if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
417 status = SCI_SUCCESS; 463 status = SCI_SUCCESS;
418 464
419 /* Suspend the associated RNC */ 465 /* Suspend the associated RNC */
420 sci_remote_node_context_suspend( 466 sci_remote_node_context_suspend(
421 &idev->rnc, 467 &idev->rnc,
422 SCI_SOFTWARE_SUSPENSION, 468 SCI_SOFTWARE_SUSPENSION,
423 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, 469 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT,
424 NULL, NULL); 470 NULL, NULL);
425 471
426 dev_dbg(scirdev_to_dev(idev), 472 dev_dbg(scirdev_to_dev(idev),
427 "%s: device: %p event code: %x: %s\n", 473 "%s: device: %p event code: %x: %s\n",
428 __func__, idev, event_code, 474 __func__, idev, event_code,
429 is_remote_device_ready(idev) 475 is_remote_device_ready(idev)
430 ? "I_T_Nexus_Timeout event" 476 ? "I_T_Nexus_Timeout event"
431 : "I_T_Nexus_Timeout event in wrong state"); 477 : "I_T_Nexus_Timeout event in wrong state");
432 478
433 break; 479 break;
434 } 480 }
435 /* Else, fall through and treat as unhandled... */ 481 /* Else, fall through and treat as unhandled... */
436 default: 482 default:
437 dev_dbg(scirdev_to_dev(idev), 483 dev_dbg(scirdev_to_dev(idev),
438 "%s: device: %p event code: %x: %s\n", 484 "%s: device: %p event code: %x: %s\n",
439 __func__, idev, event_code, 485 __func__, idev, event_code,
440 is_remote_device_ready(idev) 486 is_remote_device_ready(idev)
441 ? "unexpected event" 487 ? "unexpected event"
442 : "unexpected event in wrong state"); 488 : "unexpected event in wrong state");
443 status = SCI_FAILURE_INVALID_STATE; 489 status = SCI_FAILURE_INVALID_STATE;
444 break; 490 break;
445 } 491 }
446 492
447 if (status != SCI_SUCCESS) 493 if (status != SCI_SUCCESS)
448 return status; 494 return status;
449 495
450 return status; 496 return status;
451 } 497 }
452 498
453 static void sci_remote_device_start_request(struct isci_remote_device *idev, 499 static void sci_remote_device_start_request(struct isci_remote_device *idev,
454 struct isci_request *ireq, 500 struct isci_request *ireq,
455 enum sci_status status) 501 enum sci_status status)
456 { 502 {
457 struct isci_port *iport = idev->owning_port; 503 struct isci_port *iport = idev->owning_port;
458 504
459 /* cleanup requests that failed after starting on the port */ 505 /* cleanup requests that failed after starting on the port */
460 if (status != SCI_SUCCESS) 506 if (status != SCI_SUCCESS)
461 sci_port_complete_io(iport, idev, ireq); 507 sci_port_complete_io(iport, idev, ireq);
462 else { 508 else {
463 kref_get(&idev->kref); 509 kref_get(&idev->kref);
464 idev->started_request_count++; 510 idev->started_request_count++;
465 } 511 }
466 } 512 }
467 513
468 enum sci_status sci_remote_device_start_io(struct isci_host *ihost, 514 enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
469 struct isci_remote_device *idev, 515 struct isci_remote_device *idev,
470 struct isci_request *ireq) 516 struct isci_request *ireq)
471 { 517 {
472 struct sci_base_state_machine *sm = &idev->sm; 518 struct sci_base_state_machine *sm = &idev->sm;
473 enum sci_remote_device_states state = sm->current_state_id; 519 enum sci_remote_device_states state = sm->current_state_id;
474 struct isci_port *iport = idev->owning_port; 520 struct isci_port *iport = idev->owning_port;
475 enum sci_status status; 521 enum sci_status status;
476 522
477 switch (state) { 523 switch (state) {
478 case SCI_DEV_INITIAL: 524 case SCI_DEV_INITIAL:
479 case SCI_DEV_STOPPED: 525 case SCI_DEV_STOPPED:
480 case SCI_DEV_STARTING: 526 case SCI_DEV_STARTING:
481 case SCI_STP_DEV_NCQ_ERROR: 527 case SCI_STP_DEV_NCQ_ERROR:
482 case SCI_DEV_STOPPING: 528 case SCI_DEV_STOPPING:
483 case SCI_DEV_FAILED: 529 case SCI_DEV_FAILED:
484 case SCI_DEV_RESETTING: 530 case SCI_DEV_RESETTING:
485 case SCI_DEV_FINAL: 531 case SCI_DEV_FINAL:
486 default: 532 default:
487 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 533 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
488 __func__, dev_state_name(state)); 534 __func__, dev_state_name(state));
489 return SCI_FAILURE_INVALID_STATE; 535 return SCI_FAILURE_INVALID_STATE;
490 case SCI_DEV_READY: 536 case SCI_DEV_READY:
491 /* attempt to start an io request for this device object. The remote 537 /* attempt to start an io request for this device object. The remote
492 * device object will issue the start request for the io and if 538 * device object will issue the start request for the io and if
493 * successful it will start the request for the port object then 539 * successful it will start the request for the port object then
494 * increment its own request count. 540 * increment its own request count.
495 */ 541 */
496 status = sci_port_start_io(iport, idev, ireq); 542 status = sci_port_start_io(iport, idev, ireq);
497 if (status != SCI_SUCCESS) 543 if (status != SCI_SUCCESS)
498 return status; 544 return status;
499 545
500 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 546 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
501 if (status != SCI_SUCCESS) 547 if (status != SCI_SUCCESS)
502 break; 548 break;
503 549
504 status = sci_request_start(ireq); 550 status = sci_request_start(ireq);
505 break; 551 break;
506 case SCI_STP_DEV_IDLE: { 552 case SCI_STP_DEV_IDLE: {
507 /* handle the start io operation for a sata device that is in 553 /* handle the start io operation for a sata device that is in
508 * the command idle state. - Evalute the type of IO request to 554 * the command idle state. - Evalute the type of IO request to
509 * be started - If its an NCQ request change to NCQ substate - 555 * be started - If its an NCQ request change to NCQ substate -
510 * If its any other command change to the CMD substate 556 * If its any other command change to the CMD substate
511 * 557 *
512 * If this is a softreset we may want to have a different 558 * If this is a softreset we may want to have a different
513 * substate. 559 * substate.
514 */ 560 */
515 enum sci_remote_device_states new_state; 561 enum sci_remote_device_states new_state;
516 struct sas_task *task = isci_request_access_task(ireq); 562 struct sas_task *task = isci_request_access_task(ireq);
517 563
518 status = sci_port_start_io(iport, idev, ireq); 564 status = sci_port_start_io(iport, idev, ireq);
519 if (status != SCI_SUCCESS) 565 if (status != SCI_SUCCESS)
520 return status; 566 return status;
521 567
522 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 568 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
523 if (status != SCI_SUCCESS) 569 if (status != SCI_SUCCESS)
524 break; 570 break;
525 571
526 status = sci_request_start(ireq); 572 status = sci_request_start(ireq);
527 if (status != SCI_SUCCESS) 573 if (status != SCI_SUCCESS)
528 break; 574 break;
529 575
530 if (task->ata_task.use_ncq) 576 if (task->ata_task.use_ncq)
531 new_state = SCI_STP_DEV_NCQ; 577 new_state = SCI_STP_DEV_NCQ;
532 else { 578 else {
533 idev->working_request = ireq; 579 idev->working_request = ireq;
534 new_state = SCI_STP_DEV_CMD; 580 new_state = SCI_STP_DEV_CMD;
535 } 581 }
536 sci_change_state(sm, new_state); 582 sci_change_state(sm, new_state);
537 break; 583 break;
538 } 584 }
539 case SCI_STP_DEV_NCQ: { 585 case SCI_STP_DEV_NCQ: {
540 struct sas_task *task = isci_request_access_task(ireq); 586 struct sas_task *task = isci_request_access_task(ireq);
541 587
542 if (task->ata_task.use_ncq) { 588 if (task->ata_task.use_ncq) {
543 status = sci_port_start_io(iport, idev, ireq); 589 status = sci_port_start_io(iport, idev, ireq);
544 if (status != SCI_SUCCESS) 590 if (status != SCI_SUCCESS)
545 return status; 591 return status;
546 592
547 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 593 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
548 if (status != SCI_SUCCESS) 594 if (status != SCI_SUCCESS)
549 break; 595 break;
550 596
551 status = sci_request_start(ireq); 597 status = sci_request_start(ireq);
552 } else 598 } else
553 return SCI_FAILURE_INVALID_STATE; 599 return SCI_FAILURE_INVALID_STATE;
554 break; 600 break;
555 } 601 }
556 case SCI_STP_DEV_AWAIT_RESET: 602 case SCI_STP_DEV_AWAIT_RESET:
557 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 603 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
558 case SCI_SMP_DEV_IDLE: 604 case SCI_SMP_DEV_IDLE:
559 status = sci_port_start_io(iport, idev, ireq); 605 status = sci_port_start_io(iport, idev, ireq);
560 if (status != SCI_SUCCESS) 606 if (status != SCI_SUCCESS)
561 return status; 607 return status;
562 608
563 status = sci_remote_node_context_start_io(&idev->rnc, ireq); 609 status = sci_remote_node_context_start_io(&idev->rnc, ireq);
564 if (status != SCI_SUCCESS) 610 if (status != SCI_SUCCESS)
565 break; 611 break;
566 612
567 status = sci_request_start(ireq); 613 status = sci_request_start(ireq);
568 if (status != SCI_SUCCESS) 614 if (status != SCI_SUCCESS)
569 break; 615 break;
570 616
571 idev->working_request = ireq; 617 idev->working_request = ireq;
572 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD); 618 sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
573 break; 619 break;
574 case SCI_STP_DEV_CMD: 620 case SCI_STP_DEV_CMD:
575 case SCI_SMP_DEV_CMD: 621 case SCI_SMP_DEV_CMD:
576 /* device is already handling a command it can not accept new commands 622 /* device is already handling a command it can not accept new commands
577 * until this one is complete. 623 * until this one is complete.
578 */ 624 */
579 return SCI_FAILURE_INVALID_STATE; 625 return SCI_FAILURE_INVALID_STATE;
580 } 626 }
581 627
582 sci_remote_device_start_request(idev, ireq, status); 628 sci_remote_device_start_request(idev, ireq, status);
583 return status; 629 return status;
584 } 630 }
585 631
586 static enum sci_status common_complete_io(struct isci_port *iport, 632 static enum sci_status common_complete_io(struct isci_port *iport,
587 struct isci_remote_device *idev, 633 struct isci_remote_device *idev,
588 struct isci_request *ireq) 634 struct isci_request *ireq)
589 { 635 {
590 enum sci_status status; 636 enum sci_status status;
591 637
592 status = sci_request_complete(ireq); 638 status = sci_request_complete(ireq);
593 if (status != SCI_SUCCESS) 639 if (status != SCI_SUCCESS)
594 return status; 640 return status;
595 641
596 status = sci_port_complete_io(iport, idev, ireq); 642 status = sci_port_complete_io(iport, idev, ireq);
597 if (status != SCI_SUCCESS) 643 if (status != SCI_SUCCESS)
598 return status; 644 return status;
599 645
600 sci_remote_device_decrement_request_count(idev); 646 sci_remote_device_decrement_request_count(idev);
601 return status; 647 return status;
602 } 648 }
603 649
604 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, 650 enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
605 struct isci_remote_device *idev, 651 struct isci_remote_device *idev,
606 struct isci_request *ireq) 652 struct isci_request *ireq)
607 { 653 {
608 struct sci_base_state_machine *sm = &idev->sm; 654 struct sci_base_state_machine *sm = &idev->sm;
609 enum sci_remote_device_states state = sm->current_state_id; 655 enum sci_remote_device_states state = sm->current_state_id;
610 struct isci_port *iport = idev->owning_port; 656 struct isci_port *iport = idev->owning_port;
611 enum sci_status status; 657 enum sci_status status;
612 658
613 switch (state) { 659 switch (state) {
614 case SCI_DEV_INITIAL: 660 case SCI_DEV_INITIAL:
615 case SCI_DEV_STOPPED: 661 case SCI_DEV_STOPPED:
616 case SCI_DEV_STARTING: 662 case SCI_DEV_STARTING:
617 case SCI_STP_DEV_IDLE: 663 case SCI_STP_DEV_IDLE:
618 case SCI_SMP_DEV_IDLE: 664 case SCI_SMP_DEV_IDLE:
619 case SCI_DEV_FAILED: 665 case SCI_DEV_FAILED:
620 case SCI_DEV_FINAL: 666 case SCI_DEV_FINAL:
621 default: 667 default:
622 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 668 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
623 __func__, dev_state_name(state)); 669 __func__, dev_state_name(state));
624 return SCI_FAILURE_INVALID_STATE; 670 return SCI_FAILURE_INVALID_STATE;
625 case SCI_DEV_READY: 671 case SCI_DEV_READY:
626 case SCI_STP_DEV_AWAIT_RESET: 672 case SCI_STP_DEV_AWAIT_RESET:
627 case SCI_DEV_RESETTING: 673 case SCI_DEV_RESETTING:
628 status = common_complete_io(iport, idev, ireq); 674 status = common_complete_io(iport, idev, ireq);
629 break; 675 break;
630 case SCI_STP_DEV_CMD: 676 case SCI_STP_DEV_CMD:
631 case SCI_STP_DEV_NCQ: 677 case SCI_STP_DEV_NCQ:
632 case SCI_STP_DEV_NCQ_ERROR: 678 case SCI_STP_DEV_NCQ_ERROR:
633 case SCI_STP_DEV_ATAPI_ERROR: 679 case SCI_STP_DEV_ATAPI_ERROR:
634 status = common_complete_io(iport, idev, ireq); 680 status = common_complete_io(iport, idev, ireq);
635 if (status != SCI_SUCCESS) 681 if (status != SCI_SUCCESS)
636 break; 682 break;
637 683
638 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 684 if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
639 /* This request causes hardware error, device needs to be Lun Reset. 685 /* This request causes hardware error, device needs to be Lun Reset.
640 * So here we force the state machine to IDLE state so the rest IOs 686 * So here we force the state machine to IDLE state so the rest IOs
641 * can reach RNC state handler, these IOs will be completed by RNC with 687 * can reach RNC state handler, these IOs will be completed by RNC with
642 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE". 688 * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
643 */ 689 */
644 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET); 690 sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
645 } else if (idev->started_request_count == 0) 691 } else if (idev->started_request_count == 0)
646 sci_change_state(sm, SCI_STP_DEV_IDLE); 692 sci_change_state(sm, SCI_STP_DEV_IDLE);
647 break; 693 break;
648 case SCI_SMP_DEV_CMD: 694 case SCI_SMP_DEV_CMD:
649 status = common_complete_io(iport, idev, ireq); 695 status = common_complete_io(iport, idev, ireq);
650 if (status != SCI_SUCCESS) 696 if (status != SCI_SUCCESS)
651 break; 697 break;
652 sci_change_state(sm, SCI_SMP_DEV_IDLE); 698 sci_change_state(sm, SCI_SMP_DEV_IDLE);
653 break; 699 break;
654 case SCI_DEV_STOPPING: 700 case SCI_DEV_STOPPING:
655 status = common_complete_io(iport, idev, ireq); 701 status = common_complete_io(iport, idev, ireq);
656 if (status != SCI_SUCCESS) 702 if (status != SCI_SUCCESS)
657 break; 703 break;
658 704
659 if (idev->started_request_count == 0) 705 if (idev->started_request_count == 0)
660 sci_remote_node_context_destruct(&idev->rnc, 706 sci_remote_node_context_destruct(&idev->rnc,
661 rnc_destruct_done, 707 rnc_destruct_done,
662 idev); 708 idev);
663 break; 709 break;
664 } 710 }
665 711
666 if (status != SCI_SUCCESS) 712 if (status != SCI_SUCCESS)
667 dev_err(scirdev_to_dev(idev), 713 dev_err(scirdev_to_dev(idev),
668 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x " 714 "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
669 "could not complete\n", __func__, iport, 715 "could not complete\n", __func__, iport,
670 idev, ireq, status); 716 idev, ireq, status);
671 else 717 else
672 isci_put_device(idev); 718 isci_put_device(idev);
673 719
674 return status; 720 return status;
675 } 721 }
676 722
677 static void sci_remote_device_continue_request(void *dev) 723 static void sci_remote_device_continue_request(void *dev)
678 { 724 {
679 struct isci_remote_device *idev = dev; 725 struct isci_remote_device *idev = dev;
680 726
681 /* we need to check if this request is still valid to continue. */ 727 /* we need to check if this request is still valid to continue. */
682 if (idev->working_request) 728 if (idev->working_request)
683 sci_controller_continue_io(idev->working_request); 729 sci_controller_continue_io(idev->working_request);
684 } 730 }
685 731
686 enum sci_status sci_remote_device_start_task(struct isci_host *ihost, 732 enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
687 struct isci_remote_device *idev, 733 struct isci_remote_device *idev,
688 struct isci_request *ireq) 734 struct isci_request *ireq)
689 { 735 {
690 struct sci_base_state_machine *sm = &idev->sm; 736 struct sci_base_state_machine *sm = &idev->sm;
691 enum sci_remote_device_states state = sm->current_state_id; 737 enum sci_remote_device_states state = sm->current_state_id;
692 struct isci_port *iport = idev->owning_port; 738 struct isci_port *iport = idev->owning_port;
693 enum sci_status status; 739 enum sci_status status;
694 740
695 switch (state) { 741 switch (state) {
696 case SCI_DEV_INITIAL: 742 case SCI_DEV_INITIAL:
697 case SCI_DEV_STOPPED: 743 case SCI_DEV_STOPPED:
698 case SCI_DEV_STARTING: 744 case SCI_DEV_STARTING:
699 case SCI_SMP_DEV_IDLE: 745 case SCI_SMP_DEV_IDLE:
700 case SCI_SMP_DEV_CMD: 746 case SCI_SMP_DEV_CMD:
701 case SCI_DEV_STOPPING: 747 case SCI_DEV_STOPPING:
702 case SCI_DEV_FAILED: 748 case SCI_DEV_FAILED:
703 case SCI_DEV_RESETTING: 749 case SCI_DEV_RESETTING:
704 case SCI_DEV_FINAL: 750 case SCI_DEV_FINAL:
705 default: 751 default:
706 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 752 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
707 __func__, dev_state_name(state)); 753 __func__, dev_state_name(state));
708 return SCI_FAILURE_INVALID_STATE; 754 return SCI_FAILURE_INVALID_STATE;
709 case SCI_STP_DEV_IDLE: 755 case SCI_STP_DEV_IDLE:
710 case SCI_STP_DEV_CMD: 756 case SCI_STP_DEV_CMD:
711 case SCI_STP_DEV_NCQ: 757 case SCI_STP_DEV_NCQ:
712 case SCI_STP_DEV_NCQ_ERROR: 758 case SCI_STP_DEV_NCQ_ERROR:
713 case SCI_STP_DEV_AWAIT_RESET: 759 case SCI_STP_DEV_AWAIT_RESET:
714 status = sci_port_start_io(iport, idev, ireq); 760 status = sci_port_start_io(iport, idev, ireq);
715 if (status != SCI_SUCCESS) 761 if (status != SCI_SUCCESS)
716 return status; 762 return status;
717 763
718 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 764 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
719 if (status != SCI_SUCCESS) 765 if (status != SCI_SUCCESS)
720 goto out; 766 goto out;
721 767
722 status = sci_request_start(ireq); 768 status = sci_request_start(ireq);
723 if (status != SCI_SUCCESS) 769 if (status != SCI_SUCCESS)
724 goto out; 770 goto out;
725 771
726 /* Note: If the remote device state is not IDLE this will 772 /* Note: If the remote device state is not IDLE this will
727 * replace the request that probably resulted in the task 773 * replace the request that probably resulted in the task
728 * management request. 774 * management request.
729 */ 775 */
730 idev->working_request = ireq; 776 idev->working_request = ireq;
731 sci_change_state(sm, SCI_STP_DEV_CMD); 777 sci_change_state(sm, SCI_STP_DEV_CMD);
732 778
733 /* The remote node context must cleanup the TCi to NCQ mapping 779 /* The remote node context must cleanup the TCi to NCQ mapping
734 * table. The only way to do this correctly is to either write 780 * table. The only way to do this correctly is to either write
735 * to the TLCR register or to invalidate and repost the RNC. In 781 * to the TLCR register or to invalidate and repost the RNC. In
736 * either case the remote node context state machine will take 782 * either case the remote node context state machine will take
737 * the correct action when the remote node context is suspended 783 * the correct action when the remote node context is suspended
738 * and later resumed. 784 * and later resumed.
739 */ 785 */
740 sci_remote_node_context_suspend( 786 sci_remote_node_context_suspend(
741 &idev->rnc, SCI_SOFTWARE_SUSPENSION, 787 &idev->rnc, SCI_SOFTWARE_SUSPENSION,
742 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL); 788 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL);
743 sci_remote_node_context_resume( 789 sci_remote_node_context_resume(
744 &idev->rnc, sci_remote_device_continue_request, idev); 790 &idev->rnc, sci_remote_device_continue_request, idev);
745 791
746 out: 792 out:
747 sci_remote_device_start_request(idev, ireq, status); 793 sci_remote_device_start_request(idev, ireq, status);
748 /* We need to let the controller start request handler know that 794 /* We need to let the controller start request handler know that
749 * it can't post TC yet. We will provide a callback function to 795 * it can't post TC yet. We will provide a callback function to
750 * post TC when RNC gets resumed. 796 * post TC when RNC gets resumed.
751 */ 797 */
752 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS; 798 return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
753 case SCI_DEV_READY: 799 case SCI_DEV_READY:
754 status = sci_port_start_io(iport, idev, ireq); 800 status = sci_port_start_io(iport, idev, ireq);
755 if (status != SCI_SUCCESS) 801 if (status != SCI_SUCCESS)
756 return status; 802 return status;
757 803
758 status = sci_remote_node_context_start_task(&idev->rnc, ireq); 804 status = sci_remote_node_context_start_task(&idev->rnc, ireq);
759 if (status != SCI_SUCCESS) 805 if (status != SCI_SUCCESS)
760 break; 806 break;
761 807
762 status = sci_request_start(ireq); 808 status = sci_request_start(ireq);
763 break; 809 break;
764 } 810 }
765 sci_remote_device_start_request(idev, ireq, status); 811 sci_remote_device_start_request(idev, ireq, status);
766 812
767 return status; 813 return status;
768 } 814 }
769 815
770 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request) 816 void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
771 { 817 {
772 struct isci_port *iport = idev->owning_port; 818 struct isci_port *iport = idev->owning_port;
773 u32 context; 819 u32 context;
774 820
775 context = request | 821 context = request |
776 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 822 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
777 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 823 (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
778 idev->rnc.remote_node_index; 824 idev->rnc.remote_node_index;
779 825
780 sci_controller_post_request(iport->owning_controller, context); 826 sci_controller_post_request(iport->owning_controller, context);
781 } 827 }
782 828
783 /* called once the remote node context has transisitioned to a 829 /* called once the remote node context has transisitioned to a
784 * ready state. This is the indication that the remote device object can also 830 * ready state. This is the indication that the remote device object can also
785 * transition to ready. 831 * transition to ready.
786 */ 832 */
787 static void remote_device_resume_done(void *_dev) 833 static void remote_device_resume_done(void *_dev)
788 { 834 {
789 struct isci_remote_device *idev = _dev; 835 struct isci_remote_device *idev = _dev;
790 836
791 if (is_remote_device_ready(idev)) 837 if (is_remote_device_ready(idev))
792 return; 838 return;
793 839
794 /* go 'ready' if we are not already in a ready state */ 840 /* go 'ready' if we are not already in a ready state */
795 sci_change_state(&idev->sm, SCI_DEV_READY); 841 sci_change_state(&idev->sm, SCI_DEV_READY);
796 } 842 }
797 843
798 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev) 844 static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
799 { 845 {
800 struct isci_remote_device *idev = _dev; 846 struct isci_remote_device *idev = _dev;
801 struct isci_host *ihost = idev->owning_port->owning_controller; 847 struct isci_host *ihost = idev->owning_port->owning_controller;
802 848
803 /* For NCQ operation we do not issue a isci_remote_device_not_ready(). 849 /* For NCQ operation we do not issue a isci_remote_device_not_ready().
804 * As a result, avoid sending the ready notification. 850 * As a result, avoid sending the ready notification.
805 */ 851 */
806 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ) 852 if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
807 isci_remote_device_ready(ihost, idev); 853 isci_remote_device_ready(ihost, idev);
808 } 854 }
809 855
810 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm) 856 static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
811 { 857 {
812 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 858 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
813 859
814 /* Initial state is a transitional state to the stopped state */ 860 /* Initial state is a transitional state to the stopped state */
815 sci_change_state(&idev->sm, SCI_DEV_STOPPED); 861 sci_change_state(&idev->sm, SCI_DEV_STOPPED);
816 } 862 }
817 863
818 /** 864 /**
819 * sci_remote_device_destruct() - free remote node context and destruct 865 * sci_remote_device_destruct() - free remote node context and destruct
820 * @remote_device: This parameter specifies the remote device to be destructed. 866 * @remote_device: This parameter specifies the remote device to be destructed.
821 * 867 *
822 * Remote device objects are a limited resource. As such, they must be 868 * Remote device objects are a limited resource. As such, they must be
823 * protected. Thus calls to construct and destruct are mutually exclusive and 869 * protected. Thus calls to construct and destruct are mutually exclusive and
824 * non-reentrant. The return value shall indicate if the device was 870 * non-reentrant. The return value shall indicate if the device was
825 * successfully destructed or if some failure occurred. enum sci_status This value 871 * successfully destructed or if some failure occurred. enum sci_status This value
826 * is returned if the device is successfully destructed. 872 * is returned if the device is successfully destructed.
827 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied 873 * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
828 * device isn't valid (e.g. it's already been destoryed, the handle isn't 874 * device isn't valid (e.g. it's already been destoryed, the handle isn't
829 * valid, etc.). 875 * valid, etc.).
830 */ 876 */
831 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev) 877 static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
832 { 878 {
833 struct sci_base_state_machine *sm = &idev->sm; 879 struct sci_base_state_machine *sm = &idev->sm;
834 enum sci_remote_device_states state = sm->current_state_id; 880 enum sci_remote_device_states state = sm->current_state_id;
835 struct isci_host *ihost; 881 struct isci_host *ihost;
836 882
837 if (state != SCI_DEV_STOPPED) { 883 if (state != SCI_DEV_STOPPED) {
838 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 884 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
839 __func__, dev_state_name(state)); 885 __func__, dev_state_name(state));
840 return SCI_FAILURE_INVALID_STATE; 886 return SCI_FAILURE_INVALID_STATE;
841 } 887 }
842 888
843 ihost = idev->owning_port->owning_controller; 889 ihost = idev->owning_port->owning_controller;
844 sci_controller_free_remote_node_context(ihost, idev, 890 sci_controller_free_remote_node_context(ihost, idev,
845 idev->rnc.remote_node_index); 891 idev->rnc.remote_node_index);
846 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX; 892 idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
847 sci_change_state(sm, SCI_DEV_FINAL); 893 sci_change_state(sm, SCI_DEV_FINAL);
848 894
849 return SCI_SUCCESS; 895 return SCI_SUCCESS;
850 } 896 }
851 897
852 /** 898 /**
853 * isci_remote_device_deconstruct() - This function frees an isci_remote_device. 899 * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
854 * @ihost: This parameter specifies the isci host object. 900 * @ihost: This parameter specifies the isci host object.
855 * @idev: This parameter specifies the remote device to be freed. 901 * @idev: This parameter specifies the remote device to be freed.
856 * 902 *
857 */ 903 */
858 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev) 904 static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
859 { 905 {
860 dev_dbg(&ihost->pdev->dev, 906 dev_dbg(&ihost->pdev->dev,
861 "%s: isci_device = %p\n", __func__, idev); 907 "%s: isci_device = %p\n", __func__, idev);
862 908
863 /* There should not be any outstanding io's. All paths to 909 /* There should not be any outstanding io's. All paths to
864 * here should go through isci_remote_device_nuke_requests. 910 * here should go through isci_remote_device_nuke_requests.
865 * If we hit this condition, we will need a way to complete 911 * If we hit this condition, we will need a way to complete
866 * io requests in process */ 912 * io requests in process */
867 BUG_ON(!list_empty(&idev->reqs_in_process)); 913 BUG_ON(!list_empty(&idev->reqs_in_process));
868 914
869 sci_remote_device_destruct(idev); 915 sci_remote_device_destruct(idev);
870 list_del_init(&idev->node); 916 list_del_init(&idev->node);
871 isci_put_device(idev); 917 isci_put_device(idev);
872 } 918 }
873 919
874 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm) 920 static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
875 { 921 {
876 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 922 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
877 struct isci_host *ihost = idev->owning_port->owning_controller; 923 struct isci_host *ihost = idev->owning_port->owning_controller;
878 u32 prev_state; 924 u32 prev_state;
879 925
880 /* If we are entering from the stopping state let the SCI User know that 926 /* If we are entering from the stopping state let the SCI User know that
881 * the stop operation has completed. 927 * the stop operation has completed.
882 */ 928 */
883 prev_state = idev->sm.previous_state_id; 929 prev_state = idev->sm.previous_state_id;
884 if (prev_state == SCI_DEV_STOPPING) 930 if (prev_state == SCI_DEV_STOPPING)
885 isci_remote_device_deconstruct(ihost, idev); 931 isci_remote_device_deconstruct(ihost, idev);
886 932
887 sci_controller_remote_device_stopped(ihost, idev); 933 sci_controller_remote_device_stopped(ihost, idev);
888 } 934 }
889 935
890 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm) 936 static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
891 { 937 {
892 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 938 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
893 struct isci_host *ihost = idev->owning_port->owning_controller; 939 struct isci_host *ihost = idev->owning_port->owning_controller;
894 940
895 isci_remote_device_not_ready(ihost, idev, 941 isci_remote_device_not_ready(ihost, idev,
896 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED); 942 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
897 } 943 }
898 944
899 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm) 945 static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
900 { 946 {
901 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 947 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
902 struct isci_host *ihost = idev->owning_port->owning_controller; 948 struct isci_host *ihost = idev->owning_port->owning_controller;
903 struct domain_device *dev = idev->domain_dev; 949 struct domain_device *dev = idev->domain_dev;
904 950
905 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { 951 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
906 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); 952 sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
907 } else if (dev_is_expander(dev)) { 953 } else if (dev_is_expander(dev)) {
908 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); 954 sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
909 } else 955 } else
910 isci_remote_device_ready(ihost, idev); 956 isci_remote_device_ready(ihost, idev);
911 } 957 }
912 958
913 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm) 959 static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
914 { 960 {
915 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 961 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
916 struct domain_device *dev = idev->domain_dev; 962 struct domain_device *dev = idev->domain_dev;
917 963
918 if (dev->dev_type == SAS_END_DEV) { 964 if (dev->dev_type == SAS_END_DEV) {
919 struct isci_host *ihost = idev->owning_port->owning_controller; 965 struct isci_host *ihost = idev->owning_port->owning_controller;
920 966
921 isci_remote_device_not_ready(ihost, idev, 967 isci_remote_device_not_ready(ihost, idev,
922 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED); 968 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
923 } 969 }
924 } 970 }
925 971
926 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) 972 static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
927 { 973 {
928 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 974 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
929 struct isci_host *ihost = idev->owning_port->owning_controller; 975 struct isci_host *ihost = idev->owning_port->owning_controller;
930 976
931 dev_dbg(&ihost->pdev->dev, 977 dev_dbg(&ihost->pdev->dev,
932 "%s: isci_device = %p\n", __func__, idev); 978 "%s: isci_device = %p\n", __func__, idev);
933 979
934 sci_remote_node_context_suspend( 980 sci_remote_node_context_suspend(
935 &idev->rnc, SCI_SOFTWARE_SUSPENSION, 981 &idev->rnc, SCI_SOFTWARE_SUSPENSION,
936 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL); 982 SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT, NULL, NULL);
937 } 983 }
938 984
939 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) 985 static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
940 { 986 {
941 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 987 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
942 struct isci_host *ihost = idev->owning_port->owning_controller; 988 struct isci_host *ihost = idev->owning_port->owning_controller;
943 989
944 dev_dbg(&ihost->pdev->dev, 990 dev_dbg(&ihost->pdev->dev,
945 "%s: isci_device = %p\n", __func__, idev); 991 "%s: isci_device = %p\n", __func__, idev);
946 992
947 sci_remote_node_context_resume(&idev->rnc, NULL, NULL); 993 sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
948 } 994 }
949 995
950 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 996 static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
951 { 997 {
952 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 998 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
953 999
954 idev->working_request = NULL; 1000 idev->working_request = NULL;
955 if (sci_remote_node_context_is_ready(&idev->rnc)) { 1001 if (sci_remote_node_context_is_ready(&idev->rnc)) {
956 /* 1002 /*
957 * Since the RNC is ready, it's alright to finish completion 1003 * Since the RNC is ready, it's alright to finish completion
958 * processing (e.g. signal the remote device is ready). */ 1004 * processing (e.g. signal the remote device is ready). */
959 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev); 1005 sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
960 } else { 1006 } else {
961 sci_remote_node_context_resume(&idev->rnc, 1007 sci_remote_node_context_resume(&idev->rnc,
962 sci_stp_remote_device_ready_idle_substate_resume_complete_handler, 1008 sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
963 idev); 1009 idev);
964 } 1010 }
965 } 1011 }
966 1012
967 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 1013 static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
968 { 1014 {
969 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1015 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
970 struct isci_host *ihost = idev->owning_port->owning_controller; 1016 struct isci_host *ihost = idev->owning_port->owning_controller;
971 1017
972 BUG_ON(idev->working_request == NULL); 1018 BUG_ON(idev->working_request == NULL);
973 1019
974 isci_remote_device_not_ready(ihost, idev, 1020 isci_remote_device_not_ready(ihost, idev,
975 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED); 1021 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
976 } 1022 }
977 1023
978 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm) 1024 static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
979 { 1025 {
980 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1026 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
981 struct isci_host *ihost = idev->owning_port->owning_controller; 1027 struct isci_host *ihost = idev->owning_port->owning_controller;
982 1028
983 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED) 1029 if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
984 isci_remote_device_not_ready(ihost, idev, 1030 isci_remote_device_not_ready(ihost, idev,
985 idev->not_ready_reason); 1031 idev->not_ready_reason);
986 } 1032 }
987 1033
988 static void sci_stp_remote_device_atapi_error_substate_enter( 1034 static void sci_stp_remote_device_atapi_error_substate_enter(
989 struct sci_base_state_machine *sm) 1035 struct sci_base_state_machine *sm)
990 { 1036 {
991 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1037 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
992 1038
993 /* This state is entered when an I/O is decoded with an error 1039 /* This state is entered when an I/O is decoded with an error
994 * condition. By this point the RNC expected suspension state is set. 1040 * condition. By this point the RNC expected suspension state is set.
995 * The error conditions suspend the device, so unsuspend here if 1041 * The error conditions suspend the device, so unsuspend here if
996 * possible. 1042 * possible.
997 */ 1043 */
998 sci_remote_node_context_resume(&idev->rnc, 1044 sci_remote_node_context_resume(&idev->rnc,
999 atapi_remote_device_resume_done, 1045 atapi_remote_device_resume_done,
1000 idev); 1046 idev);
1001 } 1047 }
1002 1048
1003 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm) 1049 static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
1004 { 1050 {
1005 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1051 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1006 struct isci_host *ihost = idev->owning_port->owning_controller; 1052 struct isci_host *ihost = idev->owning_port->owning_controller;
1007 1053
1008 isci_remote_device_ready(ihost, idev); 1054 isci_remote_device_ready(ihost, idev);
1009 } 1055 }
1010 1056
1011 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm) 1057 static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
1012 { 1058 {
1013 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1059 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1014 struct isci_host *ihost = idev->owning_port->owning_controller; 1060 struct isci_host *ihost = idev->owning_port->owning_controller;
1015 1061
1016 BUG_ON(idev->working_request == NULL); 1062 BUG_ON(idev->working_request == NULL);
1017 1063
1018 isci_remote_device_not_ready(ihost, idev, 1064 isci_remote_device_not_ready(ihost, idev,
1019 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED); 1065 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
1020 } 1066 }
1021 1067
1022 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm) 1068 static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
1023 { 1069 {
1024 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); 1070 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
1025 1071
1026 idev->working_request = NULL; 1072 idev->working_request = NULL;
1027 } 1073 }
1028 1074
1029 static const struct sci_base_state sci_remote_device_state_table[] = { 1075 static const struct sci_base_state sci_remote_device_state_table[] = {
1030 [SCI_DEV_INITIAL] = { 1076 [SCI_DEV_INITIAL] = {
1031 .enter_state = sci_remote_device_initial_state_enter, 1077 .enter_state = sci_remote_device_initial_state_enter,
1032 }, 1078 },
1033 [SCI_DEV_STOPPED] = { 1079 [SCI_DEV_STOPPED] = {
1034 .enter_state = sci_remote_device_stopped_state_enter, 1080 .enter_state = sci_remote_device_stopped_state_enter,
1035 }, 1081 },
1036 [SCI_DEV_STARTING] = { 1082 [SCI_DEV_STARTING] = {
1037 .enter_state = sci_remote_device_starting_state_enter, 1083 .enter_state = sci_remote_device_starting_state_enter,
1038 }, 1084 },
1039 [SCI_DEV_READY] = { 1085 [SCI_DEV_READY] = {
1040 .enter_state = sci_remote_device_ready_state_enter, 1086 .enter_state = sci_remote_device_ready_state_enter,
1041 .exit_state = sci_remote_device_ready_state_exit 1087 .exit_state = sci_remote_device_ready_state_exit
1042 }, 1088 },
1043 [SCI_STP_DEV_IDLE] = { 1089 [SCI_STP_DEV_IDLE] = {
1044 .enter_state = sci_stp_remote_device_ready_idle_substate_enter, 1090 .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
1045 }, 1091 },
1046 [SCI_STP_DEV_CMD] = { 1092 [SCI_STP_DEV_CMD] = {
1047 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter, 1093 .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
1048 }, 1094 },
1049 [SCI_STP_DEV_NCQ] = { }, 1095 [SCI_STP_DEV_NCQ] = { },
1050 [SCI_STP_DEV_NCQ_ERROR] = { 1096 [SCI_STP_DEV_NCQ_ERROR] = {
1051 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, 1097 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1052 }, 1098 },
1053 [SCI_STP_DEV_ATAPI_ERROR] = { 1099 [SCI_STP_DEV_ATAPI_ERROR] = {
1054 .enter_state = sci_stp_remote_device_atapi_error_substate_enter, 1100 .enter_state = sci_stp_remote_device_atapi_error_substate_enter,
1055 }, 1101 },
1056 [SCI_STP_DEV_AWAIT_RESET] = { }, 1102 [SCI_STP_DEV_AWAIT_RESET] = { },
1057 [SCI_SMP_DEV_IDLE] = { 1103 [SCI_SMP_DEV_IDLE] = {
1058 .enter_state = sci_smp_remote_device_ready_idle_substate_enter, 1104 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
1059 }, 1105 },
1060 [SCI_SMP_DEV_CMD] = { 1106 [SCI_SMP_DEV_CMD] = {
1061 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter, 1107 .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
1062 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit, 1108 .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
1063 }, 1109 },
1064 [SCI_DEV_STOPPING] = { }, 1110 [SCI_DEV_STOPPING] = { },
1065 [SCI_DEV_FAILED] = { }, 1111 [SCI_DEV_FAILED] = { },
1066 [SCI_DEV_RESETTING] = { 1112 [SCI_DEV_RESETTING] = {
1067 .enter_state = sci_remote_device_resetting_state_enter, 1113 .enter_state = sci_remote_device_resetting_state_enter,
1068 .exit_state = sci_remote_device_resetting_state_exit 1114 .exit_state = sci_remote_device_resetting_state_exit
1069 }, 1115 },
1070 [SCI_DEV_FINAL] = { }, 1116 [SCI_DEV_FINAL] = { },
1071 }; 1117 };
1072 1118
1073 /** 1119 /**
1074 * sci_remote_device_construct() - common construction 1120 * sci_remote_device_construct() - common construction
1075 * @sci_port: SAS/SATA port through which this device is accessed. 1121 * @sci_port: SAS/SATA port through which this device is accessed.
1076 * @sci_dev: remote device to construct 1122 * @sci_dev: remote device to construct
1077 * 1123 *
1078 * This routine just performs benign initialization and does not 1124 * This routine just performs benign initialization and does not
1079 * allocate the remote_node_context which is left to 1125 * allocate the remote_node_context which is left to
1080 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct() 1126 * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
1081 * frees the remote_node_context(s) for the device. 1127 * frees the remote_node_context(s) for the device.
1082 */ 1128 */
1083 static void sci_remote_device_construct(struct isci_port *iport, 1129 static void sci_remote_device_construct(struct isci_port *iport,
1084 struct isci_remote_device *idev) 1130 struct isci_remote_device *idev)
1085 { 1131 {
1086 idev->owning_port = iport; 1132 idev->owning_port = iport;
1087 idev->started_request_count = 0; 1133 idev->started_request_count = 0;
1088 1134
1089 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL); 1135 sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
1090 1136
1091 sci_remote_node_context_construct(&idev->rnc, 1137 sci_remote_node_context_construct(&idev->rnc,
1092 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX); 1138 SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
1093 } 1139 }
1094 1140
1095 /** 1141 /**
1096 * sci_remote_device_da_construct() - construct direct attached device. 1142 * sci_remote_device_da_construct() - construct direct attached device.
1097 * 1143 *
1098 * The information (e.g. IAF, Signature FIS, etc.) necessary to build 1144 * The information (e.g. IAF, Signature FIS, etc.) necessary to build
1099 * the device is known to the SCI Core since it is contained in the 1145 * the device is known to the SCI Core since it is contained in the
1100 * sci_phy object. Remote node context(s) is/are a global resource 1146 * sci_phy object. Remote node context(s) is/are a global resource
1101 * allocated by this routine, freed by sci_remote_device_destruct(). 1147 * allocated by this routine, freed by sci_remote_device_destruct().
1102 * 1148 *
1103 * Returns: 1149 * Returns:
1104 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1150 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1105 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1151 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1106 * sata-only controller instance. 1152 * sata-only controller instance.
1107 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1153 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1108 */ 1154 */
1109 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, 1155 static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
1110 struct isci_remote_device *idev) 1156 struct isci_remote_device *idev)
1111 { 1157 {
1112 enum sci_status status; 1158 enum sci_status status;
1113 struct sci_port_properties properties; 1159 struct sci_port_properties properties;
1114 1160
1115 sci_remote_device_construct(iport, idev); 1161 sci_remote_device_construct(iport, idev);
1116 1162
1117 sci_port_get_properties(iport, &properties); 1163 sci_port_get_properties(iport, &properties);
1118 /* Get accurate port width from port's phy mask for a DA device. */ 1164 /* Get accurate port width from port's phy mask for a DA device. */
1119 idev->device_port_width = hweight32(properties.phy_mask); 1165 idev->device_port_width = hweight32(properties.phy_mask);
1120 1166
1121 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1167 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1122 idev, 1168 idev,
1123 &idev->rnc.remote_node_index); 1169 &idev->rnc.remote_node_index);
1124 1170
1125 if (status != SCI_SUCCESS) 1171 if (status != SCI_SUCCESS)
1126 return status; 1172 return status;
1127 1173
1128 idev->connection_rate = sci_port_get_max_allowed_speed(iport); 1174 idev->connection_rate = sci_port_get_max_allowed_speed(iport);
1129 1175
1130 return SCI_SUCCESS; 1176 return SCI_SUCCESS;
1131 } 1177 }
1132 1178
1133 /** 1179 /**
1134 * sci_remote_device_ea_construct() - construct expander attached device 1180 * sci_remote_device_ea_construct() - construct expander attached device
1135 * 1181 *
1136 * Remote node context(s) is/are a global resource allocated by this 1182 * Remote node context(s) is/are a global resource allocated by this
1137 * routine, freed by sci_remote_device_destruct(). 1183 * routine, freed by sci_remote_device_destruct().
1138 * 1184 *
1139 * Returns: 1185 * Returns:
1140 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed. 1186 * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
1141 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to 1187 * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
1142 * sata-only controller instance. 1188 * sata-only controller instance.
1143 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted. 1189 * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
1144 */ 1190 */
1145 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, 1191 static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
1146 struct isci_remote_device *idev) 1192 struct isci_remote_device *idev)
1147 { 1193 {
1148 struct domain_device *dev = idev->domain_dev; 1194 struct domain_device *dev = idev->domain_dev;
1149 enum sci_status status; 1195 enum sci_status status;
1150 1196
1151 sci_remote_device_construct(iport, idev); 1197 sci_remote_device_construct(iport, idev);
1152 1198
1153 status = sci_controller_allocate_remote_node_context(iport->owning_controller, 1199 status = sci_controller_allocate_remote_node_context(iport->owning_controller,
1154 idev, 1200 idev,
1155 &idev->rnc.remote_node_index); 1201 &idev->rnc.remote_node_index);
1156 if (status != SCI_SUCCESS) 1202 if (status != SCI_SUCCESS)
1157 return status; 1203 return status;
1158 1204
1159 /* For SAS-2 the physical link rate is actually a logical link 1205 /* For SAS-2 the physical link rate is actually a logical link
1160 * rate that incorporates multiplexing. The SCU doesn't 1206 * rate that incorporates multiplexing. The SCU doesn't
1161 * incorporate multiplexing and for the purposes of the 1207 * incorporate multiplexing and for the purposes of the
1162 * connection the logical link rate is that same as the 1208 * connection the logical link rate is that same as the
1163 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay 1209 * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
1164 * one another, so this code works for both situations. 1210 * one another, so this code works for both situations.
1165 */ 1211 */
1166 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), 1212 idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
1167 dev->linkrate); 1213 dev->linkrate);
1168 1214
1169 /* / @todo Should I assign the port width by reading all of the phys on the port? */ 1215 /* / @todo Should I assign the port width by reading all of the phys on the port? */
1170 idev->device_port_width = 1; 1216 idev->device_port_width = 1;
1171 1217
1172 return SCI_SUCCESS; 1218 return SCI_SUCCESS;
1173 } 1219 }
1174 1220
1175 /** 1221 /**
1176 * sci_remote_device_start() - This method will start the supplied remote 1222 * sci_remote_device_start() - This method will start the supplied remote
1177 * device. This method enables normal IO requests to flow through to the 1223 * device. This method enables normal IO requests to flow through to the
1178 * remote device. 1224 * remote device.
1179 * @remote_device: This parameter specifies the device to be started. 1225 * @remote_device: This parameter specifies the device to be started.
1180 * @timeout: This parameter specifies the number of milliseconds in which the 1226 * @timeout: This parameter specifies the number of milliseconds in which the
1181 * start operation should complete. 1227 * start operation should complete.
1182 * 1228 *
1183 * An indication of whether the device was successfully started. SCI_SUCCESS 1229 * An indication of whether the device was successfully started. SCI_SUCCESS
1184 * This value is returned if the device was successfully started. 1230 * This value is returned if the device was successfully started.
1185 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start 1231 * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
1186 * the device when there have been no phys added to it. 1232 * the device when there have been no phys added to it.
1187 */ 1233 */
1188 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, 1234 static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1189 u32 timeout) 1235 u32 timeout)
1190 { 1236 {
1191 struct sci_base_state_machine *sm = &idev->sm; 1237 struct sci_base_state_machine *sm = &idev->sm;
1192 enum sci_remote_device_states state = sm->current_state_id; 1238 enum sci_remote_device_states state = sm->current_state_id;
1193 enum sci_status status; 1239 enum sci_status status;
1194 1240
1195 if (state != SCI_DEV_STOPPED) { 1241 if (state != SCI_DEV_STOPPED) {
1196 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", 1242 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1197 __func__, dev_state_name(state)); 1243 __func__, dev_state_name(state));
1198 return SCI_FAILURE_INVALID_STATE; 1244 return SCI_FAILURE_INVALID_STATE;
1199 } 1245 }
1200 1246
1201 status = sci_remote_node_context_resume(&idev->rnc, 1247 status = sci_remote_node_context_resume(&idev->rnc,
1202 remote_device_resume_done, 1248 remote_device_resume_done,
1203 idev); 1249 idev);
1204 if (status != SCI_SUCCESS) 1250 if (status != SCI_SUCCESS)
1205 return status; 1251 return status;
1206 1252
1207 sci_change_state(sm, SCI_DEV_STARTING); 1253 sci_change_state(sm, SCI_DEV_STARTING);
1208 1254
1209 return SCI_SUCCESS; 1255 return SCI_SUCCESS;
1210 } 1256 }
1211 1257
1212 static enum sci_status isci_remote_device_construct(struct isci_port *iport, 1258 static enum sci_status isci_remote_device_construct(struct isci_port *iport,
1213 struct isci_remote_device *idev) 1259 struct isci_remote_device *idev)
1214 { 1260 {
1215 struct isci_host *ihost = iport->isci_host; 1261 struct isci_host *ihost = iport->isci_host;
1216 struct domain_device *dev = idev->domain_dev; 1262 struct domain_device *dev = idev->domain_dev;
1217 enum sci_status status; 1263 enum sci_status status;
1218 1264
1219 if (dev->parent && dev_is_expander(dev->parent)) 1265 if (dev->parent && dev_is_expander(dev->parent))
1220 status = sci_remote_device_ea_construct(iport, idev); 1266 status = sci_remote_device_ea_construct(iport, idev);
1221 else 1267 else
1222 status = sci_remote_device_da_construct(iport, idev); 1268 status = sci_remote_device_da_construct(iport, idev);
1223 1269
1224 if (status != SCI_SUCCESS) { 1270 if (status != SCI_SUCCESS) {
1225 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n", 1271 dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
1226 __func__, status); 1272 __func__, status);
1227 1273
1228 return status; 1274 return status;
1229 } 1275 }
1230 1276
1231 /* start the device. */ 1277 /* start the device. */
1232 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT); 1278 status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
1233 1279
1234 if (status != SCI_SUCCESS) 1280 if (status != SCI_SUCCESS)
1235 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n", 1281 dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
1236 status); 1282 status);
1237 1283
1238 return status; 1284 return status;
1239 } 1285 }
1240 1286
1241 void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) 1287 void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
1242 { 1288 {
1243 DECLARE_COMPLETION_ONSTACK(aborted_task_completion); 1289 DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
1244 1290
1245 dev_dbg(&ihost->pdev->dev, 1291 dev_dbg(&ihost->pdev->dev,
1246 "%s: idev = %p\n", __func__, idev); 1292 "%s: idev = %p\n", __func__, idev);
1247 1293
1248 /* Cleanup all requests pending for this device. */ 1294 /* Cleanup all requests pending for this device. */
1249 isci_terminate_pending_requests(ihost, idev); 1295 isci_terminate_pending_requests(ihost, idev);
1250 1296
1251 dev_dbg(&ihost->pdev->dev, 1297 dev_dbg(&ihost->pdev->dev,
1252 "%s: idev = %p, done\n", __func__, idev); 1298 "%s: idev = %p, done\n", __func__, idev);
1253 } 1299 }
1254 1300
1255 /** 1301 /**
1256 * This function builds the isci_remote_device when a libsas dev_found message 1302 * This function builds the isci_remote_device when a libsas dev_found message
1257 * is received. 1303 * is received.
1258 * @isci_host: This parameter specifies the isci host object. 1304 * @isci_host: This parameter specifies the isci host object.
1259 * @port: This parameter specifies the isci_port conected to this device. 1305 * @port: This parameter specifies the isci_port conected to this device.
1260 * 1306 *
1261 * pointer to new isci_remote_device. 1307 * pointer to new isci_remote_device.
1262 */ 1308 */
1263 static struct isci_remote_device * 1309 static struct isci_remote_device *
1264 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) 1310 isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
1265 { 1311 {
1266 struct isci_remote_device *idev; 1312 struct isci_remote_device *idev;
1267 int i; 1313 int i;
1268 1314
1269 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 1315 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
1270 idev = &ihost->devices[i]; 1316 idev = &ihost->devices[i];
1271 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags)) 1317 if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
1272 break; 1318 break;
1273 } 1319 }
1274 1320
1275 if (i >= SCI_MAX_REMOTE_DEVICES) { 1321 if (i >= SCI_MAX_REMOTE_DEVICES) {
1276 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); 1322 dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
1277 return NULL; 1323 return NULL;
1278 } 1324 }
1279 1325
1280 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) 1326 if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
1281 return NULL; 1327 return NULL;
1282 1328
1283 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) 1329 if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
1284 return NULL; 1330 return NULL;
1285 1331
1286 return idev; 1332 return idev;
1287 } 1333 }
1288 1334
1289 void isci_remote_device_release(struct kref *kref) 1335 void isci_remote_device_release(struct kref *kref)
1290 { 1336 {
1291 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref); 1337 struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
1292 struct isci_host *ihost = idev->isci_port->isci_host; 1338 struct isci_host *ihost = idev->isci_port->isci_host;
1293 1339
1294 idev->domain_dev = NULL; 1340 idev->domain_dev = NULL;
1295 idev->isci_port = NULL; 1341 idev->isci_port = NULL;
1296 clear_bit(IDEV_START_PENDING, &idev->flags); 1342 clear_bit(IDEV_START_PENDING, &idev->flags);
1297 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1343 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1298 clear_bit(IDEV_IO_READY, &idev->flags); 1344 clear_bit(IDEV_IO_READY, &idev->flags);
1299 clear_bit(IDEV_GONE, &idev->flags); 1345 clear_bit(IDEV_GONE, &idev->flags);
1300 smp_mb__before_clear_bit(); 1346 smp_mb__before_clear_bit();
1301 clear_bit(IDEV_ALLOCATED, &idev->flags); 1347 clear_bit(IDEV_ALLOCATED, &idev->flags);
1302 wake_up(&ihost->eventq); 1348 wake_up(&ihost->eventq);
1303 } 1349 }
1304 1350
1305 /** 1351 /**
1306 * isci_remote_device_stop() - This function is called internally to stop the 1352 * isci_remote_device_stop() - This function is called internally to stop the
1307 * remote device. 1353 * remote device.
1308 * @isci_host: This parameter specifies the isci host object. 1354 * @isci_host: This parameter specifies the isci host object.
1309 * @isci_device: This parameter specifies the remote device. 1355 * @isci_device: This parameter specifies the remote device.
1310 * 1356 *
1311 * The status of the ihost request to stop. 1357 * The status of the ihost request to stop.
1312 */ 1358 */
1313 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev) 1359 enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
1314 { 1360 {
1315 enum sci_status status; 1361 enum sci_status status;
1316 unsigned long flags; 1362 unsigned long flags;
1317 1363
1318 dev_dbg(&ihost->pdev->dev, 1364 dev_dbg(&ihost->pdev->dev,
1319 "%s: isci_device = %p\n", __func__, idev); 1365 "%s: isci_device = %p\n", __func__, idev);
1320 1366
1321 spin_lock_irqsave(&ihost->scic_lock, flags); 1367 spin_lock_irqsave(&ihost->scic_lock, flags);
1322 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ 1368 idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
1323 set_bit(IDEV_GONE, &idev->flags); 1369 set_bit(IDEV_GONE, &idev->flags);
1324 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1370 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1325 1371
1326 /* Kill all outstanding requests. */ 1372 /* Kill all outstanding requests. */
1327 isci_remote_device_nuke_requests(ihost, idev); 1373 isci_remote_device_nuke_requests(ihost, idev);
1328 1374
1329 set_bit(IDEV_STOP_PENDING, &idev->flags); 1375 set_bit(IDEV_STOP_PENDING, &idev->flags);
1330 1376
1331 spin_lock_irqsave(&ihost->scic_lock, flags); 1377 spin_lock_irqsave(&ihost->scic_lock, flags);
1332 status = sci_remote_device_stop(idev, 50); 1378 status = sci_remote_device_stop(idev, 50);
1333 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1379 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1334 1380
1335 /* Wait for the stop complete callback. */ 1381 /* Wait for the stop complete callback. */
1336 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n")) 1382 if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
1337 /* nothing to wait for */; 1383 /* nothing to wait for */;
1338 else 1384 else
1339 wait_for_device_stop(ihost, idev); 1385 wait_for_device_stop(ihost, idev);
1340 1386
1341 return status; 1387 return status;
1342 } 1388 }
1343 1389
1344 /** 1390 /**
1345 * isci_remote_device_gone() - This function is called by libsas when a domain 1391 * isci_remote_device_gone() - This function is called by libsas when a domain
1346 * device is removed. 1392 * device is removed.
1347 * @domain_device: This parameter specifies the libsas domain device. 1393 * @domain_device: This parameter specifies the libsas domain device.
1348 * 1394 *
1349 */ 1395 */
1350 void isci_remote_device_gone(struct domain_device *dev) 1396 void isci_remote_device_gone(struct domain_device *dev)
1351 { 1397 {
1352 struct isci_host *ihost = dev_to_ihost(dev); 1398 struct isci_host *ihost = dev_to_ihost(dev);
1353 struct isci_remote_device *idev = dev->lldd_dev; 1399 struct isci_remote_device *idev = dev->lldd_dev;
1354 1400
1355 dev_dbg(&ihost->pdev->dev, 1401 dev_dbg(&ihost->pdev->dev,
1356 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n", 1402 "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
1357 __func__, dev, idev, idev->isci_port); 1403 __func__, dev, idev, idev->isci_port);
1358 1404
1359 isci_remote_device_stop(ihost, idev); 1405 isci_remote_device_stop(ihost, idev);
1360 } 1406 }
1361 1407
1362 1408
1363 /** 1409 /**
1364 * isci_remote_device_found() - This function is called by libsas when a remote 1410 * isci_remote_device_found() - This function is called by libsas when a remote
1365 * device is discovered. A remote device object is created and started. the 1411 * device is discovered. A remote device object is created and started. the
1366 * function then sleeps until the sci core device started message is 1412 * function then sleeps until the sci core device started message is
1367 * received. 1413 * received.
1368 * @domain_device: This parameter specifies the libsas domain device. 1414 * @domain_device: This parameter specifies the libsas domain device.
1369 * 1415 *
1370 * status, zero indicates success. 1416 * status, zero indicates success.
1371 */ 1417 */
1372 int isci_remote_device_found(struct domain_device *dev) 1418 int isci_remote_device_found(struct domain_device *dev)
1373 { 1419 {
1374 struct isci_host *isci_host = dev_to_ihost(dev); 1420 struct isci_host *isci_host = dev_to_ihost(dev);
1375 struct isci_port *isci_port = dev->port->lldd_port; 1421 struct isci_port *isci_port = dev->port->lldd_port;
1376 struct isci_remote_device *isci_device; 1422 struct isci_remote_device *isci_device;
1377 enum sci_status status; 1423 enum sci_status status;
1378 1424
1379 dev_dbg(&isci_host->pdev->dev, 1425 dev_dbg(&isci_host->pdev->dev,
1380 "%s: domain_device = %p\n", __func__, dev); 1426 "%s: domain_device = %p\n", __func__, dev);
1381 1427
1382 if (!isci_port) 1428 if (!isci_port)
1383 return -ENODEV; 1429 return -ENODEV;
1384 1430
1385 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1431 isci_device = isci_remote_device_alloc(isci_host, isci_port);
1386 if (!isci_device) 1432 if (!isci_device)
1387 return -ENODEV; 1433 return -ENODEV;
1388 1434
1389 kref_init(&isci_device->kref); 1435 kref_init(&isci_device->kref);
1390 INIT_LIST_HEAD(&isci_device->node); 1436 INIT_LIST_HEAD(&isci_device->node);
1391 1437
1392 spin_lock_irq(&isci_host->scic_lock); 1438 spin_lock_irq(&isci_host->scic_lock);
1393 isci_device->domain_dev = dev; 1439 isci_device->domain_dev = dev;
1394 isci_device->isci_port = isci_port; 1440 isci_device->isci_port = isci_port;
1395 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1441 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1396 1442
1397 set_bit(IDEV_START_PENDING, &isci_device->flags); 1443 set_bit(IDEV_START_PENDING, &isci_device->flags);
1398 status = isci_remote_device_construct(isci_port, isci_device); 1444 status = isci_remote_device_construct(isci_port, isci_device);
1399 1445
1400 dev_dbg(&isci_host->pdev->dev, 1446 dev_dbg(&isci_host->pdev->dev,
1401 "%s: isci_device = %p\n", 1447 "%s: isci_device = %p\n",
1402 __func__, isci_device); 1448 __func__, isci_device);
1403 1449
1404 if (status == SCI_SUCCESS) { 1450 if (status == SCI_SUCCESS) {
1405 /* device came up, advertise it to the world */ 1451 /* device came up, advertise it to the world */
1406 dev->lldd_dev = isci_device; 1452 dev->lldd_dev = isci_device;
1407 } else 1453 } else
1408 isci_put_device(isci_device); 1454 isci_put_device(isci_device);
1409 spin_unlock_irq(&isci_host->scic_lock); 1455 spin_unlock_irq(&isci_host->scic_lock);
1410 1456
1411 /* wait for the device ready callback. */ 1457 /* wait for the device ready callback. */
1412 wait_for_device_start(isci_host, isci_device); 1458 wait_for_device_start(isci_host, isci_device);
1413 1459
1414 return status == SCI_SUCCESS ? 0 : -ENODEV; 1460 return status == SCI_SUCCESS ? 0 : -ENODEV;
1461 }
1462
1463 enum sci_status isci_remote_device_reset(
1464 struct isci_remote_device *idev)
1465 {
1466 struct isci_host *ihost = dev_to_ihost(idev->domain_dev);
1467 unsigned long flags;
1468 enum sci_status status;
1469
1470 /* Wait for the device suspend. */
1471 status = isci_remote_device_suspend(ihost, idev);
1472 if (status != SCI_SUCCESS) {
1473 dev_dbg(&ihost->pdev->dev,
1474 "%s: isci_remote_device_suspend(%p) returned %d!\n",
1475 __func__, idev, status);
1476 return status;
1477 }
1478 spin_lock_irqsave(&ihost->scic_lock, flags);
1479 status = sci_remote_device_reset(idev);
1480 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1481 if (status != SCI_SUCCESS) {
1482 dev_dbg(&ihost->pdev->dev,
1483 "%s: sci_remote_device_reset(%p) returned %d!\n",
1484 __func__, idev, status);
1485 }
1486 return status;
1487 }
1488
1489 int isci_remote_device_is_safe_to_abort(
1490 struct isci_remote_device *idev)
1491 {
1492 return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
drivers/scsi/isci/remote_device.h
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #ifndef _ISCI_REMOTE_DEVICE_H_ 56 #ifndef _ISCI_REMOTE_DEVICE_H_
57 #define _ISCI_REMOTE_DEVICE_H_ 57 #define _ISCI_REMOTE_DEVICE_H_
58 #include <scsi/libsas.h> 58 #include <scsi/libsas.h>
59 #include <linux/kref.h> 59 #include <linux/kref.h>
60 #include "scu_remote_node_context.h" 60 #include "scu_remote_node_context.h"
61 #include "remote_node_context.h" 61 #include "remote_node_context.h"
62 #include "port.h" 62 #include "port.h"
63 63
64 enum sci_remote_device_not_ready_reason_code { 64 enum sci_remote_device_not_ready_reason_code {
65 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED, 65 SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
66 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED, 66 SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
67 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED, 67 SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
68 SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED, 68 SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
69 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED, 69 SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
70 SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX 70 SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
71 }; 71 };
72 72
73 /** 73 /**
74 * isci_remote_device - isci representation of a sas expander / end point 74 * isci_remote_device - isci representation of a sas expander / end point
75 * @device_port_width: hw setting for number of simultaneous connections 75 * @device_port_width: hw setting for number of simultaneous connections
76 * @connection_rate: per-taskcontext connection rate for this device 76 * @connection_rate: per-taskcontext connection rate for this device
77 * @working_request: SATA requests have no tag we for unaccelerated 77 * @working_request: SATA requests have no tag we for unaccelerated
78 * protocols we need a method to associate unsolicited 78 * protocols we need a method to associate unsolicited
79 * frames with a pending request 79 * frames with a pending request
80 */ 80 */
81 struct isci_remote_device { 81 struct isci_remote_device {
82 #define IDEV_START_PENDING 0 82 #define IDEV_START_PENDING 0
83 #define IDEV_STOP_PENDING 1 83 #define IDEV_STOP_PENDING 1
84 #define IDEV_ALLOCATED 2 84 #define IDEV_ALLOCATED 2
85 #define IDEV_GONE 3 85 #define IDEV_GONE 3
86 #define IDEV_IO_READY 4 86 #define IDEV_IO_READY 4
87 #define IDEV_IO_NCQERROR 5 87 #define IDEV_IO_NCQERROR 5
88 #define IDEV_TXRX_SUSPENDED 6
88 unsigned long flags; 89 unsigned long flags;
89 struct kref kref; 90 struct kref kref;
90 struct isci_port *isci_port; 91 struct isci_port *isci_port;
91 struct domain_device *domain_dev; 92 struct domain_device *domain_dev;
92 struct list_head node; 93 struct list_head node;
93 struct list_head reqs_in_process; 94 struct list_head reqs_in_process;
94 struct sci_base_state_machine sm; 95 struct sci_base_state_machine sm;
95 u32 device_port_width; 96 u32 device_port_width;
96 enum sas_linkrate connection_rate; 97 enum sas_linkrate connection_rate;
97 struct isci_port *owning_port; 98 struct isci_port *owning_port;
98 struct sci_remote_node_context rnc; 99 struct sci_remote_node_context rnc;
99 /* XXX unify with device reference counting and delete */ 100 /* XXX unify with device reference counting and delete */
100 u32 started_request_count; 101 u32 started_request_count;
101 struct isci_request *working_request; 102 struct isci_request *working_request;
102 u32 not_ready_reason; 103 u32 not_ready_reason;
103 }; 104 };
104 105
105 #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 106 #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
106 107
107 /* device reference routines must be called under sci_lock */ 108 /* device reference routines must be called under sci_lock */
108 static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) 109 static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
109 { 110 {
110 struct isci_remote_device *idev = dev->lldd_dev; 111 struct isci_remote_device *idev = dev->lldd_dev;
111 112
112 if (idev && !test_bit(IDEV_GONE, &idev->flags)) { 113 if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
113 kref_get(&idev->kref); 114 kref_get(&idev->kref);
114 return idev; 115 return idev;
115 } 116 }
116 117
117 return NULL; 118 return NULL;
118 } 119 }
119 120
120 void isci_remote_device_release(struct kref *kref); 121 void isci_remote_device_release(struct kref *kref);
121 static inline void isci_put_device(struct isci_remote_device *idev) 122 static inline void isci_put_device(struct isci_remote_device *idev)
122 { 123 {
123 if (idev) 124 if (idev)
124 kref_put(&idev->kref, isci_remote_device_release); 125 kref_put(&idev->kref, isci_remote_device_release);
125 } 126 }
126 127
127 enum sci_status isci_remote_device_stop(struct isci_host *ihost, 128 enum sci_status isci_remote_device_stop(struct isci_host *ihost,
128 struct isci_remote_device *idev); 129 struct isci_remote_device *idev);
129 void isci_remote_device_nuke_requests(struct isci_host *ihost, 130 void isci_remote_device_nuke_requests(struct isci_host *ihost,
130 struct isci_remote_device *idev); 131 struct isci_remote_device *idev);
131 void isci_remote_device_gone(struct domain_device *domain_dev); 132 void isci_remote_device_gone(struct domain_device *domain_dev);
132 int isci_remote_device_found(struct domain_device *domain_dev); 133 int isci_remote_device_found(struct domain_device *domain_dev);
133 134
134 /** 135 /**
135 * sci_remote_device_stop() - This method will stop both transmission and 136 * sci_remote_device_stop() - This method will stop both transmission and
136 * reception of link activity for the supplied remote device. This method 137 * reception of link activity for the supplied remote device. This method
137 * disables normal IO requests from flowing through to the remote device. 138 * disables normal IO requests from flowing through to the remote device.
138 * @remote_device: This parameter specifies the device to be stopped. 139 * @remote_device: This parameter specifies the device to be stopped.
139 * @timeout: This parameter specifies the number of milliseconds in which the 140 * @timeout: This parameter specifies the number of milliseconds in which the
140 * stop operation should complete. 141 * stop operation should complete.
141 * 142 *
142 * An indication of whether the device was successfully stopped. SCI_SUCCESS 143 * An indication of whether the device was successfully stopped. SCI_SUCCESS
143 * This value is returned if the transmission and reception for the device was 144 * This value is returned if the transmission and reception for the device was
144 * successfully stopped. 145 * successfully stopped.
145 */ 146 */
146 enum sci_status sci_remote_device_stop( 147 enum sci_status sci_remote_device_stop(
147 struct isci_remote_device *idev, 148 struct isci_remote_device *idev,
148 u32 timeout); 149 u32 timeout);
149 150
150 /** 151 /**
151 * sci_remote_device_reset() - This method will reset the device making it 152 * sci_remote_device_reset() - This method will reset the device making it
152 * ready for operation. This method must be called anytime the device is 153 * ready for operation. This method must be called anytime the device is
153 * reset either through a SMP phy control or a port hard reset request. 154 * reset either through a SMP phy control or a port hard reset request.
154 * @remote_device: This parameter specifies the device to be reset. 155 * @remote_device: This parameter specifies the device to be reset.
155 * 156 *
156 * This method does not actually cause the device hardware to be reset. This 157 * This method does not actually cause the device hardware to be reset. This
157 * method resets the software object so that it will be operational after a 158 * method resets the software object so that it will be operational after a
158 * device hardware reset completes. An indication of whether the device reset 159 * device hardware reset completes. An indication of whether the device reset
159 * was accepted. SCI_SUCCESS This value is returned if the device reset is 160 * was accepted. SCI_SUCCESS This value is returned if the device reset is
160 * started. 161 * started.
161 */ 162 */
162 enum sci_status sci_remote_device_reset( 163 enum sci_status sci_remote_device_reset(
163 struct isci_remote_device *idev); 164 struct isci_remote_device *idev);
164 165
165 /** 166 /**
166 * sci_remote_device_reset_complete() - This method informs the device object 167 * sci_remote_device_reset_complete() - This method informs the device object
167 * that the reset operation is complete and the device can resume operation 168 * that the reset operation is complete and the device can resume operation
168 * again. 169 * again.
169 * @remote_device: This parameter specifies the device which is to be informed 170 * @remote_device: This parameter specifies the device which is to be informed
170 * of the reset complete operation. 171 * of the reset complete operation.
171 * 172 *
172 * An indication that the device is resuming operation. SCI_SUCCESS the device 173 * An indication that the device is resuming operation. SCI_SUCCESS the device
173 * is resuming operation. 174 * is resuming operation.
174 */ 175 */
175 enum sci_status sci_remote_device_reset_complete( 176 enum sci_status sci_remote_device_reset_complete(
176 struct isci_remote_device *idev); 177 struct isci_remote_device *idev);
177 178
178 /** 179 /**
179 * enum sci_remote_device_states - This enumeration depicts all the states 180 * enum sci_remote_device_states - This enumeration depicts all the states
180 * for the common remote device state machine. 181 * for the common remote device state machine.
181 * @SCI_DEV_INITIAL: Simply the initial state for the base remote device 182 * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
182 * state machine. 183 * state machine.
183 * 184 *
184 * @SCI_DEV_STOPPED: This state indicates that the remote device has 185 * @SCI_DEV_STOPPED: This state indicates that the remote device has
185 * successfully been stopped. In this state no new IO operations are 186 * successfully been stopped. In this state no new IO operations are
186 * permitted. This state is entered from the INITIAL state. This state 187 * permitted. This state is entered from the INITIAL state. This state
187 * is entered from the STOPPING state. 188 * is entered from the STOPPING state.
188 * 189 *
189 * @SCI_DEV_STARTING: This state indicates the the remote device is in 190 * @SCI_DEV_STARTING: This state indicates the the remote device is in
190 * the process of becoming ready (i.e. starting). In this state no new 191 * the process of becoming ready (i.e. starting). In this state no new
191 * IO operations are permitted. This state is entered from the STOPPED 192 * IO operations are permitted. This state is entered from the STOPPED
192 * state. 193 * state.
193 * 194 *
194 * @SCI_DEV_READY: This state indicates the remote device is now ready. 195 * @SCI_DEV_READY: This state indicates the remote device is now ready.
195 * Thus, the user is able to perform IO operations on the remote device. 196 * Thus, the user is able to perform IO operations on the remote device.
196 * This state is entered from the STARTING state. 197 * This state is entered from the STARTING state.
197 * 198 *
198 * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote 199 * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
199 * device. When there are no active IO for the device it is is in this 200 * device. When there are no active IO for the device it is is in this
200 * state. 201 * state.
201 * 202 *
202 * @SCI_STP_DEV_CMD: This is the command state for for the STP remote 203 * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
203 * device. This state is entered when the device is processing a 204 * device. This state is entered when the device is processing a
204 * non-NCQ command. The device object will fail any new start IO 205 * non-NCQ command. The device object will fail any new start IO
205 * requests until this command is complete. 206 * requests until this command is complete.
206 * 207 *
207 * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device. 208 * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
208 * This state is entered when the device is processing an NCQ reuqest. 209 * This state is entered when the device is processing an NCQ reuqest.
209 * It will remain in this state so long as there is one or more NCQ 210 * It will remain in this state so long as there is one or more NCQ
210 * requests being processed. 211 * requests being processed.
211 * 212 *
212 * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP 213 * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
213 * remote device. This state is entered when an SDB error FIS is 214 * remote device. This state is entered when an SDB error FIS is
214 * received by the device object while in the NCQ state. The device 215 * received by the device object while in the NCQ state. The device
215 * object will only accept a READ LOG command while in this state. 216 * object will only accept a READ LOG command while in this state.
216 * 217 *
217 * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP 218 * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
218 * ATAPI remote device. This state is entered when ATAPI device sends 219 * ATAPI remote device. This state is entered when ATAPI device sends
219 * error status FIS without data while the device object is in CMD 220 * error status FIS without data while the device object is in CMD
220 * state. A suspension event is expected in this state. The device 221 * state. A suspension event is expected in this state. The device
221 * object will resume right away. 222 * object will resume right away.
222 * 223 *
223 * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the 224 * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
224 * device is waiting for the RESET task coming to be recovered from 225 * device is waiting for the RESET task coming to be recovered from
225 * certain hardware specific error. 226 * certain hardware specific error.
226 * 227 *
227 * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the 228 * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
228 * remote device. This is the normal operational state for a remote 229 * remote device. This is the normal operational state for a remote
229 * device. 230 * device.
230 * 231 *
231 * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device. 232 * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
232 * This is the state that the device is placed in when a RNC suspend is 233 * This is the state that the device is placed in when a RNC suspend is
233 * received by the SCU hardware. 234 * received by the SCU hardware.
234 * 235 *
235 * @SCI_DEV_STOPPING: This state indicates that the remote device is in 236 * @SCI_DEV_STOPPING: This state indicates that the remote device is in
236 * the process of stopping. In this state no new IO operations are 237 * the process of stopping. In this state no new IO operations are
237 * permitted, but existing IO operations are allowed to complete. This 238 * permitted, but existing IO operations are allowed to complete. This
238 * state is entered from the READY state. This state is entered from 239 * state is entered from the READY state. This state is entered from
239 * the FAILED state. 240 * the FAILED state.
240 * 241 *
241 * @SCI_DEV_FAILED: This state indicates that the remote device has 242 * @SCI_DEV_FAILED: This state indicates that the remote device has
242 * failed. In this state no new IO operations are permitted. This 243 * failed. In this state no new IO operations are permitted. This
243 * state is entered from the INITIALIZING state. This state is entered 244 * state is entered from the INITIALIZING state. This state is entered
244 * from the READY state. 245 * from the READY state.
245 * 246 *
246 * @SCI_DEV_RESETTING: This state indicates the device is being reset. 247 * @SCI_DEV_RESETTING: This state indicates the device is being reset.
247 * In this state no new IO operations are permitted. This state is 248 * In this state no new IO operations are permitted. This state is
248 * entered from the READY state. 249 * entered from the READY state.
249 * 250 *
250 * @SCI_DEV_FINAL: Simply the final state for the base remote device 251 * @SCI_DEV_FINAL: Simply the final state for the base remote device
251 * state machine. 252 * state machine.
252 */ 253 */
253 #define REMOTE_DEV_STATES {\ 254 #define REMOTE_DEV_STATES {\
254 C(DEV_INITIAL),\ 255 C(DEV_INITIAL),\
255 C(DEV_STOPPED),\ 256 C(DEV_STOPPED),\
256 C(DEV_STARTING),\ 257 C(DEV_STARTING),\
257 C(DEV_READY),\ 258 C(DEV_READY),\
258 C(STP_DEV_IDLE),\ 259 C(STP_DEV_IDLE),\
259 C(STP_DEV_CMD),\ 260 C(STP_DEV_CMD),\
260 C(STP_DEV_NCQ),\ 261 C(STP_DEV_NCQ),\
261 C(STP_DEV_NCQ_ERROR),\ 262 C(STP_DEV_NCQ_ERROR),\
262 C(STP_DEV_ATAPI_ERROR),\ 263 C(STP_DEV_ATAPI_ERROR),\
263 C(STP_DEV_AWAIT_RESET),\ 264 C(STP_DEV_AWAIT_RESET),\
264 C(SMP_DEV_IDLE),\ 265 C(SMP_DEV_IDLE),\
265 C(SMP_DEV_CMD),\ 266 C(SMP_DEV_CMD),\
266 C(DEV_STOPPING),\ 267 C(DEV_STOPPING),\
267 C(DEV_FAILED),\ 268 C(DEV_FAILED),\
268 C(DEV_RESETTING),\ 269 C(DEV_RESETTING),\
269 C(DEV_FINAL),\ 270 C(DEV_FINAL),\
270 } 271 }
271 #undef C 272 #undef C
272 #define C(a) SCI_##a 273 #define C(a) SCI_##a
273 enum sci_remote_device_states REMOTE_DEV_STATES; 274 enum sci_remote_device_states REMOTE_DEV_STATES;
274 #undef C 275 #undef C
275 const char *dev_state_name(enum sci_remote_device_states state); 276 const char *dev_state_name(enum sci_remote_device_states state);
276 277
277 static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) 278 static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
278 { 279 {
279 struct isci_remote_device *idev; 280 struct isci_remote_device *idev;
280 281
281 idev = container_of(rnc, typeof(*idev), rnc); 282 idev = container_of(rnc, typeof(*idev), rnc);
282 283
283 return idev; 284 return idev;
284 } 285 }
285 286
286 static inline bool dev_is_expander(struct domain_device *dev) 287 static inline bool dev_is_expander(struct domain_device *dev)
287 { 288 {
288 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; 289 return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
289 } 290 }
290 291
291 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) 292 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
292 { 293 {
293 /* XXX delete this voodoo when converting to the top-level device 294 /* XXX delete this voodoo when converting to the top-level device
294 * reference count 295 * reference count
295 */ 296 */
296 if (WARN_ONCE(idev->started_request_count == 0, 297 if (WARN_ONCE(idev->started_request_count == 0,
297 "%s: tried to decrement started_request_count past 0!?", 298 "%s: tried to decrement started_request_count past 0!?",
298 __func__)) 299 __func__))
299 /* pass */; 300 /* pass */;
300 else 301 else
301 idev->started_request_count--; 302 idev->started_request_count--;
302 } 303 }
303 304
304 static inline void isci_dev_set_hang_detection_timeout( 305 static inline void isci_dev_set_hang_detection_timeout(
305 struct isci_remote_device *idev, 306 struct isci_remote_device *idev,
306 u32 timeout) 307 u32 timeout)
307 { 308 {
308 sci_port_set_hang_detection_timeout(idev->owning_port, timeout); 309 sci_port_set_hang_detection_timeout(idev->owning_port, timeout);
309 } 310 }
310 311
311 enum sci_status sci_remote_device_frame_handler( 312 enum sci_status sci_remote_device_frame_handler(
312 struct isci_remote_device *idev, 313 struct isci_remote_device *idev,
313 u32 frame_index); 314 u32 frame_index);
314 315
315 enum sci_status sci_remote_device_event_handler( 316 enum sci_status sci_remote_device_event_handler(
316 struct isci_remote_device *idev, 317 struct isci_remote_device *idev,
317 u32 event_code); 318 u32 event_code);
318 319
319 enum sci_status sci_remote_device_start_io( 320 enum sci_status sci_remote_device_start_io(
320 struct isci_host *ihost, 321 struct isci_host *ihost,
321 struct isci_remote_device *idev, 322 struct isci_remote_device *idev,
322 struct isci_request *ireq); 323 struct isci_request *ireq);
323 324
324 enum sci_status sci_remote_device_start_task( 325 enum sci_status sci_remote_device_start_task(
325 struct isci_host *ihost, 326 struct isci_host *ihost,
326 struct isci_remote_device *idev, 327 struct isci_remote_device *idev,
327 struct isci_request *ireq); 328 struct isci_request *ireq);
328 329
329 enum sci_status sci_remote_device_complete_io( 330 enum sci_status sci_remote_device_complete_io(
330 struct isci_host *ihost, 331 struct isci_host *ihost,
331 struct isci_remote_device *idev, 332 struct isci_remote_device *idev,
332 struct isci_request *ireq); 333 struct isci_request *ireq);
333 334
334 void sci_remote_device_post_request( 335 void sci_remote_device_post_request(
335 struct isci_remote_device *idev, 336 struct isci_remote_device *idev,
336 u32 request); 337 u32 request);
337 338
339 enum sci_status sci_remote_device_terminate_requests(
340 struct isci_remote_device *idev);
341
342 int isci_remote_device_is_safe_to_abort(
343 struct isci_remote_device *idev);
344
345 enum sci_status
346 sci_remote_device_abort_requests_pending_abort(
347 struct isci_remote_device *idev);
338 #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ 348 #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
339 349
drivers/scsi/isci/remote_node_context.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #include "host.h" 56 #include "host.h"
57 #include "isci.h" 57 #include "isci.h"
58 #include "remote_device.h" 58 #include "remote_device.h"
59 #include "remote_node_context.h" 59 #include "remote_node_context.h"
60 #include "scu_event_codes.h" 60 #include "scu_event_codes.h"
61 #include "scu_task_context.h" 61 #include "scu_task_context.h"
62 62
63 #undef C 63 #undef C
64 #define C(a) (#a) 64 #define C(a) (#a)
65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state) 65 const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66 { 66 {
67 static const char * const strings[] = RNC_STATES; 67 static const char * const strings[] = RNC_STATES;
68 68
69 return strings[state]; 69 return strings[state];
70 } 70 }
71 #undef C 71 #undef C
72 72
73 /** 73 /**
74 * 74 *
75 * @sci_rnc: The state of the remote node context object to check. 75 * @sci_rnc: The state of the remote node context object to check.
76 * 76 *
77 * This method will return true if the remote node context is in a READY state 77 * This method will return true if the remote node context is in a READY state
78 * otherwise it will return false bool true if the remote node context is in 78 * otherwise it will return false bool true if the remote node context is in
79 * the ready state. false if the remote node context is not in the ready state. 79 * the ready state. false if the remote node context is not in the ready state.
80 */ 80 */
81 bool sci_remote_node_context_is_ready( 81 bool sci_remote_node_context_is_ready(
82 struct sci_remote_node_context *sci_rnc) 82 struct sci_remote_node_context *sci_rnc)
83 { 83 {
84 u32 current_state = sci_rnc->sm.current_state_id; 84 u32 current_state = sci_rnc->sm.current_state_id;
85 85
86 if (current_state == SCI_RNC_READY) { 86 if (current_state == SCI_RNC_READY) {
87 return true; 87 return true;
88 } 88 }
89 89
90 return false; 90 return false;
91 } 91 }
92 92
93 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) 93 static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
94 { 94 {
95 if (id < ihost->remote_node_entries && 95 if (id < ihost->remote_node_entries &&
96 ihost->device_table[id]) 96 ihost->device_table[id])
97 return &ihost->remote_node_context_table[id]; 97 return &ihost->remote_node_context_table[id];
98 98
99 return NULL; 99 return NULL;
100 } 100 }
101 101
102 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) 102 static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
103 { 103 {
104 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 104 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
105 struct domain_device *dev = idev->domain_dev; 105 struct domain_device *dev = idev->domain_dev;
106 int rni = sci_rnc->remote_node_index; 106 int rni = sci_rnc->remote_node_index;
107 union scu_remote_node_context *rnc; 107 union scu_remote_node_context *rnc;
108 struct isci_host *ihost; 108 struct isci_host *ihost;
109 __le64 sas_addr; 109 __le64 sas_addr;
110 110
111 ihost = idev->owning_port->owning_controller; 111 ihost = idev->owning_port->owning_controller;
112 rnc = sci_rnc_by_id(ihost, rni); 112 rnc = sci_rnc_by_id(ihost, rni);
113 113
114 memset(rnc, 0, sizeof(union scu_remote_node_context) 114 memset(rnc, 0, sizeof(union scu_remote_node_context)
115 * sci_remote_device_node_count(idev)); 115 * sci_remote_device_node_count(idev));
116 116
117 rnc->ssp.remote_node_index = rni; 117 rnc->ssp.remote_node_index = rni;
118 rnc->ssp.remote_node_port_width = idev->device_port_width; 118 rnc->ssp.remote_node_port_width = idev->device_port_width;
119 rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; 119 rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
120 120
121 /* sas address is __be64, context ram format is __le64 */ 121 /* sas address is __be64, context ram format is __le64 */
122 sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); 122 sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
123 rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); 123 rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
124 rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); 124 rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
125 125
126 rnc->ssp.nexus_loss_timer_enable = true; 126 rnc->ssp.nexus_loss_timer_enable = true;
127 rnc->ssp.check_bit = false; 127 rnc->ssp.check_bit = false;
128 rnc->ssp.is_valid = false; 128 rnc->ssp.is_valid = false;
129 rnc->ssp.is_remote_node_context = true; 129 rnc->ssp.is_remote_node_context = true;
130 rnc->ssp.function_number = 0; 130 rnc->ssp.function_number = 0;
131 131
132 rnc->ssp.arbitration_wait_time = 0; 132 rnc->ssp.arbitration_wait_time = 0;
133 133
134 if (dev_is_sata(dev)) { 134 if (dev_is_sata(dev)) {
135 rnc->ssp.connection_occupancy_timeout = 135 rnc->ssp.connection_occupancy_timeout =
136 ihost->user_parameters.stp_max_occupancy_timeout; 136 ihost->user_parameters.stp_max_occupancy_timeout;
137 rnc->ssp.connection_inactivity_timeout = 137 rnc->ssp.connection_inactivity_timeout =
138 ihost->user_parameters.stp_inactivity_timeout; 138 ihost->user_parameters.stp_inactivity_timeout;
139 } else { 139 } else {
140 rnc->ssp.connection_occupancy_timeout = 140 rnc->ssp.connection_occupancy_timeout =
141 ihost->user_parameters.ssp_max_occupancy_timeout; 141 ihost->user_parameters.ssp_max_occupancy_timeout;
142 rnc->ssp.connection_inactivity_timeout = 142 rnc->ssp.connection_inactivity_timeout =
143 ihost->user_parameters.ssp_inactivity_timeout; 143 ihost->user_parameters.ssp_inactivity_timeout;
144 } 144 }
145 145
146 rnc->ssp.initial_arbitration_wait_time = 0; 146 rnc->ssp.initial_arbitration_wait_time = 0;
147 147
148 /* Open Address Frame Parameters */ 148 /* Open Address Frame Parameters */
149 rnc->ssp.oaf_connection_rate = idev->connection_rate; 149 rnc->ssp.oaf_connection_rate = idev->connection_rate;
150 rnc->ssp.oaf_features = 0; 150 rnc->ssp.oaf_features = 0;
151 rnc->ssp.oaf_source_zone_group = 0; 151 rnc->ssp.oaf_source_zone_group = 0;
152 rnc->ssp.oaf_more_compatibility_features = 0; 152 rnc->ssp.oaf_more_compatibility_features = 0;
153 } 153 }
154 154
155 /** 155 /**
156 * 156 *
157 * @sci_rnc: 157 * @sci_rnc:
158 * @callback: 158 * @callback:
159 * @callback_parameter: 159 * @callback_parameter:
160 * 160 *
161 * This method will setup the remote node context object so it will transition 161 * This method will setup the remote node context object so it will transition
162 * to its ready state. If the remote node context is already setup to 162 * to its ready state. If the remote node context is already setup to
163 * transition to its final state then this function does nothing. none 163 * transition to its final state then this function does nothing. none
164 */ 164 */
165 static void sci_remote_node_context_setup_to_resume( 165 static void sci_remote_node_context_setup_to_resume(
166 struct sci_remote_node_context *sci_rnc, 166 struct sci_remote_node_context *sci_rnc,
167 scics_sds_remote_node_context_callback callback, 167 scics_sds_remote_node_context_callback callback,
168 void *callback_parameter) 168 void *callback_parameter)
169 { 169 {
170 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { 170 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
171 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; 171 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
172 sci_rnc->user_callback = callback; 172 sci_rnc->user_callback = callback;
173 sci_rnc->user_cookie = callback_parameter; 173 sci_rnc->user_cookie = callback_parameter;
174 } 174 }
175 } 175 }
176 176
177 static void sci_remote_node_context_setup_to_destory( 177 static void sci_remote_node_context_setup_to_destory(
178 struct sci_remote_node_context *sci_rnc, 178 struct sci_remote_node_context *sci_rnc,
179 scics_sds_remote_node_context_callback callback, 179 scics_sds_remote_node_context_callback callback,
180 void *callback_parameter) 180 void *callback_parameter)
181 { 181 {
182 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; 182 sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
183 sci_rnc->user_callback = callback; 183 sci_rnc->user_callback = callback;
184 sci_rnc->user_cookie = callback_parameter; 184 sci_rnc->user_cookie = callback_parameter;
185 } 185 }
186 186
187 /** 187 /**
188 * 188 *
189 * 189 *
190 * This method just calls the user callback function and then resets the 190 * This method just calls the user callback function and then resets the
191 * callback. 191 * callback.
192 */ 192 */
193 static void sci_remote_node_context_notify_user( 193 static void sci_remote_node_context_notify_user(
194 struct sci_remote_node_context *rnc) 194 struct sci_remote_node_context *rnc)
195 { 195 {
196 if (rnc->user_callback != NULL) { 196 if (rnc->user_callback != NULL) {
197 (*rnc->user_callback)(rnc->user_cookie); 197 (*rnc->user_callback)(rnc->user_cookie);
198 198
199 rnc->user_callback = NULL; 199 rnc->user_callback = NULL;
200 rnc->user_cookie = NULL; 200 rnc->user_cookie = NULL;
201 } 201 }
202 } 202 }
203 203
204 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) 204 static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
205 { 205 {
206 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 206 if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
207 sci_remote_node_context_resume(rnc, rnc->user_callback, 207 sci_remote_node_context_resume(rnc, rnc->user_callback,
208 rnc->user_cookie); 208 rnc->user_cookie);
209 } 209 }
210 210
211 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) 211 static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
212 { 212 {
213 union scu_remote_node_context *rnc_buffer; 213 union scu_remote_node_context *rnc_buffer;
214 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 214 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
215 struct domain_device *dev = idev->domain_dev; 215 struct domain_device *dev = idev->domain_dev;
216 struct isci_host *ihost = idev->owning_port->owning_controller; 216 struct isci_host *ihost = idev->owning_port->owning_controller;
217 217
218 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); 218 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
219 219
220 rnc_buffer->ssp.is_valid = true; 220 rnc_buffer->ssp.is_valid = true;
221 221
222 if (dev_is_sata(dev) && dev->parent) { 222 if (dev_is_sata(dev) && dev->parent) {
223 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); 223 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
224 } else { 224 } else {
225 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); 225 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
226 226
227 if (!dev->parent) 227 if (!dev->parent)
228 sci_port_setup_transports(idev->owning_port, 228 sci_port_setup_transports(idev->owning_port,
229 sci_rnc->remote_node_index); 229 sci_rnc->remote_node_index);
230 } 230 }
231 } 231 }
232 232
233 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) 233 static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
234 { 234 {
235 union scu_remote_node_context *rnc_buffer; 235 union scu_remote_node_context *rnc_buffer;
236 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 236 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
237 struct isci_host *ihost = idev->owning_port->owning_controller; 237 struct isci_host *ihost = idev->owning_port->owning_controller;
238 238
239 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); 239 rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
240 240
241 rnc_buffer->ssp.is_valid = false; 241 rnc_buffer->ssp.is_valid = false;
242 242
243 sci_remote_device_post_request(rnc_to_dev(sci_rnc), 243 sci_remote_device_post_request(rnc_to_dev(sci_rnc),
244 SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); 244 SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
245 } 245 }
246 246
247 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) 247 static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
248 { 248 {
249 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 249 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
250 250
251 /* Check to see if we have gotten back to the initial state because 251 /* Check to see if we have gotten back to the initial state because
252 * someone requested to destroy the remote node context object. 252 * someone requested to destroy the remote node context object.
253 */ 253 */
254 if (sm->previous_state_id == SCI_RNC_INVALIDATING) { 254 if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
255 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 255 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
256 sci_remote_node_context_notify_user(rnc); 256 sci_remote_node_context_notify_user(rnc);
257 } 257 }
258 } 258 }
259 259
260 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) 260 static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
261 { 261 {
262 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); 262 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
263 263
264 sci_remote_node_context_validate_context_buffer(sci_rnc); 264 sci_remote_node_context_validate_context_buffer(sci_rnc);
265 } 265 }
266 266
267 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) 267 static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
268 { 268 {
269 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 269 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
270 270
271 /* Terminate outstanding requests pending abort. */
272 sci_remote_device_abort_requests_pending_abort(rnc_to_dev(rnc));
271 sci_remote_node_context_invalidate_context_buffer(rnc); 273 sci_remote_node_context_invalidate_context_buffer(rnc);
272 } 274 }
273 275
274 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) 276 static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
275 { 277 {
276 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 278 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
277 struct isci_remote_device *idev; 279 struct isci_remote_device *idev;
278 struct domain_device *dev; 280 struct domain_device *dev;
279 281
280 idev = rnc_to_dev(rnc); 282 idev = rnc_to_dev(rnc);
281 dev = idev->domain_dev; 283 dev = idev->domain_dev;
282 284
283 /* 285 /*
284 * For direct attached SATA devices we need to clear the TLCR 286 * For direct attached SATA devices we need to clear the TLCR
285 * NCQ to TCi tag mapping on the phy and in cases where we 287 * NCQ to TCi tag mapping on the phy and in cases where we
286 * resume because of a target reset we also need to update 288 * resume because of a target reset we also need to update
287 * the STPTLDARNI register with the RNi of the device 289 * the STPTLDARNI register with the RNi of the device
288 */ 290 */
289 if (dev_is_sata(dev) && !dev->parent) 291 if (dev_is_sata(dev) && !dev->parent)
290 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); 292 sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
291 293
292 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); 294 sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
293 } 295 }
294 296
295 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) 297 static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
296 { 298 {
297 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 299 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
298 300
299 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 301 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
300 302
301 if (rnc->user_callback) 303 if (rnc->user_callback)
302 sci_remote_node_context_notify_user(rnc); 304 sci_remote_node_context_notify_user(rnc);
303 } 305 }
304 306
305 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) 307 static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
306 { 308 {
307 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 309 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
308 310
309 sci_remote_node_context_continue_state_transitions(rnc); 311 sci_remote_node_context_continue_state_transitions(rnc);
310 } 312 }
311 313
312 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) 314 static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
313 { 315 {
314 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); 316 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
317 struct isci_remote_device *idev = rnc_to_dev(rnc);
318 struct isci_host *ihost = idev->owning_port->owning_controller;
315 319
320 set_bit(IDEV_TXRX_SUSPENDED, &idev->flags);
321
322 /* Terminate outstanding requests pending abort. */
323 sci_remote_device_abort_requests_pending_abort(idev);
324
325 wake_up(&ihost->eventq);
316 sci_remote_node_context_continue_state_transitions(rnc); 326 sci_remote_node_context_continue_state_transitions(rnc);
317 } 327 }
318 328
329 static void sci_remote_node_context_tx_rx_suspended_state_exit(
330 struct sci_base_state_machine *sm)
331 {
332 struct sci_remote_node_context *rnc
333 = container_of(sm, typeof(*rnc), sm);
334 struct isci_remote_device *idev = rnc_to_dev(rnc);
335
336 clear_bit(IDEV_TXRX_SUSPENDED, &idev->flags);
337 }
338
319 static void sci_remote_node_context_await_suspend_state_exit( 339 static void sci_remote_node_context_await_suspend_state_exit(
320 struct sci_base_state_machine *sm) 340 struct sci_base_state_machine *sm)
321 { 341 {
322 struct sci_remote_node_context *rnc 342 struct sci_remote_node_context *rnc
323 = container_of(sm, typeof(*rnc), sm); 343 = container_of(sm, typeof(*rnc), sm);
324 344
325 isci_dev_set_hang_detection_timeout(rnc_to_dev(rnc), 0); 345 isci_dev_set_hang_detection_timeout(rnc_to_dev(rnc), 0);
326 } 346 }
327 347
328 static const struct sci_base_state sci_remote_node_context_state_table[] = { 348 static const struct sci_base_state sci_remote_node_context_state_table[] = {
329 [SCI_RNC_INITIAL] = { 349 [SCI_RNC_INITIAL] = {
330 .enter_state = sci_remote_node_context_initial_state_enter, 350 .enter_state = sci_remote_node_context_initial_state_enter,
331 }, 351 },
332 [SCI_RNC_POSTING] = { 352 [SCI_RNC_POSTING] = {
333 .enter_state = sci_remote_node_context_posting_state_enter, 353 .enter_state = sci_remote_node_context_posting_state_enter,
334 }, 354 },
335 [SCI_RNC_INVALIDATING] = { 355 [SCI_RNC_INVALIDATING] = {
336 .enter_state = sci_remote_node_context_invalidating_state_enter, 356 .enter_state = sci_remote_node_context_invalidating_state_enter,
337 }, 357 },
338 [SCI_RNC_RESUMING] = { 358 [SCI_RNC_RESUMING] = {
339 .enter_state = sci_remote_node_context_resuming_state_enter, 359 .enter_state = sci_remote_node_context_resuming_state_enter,
340 }, 360 },
341 [SCI_RNC_READY] = { 361 [SCI_RNC_READY] = {
342 .enter_state = sci_remote_node_context_ready_state_enter, 362 .enter_state = sci_remote_node_context_ready_state_enter,
343 }, 363 },
344 [SCI_RNC_TX_SUSPENDED] = { 364 [SCI_RNC_TX_SUSPENDED] = {
345 .enter_state = sci_remote_node_context_tx_suspended_state_enter, 365 .enter_state = sci_remote_node_context_tx_suspended_state_enter,
346 }, 366 },
347 [SCI_RNC_TX_RX_SUSPENDED] = { 367 [SCI_RNC_TX_RX_SUSPENDED] = {
348 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, 368 .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
369 .exit_state
370 = sci_remote_node_context_tx_rx_suspended_state_exit,
349 }, 371 },
350 [SCI_RNC_AWAIT_SUSPENSION] = { 372 [SCI_RNC_AWAIT_SUSPENSION] = {
351 .exit_state = sci_remote_node_context_await_suspend_state_exit, 373 .exit_state = sci_remote_node_context_await_suspend_state_exit,
352 }, 374 },
353 }; 375 };
354 376
355 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, 377 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
356 u16 remote_node_index) 378 u16 remote_node_index)
357 { 379 {
358 memset(rnc, 0, sizeof(struct sci_remote_node_context)); 380 memset(rnc, 0, sizeof(struct sci_remote_node_context));
359 381
360 rnc->remote_node_index = remote_node_index; 382 rnc->remote_node_index = remote_node_index;
361 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 383 rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
362 384
363 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); 385 sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
364 } 386 }
365 387
366 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, 388 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
367 u32 event_code) 389 u32 event_code)
368 { 390 {
369 enum scis_sds_remote_node_context_states state; 391 enum scis_sds_remote_node_context_states state;
370 u32 next_state; 392 u32 next_state;
371 393
372 state = sci_rnc->sm.current_state_id; 394 state = sci_rnc->sm.current_state_id;
373 switch (state) { 395 switch (state) {
374 case SCI_RNC_POSTING: 396 case SCI_RNC_POSTING:
375 switch (scu_get_event_code(event_code)) { 397 switch (scu_get_event_code(event_code)) {
376 case SCU_EVENT_POST_RNC_COMPLETE: 398 case SCU_EVENT_POST_RNC_COMPLETE:
377 sci_change_state(&sci_rnc->sm, SCI_RNC_READY); 399 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
378 break; 400 break;
379 default: 401 default:
380 goto out; 402 goto out;
381 } 403 }
382 break; 404 break;
383 case SCI_RNC_INVALIDATING: 405 case SCI_RNC_INVALIDATING:
384 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { 406 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
385 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) 407 if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
386 state = SCI_RNC_INITIAL; 408 state = SCI_RNC_INITIAL;
387 else 409 else
388 state = SCI_RNC_POSTING; 410 state = SCI_RNC_POSTING;
389 sci_change_state(&sci_rnc->sm, state); 411 sci_change_state(&sci_rnc->sm, state);
390 } else { 412 } else {
391 switch (scu_get_event_type(event_code)) { 413 switch (scu_get_event_type(event_code)) {
392 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 414 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
393 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 415 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
394 /* We really dont care if the hardware is going to suspend 416 /* We really dont care if the hardware is going to suspend
395 * the device since it's being invalidated anyway */ 417 * the device since it's being invalidated anyway */
396 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 418 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
397 "%s: SCIC Remote Node Context 0x%p was " 419 "%s: SCIC Remote Node Context 0x%p was "
398 "suspeneded by hardware while being " 420 "suspeneded by hardware while being "
399 "invalidated.\n", __func__, sci_rnc); 421 "invalidated.\n", __func__, sci_rnc);
400 break; 422 break;
401 default: 423 default:
402 goto out; 424 goto out;
403 } 425 }
404 } 426 }
405 break; 427 break;
406 case SCI_RNC_RESUMING: 428 case SCI_RNC_RESUMING:
407 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { 429 if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
408 sci_change_state(&sci_rnc->sm, SCI_RNC_READY); 430 sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
409 } else { 431 } else {
410 switch (scu_get_event_type(event_code)) { 432 switch (scu_get_event_type(event_code)) {
411 case SCU_EVENT_TYPE_RNC_SUSPEND_TX: 433 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
412 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: 434 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
413 /* We really dont care if the hardware is going to suspend 435 /* We really dont care if the hardware is going to suspend
414 * the device since it's being resumed anyway */ 436 * the device since it's being resumed anyway */
415 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 437 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
416 "%s: SCIC Remote Node Context 0x%p was " 438 "%s: SCIC Remote Node Context 0x%p was "
417 "suspeneded by hardware while being resumed.\n", 439 "suspeneded by hardware while being resumed.\n",
418 __func__, sci_rnc); 440 __func__, sci_rnc);
419 break; 441 break;
420 default: 442 default:
421 goto out; 443 goto out;
422 } 444 }
423 } 445 }
424 break; 446 break;
425 case SCI_RNC_READY: 447 case SCI_RNC_READY:
426 switch (scu_get_event_type(event_code)) { 448 switch (scu_get_event_type(event_code)) {
427 case SCU_EVENT_TL_RNC_SUSPEND_TX: 449 case SCU_EVENT_TL_RNC_SUSPEND_TX:
428 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); 450 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
429 sci_rnc->suspend_type = scu_get_event_type(event_code); 451 sci_rnc->suspend_type = scu_get_event_type(event_code);
430 break; 452 break;
431 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 453 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
432 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); 454 sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
433 sci_rnc->suspend_type = scu_get_event_type(event_code); 455 sci_rnc->suspend_type = scu_get_event_type(event_code);
434 break; 456 break;
435 default: 457 default:
436 goto out; 458 goto out;
437 } 459 }
438 break; 460 break;
439 case SCI_RNC_AWAIT_SUSPENSION: 461 case SCI_RNC_AWAIT_SUSPENSION:
440 switch (scu_get_event_type(event_code)) { 462 switch (scu_get_event_type(event_code)) {
441 case SCU_EVENT_TL_RNC_SUSPEND_TX: 463 case SCU_EVENT_TL_RNC_SUSPEND_TX:
442 next_state = SCI_RNC_TX_SUSPENDED; 464 next_state = SCI_RNC_TX_SUSPENDED;
443 break; 465 break;
444 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: 466 case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
445 next_state = SCI_RNC_TX_RX_SUSPENDED; 467 next_state = SCI_RNC_TX_RX_SUSPENDED;
446 break; 468 break;
447 default: 469 default:
448 goto out; 470 goto out;
449 } 471 }
450 if (sci_rnc->suspend_type == scu_get_event_type(event_code)) 472 if (sci_rnc->suspend_type == scu_get_event_type(event_code))
451 sci_change_state(&sci_rnc->sm, next_state); 473 sci_change_state(&sci_rnc->sm, next_state);
452 break; 474 break;
453 default: 475 default:
454 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 476 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
455 "%s: invalid state: %s\n", __func__, 477 "%s: invalid state: %s\n", __func__,
456 rnc_state_name(state)); 478 rnc_state_name(state));
457 return SCI_FAILURE_INVALID_STATE; 479 return SCI_FAILURE_INVALID_STATE;
458 } 480 }
459 return SCI_SUCCESS; 481 return SCI_SUCCESS;
460 482
461 out: 483 out:
462 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 484 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
463 "%s: code: %#x state: %s\n", __func__, event_code, 485 "%s: code: %#x state: %s\n", __func__, event_code,
464 rnc_state_name(state)); 486 rnc_state_name(state));
465 return SCI_FAILURE; 487 return SCI_FAILURE;
466 488
467 } 489 }
468 490
469 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, 491 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
470 scics_sds_remote_node_context_callback cb_fn, 492 scics_sds_remote_node_context_callback cb_fn,
471 void *cb_p) 493 void *cb_p)
472 { 494 {
473 enum scis_sds_remote_node_context_states state; 495 enum scis_sds_remote_node_context_states state;
474 496
475 state = sci_rnc->sm.current_state_id; 497 state = sci_rnc->sm.current_state_id;
476 switch (state) { 498 switch (state) {
477 case SCI_RNC_INVALIDATING: 499 case SCI_RNC_INVALIDATING:
478 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); 500 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
479 return SCI_SUCCESS; 501 return SCI_SUCCESS;
480 case SCI_RNC_POSTING: 502 case SCI_RNC_POSTING:
481 case SCI_RNC_RESUMING: 503 case SCI_RNC_RESUMING:
482 case SCI_RNC_READY: 504 case SCI_RNC_READY:
483 case SCI_RNC_TX_SUSPENDED: 505 case SCI_RNC_TX_SUSPENDED:
484 case SCI_RNC_TX_RX_SUSPENDED: 506 case SCI_RNC_TX_RX_SUSPENDED:
485 case SCI_RNC_AWAIT_SUSPENSION: 507 case SCI_RNC_AWAIT_SUSPENSION:
486 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); 508 sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
487 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 509 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
488 return SCI_SUCCESS; 510 return SCI_SUCCESS;
489 case SCI_RNC_INITIAL: 511 case SCI_RNC_INITIAL:
490 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 512 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
491 "%s: invalid state: %s\n", __func__, 513 "%s: invalid state: %s\n", __func__,
492 rnc_state_name(state)); 514 rnc_state_name(state));
493 /* We have decided that the destruct request on the remote node context 515 /* We have decided that the destruct request on the remote node context
494 * can not fail since it is either in the initial/destroyed state or is 516 * can not fail since it is either in the initial/destroyed state or is
495 * can be destroyed. 517 * can be destroyed.
496 */ 518 */
497 return SCI_SUCCESS; 519 return SCI_SUCCESS;
498 default: 520 default:
499 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 521 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
500 "%s: invalid state %s\n", __func__, 522 "%s: invalid state %s\n", __func__,
501 rnc_state_name(state)); 523 rnc_state_name(state));
502 return SCI_FAILURE_INVALID_STATE; 524 return SCI_FAILURE_INVALID_STATE;
503 } 525 }
504 } 526 }
505 527
506 enum sci_status sci_remote_node_context_suspend( 528 enum sci_status sci_remote_node_context_suspend(
507 struct sci_remote_node_context *sci_rnc, 529 struct sci_remote_node_context *sci_rnc,
508 enum sci_remote_node_suspension_reasons suspend_reason, 530 enum sci_remote_node_suspension_reasons suspend_reason,
509 u32 suspend_type, 531 u32 suspend_type,
510 scics_sds_remote_node_context_callback cb_fn, 532 scics_sds_remote_node_context_callback cb_fn,
511 void *cb_p) 533 void *cb_p)
512 { 534 {
513 enum scis_sds_remote_node_context_states state 535 enum scis_sds_remote_node_context_states state
514 = sci_rnc->sm.current_state_id; 536 = sci_rnc->sm.current_state_id;
515 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 537 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
516 enum sci_status status = SCI_FAILURE_INVALID_STATE; 538 enum sci_status status = SCI_FAILURE_INVALID_STATE;
517 539
540 dev_dbg(scirdev_to_dev(idev),
541 "%s: current state %d, current suspend_type %x dest state %d,"
542 " arg suspend_reason %d, arg suspend_type %x",
543 __func__, state, sci_rnc->suspend_type,
544 sci_rnc->destination_state, suspend_reason,
545 suspend_type);
546
518 /* Disable automatic state continuations if explicitly suspending. */ 547 /* Disable automatic state continuations if explicitly suspending. */
519 if (suspend_reason == SCI_SOFTWARE_SUSPENSION) 548 if (suspend_reason == SCI_SOFTWARE_SUSPENSION)
520 sci_rnc->destination_state 549 sci_rnc->destination_state
521 = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; 550 = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
522 switch (state) { 551 switch (state) {
523 case SCI_RNC_READY: 552 case SCI_RNC_READY:
524 break; 553 break;
525 case SCI_RNC_TX_SUSPENDED: 554 case SCI_RNC_TX_SUSPENDED:
526 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) 555 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
527 status = SCI_SUCCESS; 556 status = SCI_SUCCESS;
528 break; 557 break;
529 case SCI_RNC_TX_RX_SUSPENDED: 558 case SCI_RNC_TX_RX_SUSPENDED:
530 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) 559 if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
531 status = SCI_SUCCESS; 560 status = SCI_SUCCESS;
532 break; 561 break;
533 case SCI_RNC_AWAIT_SUSPENSION: 562 case SCI_RNC_AWAIT_SUSPENSION:
534 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) 563 if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
535 || (suspend_type == sci_rnc->suspend_type)) 564 || (suspend_type == sci_rnc->suspend_type))
536 return SCI_SUCCESS; 565 return SCI_SUCCESS;
537 break; 566 break;
538 default: 567 default:
539 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 568 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
540 "%s: invalid state %s\n", __func__, 569 "%s: invalid state %s\n", __func__,
541 rnc_state_name(state)); 570 rnc_state_name(state));
542 return SCI_FAILURE_INVALID_STATE; 571 return SCI_FAILURE_INVALID_STATE;
543 } 572 }
544 sci_rnc->user_callback = cb_fn; 573 sci_rnc->user_callback = cb_fn;
545 sci_rnc->user_cookie = cb_p; 574 sci_rnc->user_cookie = cb_p;
546 sci_rnc->suspend_type = suspend_type; 575 sci_rnc->suspend_type = suspend_type;
547 576
548 if (status == SCI_SUCCESS) { /* Already in the destination state? */ 577 if (status == SCI_SUCCESS) { /* Already in the destination state? */
578 struct isci_host *ihost = idev->owning_port->owning_controller;
579
549 sci_remote_node_context_notify_user(sci_rnc); 580 sci_remote_node_context_notify_user(sci_rnc);
581 wake_up_all(&ihost->eventq); /* Let observers look. */
550 return SCI_SUCCESS; 582 return SCI_SUCCESS;
551 } 583 }
552 if (suspend_reason == SCI_SOFTWARE_SUSPENSION) { 584 if (suspend_reason == SCI_SOFTWARE_SUSPENSION) {
553 isci_dev_set_hang_detection_timeout(idev, 0x00000001); 585 isci_dev_set_hang_detection_timeout(idev, 0x00000001);
554 sci_remote_device_post_request( 586 sci_remote_device_post_request(
555 idev, SCI_SOFTWARE_SUSPEND_CMD); 587 idev, SCI_SOFTWARE_SUSPEND_CMD);
556 } 588 }
557 if (state != SCI_RNC_AWAIT_SUSPENSION) 589 if (state != SCI_RNC_AWAIT_SUSPENSION)
558 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); 590 sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
559 591
560 return SCI_SUCCESS; 592 return SCI_SUCCESS;
561 } 593 }
562 594
563 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 595 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
564 scics_sds_remote_node_context_callback cb_fn, 596 scics_sds_remote_node_context_callback cb_fn,
565 void *cb_p) 597 void *cb_p)
566 { 598 {
567 enum scis_sds_remote_node_context_states state; 599 enum scis_sds_remote_node_context_states state;
568 600
569 state = sci_rnc->sm.current_state_id; 601 state = sci_rnc->sm.current_state_id;
570 switch (state) { 602 switch (state) {
571 case SCI_RNC_INITIAL: 603 case SCI_RNC_INITIAL:
572 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 604 if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
573 return SCI_FAILURE_INVALID_STATE; 605 return SCI_FAILURE_INVALID_STATE;
574 606
575 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 607 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
576 sci_remote_node_context_construct_buffer(sci_rnc); 608 sci_remote_node_context_construct_buffer(sci_rnc);
577 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); 609 sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
578 return SCI_SUCCESS; 610 return SCI_SUCCESS;
579 case SCI_RNC_POSTING: 611 case SCI_RNC_POSTING:
580 case SCI_RNC_INVALIDATING: 612 case SCI_RNC_INVALIDATING:
581 case SCI_RNC_RESUMING: 613 case SCI_RNC_RESUMING:
582 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) 614 if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
583 return SCI_FAILURE_INVALID_STATE; 615 return SCI_FAILURE_INVALID_STATE;
584 616
585 sci_rnc->user_callback = cb_fn; 617 sci_rnc->user_callback = cb_fn;
586 sci_rnc->user_cookie = cb_p; 618 sci_rnc->user_cookie = cb_p;
587 return SCI_SUCCESS; 619 return SCI_SUCCESS;
588 case SCI_RNC_TX_SUSPENDED: 620 case SCI_RNC_TX_SUSPENDED:
589 case SCI_RNC_TX_RX_SUSPENDED: { 621 case SCI_RNC_TX_RX_SUSPENDED: {
590 struct isci_remote_device *idev = rnc_to_dev(sci_rnc); 622 struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
591 struct domain_device *dev = idev->domain_dev; 623 struct domain_device *dev = idev->domain_dev;
592 624
593 /* If this is an expander attached SATA device we must 625 /* If this is an expander attached SATA device we must
594 * invalidate and repost the RNC since this is the only way 626 * invalidate and repost the RNC since this is the only way
595 * to clear the TCi to NCQ tag mapping table for the RNi. 627 * to clear the TCi to NCQ tag mapping table for the RNi.
596 * All other device types we can just resume. 628 * All other device types we can just resume.
597 */ 629 */
598 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 630 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
599 631
600 if (dev_is_sata(dev) && dev->parent) 632 if (dev_is_sata(dev) && dev->parent)
601 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); 633 sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
602 else 634 else
603 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); 635 sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
604 return SCI_SUCCESS; 636 return SCI_SUCCESS;
605 } 637 }
606 case SCI_RNC_AWAIT_SUSPENSION: 638 case SCI_RNC_AWAIT_SUSPENSION:
607 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); 639 sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
608 return SCI_SUCCESS; 640 return SCI_SUCCESS;
609 default: 641 default:
610 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 642 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
611 "%s: invalid state %s\n", __func__, 643 "%s: invalid state %s\n", __func__,
612 rnc_state_name(state)); 644 rnc_state_name(state));
613 return SCI_FAILURE_INVALID_STATE; 645 return SCI_FAILURE_INVALID_STATE;
614 } 646 }
615 } 647 }
616 648
617 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, 649 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
618 struct isci_request *ireq) 650 struct isci_request *ireq)
619 { 651 {
620 enum scis_sds_remote_node_context_states state; 652 enum scis_sds_remote_node_context_states state;
621 653
622 state = sci_rnc->sm.current_state_id; 654 state = sci_rnc->sm.current_state_id;
623 655
624 switch (state) { 656 switch (state) {
625 case SCI_RNC_READY: 657 case SCI_RNC_READY:
626 return SCI_SUCCESS; 658 return SCI_SUCCESS;
627 case SCI_RNC_TX_SUSPENDED: 659 case SCI_RNC_TX_SUSPENDED:
628 case SCI_RNC_TX_RX_SUSPENDED: 660 case SCI_RNC_TX_RX_SUSPENDED:
629 case SCI_RNC_AWAIT_SUSPENSION: 661 case SCI_RNC_AWAIT_SUSPENSION:
630 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 662 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
631 "%s: invalid state %s\n", __func__, 663 "%s: invalid state %s\n", __func__,
632 rnc_state_name(state)); 664 rnc_state_name(state));
633 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 665 return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
634 default: 666 default:
635 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), 667 dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
636 "%s: invalid state %s\n", __func__, 668 "%s: invalid state %s\n", __func__,
637 rnc_state_name(state)); 669 rnc_state_name(state));
638 return SCI_FAILURE_INVALID_STATE; 670 return SCI_FAILURE_INVALID_STATE;
639 } 671 }
640 } 672 }
641 673
642 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 674 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
643 struct isci_request *ireq) 675 struct isci_request *ireq)
644 { 676 {
645 enum scis_sds_remote_node_context_states state; 677 enum scis_sds_remote_node_context_states state;
646 678
647 state = sci_rnc->sm.current_state_id; 679 state = sci_rnc->sm.current_state_id;
648 switch (state) { 680 switch (state) {
649 case SCI_RNC_RESUMING: 681 case SCI_RNC_RESUMING:
650 case SCI_RNC_READY: 682 case SCI_RNC_READY:
651 case SCI_RNC_AWAIT_SUSPENSION: 683 case SCI_RNC_AWAIT_SUSPENSION:
652 return SCI_SUCCESS; 684 return SCI_SUCCESS;
653 case SCI_RNC_TX_SUSPENDED: 685 case SCI_RNC_TX_SUSPENDED:
654 case SCI_RNC_TX_RX_SUSPENDED: 686 case SCI_RNC_TX_RX_SUSPENDED:
655 sci_remote_node_context_resume(sci_rnc, NULL, NULL); 687 sci_remote_node_context_resume(sci_rnc, NULL, NULL);
656 return SCI_SUCCESS; 688 return SCI_SUCCESS;
657 default: 689 default:
658 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), 690 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
659 "%s: invalid state %s\n", __func__, 691 "%s: invalid state %s\n", __func__,
660 rnc_state_name(state)); 692 rnc_state_name(state));
661 return SCI_FAILURE_INVALID_STATE; 693 return SCI_FAILURE_INVALID_STATE;
694 }
695 }
696
697 int sci_remote_node_context_is_safe_to_abort(
698 struct sci_remote_node_context *sci_rnc)
699 {
700 enum scis_sds_remote_node_context_states state;
701
702 state = sci_rnc->sm.current_state_id;
703 switch (state) {
704 case SCI_RNC_INVALIDATING:
705 case SCI_RNC_TX_RX_SUSPENDED:
706 return 1;
707 case SCI_RNC_POSTING:
708 case SCI_RNC_RESUMING:
709 case SCI_RNC_READY:
710 case SCI_RNC_TX_SUSPENDED:
711 case SCI_RNC_AWAIT_SUSPENSION:
712 case SCI_RNC_INITIAL:
713 return 0;
714 default:
715 dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
716 "%s: invalid state %d\n", __func__, state);
717 return 0;
662 } 718 }
663 } 719 }
664 720
drivers/scsi/isci/remote_node_context.h
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ 56 #ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
57 #define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ 57 #define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
58 58
59 /** 59 /**
60 * This file contains the structures, constants, and prototypes associated with 60 * This file contains the structures, constants, and prototypes associated with
61 * the remote node context in the silicon. It exists to model and manage 61 * the remote node context in the silicon. It exists to model and manage
62 * the remote node context in the silicon. 62 * the remote node context in the silicon.
63 * 63 *
64 * 64 *
65 */ 65 */
66 66
67 #include "isci.h" 67 #include "isci.h"
68 68
69 /** 69 /**
70 * 70 *
71 * 71 *
72 * This constant represents an invalid remote device id, it is used to program 72 * This constant represents an invalid remote device id, it is used to program
73 * the STPDARNI register so the driver knows when it has received a SIGNATURE 73 * the STPDARNI register so the driver knows when it has received a SIGNATURE
74 * FIS from the SCU. 74 * FIS from the SCU.
75 */ 75 */
76 #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF 76 #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
77 77
78 enum sci_remote_node_suspension_reasons { 78 enum sci_remote_node_suspension_reasons {
79 SCU_HARDWARE_SUSPENSION, 79 SCU_HARDWARE_SUSPENSION,
80 SCI_SOFTWARE_SUSPENSION 80 SCI_SOFTWARE_SUSPENSION
81 }; 81 };
82 #define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX 82 #define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
83 #define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX 83 #define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
84 84
85 struct isci_request; 85 struct isci_request;
86 struct isci_remote_device; 86 struct isci_remote_device;
87 struct sci_remote_node_context; 87 struct sci_remote_node_context;
88 88
89 typedef void (*scics_sds_remote_node_context_callback)(void *); 89 typedef void (*scics_sds_remote_node_context_callback)(void *);
90 90
91 /** 91 /**
92 * enum sci_remote_node_context_states 92 * enum sci_remote_node_context_states
93 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume 93 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
94 * request the remote node context will transition to the posting state. 94 * request the remote node context will transition to the posting state.
95 * 95 *
96 * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once 96 * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
97 * the RNC is posted the remote node context will be made ready. 97 * the RNC is posted the remote node context will be made ready.
98 * 98 *
99 * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to 99 * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
100 * the hardware. Once the invalidate is complete the remote node context will 100 * the hardware. Once the invalidate is complete the remote node context will
101 * transition to the posting state. 101 * transition to the posting state.
102 * 102 *
103 * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the 103 * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
104 * hardare. Once the event notification of resume complete is received the 104 * hardare. Once the event notification of resume complete is received the
105 * remote node context will transition to the ready state. 105 * remote node context will transition to the ready state.
106 * 106 *
107 * @SCI_RNC_READY: state that the remote node context must be in to accept io 107 * @SCI_RNC_READY: state that the remote node context must be in to accept io
108 * request operations. 108 * request operations.
109 * 109 *
110 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when 110 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
111 * it gets a TX suspend notification from the hardware. 111 * it gets a TX suspend notification from the hardware.
112 * 112 *
113 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to 113 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
114 * when it gets a TX RX suspend notification from the hardware. 114 * when it gets a TX RX suspend notification from the hardware.
115 * 115 *
116 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits 116 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
117 * for a suspend notification from the hardware. This state is entered when 117 * for a suspend notification from the hardware. This state is entered when
118 * either there is a request to supend the remote node context or when there is 118 * either there is a request to supend the remote node context or when there is
119 * a TC completion where the remote node will be suspended by the hardware. 119 * a TC completion where the remote node will be suspended by the hardware.
120 */ 120 */
121 #define RNC_STATES {\ 121 #define RNC_STATES {\
122 C(RNC_INITIAL),\ 122 C(RNC_INITIAL),\
123 C(RNC_POSTING),\ 123 C(RNC_POSTING),\
124 C(RNC_INVALIDATING),\ 124 C(RNC_INVALIDATING),\
125 C(RNC_RESUMING),\ 125 C(RNC_RESUMING),\
126 C(RNC_READY),\ 126 C(RNC_READY),\
127 C(RNC_TX_SUSPENDED),\ 127 C(RNC_TX_SUSPENDED),\
128 C(RNC_TX_RX_SUSPENDED),\ 128 C(RNC_TX_RX_SUSPENDED),\
129 C(RNC_AWAIT_SUSPENSION),\ 129 C(RNC_AWAIT_SUSPENSION),\
130 } 130 }
131 #undef C 131 #undef C
132 #define C(a) SCI_##a 132 #define C(a) SCI_##a
133 enum scis_sds_remote_node_context_states RNC_STATES; 133 enum scis_sds_remote_node_context_states RNC_STATES;
134 #undef C 134 #undef C
135 const char *rnc_state_name(enum scis_sds_remote_node_context_states state); 135 const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
136 136
137 /** 137 /**
138 * 138 *
139 * 139 *
140 * This enumeration is used to define the end destination state for the remote 140 * This enumeration is used to define the end destination state for the remote
141 * node context. 141 * node context.
142 */ 142 */
143 enum sci_remote_node_context_destination_state { 143 enum sci_remote_node_context_destination_state {
144 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, 144 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
145 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, 145 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
146 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL 146 SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
147 }; 147 };
148 148
149 /** 149 /**
150 * struct sci_remote_node_context - This structure contains the data 150 * struct sci_remote_node_context - This structure contains the data
151 * associated with the remote node context object. The remote node context 151 * associated with the remote node context object. The remote node context
152 * (RNC) object models the the remote device information necessary to manage 152 * (RNC) object models the the remote device information necessary to manage
153 * the silicon RNC. 153 * the silicon RNC.
154 */ 154 */
155 struct sci_remote_node_context { 155 struct sci_remote_node_context {
156 /** 156 /**
157 * This field indicates the remote node index (RNI) associated with 157 * This field indicates the remote node index (RNI) associated with
158 * this RNC. 158 * this RNC.
159 */ 159 */
160 u16 remote_node_index; 160 u16 remote_node_index;
161 161
162 /** 162 /**
163 * This field is the recored suspension type of the remote node 163 * This field is the recored suspension type of the remote node
164 * context suspension. 164 * context suspension.
165 */ 165 */
166 u32 suspend_type; 166 u32 suspend_type;
167 167
168 /** 168 /**
169 * This field is true if the remote node context is resuming from its current 169 * This field is true if the remote node context is resuming from its current
170 * state. This can cause an automatic resume on receiving a suspension 170 * state. This can cause an automatic resume on receiving a suspension
171 * notification. 171 * notification.
172 */ 172 */
173 enum sci_remote_node_context_destination_state destination_state; 173 enum sci_remote_node_context_destination_state destination_state;
174 174
175 /** 175 /**
176 * This field contains the callback function that the user requested to be 176 * This field contains the callback function that the user requested to be
177 * called when the requested state transition is complete. 177 * called when the requested state transition is complete.
178 */ 178 */
179 scics_sds_remote_node_context_callback user_callback; 179 scics_sds_remote_node_context_callback user_callback;
180 180
181 /** 181 /**
182 * This field contains the parameter that is called when the user requested 182 * This field contains the parameter that is called when the user requested
183 * state transition is completed. 183 * state transition is completed.
184 */ 184 */
185 void *user_cookie; 185 void *user_cookie;
186 186
187 /** 187 /**
188 * This field contains the data for the object's state machine. 188 * This field contains the data for the object's state machine.
189 */ 189 */
190 struct sci_base_state_machine sm; 190 struct sci_base_state_machine sm;
191 }; 191 };
192 192
193 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, 193 void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
194 u16 remote_node_index); 194 u16 remote_node_index);
195 195
196 196
197 bool sci_remote_node_context_is_ready( 197 bool sci_remote_node_context_is_ready(
198 struct sci_remote_node_context *sci_rnc); 198 struct sci_remote_node_context *sci_rnc);
199 199
200 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, 200 enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
201 u32 event_code); 201 u32 event_code);
202 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, 202 enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
203 scics_sds_remote_node_context_callback callback, 203 scics_sds_remote_node_context_callback callback,
204 void *callback_parameter); 204 void *callback_parameter);
205 enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, 205 enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
206 u32 suspend_type, 206 u32 suspend_type,
207 u32 suspension_code, 207 u32 suspension_code,
208 scics_sds_remote_node_context_callback cb_fn, 208 scics_sds_remote_node_context_callback cb_fn,
209 void *cb_p); 209 void *cb_p);
210 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, 210 enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
211 scics_sds_remote_node_context_callback cb_fn, 211 scics_sds_remote_node_context_callback cb_fn,
212 void *cb_p); 212 void *cb_p);
213 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, 213 enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
214 struct isci_request *ireq); 214 struct isci_request *ireq);
215 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, 215 enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
216 struct isci_request *ireq); 216 struct isci_request *ireq);
217 int sci_remote_node_context_is_safe_to_abort(
218 struct sci_remote_node_context *sci_rnc);
217 219
218 #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ 220 #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
219 221
drivers/scsi/isci/request.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #include <scsi/scsi_cmnd.h> 56 #include <scsi/scsi_cmnd.h>
57 #include "isci.h" 57 #include "isci.h"
58 #include "task.h" 58 #include "task.h"
59 #include "request.h" 59 #include "request.h"
60 #include "scu_completion_codes.h" 60 #include "scu_completion_codes.h"
61 #include "scu_event_codes.h" 61 #include "scu_event_codes.h"
62 #include "sas.h" 62 #include "sas.h"
63 63
64 #undef C 64 #undef C
65 #define C(a) (#a) 65 #define C(a) (#a)
66 const char *req_state_name(enum sci_base_request_states state) 66 const char *req_state_name(enum sci_base_request_states state)
67 { 67 {
68 static const char * const strings[] = REQUEST_STATES; 68 static const char * const strings[] = REQUEST_STATES;
69 69
70 return strings[state]; 70 return strings[state];
71 } 71 }
72 #undef C 72 #undef C
73 73
74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 74 static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
75 int idx) 75 int idx)
76 { 76 {
77 if (idx == 0) 77 if (idx == 0)
78 return &ireq->tc->sgl_pair_ab; 78 return &ireq->tc->sgl_pair_ab;
79 else if (idx == 1) 79 else if (idx == 1)
80 return &ireq->tc->sgl_pair_cd; 80 return &ireq->tc->sgl_pair_cd;
81 else if (idx < 0) 81 else if (idx < 0)
82 return NULL; 82 return NULL;
83 else 83 else
84 return &ireq->sg_table[idx - 2]; 84 return &ireq->sg_table[idx - 2];
85 } 85 }
86 86
87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, 87 static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
88 struct isci_request *ireq, u32 idx) 88 struct isci_request *ireq, u32 idx)
89 { 89 {
90 u32 offset; 90 u32 offset;
91 91
92 if (idx == 0) { 92 if (idx == 0) {
93 offset = (void *) &ireq->tc->sgl_pair_ab - 93 offset = (void *) &ireq->tc->sgl_pair_ab -
94 (void *) &ihost->task_context_table[0]; 94 (void *) &ihost->task_context_table[0];
95 return ihost->tc_dma + offset; 95 return ihost->tc_dma + offset;
96 } else if (idx == 1) { 96 } else if (idx == 1) {
97 offset = (void *) &ireq->tc->sgl_pair_cd - 97 offset = (void *) &ireq->tc->sgl_pair_cd -
98 (void *) &ihost->task_context_table[0]; 98 (void *) &ihost->task_context_table[0];
99 return ihost->tc_dma + offset; 99 return ihost->tc_dma + offset;
100 } 100 }
101 101
102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); 102 return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
103 } 103 }
104 104
105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) 105 static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
106 { 106 {
107 e->length = sg_dma_len(sg); 107 e->length = sg_dma_len(sg);
108 e->address_upper = upper_32_bits(sg_dma_address(sg)); 108 e->address_upper = upper_32_bits(sg_dma_address(sg));
109 e->address_lower = lower_32_bits(sg_dma_address(sg)); 109 e->address_lower = lower_32_bits(sg_dma_address(sg));
110 e->address_modifier = 0; 110 e->address_modifier = 0;
111 } 111 }
112 112
113 static void sci_request_build_sgl(struct isci_request *ireq) 113 static void sci_request_build_sgl(struct isci_request *ireq)
114 { 114 {
115 struct isci_host *ihost = ireq->isci_host; 115 struct isci_host *ihost = ireq->isci_host;
116 struct sas_task *task = isci_request_access_task(ireq); 116 struct sas_task *task = isci_request_access_task(ireq);
117 struct scatterlist *sg = NULL; 117 struct scatterlist *sg = NULL;
118 dma_addr_t dma_addr; 118 dma_addr_t dma_addr;
119 u32 sg_idx = 0; 119 u32 sg_idx = 0;
120 struct scu_sgl_element_pair *scu_sg = NULL; 120 struct scu_sgl_element_pair *scu_sg = NULL;
121 struct scu_sgl_element_pair *prev_sg = NULL; 121 struct scu_sgl_element_pair *prev_sg = NULL;
122 122
123 if (task->num_scatter > 0) { 123 if (task->num_scatter > 0) {
124 sg = task->scatter; 124 sg = task->scatter;
125 125
126 while (sg) { 126 while (sg) {
127 scu_sg = to_sgl_element_pair(ireq, sg_idx); 127 scu_sg = to_sgl_element_pair(ireq, sg_idx);
128 init_sgl_element(&scu_sg->A, sg); 128 init_sgl_element(&scu_sg->A, sg);
129 sg = sg_next(sg); 129 sg = sg_next(sg);
130 if (sg) { 130 if (sg) {
131 init_sgl_element(&scu_sg->B, sg); 131 init_sgl_element(&scu_sg->B, sg);
132 sg = sg_next(sg); 132 sg = sg_next(sg);
133 } else 133 } else
134 memset(&scu_sg->B, 0, sizeof(scu_sg->B)); 134 memset(&scu_sg->B, 0, sizeof(scu_sg->B));
135 135
136 if (prev_sg) { 136 if (prev_sg) {
137 dma_addr = to_sgl_element_pair_dma(ihost, 137 dma_addr = to_sgl_element_pair_dma(ihost,
138 ireq, 138 ireq,
139 sg_idx); 139 sg_idx);
140 140
141 prev_sg->next_pair_upper = 141 prev_sg->next_pair_upper =
142 upper_32_bits(dma_addr); 142 upper_32_bits(dma_addr);
143 prev_sg->next_pair_lower = 143 prev_sg->next_pair_lower =
144 lower_32_bits(dma_addr); 144 lower_32_bits(dma_addr);
145 } 145 }
146 146
147 prev_sg = scu_sg; 147 prev_sg = scu_sg;
148 sg_idx++; 148 sg_idx++;
149 } 149 }
150 } else { /* handle when no sg */ 150 } else { /* handle when no sg */
151 scu_sg = to_sgl_element_pair(ireq, sg_idx); 151 scu_sg = to_sgl_element_pair(ireq, sg_idx);
152 152
153 dma_addr = dma_map_single(&ihost->pdev->dev, 153 dma_addr = dma_map_single(&ihost->pdev->dev,
154 task->scatter, 154 task->scatter,
155 task->total_xfer_len, 155 task->total_xfer_len,
156 task->data_dir); 156 task->data_dir);
157 157
158 ireq->zero_scatter_daddr = dma_addr; 158 ireq->zero_scatter_daddr = dma_addr;
159 159
160 scu_sg->A.length = task->total_xfer_len; 160 scu_sg->A.length = task->total_xfer_len;
161 scu_sg->A.address_upper = upper_32_bits(dma_addr); 161 scu_sg->A.address_upper = upper_32_bits(dma_addr);
162 scu_sg->A.address_lower = lower_32_bits(dma_addr); 162 scu_sg->A.address_lower = lower_32_bits(dma_addr);
163 } 163 }
164 164
165 if (scu_sg) { 165 if (scu_sg) {
166 scu_sg->next_pair_upper = 0; 166 scu_sg->next_pair_upper = 0;
167 scu_sg->next_pair_lower = 0; 167 scu_sg->next_pair_lower = 0;
168 } 168 }
169 } 169 }
170 170
171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) 171 static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
172 { 172 {
173 struct ssp_cmd_iu *cmd_iu; 173 struct ssp_cmd_iu *cmd_iu;
174 struct sas_task *task = isci_request_access_task(ireq); 174 struct sas_task *task = isci_request_access_task(ireq);
175 175
176 cmd_iu = &ireq->ssp.cmd; 176 cmd_iu = &ireq->ssp.cmd;
177 177
178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); 178 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
179 cmd_iu->add_cdb_len = 0; 179 cmd_iu->add_cdb_len = 0;
180 cmd_iu->_r_a = 0; 180 cmd_iu->_r_a = 0;
181 cmd_iu->_r_b = 0; 181 cmd_iu->_r_b = 0;
182 cmd_iu->en_fburst = 0; /* unsupported */ 182 cmd_iu->en_fburst = 0; /* unsupported */
183 cmd_iu->task_prio = task->ssp_task.task_prio; 183 cmd_iu->task_prio = task->ssp_task.task_prio;
184 cmd_iu->task_attr = task->ssp_task.task_attr; 184 cmd_iu->task_attr = task->ssp_task.task_attr;
185 cmd_iu->_r_c = 0; 185 cmd_iu->_r_c = 0;
186 186
187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb, 187 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
188 sizeof(task->ssp_task.cdb) / sizeof(u32)); 188 sizeof(task->ssp_task.cdb) / sizeof(u32));
189 } 189 }
190 190
191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) 191 static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
192 { 192 {
193 struct ssp_task_iu *task_iu; 193 struct ssp_task_iu *task_iu;
194 struct sas_task *task = isci_request_access_task(ireq); 194 struct sas_task *task = isci_request_access_task(ireq);
195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 195 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
196 196
197 task_iu = &ireq->ssp.tmf; 197 task_iu = &ireq->ssp.tmf;
198 198
199 memset(task_iu, 0, sizeof(struct ssp_task_iu)); 199 memset(task_iu, 0, sizeof(struct ssp_task_iu));
200 200
201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8); 201 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
202 202
203 task_iu->task_func = isci_tmf->tmf_code; 203 task_iu->task_func = isci_tmf->tmf_code;
204 task_iu->task_tag = 204 task_iu->task_tag =
205 (test_bit(IREQ_TMF, &ireq->flags)) ? 205 (test_bit(IREQ_TMF, &ireq->flags)) ?
206 isci_tmf->io_tag : 206 isci_tmf->io_tag :
207 SCI_CONTROLLER_INVALID_IO_TAG; 207 SCI_CONTROLLER_INVALID_IO_TAG;
208 } 208 }
209 209
210 /** 210 /**
211 * This method is will fill in the SCU Task Context for any type of SSP request. 211 * This method is will fill in the SCU Task Context for any type of SSP request.
212 * @sci_req: 212 * @sci_req:
213 * @task_context: 213 * @task_context:
214 * 214 *
215 */ 215 */
216 static void scu_ssp_reqeust_construct_task_context( 216 static void scu_ssp_reqeust_construct_task_context(
217 struct isci_request *ireq, 217 struct isci_request *ireq,
218 struct scu_task_context *task_context) 218 struct scu_task_context *task_context)
219 { 219 {
220 dma_addr_t dma_addr; 220 dma_addr_t dma_addr;
221 struct isci_remote_device *idev; 221 struct isci_remote_device *idev;
222 struct isci_port *iport; 222 struct isci_port *iport;
223 223
224 idev = ireq->target_device; 224 idev = ireq->target_device;
225 iport = idev->owning_port; 225 iport = idev->owning_port;
226 226
227 /* Fill in the TC with the its required data */ 227 /* Fill in the TC with the its required data */
228 task_context->abort = 0; 228 task_context->abort = 0;
229 task_context->priority = 0; 229 task_context->priority = 0;
230 task_context->initiator_request = 1; 230 task_context->initiator_request = 1;
231 task_context->connection_rate = idev->connection_rate; 231 task_context->connection_rate = idev->connection_rate;
232 task_context->protocol_engine_index = ISCI_PEG; 232 task_context->protocol_engine_index = ISCI_PEG;
233 task_context->logical_port_index = iport->physical_port_index; 233 task_context->logical_port_index = iport->physical_port_index;
234 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 234 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
235 task_context->valid = SCU_TASK_CONTEXT_VALID; 235 task_context->valid = SCU_TASK_CONTEXT_VALID;
236 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 236 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
237 237
238 task_context->remote_node_index = idev->rnc.remote_node_index; 238 task_context->remote_node_index = idev->rnc.remote_node_index;
239 task_context->command_code = 0; 239 task_context->command_code = 0;
240 240
241 task_context->link_layer_control = 0; 241 task_context->link_layer_control = 0;
242 task_context->do_not_dma_ssp_good_response = 1; 242 task_context->do_not_dma_ssp_good_response = 1;
243 task_context->strict_ordering = 0; 243 task_context->strict_ordering = 0;
244 task_context->control_frame = 0; 244 task_context->control_frame = 0;
245 task_context->timeout_enable = 0; 245 task_context->timeout_enable = 0;
246 task_context->block_guard_enable = 0; 246 task_context->block_guard_enable = 0;
247 247
248 task_context->address_modifier = 0; 248 task_context->address_modifier = 0;
249 249
250 /* task_context->type.ssp.tag = ireq->io_tag; */ 250 /* task_context->type.ssp.tag = ireq->io_tag; */
251 task_context->task_phase = 0x01; 251 task_context->task_phase = 0x01;
252 252
253 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 253 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
254 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 254 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
255 (iport->physical_port_index << 255 (iport->physical_port_index <<
256 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 256 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
257 ISCI_TAG_TCI(ireq->io_tag)); 257 ISCI_TAG_TCI(ireq->io_tag));
258 258
259 /* 259 /*
260 * Copy the physical address for the command buffer to the 260 * Copy the physical address for the command buffer to the
261 * SCU Task Context 261 * SCU Task Context
262 */ 262 */
263 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); 263 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
264 264
265 task_context->command_iu_upper = upper_32_bits(dma_addr); 265 task_context->command_iu_upper = upper_32_bits(dma_addr);
266 task_context->command_iu_lower = lower_32_bits(dma_addr); 266 task_context->command_iu_lower = lower_32_bits(dma_addr);
267 267
268 /* 268 /*
269 * Copy the physical address for the response buffer to the 269 * Copy the physical address for the response buffer to the
270 * SCU Task Context 270 * SCU Task Context
271 */ 271 */
272 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); 272 dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
273 273
274 task_context->response_iu_upper = upper_32_bits(dma_addr); 274 task_context->response_iu_upper = upper_32_bits(dma_addr);
275 task_context->response_iu_lower = lower_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr);
276 } 276 }
277 277
278 static u8 scu_bg_blk_size(struct scsi_device *sdp) 278 static u8 scu_bg_blk_size(struct scsi_device *sdp)
279 { 279 {
280 switch (sdp->sector_size) { 280 switch (sdp->sector_size) {
281 case 512: 281 case 512:
282 return 0; 282 return 0;
283 case 1024: 283 case 1024:
284 return 1; 284 return 1;
285 case 4096: 285 case 4096:
286 return 3; 286 return 3;
287 default: 287 default:
288 return 0xff; 288 return 0xff;
289 } 289 }
290 } 290 }
291 291
292 static u32 scu_dif_bytes(u32 len, u32 sector_size) 292 static u32 scu_dif_bytes(u32 len, u32 sector_size)
293 { 293 {
294 return (len >> ilog2(sector_size)) * 8; 294 return (len >> ilog2(sector_size)) * 8;
295 } 295 }
296 296
297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) 297 static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
298 { 298 {
299 struct scu_task_context *tc = ireq->tc; 299 struct scu_task_context *tc = ireq->tc;
300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
301 u8 blk_sz = scu_bg_blk_size(scmd->device); 301 u8 blk_sz = scu_bg_blk_size(scmd->device);
302 302
303 tc->block_guard_enable = 1; 303 tc->block_guard_enable = 1;
304 tc->blk_prot_en = 1; 304 tc->blk_prot_en = 1;
305 tc->blk_sz = blk_sz; 305 tc->blk_sz = blk_sz;
306 /* DIF write insert */ 306 /* DIF write insert */
307 tc->blk_prot_func = 0x2; 307 tc->blk_prot_func = 0x2;
308 308
309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
310 scmd->device->sector_size); 310 scmd->device->sector_size);
311 311
312 /* always init to 0, used by hw */ 312 /* always init to 0, used by hw */
313 tc->interm_crc_val = 0; 313 tc->interm_crc_val = 0;
314 314
315 tc->init_crc_seed = 0; 315 tc->init_crc_seed = 0;
316 tc->app_tag_verify = 0; 316 tc->app_tag_verify = 0;
317 tc->app_tag_gen = 0; 317 tc->app_tag_gen = 0;
318 tc->ref_tag_seed_verify = 0; 318 tc->ref_tag_seed_verify = 0;
319 319
320 /* always init to same as bg_blk_sz */ 320 /* always init to same as bg_blk_sz */
321 tc->UD_bytes_immed_val = scmd->device->sector_size; 321 tc->UD_bytes_immed_val = scmd->device->sector_size;
322 322
323 tc->reserved_DC_0 = 0; 323 tc->reserved_DC_0 = 0;
324 324
325 /* always init to 8 */ 325 /* always init to 8 */
326 tc->DIF_bytes_immed_val = 8; 326 tc->DIF_bytes_immed_val = 8;
327 327
328 tc->reserved_DC_1 = 0; 328 tc->reserved_DC_1 = 0;
329 tc->bgc_blk_sz = scmd->device->sector_size; 329 tc->bgc_blk_sz = scmd->device->sector_size;
330 tc->reserved_E0_0 = 0; 330 tc->reserved_E0_0 = 0;
331 tc->app_tag_gen_mask = 0; 331 tc->app_tag_gen_mask = 0;
332 332
333 /** setup block guard control **/ 333 /** setup block guard control **/
334 tc->bgctl = 0; 334 tc->bgctl = 0;
335 335
336 /* DIF write insert */ 336 /* DIF write insert */
337 tc->bgctl_f.op = 0x2; 337 tc->bgctl_f.op = 0x2;
338 338
339 tc->app_tag_verify_mask = 0; 339 tc->app_tag_verify_mask = 0;
340 340
341 /* must init to 0 for hw */ 341 /* must init to 0 for hw */
342 tc->blk_guard_err = 0; 342 tc->blk_guard_err = 0;
343 343
344 tc->reserved_E8_0 = 0; 344 tc->reserved_E8_0 = 0;
345 345
346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; 347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
348 else if (type & SCSI_PROT_DIF_TYPE3) 348 else if (type & SCSI_PROT_DIF_TYPE3)
349 tc->ref_tag_seed_gen = 0; 349 tc->ref_tag_seed_gen = 0;
350 } 350 }
351 351
352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) 352 static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
353 { 353 {
354 struct scu_task_context *tc = ireq->tc; 354 struct scu_task_context *tc = ireq->tc;
355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; 355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
356 u8 blk_sz = scu_bg_blk_size(scmd->device); 356 u8 blk_sz = scu_bg_blk_size(scmd->device);
357 357
358 tc->block_guard_enable = 1; 358 tc->block_guard_enable = 1;
359 tc->blk_prot_en = 1; 359 tc->blk_prot_en = 1;
360 tc->blk_sz = blk_sz; 360 tc->blk_sz = blk_sz;
361 /* DIF read strip */ 361 /* DIF read strip */
362 tc->blk_prot_func = 0x1; 362 tc->blk_prot_func = 0x1;
363 363
364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes, 364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
365 scmd->device->sector_size); 365 scmd->device->sector_size);
366 366
367 /* always init to 0, used by hw */ 367 /* always init to 0, used by hw */
368 tc->interm_crc_val = 0; 368 tc->interm_crc_val = 0;
369 369
370 tc->init_crc_seed = 0; 370 tc->init_crc_seed = 0;
371 tc->app_tag_verify = 0; 371 tc->app_tag_verify = 0;
372 tc->app_tag_gen = 0; 372 tc->app_tag_gen = 0;
373 373
374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) 374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; 375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
376 else if (type & SCSI_PROT_DIF_TYPE3) 376 else if (type & SCSI_PROT_DIF_TYPE3)
377 tc->ref_tag_seed_verify = 0; 377 tc->ref_tag_seed_verify = 0;
378 378
379 /* always init to same as bg_blk_sz */ 379 /* always init to same as bg_blk_sz */
380 tc->UD_bytes_immed_val = scmd->device->sector_size; 380 tc->UD_bytes_immed_val = scmd->device->sector_size;
381 381
382 tc->reserved_DC_0 = 0; 382 tc->reserved_DC_0 = 0;
383 383
384 /* always init to 8 */ 384 /* always init to 8 */
385 tc->DIF_bytes_immed_val = 8; 385 tc->DIF_bytes_immed_val = 8;
386 386
387 tc->reserved_DC_1 = 0; 387 tc->reserved_DC_1 = 0;
388 tc->bgc_blk_sz = scmd->device->sector_size; 388 tc->bgc_blk_sz = scmd->device->sector_size;
389 tc->reserved_E0_0 = 0; 389 tc->reserved_E0_0 = 0;
390 tc->app_tag_gen_mask = 0; 390 tc->app_tag_gen_mask = 0;
391 391
392 /** setup block guard control **/ 392 /** setup block guard control **/
393 tc->bgctl = 0; 393 tc->bgctl = 0;
394 394
395 /* DIF read strip */ 395 /* DIF read strip */
396 tc->bgctl_f.crc_verify = 1; 396 tc->bgctl_f.crc_verify = 1;
397 tc->bgctl_f.op = 0x1; 397 tc->bgctl_f.op = 0x1;
398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { 398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
399 tc->bgctl_f.ref_tag_chk = 1; 399 tc->bgctl_f.ref_tag_chk = 1;
400 tc->bgctl_f.app_f_detect = 1; 400 tc->bgctl_f.app_f_detect = 1;
401 } else if (type & SCSI_PROT_DIF_TYPE3) 401 } else if (type & SCSI_PROT_DIF_TYPE3)
402 tc->bgctl_f.app_ref_f_detect = 1; 402 tc->bgctl_f.app_ref_f_detect = 1;
403 403
404 tc->app_tag_verify_mask = 0; 404 tc->app_tag_verify_mask = 0;
405 405
406 /* must init to 0 for hw */ 406 /* must init to 0 for hw */
407 tc->blk_guard_err = 0; 407 tc->blk_guard_err = 0;
408 408
409 tc->reserved_E8_0 = 0; 409 tc->reserved_E8_0 = 0;
410 tc->ref_tag_seed_gen = 0; 410 tc->ref_tag_seed_gen = 0;
411 } 411 }
412 412
413 /** 413 /**
414 * This method is will fill in the SCU Task Context for a SSP IO request. 414 * This method is will fill in the SCU Task Context for a SSP IO request.
415 * @sci_req: 415 * @sci_req:
416 * 416 *
417 */ 417 */
418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, 418 static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
419 enum dma_data_direction dir, 419 enum dma_data_direction dir,
420 u32 len) 420 u32 len)
421 { 421 {
422 struct scu_task_context *task_context = ireq->tc; 422 struct scu_task_context *task_context = ireq->tc;
423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; 423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
424 struct scsi_cmnd *scmd = sas_task->uldd_task; 424 struct scsi_cmnd *scmd = sas_task->uldd_task;
425 u8 prot_type = scsi_get_prot_type(scmd); 425 u8 prot_type = scsi_get_prot_type(scmd);
426 u8 prot_op = scsi_get_prot_op(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd);
427 427
428 scu_ssp_reqeust_construct_task_context(ireq, task_context); 428 scu_ssp_reqeust_construct_task_context(ireq, task_context);
429 429
430 task_context->ssp_command_iu_length = 430 task_context->ssp_command_iu_length =
431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 431 sizeof(struct ssp_cmd_iu) / sizeof(u32);
432 task_context->type.ssp.frame_type = SSP_COMMAND; 432 task_context->type.ssp.frame_type = SSP_COMMAND;
433 433
434 switch (dir) { 434 switch (dir) {
435 case DMA_FROM_DEVICE: 435 case DMA_FROM_DEVICE:
436 case DMA_NONE: 436 case DMA_NONE:
437 default: 437 default:
438 task_context->task_type = SCU_TASK_TYPE_IOREAD; 438 task_context->task_type = SCU_TASK_TYPE_IOREAD;
439 break; 439 break;
440 case DMA_TO_DEVICE: 440 case DMA_TO_DEVICE:
441 task_context->task_type = SCU_TASK_TYPE_IOWRITE; 441 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
442 break; 442 break;
443 } 443 }
444 444
445 task_context->transfer_length_bytes = len; 445 task_context->transfer_length_bytes = len;
446 446
447 if (task_context->transfer_length_bytes > 0) 447 if (task_context->transfer_length_bytes > 0)
448 sci_request_build_sgl(ireq); 448 sci_request_build_sgl(ireq);
449 449
450 if (prot_type != SCSI_PROT_DIF_TYPE0) { 450 if (prot_type != SCSI_PROT_DIF_TYPE0) {
451 if (prot_op == SCSI_PROT_READ_STRIP) 451 if (prot_op == SCSI_PROT_READ_STRIP)
452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op); 452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
453 else if (prot_op == SCSI_PROT_WRITE_INSERT) 453 else if (prot_op == SCSI_PROT_WRITE_INSERT)
454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op); 454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
455 } 455 }
456 } 456 }
457 457
458 /** 458 /**
459 * This method will fill in the SCU Task Context for a SSP Task request. The 459 * This method will fill in the SCU Task Context for a SSP Task request. The
460 * following important settings are utilized: -# priority == 460 * following important settings are utilized: -# priority ==
461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued 461 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
462 * ahead of other task destined for the same Remote Node. -# task_type == 462 * ahead of other task destined for the same Remote Node. -# task_type ==
463 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type 463 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
464 * (i.e. non-raw frame) is being utilized to perform task management. -# 464 * (i.e. non-raw frame) is being utilized to perform task management. -#
465 * control_frame == 1. This ensures that the proper endianess is set so 465 * control_frame == 1. This ensures that the proper endianess is set so
466 * that the bytes are transmitted in the right order for a task frame. 466 * that the bytes are transmitted in the right order for a task frame.
467 * @sci_req: This parameter specifies the task request object being 467 * @sci_req: This parameter specifies the task request object being
468 * constructed. 468 * constructed.
469 * 469 *
470 */ 470 */
471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) 471 static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
472 { 472 {
473 struct scu_task_context *task_context = ireq->tc; 473 struct scu_task_context *task_context = ireq->tc;
474 474
475 scu_ssp_reqeust_construct_task_context(ireq, task_context); 475 scu_ssp_reqeust_construct_task_context(ireq, task_context);
476 476
477 task_context->control_frame = 1; 477 task_context->control_frame = 1;
478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH;
479 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; 479 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
480 task_context->transfer_length_bytes = 0; 480 task_context->transfer_length_bytes = 0;
481 task_context->type.ssp.frame_type = SSP_TASK; 481 task_context->type.ssp.frame_type = SSP_TASK;
482 task_context->ssp_command_iu_length = 482 task_context->ssp_command_iu_length =
483 sizeof(struct ssp_task_iu) / sizeof(u32); 483 sizeof(struct ssp_task_iu) / sizeof(u32);
484 } 484 }
485 485
486 /** 486 /**
487 * This method is will fill in the SCU Task Context for any type of SATA 487 * This method is will fill in the SCU Task Context for any type of SATA
488 * request. This is called from the various SATA constructors. 488 * request. This is called from the various SATA constructors.
489 * @sci_req: The general IO request object which is to be used in 489 * @sci_req: The general IO request object which is to be used in
490 * constructing the SCU task context. 490 * constructing the SCU task context.
491 * @task_context: The buffer pointer for the SCU task context which is being 491 * @task_context: The buffer pointer for the SCU task context which is being
492 * constructed. 492 * constructed.
493 * 493 *
494 * The general io request construction is complete. The buffer assignment for 494 * The general io request construction is complete. The buffer assignment for
495 * the command buffer is complete. none Revisit task context construction to 495 * the command buffer is complete. none Revisit task context construction to
496 * determine what is common for SSP/SMP/STP task context structures. 496 * determine what is common for SSP/SMP/STP task context structures.
497 */ 497 */
498 static void scu_sata_reqeust_construct_task_context( 498 static void scu_sata_reqeust_construct_task_context(
499 struct isci_request *ireq, 499 struct isci_request *ireq,
500 struct scu_task_context *task_context) 500 struct scu_task_context *task_context)
501 { 501 {
502 dma_addr_t dma_addr; 502 dma_addr_t dma_addr;
503 struct isci_remote_device *idev; 503 struct isci_remote_device *idev;
504 struct isci_port *iport; 504 struct isci_port *iport;
505 505
506 idev = ireq->target_device; 506 idev = ireq->target_device;
507 iport = idev->owning_port; 507 iport = idev->owning_port;
508 508
509 /* Fill in the TC with the its required data */ 509 /* Fill in the TC with the its required data */
510 task_context->abort = 0; 510 task_context->abort = 0;
511 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 511 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
512 task_context->initiator_request = 1; 512 task_context->initiator_request = 1;
513 task_context->connection_rate = idev->connection_rate; 513 task_context->connection_rate = idev->connection_rate;
514 task_context->protocol_engine_index = ISCI_PEG; 514 task_context->protocol_engine_index = ISCI_PEG;
515 task_context->logical_port_index = iport->physical_port_index; 515 task_context->logical_port_index = iport->physical_port_index;
516 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; 516 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
517 task_context->valid = SCU_TASK_CONTEXT_VALID; 517 task_context->valid = SCU_TASK_CONTEXT_VALID;
518 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 518 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
519 519
520 task_context->remote_node_index = idev->rnc.remote_node_index; 520 task_context->remote_node_index = idev->rnc.remote_node_index;
521 task_context->command_code = 0; 521 task_context->command_code = 0;
522 522
523 task_context->link_layer_control = 0; 523 task_context->link_layer_control = 0;
524 task_context->do_not_dma_ssp_good_response = 1; 524 task_context->do_not_dma_ssp_good_response = 1;
525 task_context->strict_ordering = 0; 525 task_context->strict_ordering = 0;
526 task_context->control_frame = 0; 526 task_context->control_frame = 0;
527 task_context->timeout_enable = 0; 527 task_context->timeout_enable = 0;
528 task_context->block_guard_enable = 0; 528 task_context->block_guard_enable = 0;
529 529
530 task_context->address_modifier = 0; 530 task_context->address_modifier = 0;
531 task_context->task_phase = 0x01; 531 task_context->task_phase = 0x01;
532 532
533 task_context->ssp_command_iu_length = 533 task_context->ssp_command_iu_length =
534 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); 534 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
535 535
536 /* Set the first word of the H2D REG FIS */ 536 /* Set the first word of the H2D REG FIS */
537 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; 537 task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
538 538
539 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 539 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
540 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 540 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
541 (iport->physical_port_index << 541 (iport->physical_port_index <<
542 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 542 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
543 ISCI_TAG_TCI(ireq->io_tag)); 543 ISCI_TAG_TCI(ireq->io_tag));
544 /* 544 /*
545 * Copy the physical address for the command buffer to the SCU Task 545 * Copy the physical address for the command buffer to the SCU Task
546 * Context. We must offset the command buffer by 4 bytes because the 546 * Context. We must offset the command buffer by 4 bytes because the
547 * first 4 bytes are transfered in the body of the TC. 547 * first 4 bytes are transfered in the body of the TC.
548 */ 548 */
549 dma_addr = sci_io_request_get_dma_addr(ireq, 549 dma_addr = sci_io_request_get_dma_addr(ireq,
550 ((char *) &ireq->stp.cmd) + 550 ((char *) &ireq->stp.cmd) +
551 sizeof(u32)); 551 sizeof(u32));
552 552
553 task_context->command_iu_upper = upper_32_bits(dma_addr); 553 task_context->command_iu_upper = upper_32_bits(dma_addr);
554 task_context->command_iu_lower = lower_32_bits(dma_addr); 554 task_context->command_iu_lower = lower_32_bits(dma_addr);
555 555
556 /* SATA Requests do not have a response buffer */ 556 /* SATA Requests do not have a response buffer */
557 task_context->response_iu_upper = 0; 557 task_context->response_iu_upper = 0;
558 task_context->response_iu_lower = 0; 558 task_context->response_iu_lower = 0;
559 } 559 }
560 560
561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) 561 static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
562 { 562 {
563 struct scu_task_context *task_context = ireq->tc; 563 struct scu_task_context *task_context = ireq->tc;
564 564
565 scu_sata_reqeust_construct_task_context(ireq, task_context); 565 scu_sata_reqeust_construct_task_context(ireq, task_context);
566 566
567 task_context->control_frame = 0; 567 task_context->control_frame = 0;
568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
569 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; 569 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
570 task_context->type.stp.fis_type = FIS_REGH2D; 570 task_context->type.stp.fis_type = FIS_REGH2D;
571 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); 571 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
572 } 572 }
573 573
574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, 574 static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
575 bool copy_rx_frame) 575 bool copy_rx_frame)
576 { 576 {
577 struct isci_stp_request *stp_req = &ireq->stp.req; 577 struct isci_stp_request *stp_req = &ireq->stp.req;
578 578
579 scu_stp_raw_request_construct_task_context(ireq); 579 scu_stp_raw_request_construct_task_context(ireq);
580 580
581 stp_req->status = 0; 581 stp_req->status = 0;
582 stp_req->sgl.offset = 0; 582 stp_req->sgl.offset = 0;
583 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; 583 stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
584 584
585 if (copy_rx_frame) { 585 if (copy_rx_frame) {
586 sci_request_build_sgl(ireq); 586 sci_request_build_sgl(ireq);
587 stp_req->sgl.index = 0; 587 stp_req->sgl.index = 0;
588 } else { 588 } else {
589 /* The user does not want the data copied to the SGL buffer location */ 589 /* The user does not want the data copied to the SGL buffer location */
590 stp_req->sgl.index = -1; 590 stp_req->sgl.index = -1;
591 } 591 }
592 592
593 return SCI_SUCCESS; 593 return SCI_SUCCESS;
594 } 594 }
595 595
596 /** 596 /**
597 * 597 *
598 * @sci_req: This parameter specifies the request to be constructed as an 598 * @sci_req: This parameter specifies the request to be constructed as an
599 * optimized request. 599 * optimized request.
600 * @optimized_task_type: This parameter specifies whether the request is to be 600 * @optimized_task_type: This parameter specifies whether the request is to be
601 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A 601 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
602 * value of 1 indicates NCQ. 602 * value of 1 indicates NCQ.
603 * 603 *
604 * This method will perform request construction common to all types of STP 604 * This method will perform request construction common to all types of STP
605 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method 605 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
606 * returns an indication as to whether the construction was successful. 606 * returns an indication as to whether the construction was successful.
607 */ 607 */
608 static void sci_stp_optimized_request_construct(struct isci_request *ireq, 608 static void sci_stp_optimized_request_construct(struct isci_request *ireq,
609 u8 optimized_task_type, 609 u8 optimized_task_type,
610 u32 len, 610 u32 len,
611 enum dma_data_direction dir) 611 enum dma_data_direction dir)
612 { 612 {
613 struct scu_task_context *task_context = ireq->tc; 613 struct scu_task_context *task_context = ireq->tc;
614 614
615 /* Build the STP task context structure */ 615 /* Build the STP task context structure */
616 scu_sata_reqeust_construct_task_context(ireq, task_context); 616 scu_sata_reqeust_construct_task_context(ireq, task_context);
617 617
618 /* Copy over the SGL elements */ 618 /* Copy over the SGL elements */
619 sci_request_build_sgl(ireq); 619 sci_request_build_sgl(ireq);
620 620
621 /* Copy over the number of bytes to be transfered */ 621 /* Copy over the number of bytes to be transfered */
622 task_context->transfer_length_bytes = len; 622 task_context->transfer_length_bytes = len;
623 623
624 if (dir == DMA_TO_DEVICE) { 624 if (dir == DMA_TO_DEVICE) {
625 /* 625 /*
626 * The difference between the DMA IN and DMA OUT request task type 626 * The difference between the DMA IN and DMA OUT request task type
627 * values are consistent with the difference between FPDMA READ 627 * values are consistent with the difference between FPDMA READ
628 * and FPDMA WRITE values. Add the supplied task type parameter 628 * and FPDMA WRITE values. Add the supplied task type parameter
629 * to this difference to set the task type properly for this 629 * to this difference to set the task type properly for this
630 * DATA OUT (WRITE) case. */ 630 * DATA OUT (WRITE) case. */
631 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT 631 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
632 - SCU_TASK_TYPE_DMA_IN); 632 - SCU_TASK_TYPE_DMA_IN);
633 } else { 633 } else {
634 /* 634 /*
635 * For the DATA IN (READ) case, simply save the supplied 635 * For the DATA IN (READ) case, simply save the supplied
636 * optimized task type. */ 636 * optimized task type. */
637 task_context->task_type = optimized_task_type; 637 task_context->task_type = optimized_task_type;
638 } 638 }
639 } 639 }
640 640
641 static void sci_atapi_construct(struct isci_request *ireq) 641 static void sci_atapi_construct(struct isci_request *ireq)
642 { 642 {
643 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; 643 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
644 struct sas_task *task; 644 struct sas_task *task;
645 645
646 /* To simplify the implementation we take advantage of the 646 /* To simplify the implementation we take advantage of the
647 * silicon's partial acceleration of atapi protocol (dma data 647 * silicon's partial acceleration of atapi protocol (dma data
648 * transfers), so we promote all commands to dma protocol. This 648 * transfers), so we promote all commands to dma protocol. This
649 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. 649 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
650 */ 650 */
651 h2d_fis->features |= ATAPI_PKT_DMA; 651 h2d_fis->features |= ATAPI_PKT_DMA;
652 652
653 scu_stp_raw_request_construct_task_context(ireq); 653 scu_stp_raw_request_construct_task_context(ireq);
654 654
655 task = isci_request_access_task(ireq); 655 task = isci_request_access_task(ireq);
656 if (task->data_dir == DMA_NONE) 656 if (task->data_dir == DMA_NONE)
657 task->total_xfer_len = 0; 657 task->total_xfer_len = 0;
658 658
659 /* clear the response so we can detect arrivial of an 659 /* clear the response so we can detect arrivial of an
660 * unsolicited h2d fis 660 * unsolicited h2d fis
661 */ 661 */
662 ireq->stp.rsp.fis_type = 0; 662 ireq->stp.rsp.fis_type = 0;
663 } 663 }
664 664
665 static enum sci_status 665 static enum sci_status
666 sci_io_request_construct_sata(struct isci_request *ireq, 666 sci_io_request_construct_sata(struct isci_request *ireq,
667 u32 len, 667 u32 len,
668 enum dma_data_direction dir, 668 enum dma_data_direction dir,
669 bool copy) 669 bool copy)
670 { 670 {
671 enum sci_status status = SCI_SUCCESS; 671 enum sci_status status = SCI_SUCCESS;
672 struct sas_task *task = isci_request_access_task(ireq); 672 struct sas_task *task = isci_request_access_task(ireq);
673 struct domain_device *dev = ireq->target_device->domain_dev; 673 struct domain_device *dev = ireq->target_device->domain_dev;
674 674
675 /* check for management protocols */ 675 /* check for management protocols */
676 if (test_bit(IREQ_TMF, &ireq->flags)) { 676 if (test_bit(IREQ_TMF, &ireq->flags)) {
677 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
678 678
679 dev_err(&ireq->owning_controller->pdev->dev, 679 dev_err(&ireq->owning_controller->pdev->dev,
680 "%s: Request 0x%p received un-handled SAT " 680 "%s: Request 0x%p received un-handled SAT "
681 "management protocol 0x%x.\n", 681 "management protocol 0x%x.\n",
682 __func__, ireq, tmf->tmf_code); 682 __func__, ireq, tmf->tmf_code);
683 683
684 return SCI_FAILURE; 684 return SCI_FAILURE;
685 } 685 }
686 686
687 if (!sas_protocol_ata(task->task_proto)) { 687 if (!sas_protocol_ata(task->task_proto)) {
688 dev_err(&ireq->owning_controller->pdev->dev, 688 dev_err(&ireq->owning_controller->pdev->dev,
689 "%s: Non-ATA protocol in SATA path: 0x%x\n", 689 "%s: Non-ATA protocol in SATA path: 0x%x\n",
690 __func__, 690 __func__,
691 task->task_proto); 691 task->task_proto);
692 return SCI_FAILURE; 692 return SCI_FAILURE;
693 693
694 } 694 }
695 695
696 /* ATAPI */ 696 /* ATAPI */
697 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 697 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
698 task->ata_task.fis.command == ATA_CMD_PACKET) { 698 task->ata_task.fis.command == ATA_CMD_PACKET) {
699 sci_atapi_construct(ireq); 699 sci_atapi_construct(ireq);
700 return SCI_SUCCESS; 700 return SCI_SUCCESS;
701 } 701 }
702 702
703 /* non data */ 703 /* non data */
704 if (task->data_dir == DMA_NONE) { 704 if (task->data_dir == DMA_NONE) {
705 scu_stp_raw_request_construct_task_context(ireq); 705 scu_stp_raw_request_construct_task_context(ireq);
706 return SCI_SUCCESS; 706 return SCI_SUCCESS;
707 } 707 }
708 708
709 /* NCQ */ 709 /* NCQ */
710 if (task->ata_task.use_ncq) { 710 if (task->ata_task.use_ncq) {
711 sci_stp_optimized_request_construct(ireq, 711 sci_stp_optimized_request_construct(ireq,
712 SCU_TASK_TYPE_FPDMAQ_READ, 712 SCU_TASK_TYPE_FPDMAQ_READ,
713 len, dir); 713 len, dir);
714 return SCI_SUCCESS; 714 return SCI_SUCCESS;
715 } 715 }
716 716
717 /* DMA */ 717 /* DMA */
718 if (task->ata_task.dma_xfer) { 718 if (task->ata_task.dma_xfer) {
719 sci_stp_optimized_request_construct(ireq, 719 sci_stp_optimized_request_construct(ireq,
720 SCU_TASK_TYPE_DMA_IN, 720 SCU_TASK_TYPE_DMA_IN,
721 len, dir); 721 len, dir);
722 return SCI_SUCCESS; 722 return SCI_SUCCESS;
723 } else /* PIO */ 723 } else /* PIO */
724 return sci_stp_pio_request_construct(ireq, copy); 724 return sci_stp_pio_request_construct(ireq, copy);
725 725
726 return status; 726 return status;
727 } 727 }
728 728
729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) 729 static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
730 { 730 {
731 struct sas_task *task = isci_request_access_task(ireq); 731 struct sas_task *task = isci_request_access_task(ireq);
732 732
733 ireq->protocol = SAS_PROTOCOL_SSP; 733 ireq->protocol = SAS_PROTOCOL_SSP;
734 734
735 scu_ssp_io_request_construct_task_context(ireq, 735 scu_ssp_io_request_construct_task_context(ireq,
736 task->data_dir, 736 task->data_dir,
737 task->total_xfer_len); 737 task->total_xfer_len);
738 738
739 sci_io_request_build_ssp_command_iu(ireq); 739 sci_io_request_build_ssp_command_iu(ireq);
740 740
741 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 741 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
742 742
743 return SCI_SUCCESS; 743 return SCI_SUCCESS;
744 } 744 }
745 745
746 enum sci_status sci_task_request_construct_ssp( 746 enum sci_status sci_task_request_construct_ssp(
747 struct isci_request *ireq) 747 struct isci_request *ireq)
748 { 748 {
749 /* Construct the SSP Task SCU Task Context */ 749 /* Construct the SSP Task SCU Task Context */
750 scu_ssp_task_request_construct_task_context(ireq); 750 scu_ssp_task_request_construct_task_context(ireq);
751 751
752 /* Fill in the SSP Task IU */ 752 /* Fill in the SSP Task IU */
753 sci_task_request_build_ssp_task_iu(ireq); 753 sci_task_request_build_ssp_task_iu(ireq);
754 754
755 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 755 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
756 756
757 return SCI_SUCCESS; 757 return SCI_SUCCESS;
758 } 758 }
759 759
760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) 760 static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
761 { 761 {
762 enum sci_status status; 762 enum sci_status status;
763 bool copy = false; 763 bool copy = false;
764 struct sas_task *task = isci_request_access_task(ireq); 764 struct sas_task *task = isci_request_access_task(ireq);
765 765
766 ireq->protocol = SAS_PROTOCOL_STP; 766 ireq->protocol = SAS_PROTOCOL_STP;
767 767
768 copy = (task->data_dir == DMA_NONE) ? false : true; 768 copy = (task->data_dir == DMA_NONE) ? false : true;
769 769
770 status = sci_io_request_construct_sata(ireq, 770 status = sci_io_request_construct_sata(ireq,
771 task->total_xfer_len, 771 task->total_xfer_len,
772 task->data_dir, 772 task->data_dir,
773 copy); 773 copy);
774 774
775 if (status == SCI_SUCCESS) 775 if (status == SCI_SUCCESS)
776 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 776 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
777 777
778 return status; 778 return status;
779 } 779 }
780 780
781 /** 781 /**
782 * sci_req_tx_bytes - bytes transferred when reply underruns request 782 * sci_req_tx_bytes - bytes transferred when reply underruns request
783 * @ireq: request that was terminated early 783 * @ireq: request that was terminated early
784 */ 784 */
785 #define SCU_TASK_CONTEXT_SRAM 0x200000 785 #define SCU_TASK_CONTEXT_SRAM 0x200000
786 static u32 sci_req_tx_bytes(struct isci_request *ireq) 786 static u32 sci_req_tx_bytes(struct isci_request *ireq)
787 { 787 {
788 struct isci_host *ihost = ireq->owning_controller; 788 struct isci_host *ihost = ireq->owning_controller;
789 u32 ret_val = 0; 789 u32 ret_val = 0;
790 790
791 if (readl(&ihost->smu_registers->address_modifier) == 0) { 791 if (readl(&ihost->smu_registers->address_modifier) == 0) {
792 void __iomem *scu_reg_base = ihost->scu_registers; 792 void __iomem *scu_reg_base = ihost->scu_registers;
793 793
794 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where 794 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
795 * BAR1 is the scu_registers 795 * BAR1 is the scu_registers
796 * 0x20002C = 0x200000 + 0x2c 796 * 0x20002C = 0x200000 + 0x2c
797 * = start of task context SRAM + offset of (type.ssp.data_offset) 797 * = start of task context SRAM + offset of (type.ssp.data_offset)
798 * TCi is the io_tag of struct sci_request 798 * TCi is the io_tag of struct sci_request
799 */ 799 */
800 ret_val = readl(scu_reg_base + 800 ret_val = readl(scu_reg_base +
801 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + 801 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
802 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); 802 ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
803 } 803 }
804 804
805 return ret_val; 805 return ret_val;
806 } 806 }
807 807
808 enum sci_status sci_request_start(struct isci_request *ireq) 808 enum sci_status sci_request_start(struct isci_request *ireq)
809 { 809 {
810 enum sci_base_request_states state; 810 enum sci_base_request_states state;
811 struct scu_task_context *tc = ireq->tc; 811 struct scu_task_context *tc = ireq->tc;
812 struct isci_host *ihost = ireq->owning_controller; 812 struct isci_host *ihost = ireq->owning_controller;
813 813
814 state = ireq->sm.current_state_id; 814 state = ireq->sm.current_state_id;
815 if (state != SCI_REQ_CONSTRUCTED) { 815 if (state != SCI_REQ_CONSTRUCTED) {
816 dev_warn(&ihost->pdev->dev, 816 dev_warn(&ihost->pdev->dev,
817 "%s: SCIC IO Request requested to start while in wrong " 817 "%s: SCIC IO Request requested to start while in wrong "
818 "state %d\n", __func__, state); 818 "state %d\n", __func__, state);
819 return SCI_FAILURE_INVALID_STATE; 819 return SCI_FAILURE_INVALID_STATE;
820 } 820 }
821 821
822 tc->task_index = ISCI_TAG_TCI(ireq->io_tag); 822 tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
823 823
824 switch (tc->protocol_type) { 824 switch (tc->protocol_type) {
825 case SCU_TASK_CONTEXT_PROTOCOL_SMP: 825 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
826 case SCU_TASK_CONTEXT_PROTOCOL_SSP: 826 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
827 /* SSP/SMP Frame */ 827 /* SSP/SMP Frame */
828 tc->type.ssp.tag = ireq->io_tag; 828 tc->type.ssp.tag = ireq->io_tag;
829 tc->type.ssp.target_port_transfer_tag = 0xFFFF; 829 tc->type.ssp.target_port_transfer_tag = 0xFFFF;
830 break; 830 break;
831 831
832 case SCU_TASK_CONTEXT_PROTOCOL_STP: 832 case SCU_TASK_CONTEXT_PROTOCOL_STP:
833 /* STP/SATA Frame 833 /* STP/SATA Frame
834 * tc->type.stp.ncq_tag = ireq->ncq_tag; 834 * tc->type.stp.ncq_tag = ireq->ncq_tag;
835 */ 835 */
836 break; 836 break;
837 837
838 case SCU_TASK_CONTEXT_PROTOCOL_NONE: 838 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
839 /* / @todo When do we set no protocol type? */ 839 /* / @todo When do we set no protocol type? */
840 break; 840 break;
841 841
842 default: 842 default:
843 /* This should never happen since we build the IO 843 /* This should never happen since we build the IO
844 * requests */ 844 * requests */
845 break; 845 break;
846 } 846 }
847 847
848 /* Add to the post_context the io tag value */ 848 /* Add to the post_context the io tag value */
849 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); 849 ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
850 850
851 /* Everything is good go ahead and change state */ 851 /* Everything is good go ahead and change state */
852 sci_change_state(&ireq->sm, SCI_REQ_STARTED); 852 sci_change_state(&ireq->sm, SCI_REQ_STARTED);
853 853
854 return SCI_SUCCESS; 854 return SCI_SUCCESS;
855 } 855 }
856 856
857 enum sci_status 857 enum sci_status
858 sci_io_request_terminate(struct isci_request *ireq) 858 sci_io_request_terminate(struct isci_request *ireq)
859 { 859 {
860 enum sci_base_request_states state; 860 enum sci_base_request_states state;
861 861
862 state = ireq->sm.current_state_id; 862 state = ireq->sm.current_state_id;
863 863
864 switch (state) { 864 switch (state) {
865 case SCI_REQ_CONSTRUCTED: 865 case SCI_REQ_CONSTRUCTED:
866 /* Set to make sure no HW terminate posting is done: */
867 set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
866 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 868 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
867 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 869 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
868 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 870 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
869 return SCI_SUCCESS; 871 return SCI_SUCCESS;
870 case SCI_REQ_STARTED: 872 case SCI_REQ_STARTED:
871 case SCI_REQ_TASK_WAIT_TC_COMP: 873 case SCI_REQ_TASK_WAIT_TC_COMP:
872 case SCI_REQ_SMP_WAIT_RESP: 874 case SCI_REQ_SMP_WAIT_RESP:
873 case SCI_REQ_SMP_WAIT_TC_COMP: 875 case SCI_REQ_SMP_WAIT_TC_COMP:
874 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 876 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
875 case SCI_REQ_STP_UDMA_WAIT_D2H: 877 case SCI_REQ_STP_UDMA_WAIT_D2H:
876 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 878 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
877 case SCI_REQ_STP_NON_DATA_WAIT_D2H: 879 case SCI_REQ_STP_NON_DATA_WAIT_D2H:
878 case SCI_REQ_STP_PIO_WAIT_H2D: 880 case SCI_REQ_STP_PIO_WAIT_H2D:
879 case SCI_REQ_STP_PIO_WAIT_FRAME: 881 case SCI_REQ_STP_PIO_WAIT_FRAME:
880 case SCI_REQ_STP_PIO_DATA_IN: 882 case SCI_REQ_STP_PIO_DATA_IN:
881 case SCI_REQ_STP_PIO_DATA_OUT: 883 case SCI_REQ_STP_PIO_DATA_OUT:
882 case SCI_REQ_ATAPI_WAIT_H2D: 884 case SCI_REQ_ATAPI_WAIT_H2D:
883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 885 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
884 case SCI_REQ_ATAPI_WAIT_D2H: 886 case SCI_REQ_ATAPI_WAIT_D2H:
885 case SCI_REQ_ATAPI_WAIT_TC_COMP: 887 case SCI_REQ_ATAPI_WAIT_TC_COMP:
886 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 888 /* Fall through and change state to ABORTING... */
887 return SCI_SUCCESS;
888 case SCI_REQ_TASK_WAIT_TC_RESP: 889 case SCI_REQ_TASK_WAIT_TC_RESP:
889 /* The task frame was already confirmed to have been 890 /* The task frame was already confirmed to have been
890 * sent by the SCU HW. Since the state machine is 891 * sent by the SCU HW. Since the state machine is
891 * now only waiting for the task response itself, 892 * now only waiting for the task response itself,
892 * abort the request and complete it immediately 893 * abort the request and complete it immediately
893 * and don't wait for the task response. 894 * and don't wait for the task response.
894 */ 895 */
895 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 896 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
896 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 897 /* Fall through and handle like ABORTING... */
897 return SCI_SUCCESS;
898 case SCI_REQ_ABORTING: 898 case SCI_REQ_ABORTING:
899 /* If a request has a termination requested twice, return 899 if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
900 * a failure indication, since HW confirmation of the first 900 set_bit(IREQ_PENDING_ABORT, &ireq->flags);
901 * abort is still outstanding. 901 else
902 clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
903 /* If the request is only waiting on the remote device
904 * suspension, return SUCCESS so the caller will wait too.
902 */ 905 */
906 return SCI_SUCCESS;
903 case SCI_REQ_COMPLETED: 907 case SCI_REQ_COMPLETED:
904 default: 908 default:
905 dev_warn(&ireq->owning_controller->pdev->dev, 909 dev_warn(&ireq->owning_controller->pdev->dev,
906 "%s: SCIC IO Request requested to abort while in wrong " 910 "%s: SCIC IO Request requested to abort while in wrong "
907 "state %d\n", 911 "state %d\n", __func__, ireq->sm.current_state_id);
908 __func__,
909 ireq->sm.current_state_id);
910 break; 912 break;
911 } 913 }
912 914
913 return SCI_FAILURE_INVALID_STATE; 915 return SCI_FAILURE_INVALID_STATE;
914 } 916 }
915 917
916 enum sci_status sci_request_complete(struct isci_request *ireq) 918 enum sci_status sci_request_complete(struct isci_request *ireq)
917 { 919 {
918 enum sci_base_request_states state; 920 enum sci_base_request_states state;
919 struct isci_host *ihost = ireq->owning_controller; 921 struct isci_host *ihost = ireq->owning_controller;
920 922
921 state = ireq->sm.current_state_id; 923 state = ireq->sm.current_state_id;
922 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 924 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
923 "isci: request completion from wrong state (%s)\n", 925 "isci: request completion from wrong state (%s)\n",
924 req_state_name(state))) 926 req_state_name(state)))
925 return SCI_FAILURE_INVALID_STATE; 927 return SCI_FAILURE_INVALID_STATE;
926 928
927 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 929 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
928 sci_controller_release_frame(ihost, 930 sci_controller_release_frame(ihost,
929 ireq->saved_rx_frame_index); 931 ireq->saved_rx_frame_index);
930 932
931 /* XXX can we just stop the machine and remove the 'final' state? */ 933 /* XXX can we just stop the machine and remove the 'final' state? */
932 sci_change_state(&ireq->sm, SCI_REQ_FINAL); 934 sci_change_state(&ireq->sm, SCI_REQ_FINAL);
933 return SCI_SUCCESS; 935 return SCI_SUCCESS;
934 } 936 }
935 937
936 enum sci_status sci_io_request_event_handler(struct isci_request *ireq, 938 enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
937 u32 event_code) 939 u32 event_code)
938 { 940 {
939 enum sci_base_request_states state; 941 enum sci_base_request_states state;
940 struct isci_host *ihost = ireq->owning_controller; 942 struct isci_host *ihost = ireq->owning_controller;
941 943
942 state = ireq->sm.current_state_id; 944 state = ireq->sm.current_state_id;
943 945
944 if (state != SCI_REQ_STP_PIO_DATA_IN) { 946 if (state != SCI_REQ_STP_PIO_DATA_IN) {
945 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n", 947 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
946 __func__, event_code, req_state_name(state)); 948 __func__, event_code, req_state_name(state));
947 949
948 return SCI_FAILURE_INVALID_STATE; 950 return SCI_FAILURE_INVALID_STATE;
949 } 951 }
950 952
951 switch (scu_get_event_specifier(event_code)) { 953 switch (scu_get_event_specifier(event_code)) {
952 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: 954 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
953 /* We are waiting for data and the SCU has R_ERR the data frame. 955 /* We are waiting for data and the SCU has R_ERR the data frame.
954 * Go back to waiting for the D2H Register FIS 956 * Go back to waiting for the D2H Register FIS
955 */ 957 */
956 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 958 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
957 return SCI_SUCCESS; 959 return SCI_SUCCESS;
958 default: 960 default:
959 dev_err(&ihost->pdev->dev, 961 dev_err(&ihost->pdev->dev,
960 "%s: pio request unexpected event %#x\n", 962 "%s: pio request unexpected event %#x\n",
961 __func__, event_code); 963 __func__, event_code);
962 964
963 /* TODO Should we fail the PIO request when we get an 965 /* TODO Should we fail the PIO request when we get an
964 * unexpected event? 966 * unexpected event?
965 */ 967 */
966 return SCI_FAILURE; 968 return SCI_FAILURE;
967 } 969 }
968 } 970 }
969 971
970 /* 972 /*
971 * This function copies response data for requests returning response data 973 * This function copies response data for requests returning response data
972 * instead of sense data. 974 * instead of sense data.
973 * @sci_req: This parameter specifies the request object for which to copy 975 * @sci_req: This parameter specifies the request object for which to copy
974 * the response data. 976 * the response data.
975 */ 977 */
976 static void sci_io_request_copy_response(struct isci_request *ireq) 978 static void sci_io_request_copy_response(struct isci_request *ireq)
977 { 979 {
978 void *resp_buf; 980 void *resp_buf;
979 u32 len; 981 u32 len;
980 struct ssp_response_iu *ssp_response; 982 struct ssp_response_iu *ssp_response;
981 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); 983 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
982 984
983 ssp_response = &ireq->ssp.rsp; 985 ssp_response = &ireq->ssp.rsp;
984 986
985 resp_buf = &isci_tmf->resp.resp_iu; 987 resp_buf = &isci_tmf->resp.resp_iu;
986 988
987 len = min_t(u32, 989 len = min_t(u32,
988 SSP_RESP_IU_MAX_SIZE, 990 SSP_RESP_IU_MAX_SIZE,
989 be32_to_cpu(ssp_response->response_data_len)); 991 be32_to_cpu(ssp_response->response_data_len));
990 992
991 memcpy(resp_buf, ssp_response->resp_data, len); 993 memcpy(resp_buf, ssp_response->resp_data, len);
992 } 994 }
993 995
994 static enum sci_status 996 static enum sci_status
995 request_started_state_tc_event(struct isci_request *ireq, 997 request_started_state_tc_event(struct isci_request *ireq,
996 u32 completion_code) 998 u32 completion_code)
997 { 999 {
998 struct ssp_response_iu *resp_iu; 1000 struct ssp_response_iu *resp_iu;
999 u8 datapres; 1001 u8 datapres;
1000 1002
1001 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 1003 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
1002 * to determine SDMA status 1004 * to determine SDMA status
1003 */ 1005 */
1004 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1006 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1005 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1007 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1006 ireq->scu_status = SCU_TASK_DONE_GOOD; 1008 ireq->scu_status = SCU_TASK_DONE_GOOD;
1007 ireq->sci_status = SCI_SUCCESS; 1009 ireq->sci_status = SCI_SUCCESS;
1008 break; 1010 break;
1009 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { 1011 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
1010 /* There are times when the SCU hardware will return an early 1012 /* There are times when the SCU hardware will return an early
1011 * response because the io request specified more data than is 1013 * response because the io request specified more data than is
1012 * returned by the target device (mode pages, inquiry data, 1014 * returned by the target device (mode pages, inquiry data,
1013 * etc.). We must check the response stats to see if this is 1015 * etc.). We must check the response stats to see if this is
1014 * truly a failed request or a good request that just got 1016 * truly a failed request or a good request that just got
1015 * completed early. 1017 * completed early.
1016 */ 1018 */
1017 struct ssp_response_iu *resp = &ireq->ssp.rsp; 1019 struct ssp_response_iu *resp = &ireq->ssp.rsp;
1018 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1020 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1019 1021
1020 sci_swab32_cpy(&ireq->ssp.rsp, 1022 sci_swab32_cpy(&ireq->ssp.rsp,
1021 &ireq->ssp.rsp, 1023 &ireq->ssp.rsp,
1022 word_cnt); 1024 word_cnt);
1023 1025
1024 if (resp->status == 0) { 1026 if (resp->status == 0) {
1025 ireq->scu_status = SCU_TASK_DONE_GOOD; 1027 ireq->scu_status = SCU_TASK_DONE_GOOD;
1026 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 1028 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
1027 } else { 1029 } else {
1028 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1030 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1029 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1031 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1030 } 1032 }
1031 break; 1033 break;
1032 } 1034 }
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { 1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
1034 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1036 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1035 1037
1036 sci_swab32_cpy(&ireq->ssp.rsp, 1038 sci_swab32_cpy(&ireq->ssp.rsp,
1037 &ireq->ssp.rsp, 1039 &ireq->ssp.rsp,
1038 word_cnt); 1040 word_cnt);
1039 1041
1040 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1042 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1041 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1043 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1042 break; 1044 break;
1043 } 1045 }
1044 1046
1045 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): 1047 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
1046 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame 1048 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
1047 * guaranteed to be received before this completion status is 1049 * guaranteed to be received before this completion status is
1048 * posted? 1050 * posted?
1049 */ 1051 */
1050 resp_iu = &ireq->ssp.rsp; 1052 resp_iu = &ireq->ssp.rsp;
1051 datapres = resp_iu->datapres; 1053 datapres = resp_iu->datapres;
1052 1054
1053 if (datapres == 1 || datapres == 2) { 1055 if (datapres == 1 || datapres == 2) {
1054 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1056 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1055 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1057 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1056 } else { 1058 } else {
1057 ireq->scu_status = SCU_TASK_DONE_GOOD; 1059 ireq->scu_status = SCU_TASK_DONE_GOOD;
1058 ireq->sci_status = SCI_SUCCESS; 1060 ireq->sci_status = SCI_SUCCESS;
1059 } 1061 }
1060 break; 1062 break;
1061 /* only stp device gets suspended. */ 1063 /* only stp device gets suspended. */
1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): 1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1064 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): 1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1065 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): 1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1066 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): 1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1067 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): 1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1068 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1069 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): 1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1070 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): 1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1071 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 1073 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1072 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): 1074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1073 if (ireq->protocol == SAS_PROTOCOL_STP) { 1075 if (ireq->protocol == SAS_PROTOCOL_STP) {
1074 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1076 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1075 SCU_COMPLETION_TL_STATUS_SHIFT; 1077 SCU_COMPLETION_TL_STATUS_SHIFT;
1076 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1078 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1077 } else { 1079 } else {
1078 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1080 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1079 SCU_COMPLETION_TL_STATUS_SHIFT; 1081 SCU_COMPLETION_TL_STATUS_SHIFT;
1080 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1082 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1081 } 1083 }
1082 break; 1084 break;
1083 1085
1084 /* both stp/ssp device gets suspended */ 1086 /* both stp/ssp device gets suspended */
1085 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): 1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1086 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): 1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): 1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1088 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): 1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1089 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): 1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1090 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): 1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1091 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): 1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): 1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): 1095 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1094 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): 1096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
1095 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1097 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1096 SCU_COMPLETION_TL_STATUS_SHIFT; 1098 SCU_COMPLETION_TL_STATUS_SHIFT;
1097 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; 1099 ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
1098 break; 1100 break;
1099 1101
1100 /* neither ssp nor stp gets suspended. */ 1102 /* neither ssp nor stp gets suspended. */
1101 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): 1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1102 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): 1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1103 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): 1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1104 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): 1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1105 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): 1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1106 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): 1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1108 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1110 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1111 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): 1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): 1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): 1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): 1116 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): 1117 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1116 default: 1118 default:
1117 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> 1119 ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1118 SCU_COMPLETION_TL_STATUS_SHIFT; 1120 SCU_COMPLETION_TL_STATUS_SHIFT;
1119 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1121 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1120 break; 1122 break;
1121 } 1123 }
1122 1124
1123 /* 1125 /*
1124 * TODO: This is probably wrong for ACK/NAK timeout conditions 1126 * TODO: This is probably wrong for ACK/NAK timeout conditions
1125 */ 1127 */
1126 1128
1127 /* In all cases we will treat this as the completion of the IO req. */ 1129 /* In all cases we will treat this as the completion of the IO req. */
1128 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1130 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1129 return SCI_SUCCESS; 1131 return SCI_SUCCESS;
1130 } 1132 }
1131 1133
1132 static enum sci_status 1134 static enum sci_status
1133 request_aborting_state_tc_event(struct isci_request *ireq, 1135 request_aborting_state_tc_event(struct isci_request *ireq,
1134 u32 completion_code) 1136 u32 completion_code)
1135 { 1137 {
1136 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1138 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1137 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 1139 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1138 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): 1140 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
1139 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; 1141 ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
1140 ireq->sci_status = SCI_FAILURE_IO_TERMINATED; 1142 ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1141 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1143 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1142 break; 1144 break;
1143 1145
1144 default: 1146 default:
1145 /* Unless we get some strange error wait for the task abort to complete 1147 /* Unless we get some strange error wait for the task abort to complete
1146 * TODO: Should there be a state change for this completion? 1148 * TODO: Should there be a state change for this completion?
1147 */ 1149 */
1148 break; 1150 break;
1149 } 1151 }
1150 1152
1151 return SCI_SUCCESS; 1153 return SCI_SUCCESS;
1152 } 1154 }
1153 1155
1154 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, 1156 static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1155 u32 completion_code) 1157 u32 completion_code)
1156 { 1158 {
1157 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1159 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1158 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1159 ireq->scu_status = SCU_TASK_DONE_GOOD; 1161 ireq->scu_status = SCU_TASK_DONE_GOOD;
1160 ireq->sci_status = SCI_SUCCESS; 1162 ireq->sci_status = SCI_SUCCESS;
1161 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1163 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1162 break; 1164 break;
1163 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): 1165 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1164 /* Currently, the decision is to simply allow the task request 1166 /* Currently, the decision is to simply allow the task request
1165 * to timeout if the task IU wasn't received successfully. 1167 * to timeout if the task IU wasn't received successfully.
1166 * There is a potential for receiving multiple task responses if 1168 * There is a potential for receiving multiple task responses if
1167 * we decide to send the task IU again. 1169 * we decide to send the task IU again.
1168 */ 1170 */
1169 dev_warn(&ireq->owning_controller->pdev->dev, 1171 dev_warn(&ireq->owning_controller->pdev->dev,
1170 "%s: TaskRequest:0x%p CompletionCode:%x - " 1172 "%s: TaskRequest:0x%p CompletionCode:%x - "
1171 "ACK/NAK timeout\n", __func__, ireq, 1173 "ACK/NAK timeout\n", __func__, ireq,
1172 completion_code); 1174 completion_code);
1173 1175
1174 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); 1176 sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
1175 break; 1177 break;
1176 default: 1178 default:
1177 /* 1179 /*
1178 * All other completion status cause the IO to be complete. 1180 * All other completion status cause the IO to be complete.
1179 * If a NAK was received, then it is up to the user to retry 1181 * If a NAK was received, then it is up to the user to retry
1180 * the request. 1182 * the request.
1181 */ 1183 */
1182 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1184 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1183 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1185 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1184 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1186 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1185 break; 1187 break;
1186 } 1188 }
1187 1189
1188 return SCI_SUCCESS; 1190 return SCI_SUCCESS;
1189 } 1191 }
1190 1192
1191 static enum sci_status 1193 static enum sci_status
1192 smp_request_await_response_tc_event(struct isci_request *ireq, 1194 smp_request_await_response_tc_event(struct isci_request *ireq,
1193 u32 completion_code) 1195 u32 completion_code)
1194 { 1196 {
1195 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1197 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1198 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1197 /* In the AWAIT RESPONSE state, any TC completion is 1199 /* In the AWAIT RESPONSE state, any TC completion is
1198 * unexpected. but if the TC has success status, we 1200 * unexpected. but if the TC has success status, we
1199 * complete the IO anyway. 1201 * complete the IO anyway.
1200 */ 1202 */
1201 ireq->scu_status = SCU_TASK_DONE_GOOD; 1203 ireq->scu_status = SCU_TASK_DONE_GOOD;
1202 ireq->sci_status = SCI_SUCCESS; 1204 ireq->sci_status = SCI_SUCCESS;
1203 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1205 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1204 break; 1206 break;
1205 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): 1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1206 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): 1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1207 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): 1209 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1208 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): 1210 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1209 /* These status has been seen in a specific LSI 1211 /* These status has been seen in a specific LSI
1210 * expander, which sometimes is not able to send smp 1212 * expander, which sometimes is not able to send smp
1211 * response within 2 ms. This causes our hardware break 1213 * response within 2 ms. This causes our hardware break
1212 * the connection and set TC completion with one of 1214 * the connection and set TC completion with one of
1213 * these SMP_XXX_XX_ERR status. For these type of error, 1215 * these SMP_XXX_XX_ERR status. For these type of error,
1214 * we ask ihost user to retry the request. 1216 * we ask ihost user to retry the request.
1215 */ 1217 */
1216 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; 1218 ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
1217 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; 1219 ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
1218 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1220 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1219 break; 1221 break;
1220 default: 1222 default:
1221 /* All other completion status cause the IO to be complete. If a NAK 1223 /* All other completion status cause the IO to be complete. If a NAK
1222 * was received, then it is up to the user to retry the request 1224 * was received, then it is up to the user to retry the request
1223 */ 1225 */
1224 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1226 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1225 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1227 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1226 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1228 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1227 break; 1229 break;
1228 } 1230 }
1229 1231
1230 return SCI_SUCCESS; 1232 return SCI_SUCCESS;
1231 } 1233 }
1232 1234
1233 static enum sci_status 1235 static enum sci_status
1234 smp_request_await_tc_event(struct isci_request *ireq, 1236 smp_request_await_tc_event(struct isci_request *ireq,
1235 u32 completion_code) 1237 u32 completion_code)
1236 { 1238 {
1237 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1239 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1238 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1240 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1239 ireq->scu_status = SCU_TASK_DONE_GOOD; 1241 ireq->scu_status = SCU_TASK_DONE_GOOD;
1240 ireq->sci_status = SCI_SUCCESS; 1242 ireq->sci_status = SCI_SUCCESS;
1241 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1243 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1242 break; 1244 break;
1243 default: 1245 default:
1244 /* All other completion status cause the IO to be 1246 /* All other completion status cause the IO to be
1245 * complete. If a NAK was received, then it is up to 1247 * complete. If a NAK was received, then it is up to
1246 * the user to retry the request. 1248 * the user to retry the request.
1247 */ 1249 */
1248 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1250 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1249 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1251 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1250 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1252 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1251 break; 1253 break;
1252 } 1254 }
1253 1255
1254 return SCI_SUCCESS; 1256 return SCI_SUCCESS;
1255 } 1257 }
1256 1258
1257 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) 1259 static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
1258 { 1260 {
1259 struct scu_sgl_element *sgl; 1261 struct scu_sgl_element *sgl;
1260 struct scu_sgl_element_pair *sgl_pair; 1262 struct scu_sgl_element_pair *sgl_pair;
1261 struct isci_request *ireq = to_ireq(stp_req); 1263 struct isci_request *ireq = to_ireq(stp_req);
1262 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; 1264 struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
1263 1265
1264 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1266 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1265 if (!sgl_pair) 1267 if (!sgl_pair)
1266 sgl = NULL; 1268 sgl = NULL;
1267 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { 1269 else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
1268 if (sgl_pair->B.address_lower == 0 && 1270 if (sgl_pair->B.address_lower == 0 &&
1269 sgl_pair->B.address_upper == 0) { 1271 sgl_pair->B.address_upper == 0) {
1270 sgl = NULL; 1272 sgl = NULL;
1271 } else { 1273 } else {
1272 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; 1274 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
1273 sgl = &sgl_pair->B; 1275 sgl = &sgl_pair->B;
1274 } 1276 }
1275 } else { 1277 } else {
1276 if (sgl_pair->next_pair_lower == 0 && 1278 if (sgl_pair->next_pair_lower == 0 &&
1277 sgl_pair->next_pair_upper == 0) { 1279 sgl_pair->next_pair_upper == 0) {
1278 sgl = NULL; 1280 sgl = NULL;
1279 } else { 1281 } else {
1280 pio_sgl->index++; 1282 pio_sgl->index++;
1281 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; 1283 pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
1282 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index); 1284 sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
1283 sgl = &sgl_pair->A; 1285 sgl = &sgl_pair->A;
1284 } 1286 }
1285 } 1287 }
1286 1288
1287 return sgl; 1289 return sgl;
1288 } 1290 }
1289 1291
1290 static enum sci_status 1292 static enum sci_status
1291 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, 1293 stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
1292 u32 completion_code) 1294 u32 completion_code)
1293 { 1295 {
1294 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1296 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1295 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1297 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1296 ireq->scu_status = SCU_TASK_DONE_GOOD; 1298 ireq->scu_status = SCU_TASK_DONE_GOOD;
1297 ireq->sci_status = SCI_SUCCESS; 1299 ireq->sci_status = SCI_SUCCESS;
1298 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H); 1300 sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
1299 break; 1301 break;
1300 1302
1301 default: 1303 default:
1302 /* All other completion status cause the IO to be 1304 /* All other completion status cause the IO to be
1303 * complete. If a NAK was received, then it is up to 1305 * complete. If a NAK was received, then it is up to
1304 * the user to retry the request. 1306 * the user to retry the request.
1305 */ 1307 */
1306 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1308 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1307 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1309 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1308 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1310 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1309 break; 1311 break;
1310 } 1312 }
1311 1313
1312 return SCI_SUCCESS; 1314 return SCI_SUCCESS;
1313 } 1315 }
1314 1316
1315 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ 1317 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1316 1318
1317 /* transmit DATA_FIS from (current sgl + offset) for input 1319 /* transmit DATA_FIS from (current sgl + offset) for input
1318 * parameter length. current sgl and offset is alreay stored in the IO request 1320 * parameter length. current sgl and offset is alreay stored in the IO request
1319 */ 1321 */
1320 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( 1322 static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
1321 struct isci_request *ireq, 1323 struct isci_request *ireq,
1322 u32 length) 1324 u32 length)
1323 { 1325 {
1324 struct isci_stp_request *stp_req = &ireq->stp.req; 1326 struct isci_stp_request *stp_req = &ireq->stp.req;
1325 struct scu_task_context *task_context = ireq->tc; 1327 struct scu_task_context *task_context = ireq->tc;
1326 struct scu_sgl_element_pair *sgl_pair; 1328 struct scu_sgl_element_pair *sgl_pair;
1327 struct scu_sgl_element *current_sgl; 1329 struct scu_sgl_element *current_sgl;
1328 1330
1329 /* Recycle the TC and reconstruct it for sending out DATA FIS containing 1331 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1330 * for the data from current_sgl+offset for the input length 1332 * for the data from current_sgl+offset for the input length
1331 */ 1333 */
1332 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1334 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1333 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) 1335 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
1334 current_sgl = &sgl_pair->A; 1336 current_sgl = &sgl_pair->A;
1335 else 1337 else
1336 current_sgl = &sgl_pair->B; 1338 current_sgl = &sgl_pair->B;
1337 1339
1338 /* update the TC */ 1340 /* update the TC */
1339 task_context->command_iu_upper = current_sgl->address_upper; 1341 task_context->command_iu_upper = current_sgl->address_upper;
1340 task_context->command_iu_lower = current_sgl->address_lower; 1342 task_context->command_iu_lower = current_sgl->address_lower;
1341 task_context->transfer_length_bytes = length; 1343 task_context->transfer_length_bytes = length;
1342 task_context->type.stp.fis_type = FIS_DATA; 1344 task_context->type.stp.fis_type = FIS_DATA;
1343 1345
1344 /* send the new TC out. */ 1346 /* send the new TC out. */
1345 return sci_controller_continue_io(ireq); 1347 return sci_controller_continue_io(ireq);
1346 } 1348 }
1347 1349
1348 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) 1350 static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
1349 { 1351 {
1350 struct isci_stp_request *stp_req = &ireq->stp.req; 1352 struct isci_stp_request *stp_req = &ireq->stp.req;
1351 struct scu_sgl_element_pair *sgl_pair; 1353 struct scu_sgl_element_pair *sgl_pair;
1352 enum sci_status status = SCI_SUCCESS; 1354 enum sci_status status = SCI_SUCCESS;
1353 struct scu_sgl_element *sgl; 1355 struct scu_sgl_element *sgl;
1354 u32 offset; 1356 u32 offset;
1355 u32 len = 0; 1357 u32 len = 0;
1356 1358
1357 offset = stp_req->sgl.offset; 1359 offset = stp_req->sgl.offset;
1358 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index); 1360 sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
1359 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__)) 1361 if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
1360 return SCI_FAILURE; 1362 return SCI_FAILURE;
1361 1363
1362 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { 1364 if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
1363 sgl = &sgl_pair->A; 1365 sgl = &sgl_pair->A;
1364 len = sgl_pair->A.length - offset; 1366 len = sgl_pair->A.length - offset;
1365 } else { 1367 } else {
1366 sgl = &sgl_pair->B; 1368 sgl = &sgl_pair->B;
1367 len = sgl_pair->B.length - offset; 1369 len = sgl_pair->B.length - offset;
1368 } 1370 }
1369 1371
1370 if (stp_req->pio_len == 0) 1372 if (stp_req->pio_len == 0)
1371 return SCI_SUCCESS; 1373 return SCI_SUCCESS;
1372 1374
1373 if (stp_req->pio_len >= len) { 1375 if (stp_req->pio_len >= len) {
1374 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len); 1376 status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
1375 if (status != SCI_SUCCESS) 1377 if (status != SCI_SUCCESS)
1376 return status; 1378 return status;
1377 stp_req->pio_len -= len; 1379 stp_req->pio_len -= len;
1378 1380
1379 /* update the current sgl, offset and save for future */ 1381 /* update the current sgl, offset and save for future */
1380 sgl = pio_sgl_next(stp_req); 1382 sgl = pio_sgl_next(stp_req);
1381 offset = 0; 1383 offset = 0;
1382 } else if (stp_req->pio_len < len) { 1384 } else if (stp_req->pio_len < len) {
1383 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len); 1385 sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
1384 1386
1385 /* Sgl offset will be adjusted and saved for future */ 1387 /* Sgl offset will be adjusted and saved for future */
1386 offset += stp_req->pio_len; 1388 offset += stp_req->pio_len;
1387 sgl->address_lower += stp_req->pio_len; 1389 sgl->address_lower += stp_req->pio_len;
1388 stp_req->pio_len = 0; 1390 stp_req->pio_len = 0;
1389 } 1391 }
1390 1392
1391 stp_req->sgl.offset = offset; 1393 stp_req->sgl.offset = offset;
1392 1394
1393 return status; 1395 return status;
1394 } 1396 }
1395 1397
1396 /** 1398 /**
1397 * 1399 *
1398 * @stp_request: The request that is used for the SGL processing. 1400 * @stp_request: The request that is used for the SGL processing.
1399 * @data_buffer: The buffer of data to be copied. 1401 * @data_buffer: The buffer of data to be copied.
1400 * @length: The length of the data transfer. 1402 * @length: The length of the data transfer.
1401 * 1403 *
1402 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1404 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1403 * specified data region. enum sci_status 1405 * specified data region. enum sci_status
1404 */ 1406 */
1405 static enum sci_status 1407 static enum sci_status
1406 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1408 sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1407 u8 *data_buf, u32 len) 1409 u8 *data_buf, u32 len)
1408 { 1410 {
1409 struct isci_request *ireq; 1411 struct isci_request *ireq;
1410 u8 *src_addr; 1412 u8 *src_addr;
1411 int copy_len; 1413 int copy_len;
1412 struct sas_task *task; 1414 struct sas_task *task;
1413 struct scatterlist *sg; 1415 struct scatterlist *sg;
1414 void *kaddr; 1416 void *kaddr;
1415 int total_len = len; 1417 int total_len = len;
1416 1418
1417 ireq = to_ireq(stp_req); 1419 ireq = to_ireq(stp_req);
1418 task = isci_request_access_task(ireq); 1420 task = isci_request_access_task(ireq);
1419 src_addr = data_buf; 1421 src_addr = data_buf;
1420 1422
1421 if (task->num_scatter > 0) { 1423 if (task->num_scatter > 0) {
1422 sg = task->scatter; 1424 sg = task->scatter;
1423 1425
1424 while (total_len > 0) { 1426 while (total_len > 0) {
1425 struct page *page = sg_page(sg); 1427 struct page *page = sg_page(sg);
1426 1428
1427 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1429 copy_len = min_t(int, total_len, sg_dma_len(sg));
1428 kaddr = kmap_atomic(page); 1430 kaddr = kmap_atomic(page);
1429 memcpy(kaddr + sg->offset, src_addr, copy_len); 1431 memcpy(kaddr + sg->offset, src_addr, copy_len);
1430 kunmap_atomic(kaddr); 1432 kunmap_atomic(kaddr);
1431 total_len -= copy_len; 1433 total_len -= copy_len;
1432 src_addr += copy_len; 1434 src_addr += copy_len;
1433 sg = sg_next(sg); 1435 sg = sg_next(sg);
1434 } 1436 }
1435 } else { 1437 } else {
1436 BUG_ON(task->total_xfer_len < total_len); 1438 BUG_ON(task->total_xfer_len < total_len);
1437 memcpy(task->scatter, src_addr, total_len); 1439 memcpy(task->scatter, src_addr, total_len);
1438 } 1440 }
1439 1441
1440 return SCI_SUCCESS; 1442 return SCI_SUCCESS;
1441 } 1443 }
1442 1444
1443 /** 1445 /**
1444 * 1446 *
1445 * @sci_req: The PIO DATA IN request that is to receive the data. 1447 * @sci_req: The PIO DATA IN request that is to receive the data.
1446 * @data_buffer: The buffer to copy from. 1448 * @data_buffer: The buffer to copy from.
1447 * 1449 *
1448 * Copy the data buffer to the io request data region. enum sci_status 1450 * Copy the data buffer to the io request data region. enum sci_status
1449 */ 1451 */
1450 static enum sci_status sci_stp_request_pio_data_in_copy_data( 1452 static enum sci_status sci_stp_request_pio_data_in_copy_data(
1451 struct isci_stp_request *stp_req, 1453 struct isci_stp_request *stp_req,
1452 u8 *data_buffer) 1454 u8 *data_buffer)
1453 { 1455 {
1454 enum sci_status status; 1456 enum sci_status status;
1455 1457
1456 /* 1458 /*
1457 * If there is less than 1K remaining in the transfer request 1459 * If there is less than 1K remaining in the transfer request
1458 * copy just the data for the transfer */ 1460 * copy just the data for the transfer */
1459 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { 1461 if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
1460 status = sci_stp_request_pio_data_in_copy_data_buffer( 1462 status = sci_stp_request_pio_data_in_copy_data_buffer(
1461 stp_req, data_buffer, stp_req->pio_len); 1463 stp_req, data_buffer, stp_req->pio_len);
1462 1464
1463 if (status == SCI_SUCCESS) 1465 if (status == SCI_SUCCESS)
1464 stp_req->pio_len = 0; 1466 stp_req->pio_len = 0;
1465 } else { 1467 } else {
1466 /* We are transfering the whole frame so copy */ 1468 /* We are transfering the whole frame so copy */
1467 status = sci_stp_request_pio_data_in_copy_data_buffer( 1469 status = sci_stp_request_pio_data_in_copy_data_buffer(
1468 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); 1470 stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1469 1471
1470 if (status == SCI_SUCCESS) 1472 if (status == SCI_SUCCESS)
1471 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; 1473 stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
1472 } 1474 }
1473 1475
1474 return status; 1476 return status;
1475 } 1477 }
1476 1478
1477 static enum sci_status 1479 static enum sci_status
1478 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, 1480 stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
1479 u32 completion_code) 1481 u32 completion_code)
1480 { 1482 {
1481 enum sci_status status = SCI_SUCCESS; 1483 enum sci_status status = SCI_SUCCESS;
1482 1484
1483 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1485 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1484 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1486 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1485 ireq->scu_status = SCU_TASK_DONE_GOOD; 1487 ireq->scu_status = SCU_TASK_DONE_GOOD;
1486 ireq->sci_status = SCI_SUCCESS; 1488 ireq->sci_status = SCI_SUCCESS;
1487 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1489 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1488 break; 1490 break;
1489 1491
1490 default: 1492 default:
1491 /* All other completion status cause the IO to be 1493 /* All other completion status cause the IO to be
1492 * complete. If a NAK was received, then it is up to 1494 * complete. If a NAK was received, then it is up to
1493 * the user to retry the request. 1495 * the user to retry the request.
1494 */ 1496 */
1495 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1497 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1496 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1498 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1497 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1499 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1498 break; 1500 break;
1499 } 1501 }
1500 1502
1501 return status; 1503 return status;
1502 } 1504 }
1503 1505
1504 static enum sci_status 1506 static enum sci_status
1505 pio_data_out_tx_done_tc_event(struct isci_request *ireq, 1507 pio_data_out_tx_done_tc_event(struct isci_request *ireq,
1506 u32 completion_code) 1508 u32 completion_code)
1507 { 1509 {
1508 enum sci_status status = SCI_SUCCESS; 1510 enum sci_status status = SCI_SUCCESS;
1509 bool all_frames_transferred = false; 1511 bool all_frames_transferred = false;
1510 struct isci_stp_request *stp_req = &ireq->stp.req; 1512 struct isci_stp_request *stp_req = &ireq->stp.req;
1511 1513
1512 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 1514 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1513 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 1515 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1514 /* Transmit data */ 1516 /* Transmit data */
1515 if (stp_req->pio_len != 0) { 1517 if (stp_req->pio_len != 0) {
1516 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1518 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1517 if (status == SCI_SUCCESS) { 1519 if (status == SCI_SUCCESS) {
1518 if (stp_req->pio_len == 0) 1520 if (stp_req->pio_len == 0)
1519 all_frames_transferred = true; 1521 all_frames_transferred = true;
1520 } 1522 }
1521 } else if (stp_req->pio_len == 0) { 1523 } else if (stp_req->pio_len == 0) {
1522 /* 1524 /*
1523 * this will happen if the all data is written at the 1525 * this will happen if the all data is written at the
1524 * first time after the pio setup fis is received 1526 * first time after the pio setup fis is received
1525 */ 1527 */
1526 all_frames_transferred = true; 1528 all_frames_transferred = true;
1527 } 1529 }
1528 1530
1529 /* all data transferred. */ 1531 /* all data transferred. */
1530 if (all_frames_transferred) { 1532 if (all_frames_transferred) {
1531 /* 1533 /*
1532 * Change the state to SCI_REQ_STP_PIO_DATA_IN 1534 * Change the state to SCI_REQ_STP_PIO_DATA_IN
1533 * and wait for PIO_SETUP fis / or D2H REg fis. */ 1535 * and wait for PIO_SETUP fis / or D2H REg fis. */
1534 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1536 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1535 } 1537 }
1536 break; 1538 break;
1537 1539
1538 default: 1540 default:
1539 /* 1541 /*
1540 * All other completion status cause the IO to be complete. 1542 * All other completion status cause the IO to be complete.
1541 * If a NAK was received, then it is up to the user to retry 1543 * If a NAK was received, then it is up to the user to retry
1542 * the request. 1544 * the request.
1543 */ 1545 */
1544 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 1546 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
1545 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1547 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1546 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1548 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1547 break; 1549 break;
1548 } 1550 }
1549 1551
1550 return status; 1552 return status;
1551 } 1553 }
1552 1554
1553 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, 1555 static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
1554 u32 frame_index) 1556 u32 frame_index)
1555 { 1557 {
1556 struct isci_host *ihost = ireq->owning_controller; 1558 struct isci_host *ihost = ireq->owning_controller;
1557 struct dev_to_host_fis *frame_header; 1559 struct dev_to_host_fis *frame_header;
1558 enum sci_status status; 1560 enum sci_status status;
1559 u32 *frame_buffer; 1561 u32 *frame_buffer;
1560 1562
1561 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1563 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1562 frame_index, 1564 frame_index,
1563 (void **)&frame_header); 1565 (void **)&frame_header);
1564 1566
1565 if ((status == SCI_SUCCESS) && 1567 if ((status == SCI_SUCCESS) &&
1566 (frame_header->fis_type == FIS_REGD2H)) { 1568 (frame_header->fis_type == FIS_REGD2H)) {
1567 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1569 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1568 frame_index, 1570 frame_index,
1569 (void **)&frame_buffer); 1571 (void **)&frame_buffer);
1570 1572
1571 sci_controller_copy_sata_response(&ireq->stp.rsp, 1573 sci_controller_copy_sata_response(&ireq->stp.rsp,
1572 frame_header, 1574 frame_header,
1573 frame_buffer); 1575 frame_buffer);
1574 } 1576 }
1575 1577
1576 sci_controller_release_frame(ihost, frame_index); 1578 sci_controller_release_frame(ihost, frame_index);
1577 1579
1578 return status; 1580 return status;
1579 } 1581 }
1580 1582
1581 static enum sci_status process_unsolicited_fis(struct isci_request *ireq, 1583 static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1582 u32 frame_index) 1584 u32 frame_index)
1583 { 1585 {
1584 struct isci_host *ihost = ireq->owning_controller; 1586 struct isci_host *ihost = ireq->owning_controller;
1585 enum sci_status status; 1587 enum sci_status status;
1586 struct dev_to_host_fis *frame_header; 1588 struct dev_to_host_fis *frame_header;
1587 u32 *frame_buffer; 1589 u32 *frame_buffer;
1588 1590
1589 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1591 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1590 frame_index, 1592 frame_index,
1591 (void **)&frame_header); 1593 (void **)&frame_header);
1592 1594
1593 if (status != SCI_SUCCESS) 1595 if (status != SCI_SUCCESS)
1594 return status; 1596 return status;
1595 1597
1596 if (frame_header->fis_type != FIS_REGD2H) { 1598 if (frame_header->fis_type != FIS_REGD2H) {
1597 dev_err(&ireq->isci_host->pdev->dev, 1599 dev_err(&ireq->isci_host->pdev->dev,
1598 "%s ERROR: invalid fis type 0x%X\n", 1600 "%s ERROR: invalid fis type 0x%X\n",
1599 __func__, frame_header->fis_type); 1601 __func__, frame_header->fis_type);
1600 return SCI_FAILURE; 1602 return SCI_FAILURE;
1601 } 1603 }
1602 1604
1603 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1605 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1604 frame_index, 1606 frame_index,
1605 (void **)&frame_buffer); 1607 (void **)&frame_buffer);
1606 1608
1607 sci_controller_copy_sata_response(&ireq->stp.rsp, 1609 sci_controller_copy_sata_response(&ireq->stp.rsp,
1608 (u32 *)frame_header, 1610 (u32 *)frame_header,
1609 frame_buffer); 1611 frame_buffer);
1610 1612
1611 /* Frame has been decoded return it to the controller */ 1613 /* Frame has been decoded return it to the controller */
1612 sci_controller_release_frame(ihost, frame_index); 1614 sci_controller_release_frame(ihost, frame_index);
1613 1615
1614 return status; 1616 return status;
1615 } 1617 }
1616 1618
1617 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, 1619 static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1618 u32 frame_index) 1620 u32 frame_index)
1619 { 1621 {
1620 struct sas_task *task = isci_request_access_task(ireq); 1622 struct sas_task *task = isci_request_access_task(ireq);
1621 enum sci_status status; 1623 enum sci_status status;
1622 1624
1623 status = process_unsolicited_fis(ireq, frame_index); 1625 status = process_unsolicited_fis(ireq, frame_index);
1624 1626
1625 if (status == SCI_SUCCESS) { 1627 if (status == SCI_SUCCESS) {
1626 if (ireq->stp.rsp.status & ATA_ERR) 1628 if (ireq->stp.rsp.status & ATA_ERR)
1627 status = SCI_IO_FAILURE_RESPONSE_VALID; 1629 status = SCI_IO_FAILURE_RESPONSE_VALID;
1628 } else { 1630 } else {
1629 status = SCI_IO_FAILURE_RESPONSE_VALID; 1631 status = SCI_IO_FAILURE_RESPONSE_VALID;
1630 } 1632 }
1631 1633
1632 if (status != SCI_SUCCESS) { 1634 if (status != SCI_SUCCESS) {
1633 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1635 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1634 ireq->sci_status = status; 1636 ireq->sci_status = status;
1635 } else { 1637 } else {
1636 ireq->scu_status = SCU_TASK_DONE_GOOD; 1638 ireq->scu_status = SCU_TASK_DONE_GOOD;
1637 ireq->sci_status = SCI_SUCCESS; 1639 ireq->sci_status = SCI_SUCCESS;
1638 } 1640 }
1639 1641
1640 /* the d2h ufi is the end of non-data commands */ 1642 /* the d2h ufi is the end of non-data commands */
1641 if (task->data_dir == DMA_NONE) 1643 if (task->data_dir == DMA_NONE)
1642 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1644 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1643 1645
1644 return status; 1646 return status;
1645 } 1647 }
1646 1648
1647 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) 1649 static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1648 { 1650 {
1649 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1651 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1650 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; 1652 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1651 struct scu_task_context *task_context = ireq->tc; 1653 struct scu_task_context *task_context = ireq->tc;
1652 1654
1653 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame 1655 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1654 * type. The TC for previous Packet fis was already there, we only need to 1656 * type. The TC for previous Packet fis was already there, we only need to
1655 * change the H2D fis content. 1657 * change the H2D fis content.
1656 */ 1658 */
1657 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); 1659 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1658 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); 1660 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1659 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); 1661 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1660 task_context->type.stp.fis_type = FIS_DATA; 1662 task_context->type.stp.fis_type = FIS_DATA;
1661 task_context->transfer_length_bytes = dev->cdb_len; 1663 task_context->transfer_length_bytes = dev->cdb_len;
1662 } 1664 }
1663 1665
1664 static void scu_atapi_construct_task_context(struct isci_request *ireq) 1666 static void scu_atapi_construct_task_context(struct isci_request *ireq)
1665 { 1667 {
1666 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); 1668 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1667 struct sas_task *task = isci_request_access_task(ireq); 1669 struct sas_task *task = isci_request_access_task(ireq);
1668 struct scu_task_context *task_context = ireq->tc; 1670 struct scu_task_context *task_context = ireq->tc;
1669 int cdb_len = dev->cdb_len; 1671 int cdb_len = dev->cdb_len;
1670 1672
1671 /* reference: SSTL 1.13.4.2 1673 /* reference: SSTL 1.13.4.2
1672 * task_type, sata_direction 1674 * task_type, sata_direction
1673 */ 1675 */
1674 if (task->data_dir == DMA_TO_DEVICE) { 1676 if (task->data_dir == DMA_TO_DEVICE) {
1675 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; 1677 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1676 task_context->sata_direction = 0; 1678 task_context->sata_direction = 0;
1677 } else { 1679 } else {
1678 /* todo: for NO_DATA command, we need to send out raw frame. */ 1680 /* todo: for NO_DATA command, we need to send out raw frame. */
1679 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; 1681 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1680 task_context->sata_direction = 1; 1682 task_context->sata_direction = 1;
1681 } 1683 }
1682 1684
1683 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); 1685 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1684 task_context->type.stp.fis_type = FIS_DATA; 1686 task_context->type.stp.fis_type = FIS_DATA;
1685 1687
1686 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 1688 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1687 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); 1689 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1688 task_context->ssp_command_iu_length = cdb_len / sizeof(u32); 1690 task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1689 1691
1690 /* task phase is set to TX_CMD */ 1692 /* task phase is set to TX_CMD */
1691 task_context->task_phase = 0x1; 1693 task_context->task_phase = 0x1;
1692 1694
1693 /* retry counter */ 1695 /* retry counter */
1694 task_context->stp_retry_count = 0; 1696 task_context->stp_retry_count = 0;
1695 1697
1696 /* data transfer size. */ 1698 /* data transfer size. */
1697 task_context->transfer_length_bytes = task->total_xfer_len; 1699 task_context->transfer_length_bytes = task->total_xfer_len;
1698 1700
1699 /* setup sgl */ 1701 /* setup sgl */
1700 sci_request_build_sgl(ireq); 1702 sci_request_build_sgl(ireq);
1701 } 1703 }
1702 1704
1703 enum sci_status 1705 enum sci_status
1704 sci_io_request_frame_handler(struct isci_request *ireq, 1706 sci_io_request_frame_handler(struct isci_request *ireq,
1705 u32 frame_index) 1707 u32 frame_index)
1706 { 1708 {
1707 struct isci_host *ihost = ireq->owning_controller; 1709 struct isci_host *ihost = ireq->owning_controller;
1708 struct isci_stp_request *stp_req = &ireq->stp.req; 1710 struct isci_stp_request *stp_req = &ireq->stp.req;
1709 enum sci_base_request_states state; 1711 enum sci_base_request_states state;
1710 enum sci_status status; 1712 enum sci_status status;
1711 ssize_t word_cnt; 1713 ssize_t word_cnt;
1712 1714
1713 state = ireq->sm.current_state_id; 1715 state = ireq->sm.current_state_id;
1714 switch (state) { 1716 switch (state) {
1715 case SCI_REQ_STARTED: { 1717 case SCI_REQ_STARTED: {
1716 struct ssp_frame_hdr ssp_hdr; 1718 struct ssp_frame_hdr ssp_hdr;
1717 void *frame_header; 1719 void *frame_header;
1718 1720
1719 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1721 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1720 frame_index, 1722 frame_index,
1721 &frame_header); 1723 &frame_header);
1722 1724
1723 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); 1725 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1724 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt); 1726 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1725 1727
1726 if (ssp_hdr.frame_type == SSP_RESPONSE) { 1728 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1727 struct ssp_response_iu *resp_iu; 1729 struct ssp_response_iu *resp_iu;
1728 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); 1730 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1729 1731
1730 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1732 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1731 frame_index, 1733 frame_index,
1732 (void **)&resp_iu); 1734 (void **)&resp_iu);
1733 1735
1734 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt); 1736 sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
1735 1737
1736 resp_iu = &ireq->ssp.rsp; 1738 resp_iu = &ireq->ssp.rsp;
1737 1739
1738 if (resp_iu->datapres == 0x01 || 1740 if (resp_iu->datapres == 0x01 ||
1739 resp_iu->datapres == 0x02) { 1741 resp_iu->datapres == 0x02) {
1740 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1742 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1741 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1743 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1742 } else { 1744 } else {
1743 ireq->scu_status = SCU_TASK_DONE_GOOD; 1745 ireq->scu_status = SCU_TASK_DONE_GOOD;
1744 ireq->sci_status = SCI_SUCCESS; 1746 ireq->sci_status = SCI_SUCCESS;
1745 } 1747 }
1746 } else { 1748 } else {
1747 /* not a response frame, why did it get forwarded? */ 1749 /* not a response frame, why did it get forwarded? */
1748 dev_err(&ihost->pdev->dev, 1750 dev_err(&ihost->pdev->dev,
1749 "%s: SCIC IO Request 0x%p received unexpected " 1751 "%s: SCIC IO Request 0x%p received unexpected "
1750 "frame %d type 0x%02x\n", __func__, ireq, 1752 "frame %d type 0x%02x\n", __func__, ireq,
1751 frame_index, ssp_hdr.frame_type); 1753 frame_index, ssp_hdr.frame_type);
1752 } 1754 }
1753 1755
1754 /* 1756 /*
1755 * In any case we are done with this frame buffer return it to 1757 * In any case we are done with this frame buffer return it to
1756 * the controller 1758 * the controller
1757 */ 1759 */
1758 sci_controller_release_frame(ihost, frame_index); 1760 sci_controller_release_frame(ihost, frame_index);
1759 1761
1760 return SCI_SUCCESS; 1762 return SCI_SUCCESS;
1761 } 1763 }
1762 1764
1763 case SCI_REQ_TASK_WAIT_TC_RESP: 1765 case SCI_REQ_TASK_WAIT_TC_RESP:
1764 sci_io_request_copy_response(ireq); 1766 sci_io_request_copy_response(ireq);
1765 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1767 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1766 sci_controller_release_frame(ihost, frame_index); 1768 sci_controller_release_frame(ihost, frame_index);
1767 return SCI_SUCCESS; 1769 return SCI_SUCCESS;
1768 1770
1769 case SCI_REQ_SMP_WAIT_RESP: { 1771 case SCI_REQ_SMP_WAIT_RESP: {
1770 struct sas_task *task = isci_request_access_task(ireq); 1772 struct sas_task *task = isci_request_access_task(ireq);
1771 struct scatterlist *sg = &task->smp_task.smp_resp; 1773 struct scatterlist *sg = &task->smp_task.smp_resp;
1772 void *frame_header, *kaddr; 1774 void *frame_header, *kaddr;
1773 u8 *rsp; 1775 u8 *rsp;
1774 1776
1775 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1777 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1776 frame_index, 1778 frame_index,
1777 &frame_header); 1779 &frame_header);
1778 kaddr = kmap_atomic(sg_page(sg)); 1780 kaddr = kmap_atomic(sg_page(sg));
1779 rsp = kaddr + sg->offset; 1781 rsp = kaddr + sg->offset;
1780 sci_swab32_cpy(rsp, frame_header, 1); 1782 sci_swab32_cpy(rsp, frame_header, 1);
1781 1783
1782 if (rsp[0] == SMP_RESPONSE) { 1784 if (rsp[0] == SMP_RESPONSE) {
1783 void *smp_resp; 1785 void *smp_resp;
1784 1786
1785 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1787 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1786 frame_index, 1788 frame_index,
1787 &smp_resp); 1789 &smp_resp);
1788 1790
1789 word_cnt = (sg->length/4)-1; 1791 word_cnt = (sg->length/4)-1;
1790 if (word_cnt > 0) 1792 if (word_cnt > 0)
1791 word_cnt = min_t(unsigned int, word_cnt, 1793 word_cnt = min_t(unsigned int, word_cnt,
1792 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); 1794 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1793 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); 1795 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1794 1796
1795 ireq->scu_status = SCU_TASK_DONE_GOOD; 1797 ireq->scu_status = SCU_TASK_DONE_GOOD;
1796 ireq->sci_status = SCI_SUCCESS; 1798 ireq->sci_status = SCI_SUCCESS;
1797 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP); 1799 sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
1798 } else { 1800 } else {
1799 /* 1801 /*
1800 * This was not a response frame why did it get 1802 * This was not a response frame why did it get
1801 * forwarded? 1803 * forwarded?
1802 */ 1804 */
1803 dev_err(&ihost->pdev->dev, 1805 dev_err(&ihost->pdev->dev,
1804 "%s: SCIC SMP Request 0x%p received unexpected " 1806 "%s: SCIC SMP Request 0x%p received unexpected "
1805 "frame %d type 0x%02x\n", 1807 "frame %d type 0x%02x\n",
1806 __func__, 1808 __func__,
1807 ireq, 1809 ireq,
1808 frame_index, 1810 frame_index,
1809 rsp[0]); 1811 rsp[0]);
1810 1812
1811 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1813 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1812 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1814 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1813 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1815 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1814 } 1816 }
1815 kunmap_atomic(kaddr); 1817 kunmap_atomic(kaddr);
1816 1818
1817 sci_controller_release_frame(ihost, frame_index); 1819 sci_controller_release_frame(ihost, frame_index);
1818 1820
1819 return SCI_SUCCESS; 1821 return SCI_SUCCESS;
1820 } 1822 }
1821 1823
1822 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 1824 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
1823 return sci_stp_request_udma_general_frame_handler(ireq, 1825 return sci_stp_request_udma_general_frame_handler(ireq,
1824 frame_index); 1826 frame_index);
1825 1827
1826 case SCI_REQ_STP_UDMA_WAIT_D2H: 1828 case SCI_REQ_STP_UDMA_WAIT_D2H:
1827 /* Use the general frame handler to copy the resposne data */ 1829 /* Use the general frame handler to copy the resposne data */
1828 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); 1830 status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
1829 1831
1830 if (status != SCI_SUCCESS) 1832 if (status != SCI_SUCCESS)
1831 return status; 1833 return status;
1832 1834
1833 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1835 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1834 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1836 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1835 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1837 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1836 return SCI_SUCCESS; 1838 return SCI_SUCCESS;
1837 1839
1838 case SCI_REQ_STP_NON_DATA_WAIT_D2H: { 1840 case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
1839 struct dev_to_host_fis *frame_header; 1841 struct dev_to_host_fis *frame_header;
1840 u32 *frame_buffer; 1842 u32 *frame_buffer;
1841 1843
1842 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1844 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1843 frame_index, 1845 frame_index,
1844 (void **)&frame_header); 1846 (void **)&frame_header);
1845 1847
1846 if (status != SCI_SUCCESS) { 1848 if (status != SCI_SUCCESS) {
1847 dev_err(&ihost->pdev->dev, 1849 dev_err(&ihost->pdev->dev,
1848 "%s: SCIC IO Request 0x%p could not get frame " 1850 "%s: SCIC IO Request 0x%p could not get frame "
1849 "header for frame index %d, status %x\n", 1851 "header for frame index %d, status %x\n",
1850 __func__, 1852 __func__,
1851 stp_req, 1853 stp_req,
1852 frame_index, 1854 frame_index,
1853 status); 1855 status);
1854 1856
1855 return status; 1857 return status;
1856 } 1858 }
1857 1859
1858 switch (frame_header->fis_type) { 1860 switch (frame_header->fis_type) {
1859 case FIS_REGD2H: 1861 case FIS_REGD2H:
1860 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1862 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1861 frame_index, 1863 frame_index,
1862 (void **)&frame_buffer); 1864 (void **)&frame_buffer);
1863 1865
1864 sci_controller_copy_sata_response(&ireq->stp.rsp, 1866 sci_controller_copy_sata_response(&ireq->stp.rsp,
1865 frame_header, 1867 frame_header,
1866 frame_buffer); 1868 frame_buffer);
1867 1869
1868 /* The command has completed with error */ 1870 /* The command has completed with error */
1869 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1871 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1870 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1872 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1871 break; 1873 break;
1872 1874
1873 default: 1875 default:
1874 dev_warn(&ihost->pdev->dev, 1876 dev_warn(&ihost->pdev->dev,
1875 "%s: IO Request:0x%p Frame Id:%d protocol " 1877 "%s: IO Request:0x%p Frame Id:%d protocol "
1876 "violation occurred\n", __func__, stp_req, 1878 "violation occurred\n", __func__, stp_req,
1877 frame_index); 1879 frame_index);
1878 1880
1879 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; 1881 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1880 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; 1882 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1881 break; 1883 break;
1882 } 1884 }
1883 1885
1884 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1886 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1885 1887
1886 /* Frame has been decoded return it to the controller */ 1888 /* Frame has been decoded return it to the controller */
1887 sci_controller_release_frame(ihost, frame_index); 1889 sci_controller_release_frame(ihost, frame_index);
1888 1890
1889 return status; 1891 return status;
1890 } 1892 }
1891 1893
1892 case SCI_REQ_STP_PIO_WAIT_FRAME: { 1894 case SCI_REQ_STP_PIO_WAIT_FRAME: {
1893 struct sas_task *task = isci_request_access_task(ireq); 1895 struct sas_task *task = isci_request_access_task(ireq);
1894 struct dev_to_host_fis *frame_header; 1896 struct dev_to_host_fis *frame_header;
1895 u32 *frame_buffer; 1897 u32 *frame_buffer;
1896 1898
1897 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1899 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1898 frame_index, 1900 frame_index,
1899 (void **)&frame_header); 1901 (void **)&frame_header);
1900 1902
1901 if (status != SCI_SUCCESS) { 1903 if (status != SCI_SUCCESS) {
1902 dev_err(&ihost->pdev->dev, 1904 dev_err(&ihost->pdev->dev,
1903 "%s: SCIC IO Request 0x%p could not get frame " 1905 "%s: SCIC IO Request 0x%p could not get frame "
1904 "header for frame index %d, status %x\n", 1906 "header for frame index %d, status %x\n",
1905 __func__, stp_req, frame_index, status); 1907 __func__, stp_req, frame_index, status);
1906 return status; 1908 return status;
1907 } 1909 }
1908 1910
1909 switch (frame_header->fis_type) { 1911 switch (frame_header->fis_type) {
1910 case FIS_PIO_SETUP: 1912 case FIS_PIO_SETUP:
1911 /* Get from the frame buffer the PIO Setup Data */ 1913 /* Get from the frame buffer the PIO Setup Data */
1912 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1914 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1913 frame_index, 1915 frame_index,
1914 (void **)&frame_buffer); 1916 (void **)&frame_buffer);
1915 1917
1916 /* Get the data from the PIO Setup The SCU Hardware 1918 /* Get the data from the PIO Setup The SCU Hardware
1917 * returns first word in the frame_header and the rest 1919 * returns first word in the frame_header and the rest
1918 * of the data is in the frame buffer so we need to 1920 * of the data is in the frame buffer so we need to
1919 * back up one dword 1921 * back up one dword
1920 */ 1922 */
1921 1923
1922 /* transfer_count: first 16bits in the 4th dword */ 1924 /* transfer_count: first 16bits in the 4th dword */
1923 stp_req->pio_len = frame_buffer[3] & 0xffff; 1925 stp_req->pio_len = frame_buffer[3] & 0xffff;
1924 1926
1925 /* status: 4th byte in the 3rd dword */ 1927 /* status: 4th byte in the 3rd dword */
1926 stp_req->status = (frame_buffer[2] >> 24) & 0xff; 1928 stp_req->status = (frame_buffer[2] >> 24) & 0xff;
1927 1929
1928 sci_controller_copy_sata_response(&ireq->stp.rsp, 1930 sci_controller_copy_sata_response(&ireq->stp.rsp,
1929 frame_header, 1931 frame_header,
1930 frame_buffer); 1932 frame_buffer);
1931 1933
1932 ireq->stp.rsp.status = stp_req->status; 1934 ireq->stp.rsp.status = stp_req->status;
1933 1935
1934 /* The next state is dependent on whether the 1936 /* The next state is dependent on whether the
1935 * request was PIO Data-in or Data out 1937 * request was PIO Data-in or Data out
1936 */ 1938 */
1937 if (task->data_dir == DMA_FROM_DEVICE) { 1939 if (task->data_dir == DMA_FROM_DEVICE) {
1938 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN); 1940 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
1939 } else if (task->data_dir == DMA_TO_DEVICE) { 1941 } else if (task->data_dir == DMA_TO_DEVICE) {
1940 /* Transmit data */ 1942 /* Transmit data */
1941 status = sci_stp_request_pio_data_out_transmit_data(ireq); 1943 status = sci_stp_request_pio_data_out_transmit_data(ireq);
1942 if (status != SCI_SUCCESS) 1944 if (status != SCI_SUCCESS)
1943 break; 1945 break;
1944 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT); 1946 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
1945 } 1947 }
1946 break; 1948 break;
1947 1949
1948 case FIS_SETDEVBITS: 1950 case FIS_SETDEVBITS:
1949 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 1951 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
1950 break; 1952 break;
1951 1953
1952 case FIS_REGD2H: 1954 case FIS_REGD2H:
1953 if (frame_header->status & ATA_BUSY) { 1955 if (frame_header->status & ATA_BUSY) {
1954 /* 1956 /*
1955 * Now why is the drive sending a D2H Register 1957 * Now why is the drive sending a D2H Register
1956 * FIS when it is still busy? Do nothing since 1958 * FIS when it is still busy? Do nothing since
1957 * we are still in the right state. 1959 * we are still in the right state.
1958 */ 1960 */
1959 dev_dbg(&ihost->pdev->dev, 1961 dev_dbg(&ihost->pdev->dev,
1960 "%s: SCIC PIO Request 0x%p received " 1962 "%s: SCIC PIO Request 0x%p received "
1961 "D2H Register FIS with BSY status " 1963 "D2H Register FIS with BSY status "
1962 "0x%x\n", 1964 "0x%x\n",
1963 __func__, 1965 __func__,
1964 stp_req, 1966 stp_req,
1965 frame_header->status); 1967 frame_header->status);
1966 break; 1968 break;
1967 } 1969 }
1968 1970
1969 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1971 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1970 frame_index, 1972 frame_index,
1971 (void **)&frame_buffer); 1973 (void **)&frame_buffer);
1972 1974
1973 sci_controller_copy_sata_response(&ireq->stp.req, 1975 sci_controller_copy_sata_response(&ireq->stp.req,
1974 frame_header, 1976 frame_header,
1975 frame_buffer); 1977 frame_buffer);
1976 1978
1977 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 1979 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1978 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 1980 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1979 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1981 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1980 break; 1982 break;
1981 1983
1982 default: 1984 default:
1983 /* FIXME: what do we do here? */ 1985 /* FIXME: what do we do here? */
1984 break; 1986 break;
1985 } 1987 }
1986 1988
1987 /* Frame is decoded return it to the controller */ 1989 /* Frame is decoded return it to the controller */
1988 sci_controller_release_frame(ihost, frame_index); 1990 sci_controller_release_frame(ihost, frame_index);
1989 1991
1990 return status; 1992 return status;
1991 } 1993 }
1992 1994
1993 case SCI_REQ_STP_PIO_DATA_IN: { 1995 case SCI_REQ_STP_PIO_DATA_IN: {
1994 struct dev_to_host_fis *frame_header; 1996 struct dev_to_host_fis *frame_header;
1995 struct sata_fis_data *frame_buffer; 1997 struct sata_fis_data *frame_buffer;
1996 1998
1997 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1999 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1998 frame_index, 2000 frame_index,
1999 (void **)&frame_header); 2001 (void **)&frame_header);
2000 2002
2001 if (status != SCI_SUCCESS) { 2003 if (status != SCI_SUCCESS) {
2002 dev_err(&ihost->pdev->dev, 2004 dev_err(&ihost->pdev->dev,
2003 "%s: SCIC IO Request 0x%p could not get frame " 2005 "%s: SCIC IO Request 0x%p could not get frame "
2004 "header for frame index %d, status %x\n", 2006 "header for frame index %d, status %x\n",
2005 __func__, 2007 __func__,
2006 stp_req, 2008 stp_req,
2007 frame_index, 2009 frame_index,
2008 status); 2010 status);
2009 return status; 2011 return status;
2010 } 2012 }
2011 2013
2012 if (frame_header->fis_type != FIS_DATA) { 2014 if (frame_header->fis_type != FIS_DATA) {
2013 dev_err(&ihost->pdev->dev, 2015 dev_err(&ihost->pdev->dev,
2014 "%s: SCIC PIO Request 0x%p received frame %d " 2016 "%s: SCIC PIO Request 0x%p received frame %d "
2015 "with fis type 0x%02x when expecting a data " 2017 "with fis type 0x%02x when expecting a data "
2016 "fis.\n", 2018 "fis.\n",
2017 __func__, 2019 __func__,
2018 stp_req, 2020 stp_req,
2019 frame_index, 2021 frame_index,
2020 frame_header->fis_type); 2022 frame_header->fis_type);
2021 2023
2022 ireq->scu_status = SCU_TASK_DONE_GOOD; 2024 ireq->scu_status = SCU_TASK_DONE_GOOD;
2023 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; 2025 ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
2024 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2026 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2025 2027
2026 /* Frame is decoded return it to the controller */ 2028 /* Frame is decoded return it to the controller */
2027 sci_controller_release_frame(ihost, frame_index); 2029 sci_controller_release_frame(ihost, frame_index);
2028 return status; 2030 return status;
2029 } 2031 }
2030 2032
2031 if (stp_req->sgl.index < 0) { 2033 if (stp_req->sgl.index < 0) {
2032 ireq->saved_rx_frame_index = frame_index; 2034 ireq->saved_rx_frame_index = frame_index;
2033 stp_req->pio_len = 0; 2035 stp_req->pio_len = 0;
2034 } else { 2036 } else {
2035 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 2037 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
2036 frame_index, 2038 frame_index,
2037 (void **)&frame_buffer); 2039 (void **)&frame_buffer);
2038 2040
2039 status = sci_stp_request_pio_data_in_copy_data(stp_req, 2041 status = sci_stp_request_pio_data_in_copy_data(stp_req,
2040 (u8 *)frame_buffer); 2042 (u8 *)frame_buffer);
2041 2043
2042 /* Frame is decoded return it to the controller */ 2044 /* Frame is decoded return it to the controller */
2043 sci_controller_release_frame(ihost, frame_index); 2045 sci_controller_release_frame(ihost, frame_index);
2044 } 2046 }
2045 2047
2046 /* Check for the end of the transfer, are there more 2048 /* Check for the end of the transfer, are there more
2047 * bytes remaining for this data transfer 2049 * bytes remaining for this data transfer
2048 */ 2050 */
2049 if (status != SCI_SUCCESS || stp_req->pio_len != 0) 2051 if (status != SCI_SUCCESS || stp_req->pio_len != 0)
2050 return status; 2052 return status;
2051 2053
2052 if ((stp_req->status & ATA_BUSY) == 0) { 2054 if ((stp_req->status & ATA_BUSY) == 0) {
2053 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2055 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2054 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2056 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2055 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2057 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2056 } else { 2058 } else {
2057 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); 2059 sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
2058 } 2060 }
2059 return status; 2061 return status;
2060 } 2062 }
2061 2063
2062 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2064 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
2063 struct sas_task *task = isci_request_access_task(ireq); 2065 struct sas_task *task = isci_request_access_task(ireq);
2064 2066
2065 sci_controller_release_frame(ihost, frame_index); 2067 sci_controller_release_frame(ihost, frame_index);
2066 ireq->target_device->working_request = ireq; 2068 ireq->target_device->working_request = ireq;
2067 if (task->data_dir == DMA_NONE) { 2069 if (task->data_dir == DMA_NONE) {
2068 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); 2070 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2069 scu_atapi_reconstruct_raw_frame_task_context(ireq); 2071 scu_atapi_reconstruct_raw_frame_task_context(ireq);
2070 } else { 2072 } else {
2071 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2073 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2072 scu_atapi_construct_task_context(ireq); 2074 scu_atapi_construct_task_context(ireq);
2073 } 2075 }
2074 2076
2075 sci_controller_continue_io(ireq); 2077 sci_controller_continue_io(ireq);
2076 return SCI_SUCCESS; 2078 return SCI_SUCCESS;
2077 } 2079 }
2078 case SCI_REQ_ATAPI_WAIT_D2H: 2080 case SCI_REQ_ATAPI_WAIT_D2H:
2079 return atapi_d2h_reg_frame_handler(ireq, frame_index); 2081 return atapi_d2h_reg_frame_handler(ireq, frame_index);
2080 case SCI_REQ_ABORTING: 2082 case SCI_REQ_ABORTING:
2081 /* 2083 /*
2082 * TODO: Is it even possible to get an unsolicited frame in the 2084 * TODO: Is it even possible to get an unsolicited frame in the
2083 * aborting state? 2085 * aborting state?
2084 */ 2086 */
2085 sci_controller_release_frame(ihost, frame_index); 2087 sci_controller_release_frame(ihost, frame_index);
2086 return SCI_SUCCESS; 2088 return SCI_SUCCESS;
2087 2089
2088 default: 2090 default:
2089 dev_warn(&ihost->pdev->dev, 2091 dev_warn(&ihost->pdev->dev,
2090 "%s: SCIC IO Request given unexpected frame %x while " 2092 "%s: SCIC IO Request given unexpected frame %x while "
2091 "in state %d\n", 2093 "in state %d\n",
2092 __func__, 2094 __func__,
2093 frame_index, 2095 frame_index,
2094 state); 2096 state);
2095 2097
2096 sci_controller_release_frame(ihost, frame_index); 2098 sci_controller_release_frame(ihost, frame_index);
2097 return SCI_FAILURE_INVALID_STATE; 2099 return SCI_FAILURE_INVALID_STATE;
2098 } 2100 }
2099 } 2101 }
2100 2102
2101 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, 2103 static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
2102 u32 completion_code) 2104 u32 completion_code)
2103 { 2105 {
2104 enum sci_status status = SCI_SUCCESS; 2106 enum sci_status status = SCI_SUCCESS;
2105 2107
2106 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2108 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2107 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2109 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2108 ireq->scu_status = SCU_TASK_DONE_GOOD; 2110 ireq->scu_status = SCU_TASK_DONE_GOOD;
2109 ireq->sci_status = SCI_SUCCESS; 2111 ireq->sci_status = SCI_SUCCESS;
2110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2112 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2111 break; 2113 break;
2112 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): 2114 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2113 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): 2115 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
2114 /* We must check ther response buffer to see if the D2H 2116 /* We must check ther response buffer to see if the D2H
2115 * Register FIS was received before we got the TC 2117 * Register FIS was received before we got the TC
2116 * completion. 2118 * completion.
2117 */ 2119 */
2118 if (ireq->stp.rsp.fis_type == FIS_REGD2H) { 2120 if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
2119 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2121 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2120 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2122 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2121 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2123 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2122 } else { 2124 } else {
2123 /* If we have an error completion status for the 2125 /* If we have an error completion status for the
2124 * TC then we can expect a D2H register FIS from 2126 * TC then we can expect a D2H register FIS from
2125 * the device so we must change state to wait 2127 * the device so we must change state to wait
2126 * for it 2128 * for it
2127 */ 2129 */
2128 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H); 2130 sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
2129 } 2131 }
2130 break; 2132 break;
2131 2133
2132 /* TODO Check to see if any of these completion status need to 2134 /* TODO Check to see if any of these completion status need to
2133 * wait for the device to host register fis. 2135 * wait for the device to host register fis.
2134 */ 2136 */
2135 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR 2137 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2136 * - this comes only for B0 2138 * - this comes only for B0
2137 */ 2139 */
2138 default: 2140 default:
2139 /* All other completion status cause the IO to be complete. */ 2141 /* All other completion status cause the IO to be complete. */
2140 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2142 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2141 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2143 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2142 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2144 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2143 break; 2145 break;
2144 } 2146 }
2145 2147
2146 return status; 2148 return status;
2147 } 2149 }
2148 2150
2149 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2151 static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2150 enum sci_base_request_states next) 2152 enum sci_base_request_states next)
2151 { 2153 {
2152 enum sci_status status = SCI_SUCCESS; 2154 enum sci_status status = SCI_SUCCESS;
2153 2155
2154 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2156 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2155 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): 2157 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2156 ireq->scu_status = SCU_TASK_DONE_GOOD; 2158 ireq->scu_status = SCU_TASK_DONE_GOOD;
2157 ireq->sci_status = SCI_SUCCESS; 2159 ireq->sci_status = SCI_SUCCESS;
2158 sci_change_state(&ireq->sm, next); 2160 sci_change_state(&ireq->sm, next);
2159 break; 2161 break;
2160 default: 2162 default:
2161 /* All other completion status cause the IO to be complete. 2163 /* All other completion status cause the IO to be complete.
2162 * If a NAK was received, then it is up to the user to retry 2164 * If a NAK was received, then it is up to the user to retry
2163 * the request. 2165 * the request.
2164 */ 2166 */
2165 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2167 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2166 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 2168 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2167 2169
2168 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2170 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2169 break; 2171 break;
2170 } 2172 }
2171 2173
2172 return status; 2174 return status;
2173 } 2175 }
2174 2176
2175 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, 2177 static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2176 u32 completion_code) 2178 u32 completion_code)
2177 { 2179 {
2178 struct isci_remote_device *idev = ireq->target_device; 2180 struct isci_remote_device *idev = ireq->target_device;
2179 struct dev_to_host_fis *d2h = &ireq->stp.rsp; 2181 struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2180 enum sci_status status = SCI_SUCCESS; 2182 enum sci_status status = SCI_SUCCESS;
2181 2183
2182 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { 2184 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2183 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): 2185 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2184 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2186 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2185 break; 2187 break;
2186 2188
2187 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { 2189 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2188 u16 len = sci_req_tx_bytes(ireq); 2190 u16 len = sci_req_tx_bytes(ireq);
2189 2191
2190 /* likely non-error data underrrun, workaround missing 2192 /* likely non-error data underrrun, workaround missing
2191 * d2h frame from the controller 2193 * d2h frame from the controller
2192 */ 2194 */
2193 if (d2h->fis_type != FIS_REGD2H) { 2195 if (d2h->fis_type != FIS_REGD2H) {
2194 d2h->fis_type = FIS_REGD2H; 2196 d2h->fis_type = FIS_REGD2H;
2195 d2h->flags = (1 << 6); 2197 d2h->flags = (1 << 6);
2196 d2h->status = 0x50; 2198 d2h->status = 0x50;
2197 d2h->error = 0; 2199 d2h->error = 0;
2198 d2h->lbal = 0; 2200 d2h->lbal = 0;
2199 d2h->byte_count_low = len & 0xff; 2201 d2h->byte_count_low = len & 0xff;
2200 d2h->byte_count_high = len >> 8; 2202 d2h->byte_count_high = len >> 8;
2201 d2h->device = 0xa0; 2203 d2h->device = 0xa0;
2202 d2h->lbal_exp = 0; 2204 d2h->lbal_exp = 0;
2203 d2h->lbam_exp = 0; 2205 d2h->lbam_exp = 0;
2204 d2h->lbah_exp = 0; 2206 d2h->lbah_exp = 0;
2205 d2h->_r_a = 0; 2207 d2h->_r_a = 0;
2206 d2h->sector_count = 0x3; 2208 d2h->sector_count = 0x3;
2207 d2h->sector_count_exp = 0; 2209 d2h->sector_count_exp = 0;
2208 d2h->_r_b = 0; 2210 d2h->_r_b = 0;
2209 d2h->_r_c = 0; 2211 d2h->_r_c = 0;
2210 d2h->_r_d = 0; 2212 d2h->_r_d = 0;
2211 } 2213 }
2212 2214
2213 ireq->scu_status = SCU_TASK_DONE_GOOD; 2215 ireq->scu_status = SCU_TASK_DONE_GOOD;
2214 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; 2216 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2215 status = ireq->sci_status; 2217 status = ireq->sci_status;
2216 2218
2217 /* the hw will have suspended the rnc, so complete the 2219 /* the hw will have suspended the rnc, so complete the
2218 * request upon pending resume 2220 * request upon pending resume
2219 */ 2221 */
2220 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2222 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2221 break; 2223 break;
2222 } 2224 }
2223 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): 2225 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2224 /* In this case, there is no UF coming after. 2226 /* In this case, there is no UF coming after.
2225 * compelte the IO now. 2227 * compelte the IO now.
2226 */ 2228 */
2227 ireq->scu_status = SCU_TASK_DONE_GOOD; 2229 ireq->scu_status = SCU_TASK_DONE_GOOD;
2228 ireq->sci_status = SCI_SUCCESS; 2230 ireq->sci_status = SCI_SUCCESS;
2229 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 2231 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2230 break; 2232 break;
2231 2233
2232 default: 2234 default:
2233 if (d2h->fis_type == FIS_REGD2H) { 2235 if (d2h->fis_type == FIS_REGD2H) {
2234 /* UF received change the device state to ATAPI_ERROR */ 2236 /* UF received change the device state to ATAPI_ERROR */
2235 status = ireq->sci_status; 2237 status = ireq->sci_status;
2236 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); 2238 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2237 } else { 2239 } else {
2238 /* If receiving any non-sucess TC status, no UF 2240 /* If receiving any non-sucess TC status, no UF
2239 * received yet, then an UF for the status fis 2241 * received yet, then an UF for the status fis
2240 * is coming after (XXX: suspect this is 2242 * is coming after (XXX: suspect this is
2241 * actually a protocol error or a bug like the 2243 * actually a protocol error or a bug like the
2242 * DONE_UNEXP_FIS case) 2244 * DONE_UNEXP_FIS case)
2243 */ 2245 */
2244 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; 2246 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2245 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; 2247 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2246 2248
2247 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); 2249 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2248 } 2250 }
2249 break; 2251 break;
2250 } 2252 }
2251 2253
2252 return status; 2254 return status;
2253 } 2255 }
2254 2256
2255 static int sci_request_smp_completion_status_is_tx_suspend( 2257 static int sci_request_smp_completion_status_is_tx_suspend(
2256 unsigned int completion_status) 2258 unsigned int completion_status)
2257 { 2259 {
2258 switch (completion_status) { 2260 switch (completion_status) {
2259 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2261 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2260 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2262 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2261 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2263 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2262 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2264 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2263 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2265 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2264 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2266 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2265 return 1; 2267 return 1;
2266 } 2268 }
2267 return 0; 2269 return 0;
2268 } 2270 }
2269 2271
2270 static int sci_request_smp_completion_status_is_tx_rx_suspend( 2272 static int sci_request_smp_completion_status_is_tx_rx_suspend(
2271 unsigned int completion_status) 2273 unsigned int completion_status)
2272 { 2274 {
2273 return 0; /* There are no Tx/Rx SMP suspend conditions. */ 2275 return 0; /* There are no Tx/Rx SMP suspend conditions. */
2274 } 2276 }
2275 2277
2276 static int sci_request_ssp_completion_status_is_tx_suspend( 2278 static int sci_request_ssp_completion_status_is_tx_suspend(
2277 unsigned int completion_status) 2279 unsigned int completion_status)
2278 { 2280 {
2279 switch (completion_status) { 2281 switch (completion_status) {
2280 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2282 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2281 case SCU_TASK_DONE_LF_ERR: 2283 case SCU_TASK_DONE_LF_ERR:
2282 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2284 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2283 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2285 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2284 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2286 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2285 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2287 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2286 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2288 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2287 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2289 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2288 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2290 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2289 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2291 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2290 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2292 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2291 return 1; 2293 return 1;
2292 } 2294 }
2293 return 0; 2295 return 0;
2294 } 2296 }
2295 2297
2296 static int sci_request_ssp_completion_status_is_tx_rx_suspend( 2298 static int sci_request_ssp_completion_status_is_tx_rx_suspend(
2297 unsigned int completion_status) 2299 unsigned int completion_status)
2298 { 2300 {
2299 return 0; /* There are no Tx/Rx SSP suspend conditions. */ 2301 return 0; /* There are no Tx/Rx SSP suspend conditions. */
2300 } 2302 }
2301 2303
2302 static int sci_request_stpsata_completion_status_is_tx_suspend( 2304 static int sci_request_stpsata_completion_status_is_tx_suspend(
2303 unsigned int completion_status) 2305 unsigned int completion_status)
2304 { 2306 {
2305 switch (completion_status) { 2307 switch (completion_status) {
2306 case SCU_TASK_DONE_TX_RAW_CMD_ERR: 2308 case SCU_TASK_DONE_TX_RAW_CMD_ERR:
2307 case SCU_TASK_DONE_LL_R_ERR: 2309 case SCU_TASK_DONE_LL_R_ERR:
2308 case SCU_TASK_DONE_LL_PERR: 2310 case SCU_TASK_DONE_LL_PERR:
2309 case SCU_TASK_DONE_REG_ERR: 2311 case SCU_TASK_DONE_REG_ERR:
2310 case SCU_TASK_DONE_SDB_ERR: 2312 case SCU_TASK_DONE_SDB_ERR:
2311 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2313 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2312 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2314 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2313 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2315 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2314 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2316 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2315 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2317 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2316 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2318 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2317 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2319 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2318 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2320 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2319 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2321 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2320 return 1; 2322 return 1;
2321 } 2323 }
2322 return 0; 2324 return 0;
2323 } 2325 }
2324 2326
2325 2327
2326 static int sci_request_stpsata_completion_status_is_tx_rx_suspend( 2328 static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
2327 unsigned int completion_status) 2329 unsigned int completion_status)
2328 { 2330 {
2329 switch (completion_status) { 2331 switch (completion_status) {
2330 case SCU_TASK_DONE_LF_ERR: 2332 case SCU_TASK_DONE_LF_ERR:
2331 case SCU_TASK_DONE_LL_SY_TERM: 2333 case SCU_TASK_DONE_LL_SY_TERM:
2332 case SCU_TASK_DONE_LL_LF_TERM: 2334 case SCU_TASK_DONE_LL_LF_TERM:
2333 case SCU_TASK_DONE_BREAK_RCVD: 2335 case SCU_TASK_DONE_BREAK_RCVD:
2334 case SCU_TASK_DONE_INV_FIS_LEN: 2336 case SCU_TASK_DONE_INV_FIS_LEN:
2335 case SCU_TASK_DONE_UNEXP_FIS: 2337 case SCU_TASK_DONE_UNEXP_FIS:
2336 case SCU_TASK_DONE_UNEXP_SDBFIS: 2338 case SCU_TASK_DONE_UNEXP_SDBFIS:
2337 case SCU_TASK_DONE_MAX_PLD_ERR: 2339 case SCU_TASK_DONE_MAX_PLD_ERR:
2338 return 1; 2340 return 1;
2339 } 2341 }
2340 return 0; 2342 return 0;
2341 } 2343 }
2342 2344
2343 static void sci_request_handle_suspending_completions( 2345 static void sci_request_handle_suspending_completions(
2344 struct isci_request *ireq, 2346 struct isci_request *ireq,
2345 u32 completion_code) 2347 u32 completion_code)
2346 { 2348 {
2347 int is_tx = 0; 2349 int is_tx = 0;
2348 int is_tx_rx = 0; 2350 int is_tx_rx = 0;
2349 2351
2350 switch (ireq->protocol) { 2352 switch (ireq->protocol) {
2351 case SAS_PROTOCOL_SMP: 2353 case SAS_PROTOCOL_SMP:
2352 is_tx = sci_request_smp_completion_status_is_tx_suspend( 2354 is_tx = sci_request_smp_completion_status_is_tx_suspend(
2353 completion_code); 2355 completion_code);
2354 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( 2356 is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
2355 completion_code); 2357 completion_code);
2356 break; 2358 break;
2357 case SAS_PROTOCOL_SSP: 2359 case SAS_PROTOCOL_SSP:
2358 is_tx = sci_request_ssp_completion_status_is_tx_suspend( 2360 is_tx = sci_request_ssp_completion_status_is_tx_suspend(
2359 completion_code); 2361 completion_code);
2360 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( 2362 is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
2361 completion_code); 2363 completion_code);
2362 break; 2364 break;
2363 case SAS_PROTOCOL_STP: 2365 case SAS_PROTOCOL_STP:
2364 is_tx = sci_request_stpsata_completion_status_is_tx_suspend( 2366 is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
2365 completion_code); 2367 completion_code);
2366 is_tx_rx = 2368 is_tx_rx =
2367 sci_request_stpsata_completion_status_is_tx_rx_suspend( 2369 sci_request_stpsata_completion_status_is_tx_rx_suspend(
2368 completion_code); 2370 completion_code);
2369 break; 2371 break;
2370 default: 2372 default:
2371 dev_warn(&ireq->isci_host->pdev->dev, 2373 dev_warn(&ireq->isci_host->pdev->dev,
2372 "%s: request %p has no valid protocol\n", 2374 "%s: request %p has no valid protocol\n",
2373 __func__, ireq); 2375 __func__, ireq);
2374 break; 2376 break;
2375 } 2377 }
2376 if (is_tx || is_tx_rx) { 2378 if (is_tx || is_tx_rx) {
2377 BUG_ON(is_tx && is_tx_rx); 2379 BUG_ON(is_tx && is_tx_rx);
2378 2380
2379 sci_remote_node_context_suspend( 2381 sci_remote_node_context_suspend(
2380 &ireq->target_device->rnc, 2382 &ireq->target_device->rnc,
2381 SCU_HARDWARE_SUSPENSION, 2383 SCU_HARDWARE_SUSPENSION,
2382 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX 2384 (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
2383 : SCU_EVENT_TL_RNC_SUSPEND_TX, 2385 : SCU_EVENT_TL_RNC_SUSPEND_TX,
2384 NULL, NULL); 2386 NULL, NULL);
2385 } 2387 }
2386 } 2388 }
2387 2389
2388 enum sci_status 2390 enum sci_status
2389 sci_io_request_tc_completion(struct isci_request *ireq, 2391 sci_io_request_tc_completion(struct isci_request *ireq,
2390 u32 completion_code) 2392 u32 completion_code)
2391 { 2393 {
2392 enum sci_base_request_states state; 2394 enum sci_base_request_states state;
2393 struct isci_host *ihost = ireq->owning_controller; 2395 struct isci_host *ihost = ireq->owning_controller;
2394 2396
2395 state = ireq->sm.current_state_id; 2397 state = ireq->sm.current_state_id;
2396 2398
2397 /* Decode those completions that signal upcoming suspension events. */ 2399 /* Decode those completions that signal upcoming suspension events. */
2398 sci_request_handle_suspending_completions( 2400 sci_request_handle_suspending_completions(
2399 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); 2401 ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
2400 2402
2401 switch (state) { 2403 switch (state) {
2402 case SCI_REQ_STARTED: 2404 case SCI_REQ_STARTED:
2403 return request_started_state_tc_event(ireq, completion_code); 2405 return request_started_state_tc_event(ireq, completion_code);
2404 2406
2405 case SCI_REQ_TASK_WAIT_TC_COMP: 2407 case SCI_REQ_TASK_WAIT_TC_COMP:
2406 return ssp_task_request_await_tc_event(ireq, 2408 return ssp_task_request_await_tc_event(ireq,
2407 completion_code); 2409 completion_code);
2408 2410
2409 case SCI_REQ_SMP_WAIT_RESP: 2411 case SCI_REQ_SMP_WAIT_RESP:
2410 return smp_request_await_response_tc_event(ireq, 2412 return smp_request_await_response_tc_event(ireq,
2411 completion_code); 2413 completion_code);
2412 2414
2413 case SCI_REQ_SMP_WAIT_TC_COMP: 2415 case SCI_REQ_SMP_WAIT_TC_COMP:
2414 return smp_request_await_tc_event(ireq, completion_code); 2416 return smp_request_await_tc_event(ireq, completion_code);
2415 2417
2416 case SCI_REQ_STP_UDMA_WAIT_TC_COMP: 2418 case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
2417 return stp_request_udma_await_tc_event(ireq, 2419 return stp_request_udma_await_tc_event(ireq,
2418 completion_code); 2420 completion_code);
2419 2421
2420 case SCI_REQ_STP_NON_DATA_WAIT_H2D: 2422 case SCI_REQ_STP_NON_DATA_WAIT_H2D:
2421 return stp_request_non_data_await_h2d_tc_event(ireq, 2423 return stp_request_non_data_await_h2d_tc_event(ireq,
2422 completion_code); 2424 completion_code);
2423 2425
2424 case SCI_REQ_STP_PIO_WAIT_H2D: 2426 case SCI_REQ_STP_PIO_WAIT_H2D:
2425 return stp_request_pio_await_h2d_completion_tc_event(ireq, 2427 return stp_request_pio_await_h2d_completion_tc_event(ireq,
2426 completion_code); 2428 completion_code);
2427 2429
2428 case SCI_REQ_STP_PIO_DATA_OUT: 2430 case SCI_REQ_STP_PIO_DATA_OUT:
2429 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2431 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2430 2432
2431 case SCI_REQ_ABORTING: 2433 case SCI_REQ_ABORTING:
2432 return request_aborting_state_tc_event(ireq, 2434 return request_aborting_state_tc_event(ireq,
2433 completion_code); 2435 completion_code);
2434 2436
2435 case SCI_REQ_ATAPI_WAIT_H2D: 2437 case SCI_REQ_ATAPI_WAIT_H2D:
2436 return atapi_raw_completion(ireq, completion_code, 2438 return atapi_raw_completion(ireq, completion_code,
2437 SCI_REQ_ATAPI_WAIT_PIO_SETUP); 2439 SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2438 2440
2439 case SCI_REQ_ATAPI_WAIT_TC_COMP: 2441 case SCI_REQ_ATAPI_WAIT_TC_COMP:
2440 return atapi_raw_completion(ireq, completion_code, 2442 return atapi_raw_completion(ireq, completion_code,
2441 SCI_REQ_ATAPI_WAIT_D2H); 2443 SCI_REQ_ATAPI_WAIT_D2H);
2442 2444
2443 case SCI_REQ_ATAPI_WAIT_D2H: 2445 case SCI_REQ_ATAPI_WAIT_D2H:
2444 return atapi_data_tc_completion_handler(ireq, completion_code); 2446 return atapi_data_tc_completion_handler(ireq, completion_code);
2445 2447
2446 default: 2448 default:
2447 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n", 2449 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2448 __func__, completion_code, req_state_name(state)); 2450 __func__, completion_code, req_state_name(state));
2449 return SCI_FAILURE_INVALID_STATE; 2451 return SCI_FAILURE_INVALID_STATE;
2450 } 2452 }
2451 } 2453 }
2452 2454
2453 /** 2455 /**
2454 * isci_request_process_response_iu() - This function sets the status and 2456 * isci_request_process_response_iu() - This function sets the status and
2455 * response iu, in the task struct, from the request object for the upper 2457 * response iu, in the task struct, from the request object for the upper
2456 * layer driver. 2458 * layer driver.
2457 * @sas_task: This parameter is the task struct from the upper layer driver. 2459 * @sas_task: This parameter is the task struct from the upper layer driver.
2458 * @resp_iu: This parameter points to the response iu of the completed request. 2460 * @resp_iu: This parameter points to the response iu of the completed request.
2459 * @dev: This parameter specifies the linux device struct. 2461 * @dev: This parameter specifies the linux device struct.
2460 * 2462 *
2461 * none. 2463 * none.
2462 */ 2464 */
2463 static void isci_request_process_response_iu( 2465 static void isci_request_process_response_iu(
2464 struct sas_task *task, 2466 struct sas_task *task,
2465 struct ssp_response_iu *resp_iu, 2467 struct ssp_response_iu *resp_iu,
2466 struct device *dev) 2468 struct device *dev)
2467 { 2469 {
2468 dev_dbg(dev, 2470 dev_dbg(dev,
2469 "%s: resp_iu = %p " 2471 "%s: resp_iu = %p "
2470 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " 2472 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2471 "resp_iu->response_data_len = %x, " 2473 "resp_iu->response_data_len = %x, "
2472 "resp_iu->sense_data_len = %x\nrepsonse data: ", 2474 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2473 __func__, 2475 __func__,
2474 resp_iu, 2476 resp_iu,
2475 resp_iu->status, 2477 resp_iu->status,
2476 resp_iu->datapres, 2478 resp_iu->datapres,
2477 resp_iu->response_data_len, 2479 resp_iu->response_data_len,
2478 resp_iu->sense_data_len); 2480 resp_iu->sense_data_len);
2479 2481
2480 task->task_status.stat = resp_iu->status; 2482 task->task_status.stat = resp_iu->status;
2481 2483
2482 /* libsas updates the task status fields based on the response iu. */ 2484 /* libsas updates the task status fields based on the response iu. */
2483 sas_ssp_task_response(dev, task, resp_iu); 2485 sas_ssp_task_response(dev, task, resp_iu);
2484 } 2486 }
2485 2487
2486 /** 2488 /**
2487 * isci_request_set_open_reject_status() - This function prepares the I/O 2489 * isci_request_set_open_reject_status() - This function prepares the I/O
2488 * completion for OPEN_REJECT conditions. 2490 * completion for OPEN_REJECT conditions.
2489 * @request: This parameter is the completed isci_request object. 2491 * @request: This parameter is the completed isci_request object.
2490 * @response_ptr: This parameter specifies the service response for the I/O. 2492 * @response_ptr: This parameter specifies the service response for the I/O.
2491 * @status_ptr: This parameter specifies the exec status for the I/O. 2493 * @status_ptr: This parameter specifies the exec status for the I/O.
2492 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2494 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2493 * the LLDD with respect to completing this request or forcing an abort 2495 * the LLDD with respect to completing this request or forcing an abort
2494 * condition on the I/O. 2496 * condition on the I/O.
2495 * @open_rej_reason: This parameter specifies the encoded reason for the 2497 * @open_rej_reason: This parameter specifies the encoded reason for the
2496 * abandon-class reject. 2498 * abandon-class reject.
2497 * 2499 *
2498 * none. 2500 * none.
2499 */ 2501 */
2500 static void isci_request_set_open_reject_status( 2502 static void isci_request_set_open_reject_status(
2501 struct isci_request *request, 2503 struct isci_request *request,
2502 struct sas_task *task, 2504 struct sas_task *task,
2503 enum service_response *response_ptr, 2505 enum service_response *response_ptr,
2504 enum exec_status *status_ptr, 2506 enum exec_status *status_ptr,
2505 enum isci_completion_selection *complete_to_host_ptr, 2507 enum isci_completion_selection *complete_to_host_ptr,
2506 enum sas_open_rej_reason open_rej_reason) 2508 enum sas_open_rej_reason open_rej_reason)
2507 { 2509 {
2508 /* Task in the target is done. */ 2510 /* Task in the target is done. */
2509 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2511 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2510 *response_ptr = SAS_TASK_UNDELIVERED; 2512 *response_ptr = SAS_TASK_UNDELIVERED;
2511 *status_ptr = SAS_OPEN_REJECT; 2513 *status_ptr = SAS_OPEN_REJECT;
2512 *complete_to_host_ptr = isci_perform_normal_io_completion; 2514 *complete_to_host_ptr = isci_perform_normal_io_completion;
2513 task->task_status.open_rej_reason = open_rej_reason; 2515 task->task_status.open_rej_reason = open_rej_reason;
2514 } 2516 }
2515 2517
2516 /** 2518 /**
2517 * isci_request_handle_controller_specific_errors() - This function decodes 2519 * isci_request_handle_controller_specific_errors() - This function decodes
2518 * controller-specific I/O completion error conditions. 2520 * controller-specific I/O completion error conditions.
2519 * @request: This parameter is the completed isci_request object. 2521 * @request: This parameter is the completed isci_request object.
2520 * @response_ptr: This parameter specifies the service response for the I/O. 2522 * @response_ptr: This parameter specifies the service response for the I/O.
2521 * @status_ptr: This parameter specifies the exec status for the I/O. 2523 * @status_ptr: This parameter specifies the exec status for the I/O.
2522 * @complete_to_host_ptr: This parameter specifies the action to be taken by 2524 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2523 * the LLDD with respect to completing this request or forcing an abort 2525 * the LLDD with respect to completing this request or forcing an abort
2524 * condition on the I/O. 2526 * condition on the I/O.
2525 * 2527 *
2526 * none. 2528 * none.
2527 */ 2529 */
2528 static void isci_request_handle_controller_specific_errors( 2530 static void isci_request_handle_controller_specific_errors(
2529 struct isci_remote_device *idev, 2531 struct isci_remote_device *idev,
2530 struct isci_request *request, 2532 struct isci_request *request,
2531 struct sas_task *task, 2533 struct sas_task *task,
2532 enum service_response *response_ptr, 2534 enum service_response *response_ptr,
2533 enum exec_status *status_ptr, 2535 enum exec_status *status_ptr,
2534 enum isci_completion_selection *complete_to_host_ptr) 2536 enum isci_completion_selection *complete_to_host_ptr)
2535 { 2537 {
2536 unsigned int cstatus; 2538 unsigned int cstatus;
2537 2539
2538 cstatus = request->scu_status; 2540 cstatus = request->scu_status;
2539 2541
2540 dev_dbg(&request->isci_host->pdev->dev, 2542 dev_dbg(&request->isci_host->pdev->dev,
2541 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " 2543 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2542 "- controller status = 0x%x\n", 2544 "- controller status = 0x%x\n",
2543 __func__, request, cstatus); 2545 __func__, request, cstatus);
2544 2546
2545 /* Decode the controller-specific errors; most 2547 /* Decode the controller-specific errors; most
2546 * important is to recognize those conditions in which 2548 * important is to recognize those conditions in which
2547 * the target may still have a task outstanding that 2549 * the target may still have a task outstanding that
2548 * must be aborted. 2550 * must be aborted.
2549 * 2551 *
2550 * Note that there are SCU completion codes being 2552 * Note that there are SCU completion codes being
2551 * named in the decode below for which SCIC has already 2553 * named in the decode below for which SCIC has already
2552 * done work to handle them in a way other than as 2554 * done work to handle them in a way other than as
2553 * a controller-specific completion code; these are left 2555 * a controller-specific completion code; these are left
2554 * in the decode below for completeness sake. 2556 * in the decode below for completeness sake.
2555 */ 2557 */
2556 switch (cstatus) { 2558 switch (cstatus) {
2557 case SCU_TASK_DONE_DMASETUP_DIRERR: 2559 case SCU_TASK_DONE_DMASETUP_DIRERR:
2558 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ 2560 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2559 case SCU_TASK_DONE_XFERCNT_ERR: 2561 case SCU_TASK_DONE_XFERCNT_ERR:
2560 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ 2562 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2561 if (task->task_proto == SAS_PROTOCOL_SMP) { 2563 if (task->task_proto == SAS_PROTOCOL_SMP) {
2562 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ 2564 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2563 *response_ptr = SAS_TASK_COMPLETE; 2565 *response_ptr = SAS_TASK_COMPLETE;
2564 2566
2565 /* See if the device has been/is being stopped. Note 2567 /* See if the device has been/is being stopped. Note
2566 * that we ignore the quiesce state, since we are 2568 * that we ignore the quiesce state, since we are
2567 * concerned about the actual device state. 2569 * concerned about the actual device state.
2568 */ 2570 */
2569 if (!idev) 2571 if (!idev)
2570 *status_ptr = SAS_DEVICE_UNKNOWN; 2572 *status_ptr = SAS_DEVICE_UNKNOWN;
2571 else 2573 else
2572 *status_ptr = SAS_ABORTED_TASK; 2574 *status_ptr = SAS_ABORTED_TASK;
2573 2575
2574 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2576 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2575 2577
2576 *complete_to_host_ptr = 2578 *complete_to_host_ptr =
2577 isci_perform_normal_io_completion; 2579 isci_perform_normal_io_completion;
2578 } else { 2580 } else {
2579 /* Task in the target is not done. */ 2581 /* Task in the target is not done. */
2580 *response_ptr = SAS_TASK_UNDELIVERED; 2582 *response_ptr = SAS_TASK_UNDELIVERED;
2581 2583
2582 if (!idev) 2584 if (!idev)
2583 *status_ptr = SAS_DEVICE_UNKNOWN; 2585 *status_ptr = SAS_DEVICE_UNKNOWN;
2584 else 2586 else
2585 *status_ptr = SAM_STAT_TASK_ABORTED; 2587 *status_ptr = SAM_STAT_TASK_ABORTED;
2586 2588
2587 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2589 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2588 2590
2589 *complete_to_host_ptr = 2591 *complete_to_host_ptr =
2590 isci_perform_error_io_completion; 2592 isci_perform_error_io_completion;
2591 } 2593 }
2592 2594
2593 break; 2595 break;
2594 2596
2595 case SCU_TASK_DONE_CRC_ERR: 2597 case SCU_TASK_DONE_CRC_ERR:
2596 case SCU_TASK_DONE_NAK_CMD_ERR: 2598 case SCU_TASK_DONE_NAK_CMD_ERR:
2597 case SCU_TASK_DONE_EXCESS_DATA: 2599 case SCU_TASK_DONE_EXCESS_DATA:
2598 case SCU_TASK_DONE_UNEXP_FIS: 2600 case SCU_TASK_DONE_UNEXP_FIS:
2599 /* Also SCU_TASK_DONE_UNEXP_RESP: */ 2601 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2600 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ 2602 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2601 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ 2603 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2602 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ 2604 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2603 /* These are conditions in which the target 2605 /* These are conditions in which the target
2604 * has completed the task, so that no cleanup 2606 * has completed the task, so that no cleanup
2605 * is necessary. 2607 * is necessary.
2606 */ 2608 */
2607 *response_ptr = SAS_TASK_COMPLETE; 2609 *response_ptr = SAS_TASK_COMPLETE;
2608 2610
2609 /* See if the device has been/is being stopped. Note 2611 /* See if the device has been/is being stopped. Note
2610 * that we ignore the quiesce state, since we are 2612 * that we ignore the quiesce state, since we are
2611 * concerned about the actual device state. 2613 * concerned about the actual device state.
2612 */ 2614 */
2613 if (!idev) 2615 if (!idev)
2614 *status_ptr = SAS_DEVICE_UNKNOWN; 2616 *status_ptr = SAS_DEVICE_UNKNOWN;
2615 else 2617 else
2616 *status_ptr = SAS_ABORTED_TASK; 2618 *status_ptr = SAS_ABORTED_TASK;
2617 2619
2618 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2620 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2619 2621
2620 *complete_to_host_ptr = isci_perform_normal_io_completion; 2622 *complete_to_host_ptr = isci_perform_normal_io_completion;
2621 break; 2623 break;
2622 2624
2623 2625
2624 /* Note that the only open reject completion codes seen here will be 2626 /* Note that the only open reject completion codes seen here will be
2625 * abandon-class codes; all others are automatically retried in the SCU. 2627 * abandon-class codes; all others are automatically retried in the SCU.
2626 */ 2628 */
2627 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: 2629 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2628 2630
2629 isci_request_set_open_reject_status( 2631 isci_request_set_open_reject_status(
2630 request, task, response_ptr, status_ptr, 2632 request, task, response_ptr, status_ptr,
2631 complete_to_host_ptr, SAS_OREJ_WRONG_DEST); 2633 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2632 break; 2634 break;
2633 2635
2634 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: 2636 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2635 2637
2636 /* Note - the return of AB0 will change when 2638 /* Note - the return of AB0 will change when
2637 * libsas implements detection of zone violations. 2639 * libsas implements detection of zone violations.
2638 */ 2640 */
2639 isci_request_set_open_reject_status( 2641 isci_request_set_open_reject_status(
2640 request, task, response_ptr, status_ptr, 2642 request, task, response_ptr, status_ptr,
2641 complete_to_host_ptr, SAS_OREJ_RESV_AB0); 2643 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2642 break; 2644 break;
2643 2645
2644 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: 2646 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2645 2647
2646 isci_request_set_open_reject_status( 2648 isci_request_set_open_reject_status(
2647 request, task, response_ptr, status_ptr, 2649 request, task, response_ptr, status_ptr,
2648 complete_to_host_ptr, SAS_OREJ_RESV_AB1); 2650 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2649 break; 2651 break;
2650 2652
2651 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: 2653 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2652 2654
2653 isci_request_set_open_reject_status( 2655 isci_request_set_open_reject_status(
2654 request, task, response_ptr, status_ptr, 2656 request, task, response_ptr, status_ptr,
2655 complete_to_host_ptr, SAS_OREJ_RESV_AB2); 2657 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2656 break; 2658 break;
2657 2659
2658 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: 2660 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2659 2661
2660 isci_request_set_open_reject_status( 2662 isci_request_set_open_reject_status(
2661 request, task, response_ptr, status_ptr, 2663 request, task, response_ptr, status_ptr,
2662 complete_to_host_ptr, SAS_OREJ_RESV_AB3); 2664 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2663 break; 2665 break;
2664 2666
2665 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: 2667 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2666 2668
2667 isci_request_set_open_reject_status( 2669 isci_request_set_open_reject_status(
2668 request, task, response_ptr, status_ptr, 2670 request, task, response_ptr, status_ptr,
2669 complete_to_host_ptr, SAS_OREJ_BAD_DEST); 2671 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2670 break; 2672 break;
2671 2673
2672 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: 2674 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2673 2675
2674 isci_request_set_open_reject_status( 2676 isci_request_set_open_reject_status(
2675 request, task, response_ptr, status_ptr, 2677 request, task, response_ptr, status_ptr,
2676 complete_to_host_ptr, SAS_OREJ_STP_NORES); 2678 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2677 break; 2679 break;
2678 2680
2679 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: 2681 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2680 2682
2681 isci_request_set_open_reject_status( 2683 isci_request_set_open_reject_status(
2682 request, task, response_ptr, status_ptr, 2684 request, task, response_ptr, status_ptr,
2683 complete_to_host_ptr, SAS_OREJ_EPROTO); 2685 complete_to_host_ptr, SAS_OREJ_EPROTO);
2684 break; 2686 break;
2685 2687
2686 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: 2688 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2687 2689
2688 isci_request_set_open_reject_status( 2690 isci_request_set_open_reject_status(
2689 request, task, response_ptr, status_ptr, 2691 request, task, response_ptr, status_ptr,
2690 complete_to_host_ptr, SAS_OREJ_CONN_RATE); 2692 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2691 break; 2693 break;
2692 2694
2693 case SCU_TASK_DONE_LL_R_ERR: 2695 case SCU_TASK_DONE_LL_R_ERR:
2694 /* Also SCU_TASK_DONE_ACK_NAK_TO: */ 2696 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2695 case SCU_TASK_DONE_LL_PERR: 2697 case SCU_TASK_DONE_LL_PERR:
2696 case SCU_TASK_DONE_LL_SY_TERM: 2698 case SCU_TASK_DONE_LL_SY_TERM:
2697 /* Also SCU_TASK_DONE_NAK_ERR:*/ 2699 /* Also SCU_TASK_DONE_NAK_ERR:*/
2698 case SCU_TASK_DONE_LL_LF_TERM: 2700 case SCU_TASK_DONE_LL_LF_TERM:
2699 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ 2701 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2700 case SCU_TASK_DONE_LL_ABORT_ERR: 2702 case SCU_TASK_DONE_LL_ABORT_ERR:
2701 case SCU_TASK_DONE_SEQ_INV_TYPE: 2703 case SCU_TASK_DONE_SEQ_INV_TYPE:
2702 /* Also SCU_TASK_DONE_UNEXP_XR: */ 2704 /* Also SCU_TASK_DONE_UNEXP_XR: */
2703 case SCU_TASK_DONE_XR_IU_LEN_ERR: 2705 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2704 case SCU_TASK_DONE_INV_FIS_LEN: 2706 case SCU_TASK_DONE_INV_FIS_LEN:
2705 /* Also SCU_TASK_DONE_XR_WD_LEN: */ 2707 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2706 case SCU_TASK_DONE_SDMA_ERR: 2708 case SCU_TASK_DONE_SDMA_ERR:
2707 case SCU_TASK_DONE_OFFSET_ERR: 2709 case SCU_TASK_DONE_OFFSET_ERR:
2708 case SCU_TASK_DONE_MAX_PLD_ERR: 2710 case SCU_TASK_DONE_MAX_PLD_ERR:
2709 case SCU_TASK_DONE_LF_ERR: 2711 case SCU_TASK_DONE_LF_ERR:
2710 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ 2712 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2711 case SCU_TASK_DONE_SMP_LL_RX_ERR: 2713 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2712 case SCU_TASK_DONE_UNEXP_DATA: 2714 case SCU_TASK_DONE_UNEXP_DATA:
2713 case SCU_TASK_DONE_UNEXP_SDBFIS: 2715 case SCU_TASK_DONE_UNEXP_SDBFIS:
2714 case SCU_TASK_DONE_REG_ERR: 2716 case SCU_TASK_DONE_REG_ERR:
2715 case SCU_TASK_DONE_SDB_ERR: 2717 case SCU_TASK_DONE_SDB_ERR:
2716 case SCU_TASK_DONE_TASK_ABORT: 2718 case SCU_TASK_DONE_TASK_ABORT:
2717 default: 2719 default:
2718 /* Task in the target is not done. */ 2720 /* Task in the target is not done. */
2719 *response_ptr = SAS_TASK_UNDELIVERED; 2721 *response_ptr = SAS_TASK_UNDELIVERED;
2720 *status_ptr = SAM_STAT_TASK_ABORTED; 2722 *status_ptr = SAM_STAT_TASK_ABORTED;
2721 2723
2722 if (task->task_proto == SAS_PROTOCOL_SMP) { 2724 if (task->task_proto == SAS_PROTOCOL_SMP) {
2723 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2725 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2724 2726
2725 *complete_to_host_ptr = isci_perform_normal_io_completion; 2727 *complete_to_host_ptr = isci_perform_normal_io_completion;
2726 } else { 2728 } else {
2727 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2729 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2728 2730
2729 *complete_to_host_ptr = isci_perform_error_io_completion; 2731 *complete_to_host_ptr = isci_perform_error_io_completion;
2730 } 2732 }
2731 break; 2733 break;
2732 } 2734 }
2733 } 2735 }
2734 2736
2735 /** 2737 /**
2736 * isci_task_save_for_upper_layer_completion() - This function saves the 2738 * isci_task_save_for_upper_layer_completion() - This function saves the
2737 * request for later completion to the upper layer driver. 2739 * request for later completion to the upper layer driver.
2738 * @host: This parameter is a pointer to the host on which the the request 2740 * @host: This parameter is a pointer to the host on which the the request
2739 * should be queued (either as an error or success). 2741 * should be queued (either as an error or success).
2740 * @request: This parameter is the completed request. 2742 * @request: This parameter is the completed request.
2741 * @response: This parameter is the response code for the completed task. 2743 * @response: This parameter is the response code for the completed task.
2742 * @status: This parameter is the status code for the completed task. 2744 * @status: This parameter is the status code for the completed task.
2743 * 2745 *
2744 * none. 2746 * none.
2745 */ 2747 */
2746 static void isci_task_save_for_upper_layer_completion( 2748 static void isci_task_save_for_upper_layer_completion(
2747 struct isci_host *host, 2749 struct isci_host *host,
2748 struct isci_request *request, 2750 struct isci_request *request,
2749 enum service_response response, 2751 enum service_response response,
2750 enum exec_status status, 2752 enum exec_status status,
2751 enum isci_completion_selection task_notification_selection) 2753 enum isci_completion_selection task_notification_selection)
2752 { 2754 {
2753 struct sas_task *task = isci_request_access_task(request); 2755 struct sas_task *task = isci_request_access_task(request);
2754 2756
2755 task_notification_selection 2757 task_notification_selection
2756 = isci_task_set_completion_status(task, response, status, 2758 = isci_task_set_completion_status(task, response, status,
2757 task_notification_selection); 2759 task_notification_selection);
2758 2760
2759 /* Tasks aborted specifically by a call to the lldd_abort_task 2761 /* Tasks aborted specifically by a call to the lldd_abort_task
2760 * function should not be completed to the host in the regular path. 2762 * function should not be completed to the host in the regular path.
2761 */ 2763 */
2762 switch (task_notification_selection) { 2764 switch (task_notification_selection) {
2763 2765
2764 case isci_perform_normal_io_completion: 2766 case isci_perform_normal_io_completion:
2765 /* Normal notification (task_done) */ 2767 /* Normal notification (task_done) */
2766 2768
2767 /* Add to the completed list. */ 2769 /* Add to the completed list. */
2768 list_add(&request->completed_node, 2770 list_add(&request->completed_node,
2769 &host->requests_to_complete); 2771 &host->requests_to_complete);
2770 2772
2771 /* Take the request off the device's pending request list. */ 2773 /* Take the request off the device's pending request list. */
2772 list_del_init(&request->dev_node); 2774 list_del_init(&request->dev_node);
2773 break; 2775 break;
2774 2776
2775 case isci_perform_aborted_io_completion: 2777 case isci_perform_aborted_io_completion:
2776 /* No notification to libsas because this request is 2778 /* No notification to libsas because this request is
2777 * already in the abort path. 2779 * already in the abort path.
2778 */ 2780 */
2779 /* Wake up whatever process was waiting for this 2781 /* Wake up whatever process was waiting for this
2780 * request to complete. 2782 * request to complete.
2781 */ 2783 */
2782 WARN_ON(request->io_request_completion == NULL); 2784 WARN_ON(request->io_request_completion == NULL);
2783 2785
2784 if (request->io_request_completion != NULL) { 2786 if (request->io_request_completion != NULL) {
2785 2787
2786 /* Signal whoever is waiting that this 2788 /* Signal whoever is waiting that this
2787 * request is complete. 2789 * request is complete.
2788 */ 2790 */
2789 complete(request->io_request_completion); 2791 complete(request->io_request_completion);
2790 } 2792 }
2791 break; 2793 break;
2792 2794
2793 case isci_perform_error_io_completion: 2795 case isci_perform_error_io_completion:
2794 /* Use sas_task_abort */ 2796 /* Use sas_task_abort */
2795 /* Add to the aborted list. */ 2797 /* Add to the aborted list. */
2796 list_add(&request->completed_node, 2798 list_add(&request->completed_node,
2797 &host->requests_to_errorback); 2799 &host->requests_to_errorback);
2798 break; 2800 break;
2799 2801
2800 default: 2802 default:
2801 /* Add to the error to libsas list. */ 2803 /* Add to the error to libsas list. */
2802 list_add(&request->completed_node, 2804 list_add(&request->completed_node,
2803 &host->requests_to_errorback); 2805 &host->requests_to_errorback);
2804 break; 2806 break;
2805 } 2807 }
2806 dev_dbg(&host->pdev->dev, 2808 dev_dbg(&host->pdev->dev,
2807 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", 2809 "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
2808 __func__, task_notification_selection, task, 2810 __func__, task_notification_selection, task,
2809 (task) ? task->task_status.resp : 0, response, 2811 (task) ? task->task_status.resp : 0, response,
2810 (task) ? task->task_status.stat : 0, status); 2812 (task) ? task->task_status.stat : 0, status);
2811 } 2813 }
2812 2814
2813 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) 2815 static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
2814 { 2816 {
2815 struct task_status_struct *ts = &task->task_status; 2817 struct task_status_struct *ts = &task->task_status;
2816 struct ata_task_resp *resp = (void *)&ts->buf[0]; 2818 struct ata_task_resp *resp = (void *)&ts->buf[0];
2817 2819
2818 resp->frame_len = sizeof(*fis); 2820 resp->frame_len = sizeof(*fis);
2819 memcpy(resp->ending_fis, fis, sizeof(*fis)); 2821 memcpy(resp->ending_fis, fis, sizeof(*fis));
2820 ts->buf_valid_size = sizeof(*resp); 2822 ts->buf_valid_size = sizeof(*resp);
2821 2823
2822 /* If the device fault bit is set in the status register, then 2824 /* If the device fault bit is set in the status register, then
2823 * set the sense data and return. 2825 * set the sense data and return.
2824 */ 2826 */
2825 if (fis->status & ATA_DF) 2827 if (fis->status & ATA_DF)
2826 ts->stat = SAS_PROTO_RESPONSE; 2828 ts->stat = SAS_PROTO_RESPONSE;
2827 else if (fis->status & ATA_ERR) 2829 else if (fis->status & ATA_ERR)
2828 ts->stat = SAM_STAT_CHECK_CONDITION; 2830 ts->stat = SAM_STAT_CHECK_CONDITION;
2829 else 2831 else
2830 ts->stat = SAM_STAT_GOOD; 2832 ts->stat = SAM_STAT_GOOD;
2831 2833
2832 ts->resp = SAS_TASK_COMPLETE; 2834 ts->resp = SAS_TASK_COMPLETE;
2833 } 2835 }
2834 2836
2835 static void isci_request_io_request_complete(struct isci_host *ihost, 2837 static void isci_request_io_request_complete(struct isci_host *ihost,
2836 struct isci_request *request, 2838 struct isci_request *request,
2837 enum sci_io_status completion_status) 2839 enum sci_io_status completion_status)
2838 { 2840 {
2839 struct sas_task *task = isci_request_access_task(request); 2841 struct sas_task *task = isci_request_access_task(request);
2840 struct ssp_response_iu *resp_iu; 2842 struct ssp_response_iu *resp_iu;
2841 unsigned long task_flags; 2843 unsigned long task_flags;
2842 struct isci_remote_device *idev = request->target_device; 2844 struct isci_remote_device *idev = request->target_device;
2843 enum service_response response = SAS_TASK_UNDELIVERED; 2845 enum service_response response = SAS_TASK_UNDELIVERED;
2844 enum exec_status status = SAS_ABORTED_TASK; 2846 enum exec_status status = SAS_ABORTED_TASK;
2845 enum isci_request_status request_status; 2847 enum isci_request_status request_status;
2846 enum isci_completion_selection complete_to_host 2848 enum isci_completion_selection complete_to_host
2847 = isci_perform_normal_io_completion; 2849 = isci_perform_normal_io_completion;
2848 2850
2849 dev_dbg(&ihost->pdev->dev, 2851 dev_dbg(&ihost->pdev->dev,
2850 "%s: request = %p, task = %p,\n" 2852 "%s: request = %p, task = %p,\n"
2851 "task->data_dir = %d completion_status = 0x%x\n", 2853 "task->data_dir = %d completion_status = 0x%x\n",
2852 __func__, 2854 __func__,
2853 request, 2855 request,
2854 task, 2856 task,
2855 task->data_dir, 2857 task->data_dir,
2856 completion_status); 2858 completion_status);
2857 2859
2858 spin_lock(&request->state_lock); 2860 spin_lock(&request->state_lock);
2859 request_status = request->status; 2861 request_status = request->status;
2860 2862
2861 /* Decode the request status. Note that if the request has been 2863 /* Decode the request status. Note that if the request has been
2862 * aborted by a task management function, we don't care 2864 * aborted by a task management function, we don't care
2863 * what the status is. 2865 * what the status is.
2864 */ 2866 */
2865 switch (request_status) { 2867 switch (request_status) {
2866 2868
2867 case aborted: 2869 case aborted:
2868 /* "aborted" indicates that the request was aborted by a task 2870 /* "aborted" indicates that the request was aborted by a task
2869 * management function, since once a task management request is 2871 * management function, since once a task management request is
2870 * perfomed by the device, the request only completes because 2872 * perfomed by the device, the request only completes because
2871 * of the subsequent driver terminate. 2873 * of the subsequent driver terminate.
2872 * 2874 *
2873 * Aborted also means an external thread is explicitly managing 2875 * Aborted also means an external thread is explicitly managing
2874 * this request, so that we do not complete it up the stack. 2876 * this request, so that we do not complete it up the stack.
2875 * 2877 *
2876 * The target is still there (since the TMF was successful). 2878 * The target is still there (since the TMF was successful).
2877 */ 2879 */
2878 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2880 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2879 response = SAS_TASK_COMPLETE; 2881 response = SAS_TASK_COMPLETE;
2880 2882
2881 /* See if the device has been/is being stopped. Note 2883 /* See if the device has been/is being stopped. Note
2882 * that we ignore the quiesce state, since we are 2884 * that we ignore the quiesce state, since we are
2883 * concerned about the actual device state. 2885 * concerned about the actual device state.
2884 */ 2886 */
2885 if (!idev) 2887 if (!idev)
2886 status = SAS_DEVICE_UNKNOWN; 2888 status = SAS_DEVICE_UNKNOWN;
2887 else 2889 else
2888 status = SAS_ABORTED_TASK; 2890 status = SAS_ABORTED_TASK;
2889 2891
2890 complete_to_host = isci_perform_aborted_io_completion; 2892 complete_to_host = isci_perform_aborted_io_completion;
2891 /* This was an aborted request. */ 2893 /* This was an aborted request. */
2892 2894
2893 spin_unlock(&request->state_lock); 2895 spin_unlock(&request->state_lock);
2894 break; 2896 break;
2895 2897
2896 case aborting: 2898 case aborting:
2897 /* aborting means that the task management function tried and 2899 /* aborting means that the task management function tried and
2898 * failed to abort the request. We need to note the request 2900 * failed to abort the request. We need to note the request
2899 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the 2901 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2900 * target as down. 2902 * target as down.
2901 * 2903 *
2902 * Aborting also means an external thread is explicitly managing 2904 * Aborting also means an external thread is explicitly managing
2903 * this request, so that we do not complete it up the stack. 2905 * this request, so that we do not complete it up the stack.
2904 */ 2906 */
2905 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2907 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2906 response = SAS_TASK_UNDELIVERED; 2908 response = SAS_TASK_UNDELIVERED;
2907 2909
2908 if (!idev) 2910 if (!idev)
2909 /* The device has been /is being stopped. Note that 2911 /* The device has been /is being stopped. Note that
2910 * we ignore the quiesce state, since we are 2912 * we ignore the quiesce state, since we are
2911 * concerned about the actual device state. 2913 * concerned about the actual device state.
2912 */ 2914 */
2913 status = SAS_DEVICE_UNKNOWN; 2915 status = SAS_DEVICE_UNKNOWN;
2914 else 2916 else
2915 status = SAS_PHY_DOWN; 2917 status = SAS_PHY_DOWN;
2916 2918
2917 complete_to_host = isci_perform_aborted_io_completion; 2919 complete_to_host = isci_perform_aborted_io_completion;
2918 2920
2919 /* This was an aborted request. */ 2921 /* This was an aborted request. */
2920 2922
2921 spin_unlock(&request->state_lock); 2923 spin_unlock(&request->state_lock);
2922 break; 2924 break;
2923 2925
2924 case terminating: 2926 case terminating:
2925 2927
2926 /* This was an terminated request. This happens when 2928 /* This was an terminated request. This happens when
2927 * the I/O is being terminated because of an action on 2929 * the I/O is being terminated because of an action on
2928 * the device (reset, tear down, etc.), and the I/O needs 2930 * the device (reset, tear down, etc.), and the I/O needs
2929 * to be completed up the stack. 2931 * to be completed up the stack.
2930 */ 2932 */
2931 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2933 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2932 response = SAS_TASK_UNDELIVERED; 2934 response = SAS_TASK_UNDELIVERED;
2933 2935
2934 /* See if the device has been/is being stopped. Note 2936 /* See if the device has been/is being stopped. Note
2935 * that we ignore the quiesce state, since we are 2937 * that we ignore the quiesce state, since we are
2936 * concerned about the actual device state. 2938 * concerned about the actual device state.
2937 */ 2939 */
2938 if (!idev) 2940 if (!idev)
2939 status = SAS_DEVICE_UNKNOWN; 2941 status = SAS_DEVICE_UNKNOWN;
2940 else 2942 else
2941 status = SAS_ABORTED_TASK; 2943 status = SAS_ABORTED_TASK;
2942 2944
2943 complete_to_host = isci_perform_aborted_io_completion; 2945 complete_to_host = isci_perform_aborted_io_completion;
2944 2946
2945 /* This was a terminated request. */ 2947 /* This was a terminated request. */
2946 2948
2947 spin_unlock(&request->state_lock); 2949 spin_unlock(&request->state_lock);
2948 break; 2950 break;
2949 2951
2950 case dead: 2952 case dead:
2951 /* This was a terminated request that timed-out during the 2953 /* This was a terminated request that timed-out during the
2952 * termination process. There is no task to complete to 2954 * termination process. There is no task to complete to
2953 * libsas. 2955 * libsas.
2954 */ 2956 */
2955 complete_to_host = isci_perform_normal_io_completion; 2957 complete_to_host = isci_perform_normal_io_completion;
2956 spin_unlock(&request->state_lock); 2958 spin_unlock(&request->state_lock);
2957 break; 2959 break;
2958 2960
2959 default: 2961 default:
2960 2962
2961 /* The request is done from an SCU HW perspective. */ 2963 /* The request is done from an SCU HW perspective. */
2962 request->status = completed; 2964 request->status = completed;
2963 2965
2964 spin_unlock(&request->state_lock); 2966 spin_unlock(&request->state_lock);
2965 2967
2966 /* This is an active request being completed from the core. */ 2968 /* This is an active request being completed from the core. */
2967 switch (completion_status) { 2969 switch (completion_status) {
2968 2970
2969 case SCI_IO_FAILURE_RESPONSE_VALID: 2971 case SCI_IO_FAILURE_RESPONSE_VALID:
2970 dev_dbg(&ihost->pdev->dev, 2972 dev_dbg(&ihost->pdev->dev,
2971 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", 2973 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2972 __func__, 2974 __func__,
2973 request, 2975 request,
2974 task); 2976 task);
2975 2977
2976 if (sas_protocol_ata(task->task_proto)) { 2978 if (sas_protocol_ata(task->task_proto)) {
2977 isci_process_stp_response(task, &request->stp.rsp); 2979 isci_process_stp_response(task, &request->stp.rsp);
2978 } else if (SAS_PROTOCOL_SSP == task->task_proto) { 2980 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2979 2981
2980 /* crack the iu response buffer. */ 2982 /* crack the iu response buffer. */
2981 resp_iu = &request->ssp.rsp; 2983 resp_iu = &request->ssp.rsp;
2982 isci_request_process_response_iu(task, resp_iu, 2984 isci_request_process_response_iu(task, resp_iu,
2983 &ihost->pdev->dev); 2985 &ihost->pdev->dev);
2984 2986
2985 } else if (SAS_PROTOCOL_SMP == task->task_proto) { 2987 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2986 2988
2987 dev_err(&ihost->pdev->dev, 2989 dev_err(&ihost->pdev->dev,
2988 "%s: SCI_IO_FAILURE_RESPONSE_VALID: " 2990 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2989 "SAS_PROTOCOL_SMP protocol\n", 2991 "SAS_PROTOCOL_SMP protocol\n",
2990 __func__); 2992 __func__);
2991 2993
2992 } else 2994 } else
2993 dev_err(&ihost->pdev->dev, 2995 dev_err(&ihost->pdev->dev,
2994 "%s: unknown protocol\n", __func__); 2996 "%s: unknown protocol\n", __func__);
2995 2997
2996 /* use the task status set in the task struct by the 2998 /* use the task status set in the task struct by the
2997 * isci_request_process_response_iu call. 2999 * isci_request_process_response_iu call.
2998 */ 3000 */
2999 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3001 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3000 response = task->task_status.resp; 3002 response = task->task_status.resp;
3001 status = task->task_status.stat; 3003 status = task->task_status.stat;
3002 break; 3004 break;
3003 3005
3004 case SCI_IO_SUCCESS: 3006 case SCI_IO_SUCCESS:
3005 case SCI_IO_SUCCESS_IO_DONE_EARLY: 3007 case SCI_IO_SUCCESS_IO_DONE_EARLY:
3006 3008
3007 response = SAS_TASK_COMPLETE; 3009 response = SAS_TASK_COMPLETE;
3008 status = SAM_STAT_GOOD; 3010 status = SAM_STAT_GOOD;
3009 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3011 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3010 3012
3011 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { 3013 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
3012 3014
3013 /* This was an SSP / STP / SATA transfer. 3015 /* This was an SSP / STP / SATA transfer.
3014 * There is a possibility that less data than 3016 * There is a possibility that less data than
3015 * the maximum was transferred. 3017 * the maximum was transferred.
3016 */ 3018 */
3017 u32 transferred_length = sci_req_tx_bytes(request); 3019 u32 transferred_length = sci_req_tx_bytes(request);
3018 3020
3019 task->task_status.residual 3021 task->task_status.residual
3020 = task->total_xfer_len - transferred_length; 3022 = task->total_xfer_len - transferred_length;
3021 3023
3022 /* If there were residual bytes, call this an 3024 /* If there were residual bytes, call this an
3023 * underrun. 3025 * underrun.
3024 */ 3026 */
3025 if (task->task_status.residual != 0) 3027 if (task->task_status.residual != 0)
3026 status = SAS_DATA_UNDERRUN; 3028 status = SAS_DATA_UNDERRUN;
3027 3029
3028 dev_dbg(&ihost->pdev->dev, 3030 dev_dbg(&ihost->pdev->dev,
3029 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", 3031 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
3030 __func__, 3032 __func__,
3031 status); 3033 status);
3032 3034
3033 } else 3035 } else
3034 dev_dbg(&ihost->pdev->dev, 3036 dev_dbg(&ihost->pdev->dev,
3035 "%s: SCI_IO_SUCCESS\n", 3037 "%s: SCI_IO_SUCCESS\n",
3036 __func__); 3038 __func__);
3037 3039
3038 break; 3040 break;
3039 3041
3040 case SCI_IO_FAILURE_TERMINATED: 3042 case SCI_IO_FAILURE_TERMINATED:
3041 dev_dbg(&ihost->pdev->dev, 3043 dev_dbg(&ihost->pdev->dev,
3042 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", 3044 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
3043 __func__, 3045 __func__,
3044 request, 3046 request,
3045 task); 3047 task);
3046 3048
3047 /* The request was terminated explicitly. No handling 3049 /* The request was terminated explicitly. No handling
3048 * is needed in the SCSI error handler path. 3050 * is needed in the SCSI error handler path.
3049 */ 3051 */
3050 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3052 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3051 response = SAS_TASK_UNDELIVERED; 3053 response = SAS_TASK_UNDELIVERED;
3052 3054
3053 /* See if the device has been/is being stopped. Note 3055 /* See if the device has been/is being stopped. Note
3054 * that we ignore the quiesce state, since we are 3056 * that we ignore the quiesce state, since we are
3055 * concerned about the actual device state. 3057 * concerned about the actual device state.
3056 */ 3058 */
3057 if (!idev) 3059 if (!idev)
3058 status = SAS_DEVICE_UNKNOWN; 3060 status = SAS_DEVICE_UNKNOWN;
3059 else 3061 else
3060 status = SAS_ABORTED_TASK; 3062 status = SAS_ABORTED_TASK;
3061 3063
3062 complete_to_host = isci_perform_normal_io_completion; 3064 complete_to_host = isci_perform_normal_io_completion;
3063 break; 3065 break;
3064 3066
3065 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: 3067 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
3066 3068
3067 isci_request_handle_controller_specific_errors( 3069 isci_request_handle_controller_specific_errors(
3068 idev, request, task, &response, &status, 3070 idev, request, task, &response, &status,
3069 &complete_to_host); 3071 &complete_to_host);
3070 3072
3071 break; 3073 break;
3072 3074
3073 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: 3075 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
3074 /* This is a special case, in that the I/O completion 3076 /* This is a special case, in that the I/O completion
3075 * is telling us that the device needs a reset. 3077 * is telling us that the device needs a reset.
3076 * In order for the device reset condition to be 3078 * In order for the device reset condition to be
3077 * noticed, the I/O has to be handled in the error 3079 * noticed, the I/O has to be handled in the error
3078 * handler. Set the reset flag and cause the 3080 * handler. Set the reset flag and cause the
3079 * SCSI error thread to be scheduled. 3081 * SCSI error thread to be scheduled.
3080 */ 3082 */
3081 spin_lock_irqsave(&task->task_state_lock, task_flags); 3083 spin_lock_irqsave(&task->task_state_lock, task_flags);
3082 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3084 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3083 spin_unlock_irqrestore(&task->task_state_lock, task_flags); 3085 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
3084 3086
3085 /* Fail the I/O. */ 3087 /* Fail the I/O. */
3086 response = SAS_TASK_UNDELIVERED; 3088 response = SAS_TASK_UNDELIVERED;
3087 status = SAM_STAT_TASK_ABORTED; 3089 status = SAM_STAT_TASK_ABORTED;
3088 3090
3089 complete_to_host = isci_perform_error_io_completion; 3091 complete_to_host = isci_perform_error_io_completion;
3090 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3092 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3091 break; 3093 break;
3092 3094
3093 case SCI_FAILURE_RETRY_REQUIRED: 3095 case SCI_FAILURE_RETRY_REQUIRED:
3094 3096
3095 /* Fail the I/O so it can be retried. */ 3097 /* Fail the I/O so it can be retried. */
3096 response = SAS_TASK_UNDELIVERED; 3098 response = SAS_TASK_UNDELIVERED;
3097 if (!idev) 3099 if (!idev)
3098 status = SAS_DEVICE_UNKNOWN; 3100 status = SAS_DEVICE_UNKNOWN;
3099 else 3101 else
3100 status = SAS_ABORTED_TASK; 3102 status = SAS_ABORTED_TASK;
3101 3103
3102 complete_to_host = isci_perform_normal_io_completion; 3104 complete_to_host = isci_perform_normal_io_completion;
3103 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3105 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3104 break; 3106 break;
3105 3107
3106 3108
3107 default: 3109 default:
3108 /* Catch any otherwise unhandled error codes here. */ 3110 /* Catch any otherwise unhandled error codes here. */
3109 dev_dbg(&ihost->pdev->dev, 3111 dev_dbg(&ihost->pdev->dev,
3110 "%s: invalid completion code: 0x%x - " 3112 "%s: invalid completion code: 0x%x - "
3111 "isci_request = %p\n", 3113 "isci_request = %p\n",
3112 __func__, completion_status, request); 3114 __func__, completion_status, request);
3113 3115
3114 response = SAS_TASK_UNDELIVERED; 3116 response = SAS_TASK_UNDELIVERED;
3115 3117
3116 /* See if the device has been/is being stopped. Note 3118 /* See if the device has been/is being stopped. Note
3117 * that we ignore the quiesce state, since we are 3119 * that we ignore the quiesce state, since we are
3118 * concerned about the actual device state. 3120 * concerned about the actual device state.
3119 */ 3121 */
3120 if (!idev) 3122 if (!idev)
3121 status = SAS_DEVICE_UNKNOWN; 3123 status = SAS_DEVICE_UNKNOWN;
3122 else 3124 else
3123 status = SAS_ABORTED_TASK; 3125 status = SAS_ABORTED_TASK;
3124 3126
3125 if (SAS_PROTOCOL_SMP == task->task_proto) { 3127 if (SAS_PROTOCOL_SMP == task->task_proto) {
3126 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3128 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3127 complete_to_host = isci_perform_normal_io_completion; 3129 complete_to_host = isci_perform_normal_io_completion;
3128 } else { 3130 } else {
3129 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 3131 clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
3130 complete_to_host = isci_perform_error_io_completion; 3132 complete_to_host = isci_perform_error_io_completion;
3131 } 3133 }
3132 break; 3134 break;
3133 } 3135 }
3134 break; 3136 break;
3135 } 3137 }
3136 3138
3137 switch (task->task_proto) { 3139 switch (task->task_proto) {
3138 case SAS_PROTOCOL_SSP: 3140 case SAS_PROTOCOL_SSP:
3139 if (task->data_dir == DMA_NONE) 3141 if (task->data_dir == DMA_NONE)
3140 break; 3142 break;
3141 if (task->num_scatter == 0) 3143 if (task->num_scatter == 0)
3142 /* 0 indicates a single dma address */ 3144 /* 0 indicates a single dma address */
3143 dma_unmap_single(&ihost->pdev->dev, 3145 dma_unmap_single(&ihost->pdev->dev,
3144 request->zero_scatter_daddr, 3146 request->zero_scatter_daddr,
3145 task->total_xfer_len, task->data_dir); 3147 task->total_xfer_len, task->data_dir);
3146 else /* unmap the sgl dma addresses */ 3148 else /* unmap the sgl dma addresses */
3147 dma_unmap_sg(&ihost->pdev->dev, task->scatter, 3149 dma_unmap_sg(&ihost->pdev->dev, task->scatter,
3148 request->num_sg_entries, task->data_dir); 3150 request->num_sg_entries, task->data_dir);
3149 break; 3151 break;
3150 case SAS_PROTOCOL_SMP: { 3152 case SAS_PROTOCOL_SMP: {
3151 struct scatterlist *sg = &task->smp_task.smp_req; 3153 struct scatterlist *sg = &task->smp_task.smp_req;
3152 struct smp_req *smp_req; 3154 struct smp_req *smp_req;
3153 void *kaddr; 3155 void *kaddr;
3154 3156
3155 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 3157 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3156 3158
3157 /* need to swab it back in case the command buffer is re-used */ 3159 /* need to swab it back in case the command buffer is re-used */
3158 kaddr = kmap_atomic(sg_page(sg)); 3160 kaddr = kmap_atomic(sg_page(sg));
3159 smp_req = kaddr + sg->offset; 3161 smp_req = kaddr + sg->offset;
3160 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3162 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3161 kunmap_atomic(kaddr); 3163 kunmap_atomic(kaddr);
3162 break; 3164 break;
3163 } 3165 }
3164 default: 3166 default:
3165 break; 3167 break;
3166 } 3168 }
3167 3169
3168 /* Put the completed request on the correct list */ 3170 /* Put the completed request on the correct list */
3169 isci_task_save_for_upper_layer_completion(ihost, request, response, 3171 isci_task_save_for_upper_layer_completion(ihost, request, response,
3170 status, complete_to_host 3172 status, complete_to_host
3171 ); 3173 );
3172 3174
3173 /* complete the io request to the core. */ 3175 /* complete the io request to the core. */
3174 sci_controller_complete_io(ihost, request->target_device, request); 3176 sci_controller_complete_io(ihost, request->target_device, request);
3175 3177
3176 /* set terminated handle so it cannot be completed or 3178 /* set terminated handle so it cannot be completed or
3177 * terminated again, and to cause any calls into abort 3179 * terminated again, and to cause any calls into abort
3178 * task to recognize the already completed case. 3180 * task to recognize the already completed case.
3179 */ 3181 */
3180 set_bit(IREQ_TERMINATED, &request->flags); 3182 set_bit(IREQ_TERMINATED, &request->flags);
3181 } 3183 }
3182 3184
3183 static void sci_request_started_state_enter(struct sci_base_state_machine *sm) 3185 static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3184 { 3186 {
3185 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3187 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3186 struct domain_device *dev = ireq->target_device->domain_dev; 3188 struct domain_device *dev = ireq->target_device->domain_dev;
3187 enum sci_base_request_states state; 3189 enum sci_base_request_states state;
3188 struct sas_task *task; 3190 struct sas_task *task;
3189 3191
3190 /* XXX as hch said always creating an internal sas_task for tmf 3192 /* XXX as hch said always creating an internal sas_task for tmf
3191 * requests would simplify the driver 3193 * requests would simplify the driver
3192 */ 3194 */
3193 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); 3195 task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq);
3194 3196
3195 /* all unaccelerated request types (non ssp or ncq) handled with 3197 /* all unaccelerated request types (non ssp or ncq) handled with
3196 * substates 3198 * substates
3197 */ 3199 */
3198 if (!task && dev->dev_type == SAS_END_DEV) { 3200 if (!task && dev->dev_type == SAS_END_DEV) {
3199 state = SCI_REQ_TASK_WAIT_TC_COMP; 3201 state = SCI_REQ_TASK_WAIT_TC_COMP;
3200 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3202 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3201 state = SCI_REQ_SMP_WAIT_RESP; 3203 state = SCI_REQ_SMP_WAIT_RESP;
3202 } else if (task && sas_protocol_ata(task->task_proto) && 3204 } else if (task && sas_protocol_ata(task->task_proto) &&
3203 !task->ata_task.use_ncq) { 3205 !task->ata_task.use_ncq) {
3204 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 3206 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
3205 task->ata_task.fis.command == ATA_CMD_PACKET) { 3207 task->ata_task.fis.command == ATA_CMD_PACKET) {
3206 state = SCI_REQ_ATAPI_WAIT_H2D; 3208 state = SCI_REQ_ATAPI_WAIT_H2D;
3207 } else if (task->data_dir == DMA_NONE) { 3209 } else if (task->data_dir == DMA_NONE) {
3208 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 3210 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
3209 } else if (task->ata_task.dma_xfer) { 3211 } else if (task->ata_task.dma_xfer) {
3210 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 3212 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
3211 } else /* PIO */ { 3213 } else /* PIO */ {
3212 state = SCI_REQ_STP_PIO_WAIT_H2D; 3214 state = SCI_REQ_STP_PIO_WAIT_H2D;
3213 } 3215 }
3214 } else { 3216 } else {
3215 /* SSP or NCQ are fully accelerated, no substates */ 3217 /* SSP or NCQ are fully accelerated, no substates */
3216 return; 3218 return;
3217 } 3219 }
3218 sci_change_state(sm, state); 3220 sci_change_state(sm, state);
3219 } 3221 }
3220 3222
3221 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 3223 static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
3222 { 3224 {
3223 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3225 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3224 struct isci_host *ihost = ireq->owning_controller; 3226 struct isci_host *ihost = ireq->owning_controller;
3225 3227
3226 /* Tell the SCI_USER that the IO request is complete */ 3228 /* Tell the SCI_USER that the IO request is complete */
3227 if (!test_bit(IREQ_TMF, &ireq->flags)) 3229 if (!test_bit(IREQ_TMF, &ireq->flags))
3228 isci_request_io_request_complete(ihost, ireq, 3230 isci_request_io_request_complete(ihost, ireq,
3229 ireq->sci_status); 3231 ireq->sci_status);
3230 else 3232 else
3231 isci_task_request_complete(ihost, ireq, ireq->sci_status); 3233 isci_task_request_complete(ihost, ireq, ireq->sci_status);
3232 } 3234 }
3233 3235
3234 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) 3236 static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
3235 { 3237 {
3236 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3238 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3237 3239
3238 /* Setting the abort bit in the Task Context is required by the silicon. */ 3240 /* Setting the abort bit in the Task Context is required by the silicon. */
3239 ireq->tc->abort = 1; 3241 ireq->tc->abort = 1;
3240 } 3242 }
3241 3243
3242 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3244 static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3243 { 3245 {
3244 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3246 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3245 3247
3246 ireq->target_device->working_request = ireq; 3248 ireq->target_device->working_request = ireq;
3247 } 3249 }
3248 3250
3249 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) 3251 static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
3250 { 3252 {
3251 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3253 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3252 3254
3253 ireq->target_device->working_request = ireq; 3255 ireq->target_device->working_request = ireq;
3254 } 3256 }
3255 3257
3256 static const struct sci_base_state sci_request_state_table[] = { 3258 static const struct sci_base_state sci_request_state_table[] = {
3257 [SCI_REQ_INIT] = { }, 3259 [SCI_REQ_INIT] = { },
3258 [SCI_REQ_CONSTRUCTED] = { }, 3260 [SCI_REQ_CONSTRUCTED] = { },
3259 [SCI_REQ_STARTED] = { 3261 [SCI_REQ_STARTED] = {
3260 .enter_state = sci_request_started_state_enter, 3262 .enter_state = sci_request_started_state_enter,
3261 }, 3263 },
3262 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { 3264 [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
3263 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, 3265 .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
3264 }, 3266 },
3265 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, 3267 [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
3266 [SCI_REQ_STP_PIO_WAIT_H2D] = { 3268 [SCI_REQ_STP_PIO_WAIT_H2D] = {
3267 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, 3269 .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
3268 }, 3270 },
3269 [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, 3271 [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
3270 [SCI_REQ_STP_PIO_DATA_IN] = { }, 3272 [SCI_REQ_STP_PIO_DATA_IN] = { },
3271 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3273 [SCI_REQ_STP_PIO_DATA_OUT] = { },
3272 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3274 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3273 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3275 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3274 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3276 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3275 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3277 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3276 [SCI_REQ_SMP_WAIT_RESP] = { }, 3278 [SCI_REQ_SMP_WAIT_RESP] = { },
3277 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3279 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3278 [SCI_REQ_ATAPI_WAIT_H2D] = { }, 3280 [SCI_REQ_ATAPI_WAIT_H2D] = { },
3279 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, 3281 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3280 [SCI_REQ_ATAPI_WAIT_D2H] = { }, 3282 [SCI_REQ_ATAPI_WAIT_D2H] = { },
3281 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, 3283 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
3282 [SCI_REQ_COMPLETED] = { 3284 [SCI_REQ_COMPLETED] = {
3283 .enter_state = sci_request_completed_state_enter, 3285 .enter_state = sci_request_completed_state_enter,
3284 }, 3286 },
3285 [SCI_REQ_ABORTING] = { 3287 [SCI_REQ_ABORTING] = {
3286 .enter_state = sci_request_aborting_state_enter, 3288 .enter_state = sci_request_aborting_state_enter,
3287 }, 3289 },
3288 [SCI_REQ_FINAL] = { }, 3290 [SCI_REQ_FINAL] = { },
3289 }; 3291 };
3290 3292
3291 static void 3293 static void
3292 sci_general_request_construct(struct isci_host *ihost, 3294 sci_general_request_construct(struct isci_host *ihost,
3293 struct isci_remote_device *idev, 3295 struct isci_remote_device *idev,
3294 struct isci_request *ireq) 3296 struct isci_request *ireq)
3295 { 3297 {
3296 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); 3298 sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
3297 3299
3298 ireq->target_device = idev; 3300 ireq->target_device = idev;
3299 ireq->protocol = SAS_PROTOCOL_NONE; 3301 ireq->protocol = SAS_PROTOCOL_NONE;
3300 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; 3302 ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3301 3303
3302 ireq->sci_status = SCI_SUCCESS; 3304 ireq->sci_status = SCI_SUCCESS;
3303 ireq->scu_status = 0; 3305 ireq->scu_status = 0;
3304 ireq->post_context = 0xFFFFFFFF; 3306 ireq->post_context = 0xFFFFFFFF;
3305 } 3307 }
3306 3308
3307 static enum sci_status 3309 static enum sci_status
3308 sci_io_request_construct(struct isci_host *ihost, 3310 sci_io_request_construct(struct isci_host *ihost,
3309 struct isci_remote_device *idev, 3311 struct isci_remote_device *idev,
3310 struct isci_request *ireq) 3312 struct isci_request *ireq)
3311 { 3313 {
3312 struct domain_device *dev = idev->domain_dev; 3314 struct domain_device *dev = idev->domain_dev;
3313 enum sci_status status = SCI_SUCCESS; 3315 enum sci_status status = SCI_SUCCESS;
3314 3316
3315 /* Build the common part of the request */ 3317 /* Build the common part of the request */
3316 sci_general_request_construct(ihost, idev, ireq); 3318 sci_general_request_construct(ihost, idev, ireq);
3317 3319
3318 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) 3320 if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
3319 return SCI_FAILURE_INVALID_REMOTE_DEVICE; 3321 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3320 3322
3321 if (dev->dev_type == SAS_END_DEV) 3323 if (dev->dev_type == SAS_END_DEV)
3322 /* pass */; 3324 /* pass */;
3323 else if (dev_is_sata(dev)) 3325 else if (dev_is_sata(dev))
3324 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); 3326 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
3325 else if (dev_is_expander(dev)) 3327 else if (dev_is_expander(dev))
3326 /* pass */; 3328 /* pass */;
3327 else 3329 else
3328 return SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3330 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3329 3331
3330 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); 3332 memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
3331 3333
3332 return status; 3334 return status;
3333 } 3335 }
3334 3336
3335 enum sci_status sci_task_request_construct(struct isci_host *ihost, 3337 enum sci_status sci_task_request_construct(struct isci_host *ihost,
3336 struct isci_remote_device *idev, 3338 struct isci_remote_device *idev,
3337 u16 io_tag, struct isci_request *ireq) 3339 u16 io_tag, struct isci_request *ireq)
3338 { 3340 {
3339 struct domain_device *dev = idev->domain_dev; 3341 struct domain_device *dev = idev->domain_dev;
3340 enum sci_status status = SCI_SUCCESS; 3342 enum sci_status status = SCI_SUCCESS;
3341 3343
3342 /* Build the common part of the request */ 3344 /* Build the common part of the request */
3343 sci_general_request_construct(ihost, idev, ireq); 3345 sci_general_request_construct(ihost, idev, ireq);
3344 3346
3345 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { 3347 if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
3346 set_bit(IREQ_TMF, &ireq->flags); 3348 set_bit(IREQ_TMF, &ireq->flags);
3347 memset(ireq->tc, 0, sizeof(struct scu_task_context)); 3349 memset(ireq->tc, 0, sizeof(struct scu_task_context));
3348 } else 3350 } else
3349 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3351 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3350 3352
3351 return status; 3353 return status;
3352 } 3354 }
3353 3355
3354 static enum sci_status isci_request_ssp_request_construct( 3356 static enum sci_status isci_request_ssp_request_construct(
3355 struct isci_request *request) 3357 struct isci_request *request)
3356 { 3358 {
3357 enum sci_status status; 3359 enum sci_status status;
3358 3360
3359 dev_dbg(&request->isci_host->pdev->dev, 3361 dev_dbg(&request->isci_host->pdev->dev,
3360 "%s: request = %p\n", 3362 "%s: request = %p\n",
3361 __func__, 3363 __func__,
3362 request); 3364 request);
3363 status = sci_io_request_construct_basic_ssp(request); 3365 status = sci_io_request_construct_basic_ssp(request);
3364 return status; 3366 return status;
3365 } 3367 }
3366 3368
3367 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) 3369 static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
3368 { 3370 {
3369 struct sas_task *task = isci_request_access_task(ireq); 3371 struct sas_task *task = isci_request_access_task(ireq);
3370 struct host_to_dev_fis *fis = &ireq->stp.cmd; 3372 struct host_to_dev_fis *fis = &ireq->stp.cmd;
3371 struct ata_queued_cmd *qc = task->uldd_task; 3373 struct ata_queued_cmd *qc = task->uldd_task;
3372 enum sci_status status; 3374 enum sci_status status;
3373 3375
3374 dev_dbg(&ireq->isci_host->pdev->dev, 3376 dev_dbg(&ireq->isci_host->pdev->dev,
3375 "%s: ireq = %p\n", 3377 "%s: ireq = %p\n",
3376 __func__, 3378 __func__,
3377 ireq); 3379 ireq);
3378 3380
3379 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 3381 memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
3380 if (!task->ata_task.device_control_reg_update) 3382 if (!task->ata_task.device_control_reg_update)
3381 fis->flags |= 0x80; 3383 fis->flags |= 0x80;
3382 fis->flags &= 0xF0; 3384 fis->flags &= 0xF0;
3383 3385
3384 status = sci_io_request_construct_basic_sata(ireq); 3386 status = sci_io_request_construct_basic_sata(ireq);
3385 3387
3386 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || 3388 if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
3387 qc->tf.command == ATA_CMD_FPDMA_READ)) { 3389 qc->tf.command == ATA_CMD_FPDMA_READ)) {
3388 fis->sector_count = qc->tag << 3; 3390 fis->sector_count = qc->tag << 3;
3389 ireq->tc->type.stp.ncq_tag = qc->tag; 3391 ireq->tc->type.stp.ncq_tag = qc->tag;
3390 } 3392 }
3391 3393
3392 return status; 3394 return status;
3393 } 3395 }
3394 3396
3395 static enum sci_status 3397 static enum sci_status
3396 sci_io_request_construct_smp(struct device *dev, 3398 sci_io_request_construct_smp(struct device *dev,
3397 struct isci_request *ireq, 3399 struct isci_request *ireq,
3398 struct sas_task *task) 3400 struct sas_task *task)
3399 { 3401 {
3400 struct scatterlist *sg = &task->smp_task.smp_req; 3402 struct scatterlist *sg = &task->smp_task.smp_req;
3401 struct isci_remote_device *idev; 3403 struct isci_remote_device *idev;
3402 struct scu_task_context *task_context; 3404 struct scu_task_context *task_context;
3403 struct isci_port *iport; 3405 struct isci_port *iport;
3404 struct smp_req *smp_req; 3406 struct smp_req *smp_req;
3405 void *kaddr; 3407 void *kaddr;
3406 u8 req_len; 3408 u8 req_len;
3407 u32 cmd; 3409 u32 cmd;
3408 3410
3409 kaddr = kmap_atomic(sg_page(sg)); 3411 kaddr = kmap_atomic(sg_page(sg));
3410 smp_req = kaddr + sg->offset; 3412 smp_req = kaddr + sg->offset;
3411 /* 3413 /*
3412 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3414 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3413 * functions under SAS 2.0, a zero request length really indicates 3415 * functions under SAS 2.0, a zero request length really indicates
3414 * a non-zero default length. 3416 * a non-zero default length.
3415 */ 3417 */
3416 if (smp_req->req_len == 0) { 3418 if (smp_req->req_len == 0) {
3417 switch (smp_req->func) { 3419 switch (smp_req->func) {
3418 case SMP_DISCOVER: 3420 case SMP_DISCOVER:
3419 case SMP_REPORT_PHY_ERR_LOG: 3421 case SMP_REPORT_PHY_ERR_LOG:
3420 case SMP_REPORT_PHY_SATA: 3422 case SMP_REPORT_PHY_SATA:
3421 case SMP_REPORT_ROUTE_INFO: 3423 case SMP_REPORT_ROUTE_INFO:
3422 smp_req->req_len = 2; 3424 smp_req->req_len = 2;
3423 break; 3425 break;
3424 case SMP_CONF_ROUTE_INFO: 3426 case SMP_CONF_ROUTE_INFO:
3425 case SMP_PHY_CONTROL: 3427 case SMP_PHY_CONTROL:
3426 case SMP_PHY_TEST_FUNCTION: 3428 case SMP_PHY_TEST_FUNCTION:
3427 smp_req->req_len = 9; 3429 smp_req->req_len = 9;
3428 break; 3430 break;
3429 /* Default - zero is a valid default for 2.0. */ 3431 /* Default - zero is a valid default for 2.0. */
3430 } 3432 }
3431 } 3433 }
3432 req_len = smp_req->req_len; 3434 req_len = smp_req->req_len;
3433 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3435 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3434 cmd = *(u32 *) smp_req; 3436 cmd = *(u32 *) smp_req;
3435 kunmap_atomic(kaddr); 3437 kunmap_atomic(kaddr);
3436 3438
3437 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3439 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3438 return SCI_FAILURE; 3440 return SCI_FAILURE;
3439 3441
3440 ireq->protocol = SAS_PROTOCOL_SMP; 3442 ireq->protocol = SAS_PROTOCOL_SMP;
3441 3443
3442 /* byte swap the smp request. */ 3444 /* byte swap the smp request. */
3443 3445
3444 task_context = ireq->tc; 3446 task_context = ireq->tc;
3445 3447
3446 idev = ireq->target_device; 3448 idev = ireq->target_device;
3447 iport = idev->owning_port; 3449 iport = idev->owning_port;
3448 3450
3449 /* 3451 /*
3450 * Fill in the TC with the its required data 3452 * Fill in the TC with the its required data
3451 * 00h 3453 * 00h
3452 */ 3454 */
3453 task_context->priority = 0; 3455 task_context->priority = 0;
3454 task_context->initiator_request = 1; 3456 task_context->initiator_request = 1;
3455 task_context->connection_rate = idev->connection_rate; 3457 task_context->connection_rate = idev->connection_rate;
3456 task_context->protocol_engine_index = ISCI_PEG; 3458 task_context->protocol_engine_index = ISCI_PEG;
3457 task_context->logical_port_index = iport->physical_port_index; 3459 task_context->logical_port_index = iport->physical_port_index;
3458 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; 3460 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3459 task_context->abort = 0; 3461 task_context->abort = 0;
3460 task_context->valid = SCU_TASK_CONTEXT_VALID; 3462 task_context->valid = SCU_TASK_CONTEXT_VALID;
3461 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 3463 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3462 3464
3463 /* 04h */ 3465 /* 04h */
3464 task_context->remote_node_index = idev->rnc.remote_node_index; 3466 task_context->remote_node_index = idev->rnc.remote_node_index;
3465 task_context->command_code = 0; 3467 task_context->command_code = 0;
3466 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; 3468 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3467 3469
3468 /* 08h */ 3470 /* 08h */
3469 task_context->link_layer_control = 0; 3471 task_context->link_layer_control = 0;
3470 task_context->do_not_dma_ssp_good_response = 1; 3472 task_context->do_not_dma_ssp_good_response = 1;
3471 task_context->strict_ordering = 0; 3473 task_context->strict_ordering = 0;
3472 task_context->control_frame = 1; 3474 task_context->control_frame = 1;
3473 task_context->timeout_enable = 0; 3475 task_context->timeout_enable = 0;
3474 task_context->block_guard_enable = 0; 3476 task_context->block_guard_enable = 0;
3475 3477
3476 /* 0ch */ 3478 /* 0ch */
3477 task_context->address_modifier = 0; 3479 task_context->address_modifier = 0;
3478 3480
3479 /* 10h */ 3481 /* 10h */
3480 task_context->ssp_command_iu_length = req_len; 3482 task_context->ssp_command_iu_length = req_len;
3481 3483
3482 /* 14h */ 3484 /* 14h */
3483 task_context->transfer_length_bytes = 0; 3485 task_context->transfer_length_bytes = 0;
3484 3486
3485 /* 3487 /*
3486 * 18h ~ 30h, protocol specific 3488 * 18h ~ 30h, protocol specific
3487 * since commandIU has been build by framework at this point, we just 3489 * since commandIU has been build by framework at this point, we just
3488 * copy the frist DWord from command IU to this location. */ 3490 * copy the frist DWord from command IU to this location. */
3489 memcpy(&task_context->type.smp, &cmd, sizeof(u32)); 3491 memcpy(&task_context->type.smp, &cmd, sizeof(u32));
3490 3492
3491 /* 3493 /*
3492 * 40h 3494 * 40h
3493 * "For SMP you could program it to zero. We would prefer that way 3495 * "For SMP you could program it to zero. We would prefer that way
3494 * so that done code will be consistent." - Venki 3496 * so that done code will be consistent." - Venki
3495 */ 3497 */
3496 task_context->task_phase = 0; 3498 task_context->task_phase = 0;
3497 3499
3498 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 3500 ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3499 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | 3501 (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3500 (iport->physical_port_index << 3502 (iport->physical_port_index <<
3501 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | 3503 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3502 ISCI_TAG_TCI(ireq->io_tag)); 3504 ISCI_TAG_TCI(ireq->io_tag));
3503 /* 3505 /*
3504 * Copy the physical address for the command buffer to the SCU Task 3506 * Copy the physical address for the command buffer to the SCU Task
3505 * Context command buffer should not contain command header. 3507 * Context command buffer should not contain command header.
3506 */ 3508 */
3507 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); 3509 task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
3508 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); 3510 task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
3509 3511
3510 /* SMP response comes as UF, so no need to set response IU address. */ 3512 /* SMP response comes as UF, so no need to set response IU address. */
3511 task_context->response_iu_upper = 0; 3513 task_context->response_iu_upper = 0;
3512 task_context->response_iu_lower = 0; 3514 task_context->response_iu_lower = 0;
3513 3515
3514 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); 3516 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
3515 3517
3516 return SCI_SUCCESS; 3518 return SCI_SUCCESS;
3517 } 3519 }
3518 3520
3519 /* 3521 /*
3520 * isci_smp_request_build() - This function builds the smp request. 3522 * isci_smp_request_build() - This function builds the smp request.
3521 * @ireq: This parameter points to the isci_request allocated in the 3523 * @ireq: This parameter points to the isci_request allocated in the
3522 * request construct function. 3524 * request construct function.
3523 * 3525 *
3524 * SCI_SUCCESS on successfull completion, or specific failure code. 3526 * SCI_SUCCESS on successfull completion, or specific failure code.
3525 */ 3527 */
3526 static enum sci_status isci_smp_request_build(struct isci_request *ireq) 3528 static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3527 { 3529 {
3528 struct sas_task *task = isci_request_access_task(ireq); 3530 struct sas_task *task = isci_request_access_task(ireq);
3529 struct device *dev = &ireq->isci_host->pdev->dev; 3531 struct device *dev = &ireq->isci_host->pdev->dev;
3530 enum sci_status status = SCI_FAILURE; 3532 enum sci_status status = SCI_FAILURE;
3531 3533
3532 status = sci_io_request_construct_smp(dev, ireq, task); 3534 status = sci_io_request_construct_smp(dev, ireq, task);
3533 if (status != SCI_SUCCESS) 3535 if (status != SCI_SUCCESS)
3534 dev_dbg(&ireq->isci_host->pdev->dev, 3536 dev_dbg(&ireq->isci_host->pdev->dev,
3535 "%s: failed with status = %d\n", 3537 "%s: failed with status = %d\n",
3536 __func__, 3538 __func__,
3537 status); 3539 status);
3538 3540
3539 return status; 3541 return status;
3540 } 3542 }
3541 3543
3542 /** 3544 /**
3543 * isci_io_request_build() - This function builds the io request object. 3545 * isci_io_request_build() - This function builds the io request object.
3544 * @ihost: This parameter specifies the ISCI host object 3546 * @ihost: This parameter specifies the ISCI host object
3545 * @request: This parameter points to the isci_request object allocated in the 3547 * @request: This parameter points to the isci_request object allocated in the
3546 * request construct function. 3548 * request construct function.
3547 * @sci_device: This parameter is the handle for the sci core's remote device 3549 * @sci_device: This parameter is the handle for the sci core's remote device
3548 * object that is the destination for this request. 3550 * object that is the destination for this request.
3549 * 3551 *
3550 * SCI_SUCCESS on successfull completion, or specific failure code. 3552 * SCI_SUCCESS on successfull completion, or specific failure code.
3551 */ 3553 */
3552 static enum sci_status isci_io_request_build(struct isci_host *ihost, 3554 static enum sci_status isci_io_request_build(struct isci_host *ihost,
3553 struct isci_request *request, 3555 struct isci_request *request,
3554 struct isci_remote_device *idev) 3556 struct isci_remote_device *idev)
3555 { 3557 {
3556 enum sci_status status = SCI_SUCCESS; 3558 enum sci_status status = SCI_SUCCESS;
3557 struct sas_task *task = isci_request_access_task(request); 3559 struct sas_task *task = isci_request_access_task(request);
3558 3560
3559 dev_dbg(&ihost->pdev->dev, 3561 dev_dbg(&ihost->pdev->dev,
3560 "%s: idev = 0x%p; request = %p, " 3562 "%s: idev = 0x%p; request = %p, "
3561 "num_scatter = %d\n", 3563 "num_scatter = %d\n",
3562 __func__, 3564 __func__,
3563 idev, 3565 idev,
3564 request, 3566 request,
3565 task->num_scatter); 3567 task->num_scatter);
3566 3568
3567 /* map the sgl addresses, if present. 3569 /* map the sgl addresses, if present.
3568 * libata does the mapping for sata devices 3570 * libata does the mapping for sata devices
3569 * before we get the request. 3571 * before we get the request.
3570 */ 3572 */
3571 if (task->num_scatter && 3573 if (task->num_scatter &&
3572 !sas_protocol_ata(task->task_proto) && 3574 !sas_protocol_ata(task->task_proto) &&
3573 !(SAS_PROTOCOL_SMP & task->task_proto)) { 3575 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3574 3576
3575 request->num_sg_entries = dma_map_sg( 3577 request->num_sg_entries = dma_map_sg(
3576 &ihost->pdev->dev, 3578 &ihost->pdev->dev,
3577 task->scatter, 3579 task->scatter,
3578 task->num_scatter, 3580 task->num_scatter,
3579 task->data_dir 3581 task->data_dir
3580 ); 3582 );
3581 3583
3582 if (request->num_sg_entries == 0) 3584 if (request->num_sg_entries == 0)
3583 return SCI_FAILURE_INSUFFICIENT_RESOURCES; 3585 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3584 } 3586 }
3585 3587
3586 status = sci_io_request_construct(ihost, idev, request); 3588 status = sci_io_request_construct(ihost, idev, request);
3587 3589
3588 if (status != SCI_SUCCESS) { 3590 if (status != SCI_SUCCESS) {
3589 dev_dbg(&ihost->pdev->dev, 3591 dev_dbg(&ihost->pdev->dev,
3590 "%s: failed request construct\n", 3592 "%s: failed request construct\n",
3591 __func__); 3593 __func__);
3592 return SCI_FAILURE; 3594 return SCI_FAILURE;
3593 } 3595 }
3594 3596
3595 switch (task->task_proto) { 3597 switch (task->task_proto) {
3596 case SAS_PROTOCOL_SMP: 3598 case SAS_PROTOCOL_SMP:
3597 status = isci_smp_request_build(request); 3599 status = isci_smp_request_build(request);
3598 break; 3600 break;
3599 case SAS_PROTOCOL_SSP: 3601 case SAS_PROTOCOL_SSP:
3600 status = isci_request_ssp_request_construct(request); 3602 status = isci_request_ssp_request_construct(request);
3601 break; 3603 break;
3602 case SAS_PROTOCOL_SATA: 3604 case SAS_PROTOCOL_SATA:
3603 case SAS_PROTOCOL_STP: 3605 case SAS_PROTOCOL_STP:
3604 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 3606 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3605 status = isci_request_stp_request_construct(request); 3607 status = isci_request_stp_request_construct(request);
3606 break; 3608 break;
3607 default: 3609 default:
3608 dev_dbg(&ihost->pdev->dev, 3610 dev_dbg(&ihost->pdev->dev,
3609 "%s: unknown protocol\n", __func__); 3611 "%s: unknown protocol\n", __func__);
3610 return SCI_FAILURE; 3612 return SCI_FAILURE;
3611 } 3613 }
3612 3614
3613 return SCI_SUCCESS; 3615 return SCI_SUCCESS;
3614 } 3616 }
3615 3617
3616 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) 3618 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
3617 { 3619 {
3618 struct isci_request *ireq; 3620 struct isci_request *ireq;
3619 3621
3620 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; 3622 ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
3621 ireq->io_tag = tag; 3623 ireq->io_tag = tag;
3622 ireq->io_request_completion = NULL; 3624 ireq->io_request_completion = NULL;
3623 ireq->flags = 0; 3625 ireq->flags = 0;
3624 ireq->num_sg_entries = 0; 3626 ireq->num_sg_entries = 0;
3625 INIT_LIST_HEAD(&ireq->completed_node); 3627 INIT_LIST_HEAD(&ireq->completed_node);
3626 INIT_LIST_HEAD(&ireq->dev_node); 3628 INIT_LIST_HEAD(&ireq->dev_node);
3627 isci_request_change_state(ireq, allocated); 3629 isci_request_change_state(ireq, allocated);
3628 3630
3629 return ireq; 3631 return ireq;
3630 } 3632 }
3631 3633
3632 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, 3634 static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
3633 struct sas_task *task, 3635 struct sas_task *task,
3634 u16 tag) 3636 u16 tag)
3635 { 3637 {
3636 struct isci_request *ireq; 3638 struct isci_request *ireq;
3637 3639
3638 ireq = isci_request_from_tag(ihost, tag); 3640 ireq = isci_request_from_tag(ihost, tag);
3639 ireq->ttype_ptr.io_task_ptr = task; 3641 ireq->ttype_ptr.io_task_ptr = task;
3640 clear_bit(IREQ_TMF, &ireq->flags); 3642 clear_bit(IREQ_TMF, &ireq->flags);
3641 task->lldd_task = ireq; 3643 task->lldd_task = ireq;
3642 3644
3643 return ireq; 3645 return ireq;
3644 } 3646 }
3645 3647
3646 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 3648 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
3647 struct isci_tmf *isci_tmf, 3649 struct isci_tmf *isci_tmf,
3648 u16 tag) 3650 u16 tag)
3649 { 3651 {
3650 struct isci_request *ireq; 3652 struct isci_request *ireq;
3651 3653
3652 ireq = isci_request_from_tag(ihost, tag); 3654 ireq = isci_request_from_tag(ihost, tag);
3653 ireq->ttype_ptr.tmf_task_ptr = isci_tmf; 3655 ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
3654 set_bit(IREQ_TMF, &ireq->flags); 3656 set_bit(IREQ_TMF, &ireq->flags);
3655 3657
3656 return ireq; 3658 return ireq;
3657 } 3659 }
3658 3660
3659 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 3661 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
3660 struct sas_task *task, u16 tag) 3662 struct sas_task *task, u16 tag)
3661 { 3663 {
3662 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; 3664 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3663 struct isci_request *ireq; 3665 struct isci_request *ireq;
3664 unsigned long flags; 3666 unsigned long flags;
3665 int ret = 0; 3667 int ret = 0;
3666 3668
3667 /* do common allocation and init of request object. */ 3669 /* do common allocation and init of request object. */
3668 ireq = isci_io_request_from_tag(ihost, task, tag); 3670 ireq = isci_io_request_from_tag(ihost, task, tag);
3669 3671
3670 status = isci_io_request_build(ihost, ireq, idev); 3672 status = isci_io_request_build(ihost, ireq, idev);
3671 if (status != SCI_SUCCESS) { 3673 if (status != SCI_SUCCESS) {
3672 dev_dbg(&ihost->pdev->dev, 3674 dev_dbg(&ihost->pdev->dev,
3673 "%s: request_construct failed - status = 0x%x\n", 3675 "%s: request_construct failed - status = 0x%x\n",
3674 __func__, 3676 __func__,
3675 status); 3677 status);
3676 return status; 3678 return status;
3677 } 3679 }
3678 3680
3679 spin_lock_irqsave(&ihost->scic_lock, flags); 3681 spin_lock_irqsave(&ihost->scic_lock, flags);
3680 3682
3681 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { 3683 if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
3682 3684
3683 if (isci_task_is_ncq_recovery(task)) { 3685 if (isci_task_is_ncq_recovery(task)) {
3684 3686
3685 /* The device is in an NCQ recovery state. Issue the 3687 /* The device is in an NCQ recovery state. Issue the
3686 * request on the task side. Note that it will 3688 * request on the task side. Note that it will
3687 * complete on the I/O request side because the 3689 * complete on the I/O request side because the
3688 * request was built that way (ie. 3690 * request was built that way (ie.
3689 * ireq->is_task_management_request is false). 3691 * ireq->is_task_management_request is false).
3690 */ 3692 */
3691 status = sci_controller_start_task(ihost, 3693 status = sci_controller_start_task(ihost,
3692 idev, 3694 idev,
3693 ireq); 3695 ireq);
3694 } else { 3696 } else {
3695 status = SCI_FAILURE; 3697 status = SCI_FAILURE;
3696 } 3698 }
3697 } else { 3699 } else {
3698 /* send the request, let the core assign the IO TAG. */ 3700 /* send the request, let the core assign the IO TAG. */
3699 status = sci_controller_start_io(ihost, idev, 3701 status = sci_controller_start_io(ihost, idev,
3700 ireq); 3702 ireq);
3701 } 3703 }
3702 3704
3703 if (status != SCI_SUCCESS && 3705 if (status != SCI_SUCCESS &&
3704 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3706 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3705 dev_dbg(&ihost->pdev->dev, 3707 dev_dbg(&ihost->pdev->dev,
3706 "%s: failed request start (0x%x)\n", 3708 "%s: failed request start (0x%x)\n",
3707 __func__, status); 3709 __func__, status);
3708 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3710 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3709 return status; 3711 return status;
3710 } 3712 }
3711 3713
3712 /* Either I/O started OK, or the core has signaled that 3714 /* Either I/O started OK, or the core has signaled that
3713 * the device needs a target reset. 3715 * the device needs a target reset.
3714 * 3716 *
3715 * In either case, hold onto the I/O for later. 3717 * In either case, hold onto the I/O for later.
3716 * 3718 *
3717 * Update it's status and add it to the list in the 3719 * Update it's status and add it to the list in the
3718 * remote device object. 3720 * remote device object.
3719 */ 3721 */
3720 list_add(&ireq->dev_node, &idev->reqs_in_process); 3722 list_add(&ireq->dev_node, &idev->reqs_in_process);
3721 3723
3722 if (status == SCI_SUCCESS) { 3724 if (status == SCI_SUCCESS) {
3723 isci_request_change_state(ireq, started); 3725 isci_request_change_state(ireq, started);
3724 } else { 3726 } else {
3725 /* The request did not really start in the 3727 /* The request did not really start in the
3726 * hardware, so clear the request handle 3728 * hardware, so clear the request handle
3727 * here so no terminations will be done. 3729 * here so no terminations will be done.
3728 */ 3730 */
3729 set_bit(IREQ_TERMINATED, &ireq->flags); 3731 set_bit(IREQ_TERMINATED, &ireq->flags);
3730 isci_request_change_state(ireq, completed); 3732 isci_request_change_state(ireq, completed);
3731 } 3733 }
3732 spin_unlock_irqrestore(&ihost->scic_lock, flags); 3734 spin_unlock_irqrestore(&ihost->scic_lock, flags);
3733 3735
3734 if (status == 3736 if (status ==
3735 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { 3737 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3736 /* Signal libsas that we need the SCSI error 3738 /* Signal libsas that we need the SCSI error
3737 * handler thread to work on this I/O and that 3739 * handler thread to work on this I/O and that
3738 * we want a device reset. 3740 * we want a device reset.
3739 */ 3741 */
3740 spin_lock_irqsave(&task->task_state_lock, flags); 3742 spin_lock_irqsave(&task->task_state_lock, flags);
3741 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 3743 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
3742 spin_unlock_irqrestore(&task->task_state_lock, flags); 3744 spin_unlock_irqrestore(&task->task_state_lock, flags);
3743 3745
3744 /* Cause this task to be scheduled in the SCSI error 3746 /* Cause this task to be scheduled in the SCSI error
3745 * handler thread. 3747 * handler thread.
3746 */ 3748 */
3747 sas_task_abort(task); 3749 sas_task_abort(task);
3748 3750
3749 /* Change the status, since we are holding 3751 /* Change the status, since we are holding
3750 * the I/O until it is managed by the SCSI 3752 * the I/O until it is managed by the SCSI
3751 * error handler. 3753 * error handler.
3752 */ 3754 */
3753 status = SCI_SUCCESS; 3755 status = SCI_SUCCESS;
3754 } 3756 }
drivers/scsi/isci/request.h
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #ifndef _ISCI_REQUEST_H_ 56 #ifndef _ISCI_REQUEST_H_
57 #define _ISCI_REQUEST_H_ 57 #define _ISCI_REQUEST_H_
58 58
59 #include "isci.h" 59 #include "isci.h"
60 #include "host.h" 60 #include "host.h"
61 #include "scu_task_context.h" 61 #include "scu_task_context.h"
62 62
63 /** 63 /**
64 * struct isci_request_status - This enum defines the possible states of an I/O 64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request. 65 * request.
66 * 66 *
67 * 67 *
68 */ 68 */
69 enum isci_request_status { 69 enum isci_request_status {
70 unallocated = 0x00, 70 unallocated = 0x00,
71 allocated = 0x01, 71 allocated = 0x01,
72 started = 0x02, 72 started = 0x02,
73 completed = 0x03, 73 completed = 0x03,
74 aborting = 0x04, 74 aborting = 0x04,
75 aborted = 0x05, 75 aborted = 0x05,
76 terminating = 0x06, 76 terminating = 0x06,
77 dead = 0x07 77 dead = 0x07
78 }; 78 };
79 79
80 /** 80 /**
81 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol 81 * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
82 * @pio_len - number of bytes requested at PIO setup 82 * @pio_len - number of bytes requested at PIO setup
83 * @status - pio setup ending status value to tell us if we need 83 * @status - pio setup ending status value to tell us if we need
84 * to wait for another fis or if the transfer is complete. Upon 84 * to wait for another fis or if the transfer is complete. Upon
85 * receipt of a d2h fis this will be the status field of that fis. 85 * receipt of a d2h fis this will be the status field of that fis.
86 * @sgl - track pio transfer progress as we iterate through the sgl 86 * @sgl - track pio transfer progress as we iterate through the sgl
87 */ 87 */
88 struct isci_stp_request { 88 struct isci_stp_request {
89 u32 pio_len; 89 u32 pio_len;
90 u8 status; 90 u8 status;
91 91
92 struct isci_stp_pio_sgl { 92 struct isci_stp_pio_sgl {
93 int index; 93 int index;
94 u8 set; 94 u8 set;
95 u32 offset; 95 u32 offset;
96 } sgl; 96 } sgl;
97 }; 97 };
98 98
99 struct isci_request { 99 struct isci_request {
100 enum isci_request_status status; 100 enum isci_request_status status;
101 #define IREQ_COMPLETE_IN_TARGET 0 101 #define IREQ_COMPLETE_IN_TARGET 0
102 #define IREQ_TERMINATED 1 102 #define IREQ_TERMINATED 1
103 #define IREQ_TMF 2 103 #define IREQ_TMF 2
104 #define IREQ_ACTIVE 3 104 #define IREQ_ACTIVE 3
105 #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
106 #define IREQ_TC_ABORT_POSTED 5
105 unsigned long flags; 107 unsigned long flags;
106 /* XXX kill ttype and ttype_ptr, allocate full sas_task */ 108 /* XXX kill ttype and ttype_ptr, allocate full sas_task */
107 union ttype_ptr_union { 109 union ttype_ptr_union {
108 struct sas_task *io_task_ptr; /* When ttype==io_task */ 110 struct sas_task *io_task_ptr; /* When ttype==io_task */
109 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ 111 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
110 } ttype_ptr; 112 } ttype_ptr;
111 struct isci_host *isci_host; 113 struct isci_host *isci_host;
112 /* For use in the requests_to_{complete|abort} lists: */ 114 /* For use in the requests_to_{complete|abort} lists: */
113 struct list_head completed_node; 115 struct list_head completed_node;
114 /* For use in the reqs_in_process list: */ 116 /* For use in the reqs_in_process list: */
115 struct list_head dev_node; 117 struct list_head dev_node;
116 spinlock_t state_lock; 118 spinlock_t state_lock;
117 dma_addr_t request_daddr; 119 dma_addr_t request_daddr;
118 dma_addr_t zero_scatter_daddr; 120 dma_addr_t zero_scatter_daddr;
119 unsigned int num_sg_entries; 121 unsigned int num_sg_entries;
120 /* Note: "io_request_completion" is completed in two different ways 122 /* Note: "io_request_completion" is completed in two different ways
121 * depending on whether this is a TMF or regular request. 123 * depending on whether this is a TMF or regular request.
122 * - TMF requests are completed in the thread that started them; 124 * - TMF requests are completed in the thread that started them;
123 * - regular requests are completed in the request completion callback 125 * - regular requests are completed in the request completion callback
124 * function. 126 * function.
125 * This difference in operation allows the aborter of a TMF request 127 * This difference in operation allows the aborter of a TMF request
126 * to be sure that once the TMF request completes, the I/O that the 128 * to be sure that once the TMF request completes, the I/O that the
127 * TMF was aborting is guaranteed to have completed. 129 * TMF was aborting is guaranteed to have completed.
128 * 130 *
129 * XXX kill io_request_completion 131 * XXX kill io_request_completion
130 */ 132 */
131 struct completion *io_request_completion; 133 struct completion *io_request_completion;
132 struct sci_base_state_machine sm; 134 struct sci_base_state_machine sm;
133 struct isci_host *owning_controller; 135 struct isci_host *owning_controller;
134 struct isci_remote_device *target_device; 136 struct isci_remote_device *target_device;
135 u16 io_tag; 137 u16 io_tag;
136 enum sas_protocol protocol; 138 enum sas_protocol protocol;
137 u32 scu_status; /* hardware result */ 139 u32 scu_status; /* hardware result */
138 u32 sci_status; /* upper layer disposition */ 140 u32 sci_status; /* upper layer disposition */
139 u32 post_context; 141 u32 post_context;
140 struct scu_task_context *tc; 142 struct scu_task_context *tc;
141 /* could be larger with sg chaining */ 143 /* could be larger with sg chaining */
142 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2) 144 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
143 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32))); 145 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
144 /* This field is a pointer to the stored rx frame data. It is used in 146 /* This field is a pointer to the stored rx frame data. It is used in
145 * STP internal requests and SMP response frames. If this field is 147 * STP internal requests and SMP response frames. If this field is
146 * non-NULL the saved frame must be released on IO request completion. 148 * non-NULL the saved frame must be released on IO request completion.
147 */ 149 */
148 u32 saved_rx_frame_index; 150 u32 saved_rx_frame_index;
149 151
150 union { 152 union {
151 struct { 153 struct {
152 union { 154 union {
153 struct ssp_cmd_iu cmd; 155 struct ssp_cmd_iu cmd;
154 struct ssp_task_iu tmf; 156 struct ssp_task_iu tmf;
155 }; 157 };
156 union { 158 union {
157 struct ssp_response_iu rsp; 159 struct ssp_response_iu rsp;
158 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE]; 160 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
159 }; 161 };
160 } ssp; 162 } ssp;
161 struct { 163 struct {
162 struct isci_stp_request req; 164 struct isci_stp_request req;
163 struct host_to_dev_fis cmd; 165 struct host_to_dev_fis cmd;
164 struct dev_to_host_fis rsp; 166 struct dev_to_host_fis rsp;
165 } stp; 167 } stp;
166 }; 168 };
167 }; 169 };
168 170
169 static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req) 171 static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
170 { 172 {
171 struct isci_request *ireq; 173 struct isci_request *ireq;
172 174
173 ireq = container_of(stp_req, typeof(*ireq), stp.req); 175 ireq = container_of(stp_req, typeof(*ireq), stp.req);
174 return ireq; 176 return ireq;
175 } 177 }
176 178
177 /** 179 /**
178 * enum sci_base_request_states - request state machine states 180 * enum sci_base_request_states - request state machine states
179 * 181 *
180 * @SCI_REQ_INIT: Simply the initial state for the base request state machine. 182 * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
181 * 183 *
182 * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been 184 * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
183 * constructed. This state is entered from the INITIAL state. 185 * constructed. This state is entered from the INITIAL state.
184 * 186 *
185 * @SCI_REQ_STARTED: This state indicates that the request has been started. 187 * @SCI_REQ_STARTED: This state indicates that the request has been started.
186 * This state is entered from the CONSTRUCTED state. 188 * This state is entered from the CONSTRUCTED state.
187 * 189 *
188 * @SCI_REQ_STP_UDMA_WAIT_TC_COMP: 190 * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
189 * @SCI_REQ_STP_UDMA_WAIT_D2H: 191 * @SCI_REQ_STP_UDMA_WAIT_D2H:
190 * @SCI_REQ_STP_NON_DATA_WAIT_H2D: 192 * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
191 * @SCI_REQ_STP_NON_DATA_WAIT_D2H: 193 * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
192 * 194 *
193 * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is 195 * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
194 * waiting for the TC completion notification for the H2D Register FIS 196 * waiting for the TC completion notification for the H2D Register FIS
195 * 197 *
196 * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is 198 * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
197 * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame 199 * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
198 * received is based on the result of the prior frame and line conditions. 200 * received is based on the result of the prior frame and line conditions.
199 * 201 *
200 * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is 202 * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
201 * waiting for a DATA frame from the device. 203 * waiting for a DATA frame from the device.
202 * 204 *
203 * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is 205 * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
204 * waiting to transmit the next data frame to the device. 206 * waiting to transmit the next data frame to the device.
205 * 207 *
206 * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is 208 * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
207 * waiting for the TC completion notification for the H2D Register FIS 209 * waiting for the TC completion notification for the H2D Register FIS
208 * 210 *
209 * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is 211 * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
210 * waiting for either a PIO Setup. 212 * waiting for either a PIO Setup.
211 * 213 *
212 * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state 214 * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
213 * after receiving TC completion. While in this state IO request object is 215 * after receiving TC completion. While in this state IO request object is
214 * waiting for D2H status frame as UF. 216 * waiting for D2H status frame as UF.
215 * 217 *
216 * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports 218 * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
217 * task context completion after every frame submission, so in the 219 * task context completion after every frame submission, so in the
218 * non-accelerated case we need to expect the completion for the "cdb" frame. 220 * non-accelerated case we need to expect the completion for the "cdb" frame.
219 * 221 *
220 * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that 222 * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
221 * the started raw task management request is waiting for the transmission of 223 * the started raw task management request is waiting for the transmission of
222 * the initial frame (i.e. command, task, etc.). 224 * the initial frame (i.e. command, task, etc.).
223 * 225 *
224 * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task 226 * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
225 * management request is waiting for the reception of an unsolicited frame 227 * management request is waiting for the reception of an unsolicited frame
226 * (i.e. response IU). 228 * (i.e. response IU).
227 * 229 *
228 * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task 230 * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
229 * management request is waiting for the reception of an unsolicited frame 231 * management request is waiting for the reception of an unsolicited frame
230 * (i.e. response IU). 232 * (i.e. response IU).
231 * 233 *
232 * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that 234 * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
233 * the started SMP request is waiting for the transmission of the initial frame 235 * the started SMP request is waiting for the transmission of the initial frame
234 * (i.e. command, task, etc.). 236 * (i.e. command, task, etc.).
235 * 237 *
236 * @SCI_REQ_COMPLETED: This state indicates that the request has completed. 238 * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
237 * This state is entered from the STARTED state. This state is entered from the 239 * This state is entered from the STARTED state. This state is entered from the
238 * ABORTING state. 240 * ABORTING state.
239 * 241 *
240 * @SCI_REQ_ABORTING: This state indicates that the request is in the process 242 * @SCI_REQ_ABORTING: This state indicates that the request is in the process
241 * of being terminated/aborted. This state is entered from the CONSTRUCTED 243 * of being terminated/aborted. This state is entered from the CONSTRUCTED
242 * state. This state is entered from the STARTED state. 244 * state. This state is entered from the STARTED state.
243 * 245 *
244 * @SCI_REQ_FINAL: Simply the final state for the base request state machine. 246 * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
245 */ 247 */
246 #define REQUEST_STATES {\ 248 #define REQUEST_STATES {\
247 C(REQ_INIT),\ 249 C(REQ_INIT),\
248 C(REQ_CONSTRUCTED),\ 250 C(REQ_CONSTRUCTED),\
249 C(REQ_STARTED),\ 251 C(REQ_STARTED),\
250 C(REQ_STP_UDMA_WAIT_TC_COMP),\ 252 C(REQ_STP_UDMA_WAIT_TC_COMP),\
251 C(REQ_STP_UDMA_WAIT_D2H),\ 253 C(REQ_STP_UDMA_WAIT_D2H),\
252 C(REQ_STP_NON_DATA_WAIT_H2D),\ 254 C(REQ_STP_NON_DATA_WAIT_H2D),\
253 C(REQ_STP_NON_DATA_WAIT_D2H),\ 255 C(REQ_STP_NON_DATA_WAIT_D2H),\
254 C(REQ_STP_PIO_WAIT_H2D),\ 256 C(REQ_STP_PIO_WAIT_H2D),\
255 C(REQ_STP_PIO_WAIT_FRAME),\ 257 C(REQ_STP_PIO_WAIT_FRAME),\
256 C(REQ_STP_PIO_DATA_IN),\ 258 C(REQ_STP_PIO_DATA_IN),\
257 C(REQ_STP_PIO_DATA_OUT),\ 259 C(REQ_STP_PIO_DATA_OUT),\
258 C(REQ_ATAPI_WAIT_H2D),\ 260 C(REQ_ATAPI_WAIT_H2D),\
259 C(REQ_ATAPI_WAIT_PIO_SETUP),\ 261 C(REQ_ATAPI_WAIT_PIO_SETUP),\
260 C(REQ_ATAPI_WAIT_D2H),\ 262 C(REQ_ATAPI_WAIT_D2H),\
261 C(REQ_ATAPI_WAIT_TC_COMP),\ 263 C(REQ_ATAPI_WAIT_TC_COMP),\
262 C(REQ_TASK_WAIT_TC_COMP),\ 264 C(REQ_TASK_WAIT_TC_COMP),\
263 C(REQ_TASK_WAIT_TC_RESP),\ 265 C(REQ_TASK_WAIT_TC_RESP),\
264 C(REQ_SMP_WAIT_RESP),\ 266 C(REQ_SMP_WAIT_RESP),\
265 C(REQ_SMP_WAIT_TC_COMP),\ 267 C(REQ_SMP_WAIT_TC_COMP),\
266 C(REQ_COMPLETED),\ 268 C(REQ_COMPLETED),\
267 C(REQ_ABORTING),\ 269 C(REQ_ABORTING),\
268 C(REQ_FINAL),\ 270 C(REQ_FINAL),\
269 } 271 }
270 #undef C 272 #undef C
271 #define C(a) SCI_##a 273 #define C(a) SCI_##a
272 enum sci_base_request_states REQUEST_STATES; 274 enum sci_base_request_states REQUEST_STATES;
273 #undef C 275 #undef C
274 const char *req_state_name(enum sci_base_request_states state); 276 const char *req_state_name(enum sci_base_request_states state);
275 277
276 enum sci_status sci_request_start(struct isci_request *ireq); 278 enum sci_status sci_request_start(struct isci_request *ireq);
277 enum sci_status sci_io_request_terminate(struct isci_request *ireq); 279 enum sci_status sci_io_request_terminate(struct isci_request *ireq);
278 enum sci_status 280 enum sci_status
279 sci_io_request_event_handler(struct isci_request *ireq, 281 sci_io_request_event_handler(struct isci_request *ireq,
280 u32 event_code); 282 u32 event_code);
281 enum sci_status 283 enum sci_status
282 sci_io_request_frame_handler(struct isci_request *ireq, 284 sci_io_request_frame_handler(struct isci_request *ireq,
283 u32 frame_index); 285 u32 frame_index);
284 enum sci_status 286 enum sci_status
285 sci_task_request_terminate(struct isci_request *ireq); 287 sci_task_request_terminate(struct isci_request *ireq);
286 extern enum sci_status 288 extern enum sci_status
287 sci_request_complete(struct isci_request *ireq); 289 sci_request_complete(struct isci_request *ireq);
288 extern enum sci_status 290 extern enum sci_status
289 sci_io_request_tc_completion(struct isci_request *ireq, u32 code); 291 sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
290 292
291 /* XXX open code in caller */ 293 /* XXX open code in caller */
292 static inline dma_addr_t 294 static inline dma_addr_t
293 sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) 295 sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
294 { 296 {
295 297
296 char *requested_addr = (char *)virt_addr; 298 char *requested_addr = (char *)virt_addr;
297 char *base_addr = (char *)ireq; 299 char *base_addr = (char *)ireq;
298 300
299 BUG_ON(requested_addr < base_addr); 301 BUG_ON(requested_addr < base_addr);
300 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq)); 302 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
301 303
302 return ireq->request_daddr + (requested_addr - base_addr); 304 return ireq->request_daddr + (requested_addr - base_addr);
303 } 305 }
304 306
305 /** 307 /**
306 * isci_request_change_state() - This function sets the status of the request 308 * isci_request_change_state() - This function sets the status of the request
307 * object. 309 * object.
308 * @request: This parameter points to the isci_request object 310 * @request: This parameter points to the isci_request object
309 * @status: This Parameter is the new status of the object 311 * @status: This Parameter is the new status of the object
310 * 312 *
311 */ 313 */
312 static inline enum isci_request_status 314 static inline enum isci_request_status
313 isci_request_change_state(struct isci_request *isci_request, 315 isci_request_change_state(struct isci_request *isci_request,
314 enum isci_request_status status) 316 enum isci_request_status status)
315 { 317 {
316 enum isci_request_status old_state; 318 enum isci_request_status old_state;
317 unsigned long flags; 319 unsigned long flags;
318 320
319 dev_dbg(&isci_request->isci_host->pdev->dev, 321 dev_dbg(&isci_request->isci_host->pdev->dev,
320 "%s: isci_request = %p, state = 0x%x\n", 322 "%s: isci_request = %p, state = 0x%x\n",
321 __func__, 323 __func__,
322 isci_request, 324 isci_request,
323 status); 325 status);
324 326
325 BUG_ON(isci_request == NULL); 327 BUG_ON(isci_request == NULL);
326 328
327 spin_lock_irqsave(&isci_request->state_lock, flags); 329 spin_lock_irqsave(&isci_request->state_lock, flags);
328 old_state = isci_request->status; 330 old_state = isci_request->status;
329 isci_request->status = status; 331 isci_request->status = status;
330 spin_unlock_irqrestore(&isci_request->state_lock, flags); 332 spin_unlock_irqrestore(&isci_request->state_lock, flags);
331 333
332 return old_state; 334 return old_state;
333 } 335 }
334 336
335 /** 337 /**
336 * isci_request_change_started_to_newstate() - This function sets the status of 338 * isci_request_change_started_to_newstate() - This function sets the status of
337 * the request object. 339 * the request object.
338 * @request: This parameter points to the isci_request object 340 * @request: This parameter points to the isci_request object
339 * @status: This Parameter is the new status of the object 341 * @status: This Parameter is the new status of the object
340 * 342 *
341 * state previous to any change. 343 * state previous to any change.
342 */ 344 */
343 static inline enum isci_request_status 345 static inline enum isci_request_status
344 isci_request_change_started_to_newstate(struct isci_request *isci_request, 346 isci_request_change_started_to_newstate(struct isci_request *isci_request,
345 struct completion *completion_ptr, 347 struct completion *completion_ptr,
346 enum isci_request_status newstate) 348 enum isci_request_status newstate)
347 { 349 {
348 enum isci_request_status old_state; 350 enum isci_request_status old_state;
349 unsigned long flags; 351 unsigned long flags;
350 352
351 spin_lock_irqsave(&isci_request->state_lock, flags); 353 spin_lock_irqsave(&isci_request->state_lock, flags);
352 354
353 old_state = isci_request->status; 355 old_state = isci_request->status;
354 356
355 if (old_state == started || old_state == aborting) { 357 if (old_state == started || old_state == aborting) {
356 BUG_ON(isci_request->io_request_completion != NULL); 358 BUG_ON(isci_request->io_request_completion != NULL);
357 359
358 isci_request->io_request_completion = completion_ptr; 360 isci_request->io_request_completion = completion_ptr;
359 isci_request->status = newstate; 361 isci_request->status = newstate;
360 } 362 }
361 363
362 spin_unlock_irqrestore(&isci_request->state_lock, flags); 364 spin_unlock_irqrestore(&isci_request->state_lock, flags);
363 365
364 dev_dbg(&isci_request->isci_host->pdev->dev, 366 dev_dbg(&isci_request->isci_host->pdev->dev,
365 "%s: isci_request = %p, old_state = 0x%x\n", 367 "%s: isci_request = %p, old_state = 0x%x\n",
366 __func__, 368 __func__,
367 isci_request, 369 isci_request,
368 old_state); 370 old_state);
369 371
370 return old_state; 372 return old_state;
371 } 373 }
372 374
373 /** 375 /**
374 * isci_request_change_started_to_aborted() - This function sets the status of 376 * isci_request_change_started_to_aborted() - This function sets the status of
375 * the request object. 377 * the request object.
376 * @request: This parameter points to the isci_request object 378 * @request: This parameter points to the isci_request object
377 * @completion_ptr: This parameter is saved as the kernel completion structure 379 * @completion_ptr: This parameter is saved as the kernel completion structure
378 * signalled when the old request completes. 380 * signalled when the old request completes.
379 * 381 *
380 * state previous to any change. 382 * state previous to any change.
381 */ 383 */
382 static inline enum isci_request_status 384 static inline enum isci_request_status
383 isci_request_change_started_to_aborted(struct isci_request *isci_request, 385 isci_request_change_started_to_aborted(struct isci_request *isci_request,
384 struct completion *completion_ptr) 386 struct completion *completion_ptr)
385 { 387 {
386 return isci_request_change_started_to_newstate(isci_request, 388 return isci_request_change_started_to_newstate(isci_request,
387 completion_ptr, 389 completion_ptr,
388 aborted); 390 aborted);
389 } 391 }
390 392
391 #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) 393 #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
392 394
393 #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) 395 #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
394 396
395 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, 397 struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
396 struct isci_tmf *isci_tmf, 398 struct isci_tmf *isci_tmf,
397 u16 tag); 399 u16 tag);
398 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, 400 int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
399 struct sas_task *task, u16 tag); 401 struct sas_task *task, u16 tag);
400 void isci_terminate_pending_requests(struct isci_host *ihost, 402 void isci_terminate_pending_requests(struct isci_host *ihost,
401 struct isci_remote_device *idev); 403 struct isci_remote_device *idev);
402 enum sci_status 404 enum sci_status
403 sci_task_request_construct(struct isci_host *ihost, 405 sci_task_request_construct(struct isci_host *ihost,
404 struct isci_remote_device *idev, 406 struct isci_remote_device *idev,
405 u16 io_tag, 407 u16 io_tag,
406 struct isci_request *ireq); 408 struct isci_request *ireq);
407 enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq); 409 enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
408 void sci_smp_request_copy_response(struct isci_request *ireq); 410 void sci_smp_request_copy_response(struct isci_request *ireq);
409 411
410 static inline int isci_task_is_ncq_recovery(struct sas_task *task) 412 static inline int isci_task_is_ncq_recovery(struct sas_task *task)
411 { 413 {
412 return (sas_protocol_ata(task->task_proto) && 414 return (sas_protocol_ata(task->task_proto) &&
413 task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT && 415 task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
414 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); 416 task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
415 417
416 } 418 }
417 #endif /* !defined(_ISCI_REQUEST_H_) */ 419 #endif /* !defined(_ISCI_REQUEST_H_) */
418 420