Commit 397497dd61948b0d59d1d21812b93c97b0eeb2dd

Authored by Jeff Skirvin
Committed by Dan Williams
1 parent 87805162b6

isci: Check IDEV_GONE before performing abort path operations.

In the link fail path, set IDEV_GONE for every device on the domain
when the last link in the port fails.

In the abort path functions like isci_reset_device, make sure that
there has not already been a detected domain failure with the device
by checking IDEV_GONE, before performing any kind of hard reset, SMP
phy control, or TMF operation.

The check for IDEV_GONE makes sure that the device in the abort path
really has control of the port with which it is associated.  This
prevents starting hard resets at incorrect times and scheduling
unnecessary LUN resets for SATA devices.

Signed-off-by: Jeff Skirvin <jeffrey.d.skirvin@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 2 changed files with 57 additions and 21 deletions Inline Diff

drivers/scsi/isci/port.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #include "isci.h" 56 #include "isci.h"
57 #include "port.h" 57 #include "port.h"
58 #include "request.h" 58 #include "request.h"
59 59
60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) 60 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61 #define SCU_DUMMY_INDEX (0xFFFF) 61 #define SCU_DUMMY_INDEX (0xFFFF)
62 62
63 #undef C 63 #undef C
64 #define C(a) (#a) 64 #define C(a) (#a)
65 const char *port_state_name(enum sci_port_states state) 65 const char *port_state_name(enum sci_port_states state)
66 { 66 {
67 static const char * const strings[] = PORT_STATES; 67 static const char * const strings[] = PORT_STATES;
68 68
69 return strings[state]; 69 return strings[state];
70 } 70 }
71 #undef C 71 #undef C
72 72
73 static struct device *sciport_to_dev(struct isci_port *iport) 73 static struct device *sciport_to_dev(struct isci_port *iport)
74 { 74 {
75 int i = iport->physical_port_index; 75 int i = iport->physical_port_index;
76 struct isci_port *table; 76 struct isci_port *table;
77 struct isci_host *ihost; 77 struct isci_host *ihost;
78 78
79 if (i == SCIC_SDS_DUMMY_PORT) 79 if (i == SCIC_SDS_DUMMY_PORT)
80 i = SCI_MAX_PORTS+1; 80 i = SCI_MAX_PORTS+1;
81 81
82 table = iport - i; 82 table = iport - i;
83 ihost = container_of(table, typeof(*ihost), ports[0]); 83 ihost = container_of(table, typeof(*ihost), ports[0]);
84 84
85 return &ihost->pdev->dev; 85 return &ihost->pdev->dev;
86 } 86 }
87 87
88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) 88 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
89 { 89 {
90 u8 index; 90 u8 index;
91 91
92 proto->all = 0; 92 proto->all = 0;
93 for (index = 0; index < SCI_MAX_PHYS; index++) { 93 for (index = 0; index < SCI_MAX_PHYS; index++) {
94 struct isci_phy *iphy = iport->phy_table[index]; 94 struct isci_phy *iphy = iport->phy_table[index];
95 95
96 if (!iphy) 96 if (!iphy)
97 continue; 97 continue;
98 sci_phy_get_protocols(iphy, proto); 98 sci_phy_get_protocols(iphy, proto);
99 } 99 }
100 } 100 }
101 101
102 static u32 sci_port_get_phys(struct isci_port *iport) 102 static u32 sci_port_get_phys(struct isci_port *iport)
103 { 103 {
104 u32 index; 104 u32 index;
105 u32 mask; 105 u32 mask;
106 106
107 mask = 0; 107 mask = 0;
108 for (index = 0; index < SCI_MAX_PHYS; index++) 108 for (index = 0; index < SCI_MAX_PHYS; index++)
109 if (iport->phy_table[index]) 109 if (iport->phy_table[index])
110 mask |= (1 << index); 110 mask |= (1 << index);
111 111
112 return mask; 112 return mask;
113 } 113 }
114 114
115 /** 115 /**
116 * sci_port_get_properties() - This method simply returns the properties 116 * sci_port_get_properties() - This method simply returns the properties
117 * regarding the port, such as: physical index, protocols, sas address, etc. 117 * regarding the port, such as: physical index, protocols, sas address, etc.
118 * @port: this parameter specifies the port for which to retrieve the physical 118 * @port: this parameter specifies the port for which to retrieve the physical
119 * index. 119 * index.
120 * @properties: This parameter specifies the properties structure into which to 120 * @properties: This parameter specifies the properties structure into which to
121 * copy the requested information. 121 * copy the requested information.
122 * 122 *
123 * Indicate if the user specified a valid port. SCI_SUCCESS This value is 123 * Indicate if the user specified a valid port. SCI_SUCCESS This value is
124 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This 124 * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
125 * value is returned if the specified port is not valid. When this value is 125 * value is returned if the specified port is not valid. When this value is
126 * returned, no data is copied to the properties output parameter. 126 * returned, no data is copied to the properties output parameter.
127 */ 127 */
128 enum sci_status sci_port_get_properties(struct isci_port *iport, 128 enum sci_status sci_port_get_properties(struct isci_port *iport,
129 struct sci_port_properties *prop) 129 struct sci_port_properties *prop)
130 { 130 {
131 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT) 131 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
132 return SCI_FAILURE_INVALID_PORT; 132 return SCI_FAILURE_INVALID_PORT;
133 133
134 prop->index = iport->logical_port_index; 134 prop->index = iport->logical_port_index;
135 prop->phy_mask = sci_port_get_phys(iport); 135 prop->phy_mask = sci_port_get_phys(iport);
136 sci_port_get_sas_address(iport, &prop->local.sas_address); 136 sci_port_get_sas_address(iport, &prop->local.sas_address);
137 sci_port_get_protocols(iport, &prop->local.protocols); 137 sci_port_get_protocols(iport, &prop->local.protocols);
138 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address); 138 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
139 139
140 return SCI_SUCCESS; 140 return SCI_SUCCESS;
141 } 141 }
142 142
143 static void sci_port_bcn_enable(struct isci_port *iport) 143 static void sci_port_bcn_enable(struct isci_port *iport)
144 { 144 {
145 struct isci_phy *iphy; 145 struct isci_phy *iphy;
146 u32 val; 146 u32 val;
147 int i; 147 int i;
148 148
149 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) { 149 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
150 iphy = iport->phy_table[i]; 150 iphy = iport->phy_table[i];
151 if (!iphy) 151 if (!iphy)
152 continue; 152 continue;
153 val = readl(&iphy->link_layer_registers->link_layer_control); 153 val = readl(&iphy->link_layer_registers->link_layer_control);
154 /* clear the bit by writing 1. */ 154 /* clear the bit by writing 1. */
155 writel(val, &iphy->link_layer_registers->link_layer_control); 155 writel(val, &iphy->link_layer_registers->link_layer_control);
156 } 156 }
157 } 157 }
158 158
159 static void isci_port_bc_change_received(struct isci_host *ihost, 159 static void isci_port_bc_change_received(struct isci_host *ihost,
160 struct isci_port *iport, 160 struct isci_port *iport,
161 struct isci_phy *iphy) 161 struct isci_phy *iphy)
162 { 162 {
163 dev_dbg(&ihost->pdev->dev, 163 dev_dbg(&ihost->pdev->dev,
164 "%s: isci_phy = %p, sas_phy = %p\n", 164 "%s: isci_phy = %p, sas_phy = %p\n",
165 __func__, iphy, &iphy->sas_phy); 165 __func__, iphy, &iphy->sas_phy);
166 166
167 ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD); 167 ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
168 sci_port_bcn_enable(iport); 168 sci_port_bcn_enable(iport);
169 } 169 }
170 170
171 static void isci_port_link_up(struct isci_host *isci_host, 171 static void isci_port_link_up(struct isci_host *isci_host,
172 struct isci_port *iport, 172 struct isci_port *iport,
173 struct isci_phy *iphy) 173 struct isci_phy *iphy)
174 { 174 {
175 unsigned long flags; 175 unsigned long flags;
176 struct sci_port_properties properties; 176 struct sci_port_properties properties;
177 unsigned long success = true; 177 unsigned long success = true;
178 178
179 dev_dbg(&isci_host->pdev->dev, 179 dev_dbg(&isci_host->pdev->dev,
180 "%s: isci_port = %p\n", 180 "%s: isci_port = %p\n",
181 __func__, iport); 181 __func__, iport);
182 182
183 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); 183 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
184 184
185 sci_port_get_properties(iport, &properties); 185 sci_port_get_properties(iport, &properties);
186 186
187 if (iphy->protocol == SAS_PROTOCOL_SATA) { 187 if (iphy->protocol == SAS_PROTOCOL_SATA) {
188 u64 attached_sas_address; 188 u64 attached_sas_address;
189 189
190 iphy->sas_phy.oob_mode = SATA_OOB_MODE; 190 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
191 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis); 191 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
192 192
193 /* 193 /*
194 * For direct-attached SATA devices, the SCI core will 194 * For direct-attached SATA devices, the SCI core will
195 * automagically assign a SAS address to the end device 195 * automagically assign a SAS address to the end device
196 * for the purpose of creating a port. This SAS address 196 * for the purpose of creating a port. This SAS address
197 * will not be the same as assigned to the PHY and needs 197 * will not be the same as assigned to the PHY and needs
198 * to be obtained from struct sci_port_properties properties. 198 * to be obtained from struct sci_port_properties properties.
199 */ 199 */
200 attached_sas_address = properties.remote.sas_address.high; 200 attached_sas_address = properties.remote.sas_address.high;
201 attached_sas_address <<= 32; 201 attached_sas_address <<= 32;
202 attached_sas_address |= properties.remote.sas_address.low; 202 attached_sas_address |= properties.remote.sas_address.low;
203 swab64s(&attached_sas_address); 203 swab64s(&attached_sas_address);
204 204
205 memcpy(&iphy->sas_phy.attached_sas_addr, 205 memcpy(&iphy->sas_phy.attached_sas_addr,
206 &attached_sas_address, sizeof(attached_sas_address)); 206 &attached_sas_address, sizeof(attached_sas_address));
207 } else if (iphy->protocol == SAS_PROTOCOL_SSP) { 207 } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
208 iphy->sas_phy.oob_mode = SAS_OOB_MODE; 208 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); 209 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
210 210
211 /* Copy the attached SAS address from the IAF */ 211 /* Copy the attached SAS address from the IAF */
212 memcpy(iphy->sas_phy.attached_sas_addr, 212 memcpy(iphy->sas_phy.attached_sas_addr,
213 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); 213 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
214 } else { 214 } else {
215 dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__); 215 dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
216 success = false; 216 success = false;
217 } 217 }
218 218
219 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy); 219 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
220 220
221 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags); 221 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
222 222
223 /* Notify libsas that we have an address frame, if indeed 223 /* Notify libsas that we have an address frame, if indeed
224 * we've found an SSP, SMP, or STP target */ 224 * we've found an SSP, SMP, or STP target */
225 if (success) 225 if (success)
226 isci_host->sas_ha.notify_port_event(&iphy->sas_phy, 226 isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
227 PORTE_BYTES_DMAED); 227 PORTE_BYTES_DMAED);
228 } 228 }
229 229
230 230
231 /** 231 /**
232 * isci_port_link_down() - This function is called by the sci core when a link 232 * isci_port_link_down() - This function is called by the sci core when a link
233 * becomes inactive. 233 * becomes inactive.
234 * @isci_host: This parameter specifies the isci host object. 234 * @isci_host: This parameter specifies the isci host object.
235 * @phy: This parameter specifies the isci phy with the active link. 235 * @phy: This parameter specifies the isci phy with the active link.
236 * @port: This parameter specifies the isci port with the active link. 236 * @port: This parameter specifies the isci port with the active link.
237 * 237 *
238 */ 238 */
239 static void isci_port_link_down(struct isci_host *isci_host, 239 static void isci_port_link_down(struct isci_host *isci_host,
240 struct isci_phy *isci_phy, 240 struct isci_phy *isci_phy,
241 struct isci_port *isci_port) 241 struct isci_port *isci_port)
242 { 242 {
243 struct isci_remote_device *isci_device;
244
243 dev_dbg(&isci_host->pdev->dev, 245 dev_dbg(&isci_host->pdev->dev,
244 "%s: isci_port = %p\n", __func__, isci_port); 246 "%s: isci_port = %p\n", __func__, isci_port);
247
248 if (isci_port) {
249
250 /* check to see if this is the last phy on this port. */
251 if (isci_phy->sas_phy.port &&
252 isci_phy->sas_phy.port->num_phys == 1) {
253 /* change the state for all devices on this port. The
254 * next task sent to this device will be returned as
255 * SAS_TASK_UNDELIVERED, and the scsi mid layer will
256 * remove the target
257 */
258 list_for_each_entry(isci_device,
259 &isci_port->remote_dev_list,
260 node) {
261 dev_dbg(&isci_host->pdev->dev,
262 "%s: isci_device = %p\n",
263 __func__, isci_device);
264 set_bit(IDEV_GONE, &isci_device->flags);
265 }
266 }
267 }
245 268
246 /* Notify libsas of the borken link, this will trigger calls to our 269 /* Notify libsas of the borken link, this will trigger calls to our
247 * isci_port_deformed and isci_dev_gone functions. 270 * isci_port_deformed and isci_dev_gone functions.
248 */ 271 */
249 sas_phy_disconnected(&isci_phy->sas_phy); 272 sas_phy_disconnected(&isci_phy->sas_phy);
250 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, 273 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
251 PHYE_LOSS_OF_SIGNAL); 274 PHYE_LOSS_OF_SIGNAL);
252 275
253 dev_dbg(&isci_host->pdev->dev, 276 dev_dbg(&isci_host->pdev->dev,
254 "%s: isci_port = %p - Done\n", __func__, isci_port); 277 "%s: isci_port = %p - Done\n", __func__, isci_port);
255 } 278 }
256 279
257 static bool is_port_ready_state(enum sci_port_states state) 280 static bool is_port_ready_state(enum sci_port_states state)
258 { 281 {
259 switch (state) { 282 switch (state) {
260 case SCI_PORT_READY: 283 case SCI_PORT_READY:
261 case SCI_PORT_SUB_WAITING: 284 case SCI_PORT_SUB_WAITING:
262 case SCI_PORT_SUB_OPERATIONAL: 285 case SCI_PORT_SUB_OPERATIONAL:
263 case SCI_PORT_SUB_CONFIGURING: 286 case SCI_PORT_SUB_CONFIGURING:
264 return true; 287 return true;
265 default: 288 default:
266 return false; 289 return false;
267 } 290 }
268 } 291 }
269 292
270 /* flag dummy rnc hanling when exiting a ready state */ 293 /* flag dummy rnc hanling when exiting a ready state */
271 static void port_state_machine_change(struct isci_port *iport, 294 static void port_state_machine_change(struct isci_port *iport,
272 enum sci_port_states state) 295 enum sci_port_states state)
273 { 296 {
274 struct sci_base_state_machine *sm = &iport->sm; 297 struct sci_base_state_machine *sm = &iport->sm;
275 enum sci_port_states old_state = sm->current_state_id; 298 enum sci_port_states old_state = sm->current_state_id;
276 299
277 if (is_port_ready_state(old_state) && !is_port_ready_state(state)) 300 if (is_port_ready_state(old_state) && !is_port_ready_state(state))
278 iport->ready_exit = true; 301 iport->ready_exit = true;
279 302
280 sci_change_state(sm, state); 303 sci_change_state(sm, state);
281 iport->ready_exit = false; 304 iport->ready_exit = false;
282 } 305 }
283 306
284 /** 307 /**
285 * isci_port_hard_reset_complete() - This function is called by the sci core 308 * isci_port_hard_reset_complete() - This function is called by the sci core
286 * when the hard reset complete notification has been received. 309 * when the hard reset complete notification has been received.
287 * @port: This parameter specifies the sci port with the active link. 310 * @port: This parameter specifies the sci port with the active link.
288 * @completion_status: This parameter specifies the core status for the reset 311 * @completion_status: This parameter specifies the core status for the reset
289 * process. 312 * process.
290 * 313 *
291 */ 314 */
292 static void isci_port_hard_reset_complete(struct isci_port *isci_port, 315 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
293 enum sci_status completion_status) 316 enum sci_status completion_status)
294 { 317 {
295 struct isci_host *ihost = isci_port->owning_controller; 318 struct isci_host *ihost = isci_port->owning_controller;
296 319
297 dev_dbg(&ihost->pdev->dev, 320 dev_dbg(&ihost->pdev->dev,
298 "%s: isci_port = %p, completion_status=%x\n", 321 "%s: isci_port = %p, completion_status=%x\n",
299 __func__, isci_port, completion_status); 322 __func__, isci_port, completion_status);
300 323
301 /* Save the status of the hard reset from the port. */ 324 /* Save the status of the hard reset from the port. */
302 isci_port->hard_reset_status = completion_status; 325 isci_port->hard_reset_status = completion_status;
303 326
304 if (completion_status != SCI_SUCCESS) { 327 if (completion_status != SCI_SUCCESS) {
305 328
306 /* The reset failed. The port state is now SCI_PORT_FAILED. */ 329 /* The reset failed. The port state is now SCI_PORT_FAILED. */
307 if (isci_port->active_phy_mask == 0) { 330 if (isci_port->active_phy_mask == 0) {
308 int phy_idx = isci_port->last_active_phy; 331 int phy_idx = isci_port->last_active_phy;
309 struct isci_phy *iphy = &ihost->phys[phy_idx]; 332 struct isci_phy *iphy = &ihost->phys[phy_idx];
310 333
311 /* Generate the link down now to the host, since it 334 /* Generate the link down now to the host, since it
312 * was intercepted by the hard reset state machine when 335 * was intercepted by the hard reset state machine when
313 * it really happened. 336 * it really happened.
314 */ 337 */
315 isci_port_link_down(ihost, iphy, isci_port); 338 isci_port_link_down(ihost, iphy, isci_port);
316 } 339 }
317 /* Advance the port state so that link state changes will be 340 /* Advance the port state so that link state changes will be
318 * noticed. 341 * noticed.
319 */ 342 */
320 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); 343 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
321 344
322 } 345 }
323 clear_bit(IPORT_RESET_PENDING, &isci_port->state); 346 clear_bit(IPORT_RESET_PENDING, &isci_port->state);
324 wake_up(&ihost->eventq); 347 wake_up(&ihost->eventq);
325 348
326 } 349 }
327 350
328 /* This method will return a true value if the specified phy can be assigned to 351 /* This method will return a true value if the specified phy can be assigned to
329 * this port The following is a list of phys for each port that are allowed: - 352 * this port The following is a list of phys for each port that are allowed: -
330 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method 353 * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
331 * doesn't preclude all configurations. It merely ensures that a phy is part 354 * doesn't preclude all configurations. It merely ensures that a phy is part
332 * of the allowable set of phy identifiers for that port. For example, one 355 * of the allowable set of phy identifiers for that port. For example, one
333 * could assign phy 3 to port 0 and no other phys. Please refer to 356 * could assign phy 3 to port 0 and no other phys. Please refer to
334 * sci_port_is_phy_mask_valid() for information regarding whether the 357 * sci_port_is_phy_mask_valid() for information regarding whether the
335 * phy_mask for a port can be supported. bool true if this is a valid phy 358 * phy_mask for a port can be supported. bool true if this is a valid phy
336 * assignment for the port false if this is not a valid phy assignment for the 359 * assignment for the port false if this is not a valid phy assignment for the
337 * port 360 * port
338 */ 361 */
339 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index) 362 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
340 { 363 {
341 struct isci_host *ihost = iport->owning_controller; 364 struct isci_host *ihost = iport->owning_controller;
342 struct sci_user_parameters *user = &ihost->user_parameters; 365 struct sci_user_parameters *user = &ihost->user_parameters;
343 366
344 /* Initialize to invalid value. */ 367 /* Initialize to invalid value. */
345 u32 existing_phy_index = SCI_MAX_PHYS; 368 u32 existing_phy_index = SCI_MAX_PHYS;
346 u32 index; 369 u32 index;
347 370
348 if ((iport->physical_port_index == 1) && (phy_index != 1)) 371 if ((iport->physical_port_index == 1) && (phy_index != 1))
349 return false; 372 return false;
350 373
351 if (iport->physical_port_index == 3 && phy_index != 3) 374 if (iport->physical_port_index == 3 && phy_index != 3)
352 return false; 375 return false;
353 376
354 if (iport->physical_port_index == 2 && 377 if (iport->physical_port_index == 2 &&
355 (phy_index == 0 || phy_index == 1)) 378 (phy_index == 0 || phy_index == 1))
356 return false; 379 return false;
357 380
358 for (index = 0; index < SCI_MAX_PHYS; index++) 381 for (index = 0; index < SCI_MAX_PHYS; index++)
359 if (iport->phy_table[index] && index != phy_index) 382 if (iport->phy_table[index] && index != phy_index)
360 existing_phy_index = index; 383 existing_phy_index = index;
361 384
362 /* Ensure that all of the phys in the port are capable of 385 /* Ensure that all of the phys in the port are capable of
363 * operating at the same maximum link rate. 386 * operating at the same maximum link rate.
364 */ 387 */
365 if (existing_phy_index < SCI_MAX_PHYS && 388 if (existing_phy_index < SCI_MAX_PHYS &&
366 user->phys[phy_index].max_speed_generation != 389 user->phys[phy_index].max_speed_generation !=
367 user->phys[existing_phy_index].max_speed_generation) 390 user->phys[existing_phy_index].max_speed_generation)
368 return false; 391 return false;
369 392
370 return true; 393 return true;
371 } 394 }
372 395
373 /** 396 /**
374 * 397 *
375 * @sci_port: This is the port object for which to determine if the phy mask 398 * @sci_port: This is the port object for which to determine if the phy mask
376 * can be supported. 399 * can be supported.
377 * 400 *
378 * This method will return a true value if the port's phy mask can be supported 401 * This method will return a true value if the port's phy mask can be supported
379 * by the SCU. The following is a list of valid PHY mask configurations for 402 * by the SCU. The following is a list of valid PHY mask configurations for
380 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2] 403 * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
381 * - Port 3 - [3] This method returns a boolean indication specifying if the 404 * - Port 3 - [3] This method returns a boolean indication specifying if the
382 * phy mask can be supported. true if this is a valid phy assignment for the 405 * phy mask can be supported. true if this is a valid phy assignment for the
383 * port false if this is not a valid phy assignment for the port 406 * port false if this is not a valid phy assignment for the port
384 */ 407 */
385 static bool sci_port_is_phy_mask_valid( 408 static bool sci_port_is_phy_mask_valid(
386 struct isci_port *iport, 409 struct isci_port *iport,
387 u32 phy_mask) 410 u32 phy_mask)
388 { 411 {
389 if (iport->physical_port_index == 0) { 412 if (iport->physical_port_index == 0) {
390 if (((phy_mask & 0x0F) == 0x0F) 413 if (((phy_mask & 0x0F) == 0x0F)
391 || ((phy_mask & 0x03) == 0x03) 414 || ((phy_mask & 0x03) == 0x03)
392 || ((phy_mask & 0x01) == 0x01) 415 || ((phy_mask & 0x01) == 0x01)
393 || (phy_mask == 0)) 416 || (phy_mask == 0))
394 return true; 417 return true;
395 } else if (iport->physical_port_index == 1) { 418 } else if (iport->physical_port_index == 1) {
396 if (((phy_mask & 0x02) == 0x02) 419 if (((phy_mask & 0x02) == 0x02)
397 || (phy_mask == 0)) 420 || (phy_mask == 0))
398 return true; 421 return true;
399 } else if (iport->physical_port_index == 2) { 422 } else if (iport->physical_port_index == 2) {
400 if (((phy_mask & 0x0C) == 0x0C) 423 if (((phy_mask & 0x0C) == 0x0C)
401 || ((phy_mask & 0x04) == 0x04) 424 || ((phy_mask & 0x04) == 0x04)
402 || (phy_mask == 0)) 425 || (phy_mask == 0))
403 return true; 426 return true;
404 } else if (iport->physical_port_index == 3) { 427 } else if (iport->physical_port_index == 3) {
405 if (((phy_mask & 0x08) == 0x08) 428 if (((phy_mask & 0x08) == 0x08)
406 || (phy_mask == 0)) 429 || (phy_mask == 0))
407 return true; 430 return true;
408 } 431 }
409 432
410 return false; 433 return false;
411 } 434 }
412 435
413 /* 436 /*
414 * This method retrieves a currently active (i.e. connected) phy contained in 437 * This method retrieves a currently active (i.e. connected) phy contained in
415 * the port. Currently, the lowest order phy that is connected is returned. 438 * the port. Currently, the lowest order phy that is connected is returned.
416 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is 439 * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
417 * returned if there are no currently active (i.e. connected to a remote end 440 * returned if there are no currently active (i.e. connected to a remote end
418 * point) phys contained in the port. All other values specify a struct sci_phy 441 * point) phys contained in the port. All other values specify a struct sci_phy
419 * object that is active in the port. 442 * object that is active in the port.
420 */ 443 */
421 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport) 444 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
422 { 445 {
423 u32 index; 446 u32 index;
424 struct isci_phy *iphy; 447 struct isci_phy *iphy;
425 448
426 for (index = 0; index < SCI_MAX_PHYS; index++) { 449 for (index = 0; index < SCI_MAX_PHYS; index++) {
427 /* Ensure that the phy is both part of the port and currently 450 /* Ensure that the phy is both part of the port and currently
428 * connected to the remote end-point. 451 * connected to the remote end-point.
429 */ 452 */
430 iphy = iport->phy_table[index]; 453 iphy = iport->phy_table[index];
431 if (iphy && sci_port_active_phy(iport, iphy)) 454 if (iphy && sci_port_active_phy(iport, iphy))
432 return iphy; 455 return iphy;
433 } 456 }
434 457
435 return NULL; 458 return NULL;
436 } 459 }
437 460
438 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy) 461 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
439 { 462 {
440 /* Check to see if we can add this phy to a port 463 /* Check to see if we can add this phy to a port
441 * that means that the phy is not part of a port and that the port does 464 * that means that the phy is not part of a port and that the port does
442 * not already have a phy assinged to the phy index. 465 * not already have a phy assinged to the phy index.
443 */ 466 */
444 if (!iport->phy_table[iphy->phy_index] && 467 if (!iport->phy_table[iphy->phy_index] &&
445 !phy_get_non_dummy_port(iphy) && 468 !phy_get_non_dummy_port(iphy) &&
446 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) { 469 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
447 /* Phy is being added in the stopped state so we are in MPC mode 470 /* Phy is being added in the stopped state so we are in MPC mode
448 * make logical port index = physical port index 471 * make logical port index = physical port index
449 */ 472 */
450 iport->logical_port_index = iport->physical_port_index; 473 iport->logical_port_index = iport->physical_port_index;
451 iport->phy_table[iphy->phy_index] = iphy; 474 iport->phy_table[iphy->phy_index] = iphy;
452 sci_phy_set_port(iphy, iport); 475 sci_phy_set_port(iphy, iport);
453 476
454 return SCI_SUCCESS; 477 return SCI_SUCCESS;
455 } 478 }
456 479
457 return SCI_FAILURE; 480 return SCI_FAILURE;
458 } 481 }
459 482
460 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy) 483 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
461 { 484 {
462 /* Make sure that this phy is part of this port */ 485 /* Make sure that this phy is part of this port */
463 if (iport->phy_table[iphy->phy_index] == iphy && 486 if (iport->phy_table[iphy->phy_index] == iphy &&
464 phy_get_non_dummy_port(iphy) == iport) { 487 phy_get_non_dummy_port(iphy) == iport) {
465 struct isci_host *ihost = iport->owning_controller; 488 struct isci_host *ihost = iport->owning_controller;
466 489
467 /* Yep it is assigned to this port so remove it */ 490 /* Yep it is assigned to this port so remove it */
468 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]); 491 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
469 iport->phy_table[iphy->phy_index] = NULL; 492 iport->phy_table[iphy->phy_index] = NULL;
470 return SCI_SUCCESS; 493 return SCI_SUCCESS;
471 } 494 }
472 495
473 return SCI_FAILURE; 496 return SCI_FAILURE;
474 } 497 }
475 498
476 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas) 499 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
477 { 500 {
478 u32 index; 501 u32 index;
479 502
480 sas->high = 0; 503 sas->high = 0;
481 sas->low = 0; 504 sas->low = 0;
482 for (index = 0; index < SCI_MAX_PHYS; index++) 505 for (index = 0; index < SCI_MAX_PHYS; index++)
483 if (iport->phy_table[index]) 506 if (iport->phy_table[index])
484 sci_phy_get_sas_address(iport->phy_table[index], sas); 507 sci_phy_get_sas_address(iport->phy_table[index], sas);
485 } 508 }
486 509
487 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas) 510 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
488 { 511 {
489 struct isci_phy *iphy; 512 struct isci_phy *iphy;
490 513
491 /* 514 /*
492 * Ensure that the phy is both part of the port and currently 515 * Ensure that the phy is both part of the port and currently
493 * connected to the remote end-point. 516 * connected to the remote end-point.
494 */ 517 */
495 iphy = sci_port_get_a_connected_phy(iport); 518 iphy = sci_port_get_a_connected_phy(iport);
496 if (iphy) { 519 if (iphy) {
497 if (iphy->protocol != SAS_PROTOCOL_SATA) { 520 if (iphy->protocol != SAS_PROTOCOL_SATA) {
498 sci_phy_get_attached_sas_address(iphy, sas); 521 sci_phy_get_attached_sas_address(iphy, sas);
499 } else { 522 } else {
500 sci_phy_get_sas_address(iphy, sas); 523 sci_phy_get_sas_address(iphy, sas);
501 sas->low += iphy->phy_index; 524 sas->low += iphy->phy_index;
502 } 525 }
503 } else { 526 } else {
504 sas->high = 0; 527 sas->high = 0;
505 sas->low = 0; 528 sas->low = 0;
506 } 529 }
507 } 530 }
508 531
509 /** 532 /**
510 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround 533 * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
511 * 534 *
512 * @sci_port: logical port on which we need to create the remote node context 535 * @sci_port: logical port on which we need to create the remote node context
513 * @rni: remote node index for this remote node context. 536 * @rni: remote node index for this remote node context.
514 * 537 *
515 * This routine will construct a dummy remote node context data structure 538 * This routine will construct a dummy remote node context data structure
516 * This structure will be posted to the hardware to work around a scheduler 539 * This structure will be posted to the hardware to work around a scheduler
517 * error in the hardware. 540 * error in the hardware.
518 */ 541 */
519 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni) 542 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
520 { 543 {
521 union scu_remote_node_context *rnc; 544 union scu_remote_node_context *rnc;
522 545
523 rnc = &iport->owning_controller->remote_node_context_table[rni]; 546 rnc = &iport->owning_controller->remote_node_context_table[rni];
524 547
525 memset(rnc, 0, sizeof(union scu_remote_node_context)); 548 memset(rnc, 0, sizeof(union scu_remote_node_context));
526 549
527 rnc->ssp.remote_sas_address_hi = 0; 550 rnc->ssp.remote_sas_address_hi = 0;
528 rnc->ssp.remote_sas_address_lo = 0; 551 rnc->ssp.remote_sas_address_lo = 0;
529 552
530 rnc->ssp.remote_node_index = rni; 553 rnc->ssp.remote_node_index = rni;
531 rnc->ssp.remote_node_port_width = 1; 554 rnc->ssp.remote_node_port_width = 1;
532 rnc->ssp.logical_port_index = iport->physical_port_index; 555 rnc->ssp.logical_port_index = iport->physical_port_index;
533 556
534 rnc->ssp.nexus_loss_timer_enable = false; 557 rnc->ssp.nexus_loss_timer_enable = false;
535 rnc->ssp.check_bit = false; 558 rnc->ssp.check_bit = false;
536 rnc->ssp.is_valid = true; 559 rnc->ssp.is_valid = true;
537 rnc->ssp.is_remote_node_context = true; 560 rnc->ssp.is_remote_node_context = true;
538 rnc->ssp.function_number = 0; 561 rnc->ssp.function_number = 0;
539 rnc->ssp.arbitration_wait_time = 0; 562 rnc->ssp.arbitration_wait_time = 0;
540 } 563 }
541 564
542 /* 565 /*
543 * construct a dummy task context data structure. This 566 * construct a dummy task context data structure. This
544 * structure will be posted to the hardwre to work around a scheduler error 567 * structure will be posted to the hardwre to work around a scheduler error
545 * in the hardware. 568 * in the hardware.
546 */ 569 */
547 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag) 570 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
548 { 571 {
549 struct isci_host *ihost = iport->owning_controller; 572 struct isci_host *ihost = iport->owning_controller;
550 struct scu_task_context *task_context; 573 struct scu_task_context *task_context;
551 574
552 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 575 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
553 memset(task_context, 0, sizeof(struct scu_task_context)); 576 memset(task_context, 0, sizeof(struct scu_task_context));
554 577
555 task_context->initiator_request = 1; 578 task_context->initiator_request = 1;
556 task_context->connection_rate = 1; 579 task_context->connection_rate = 1;
557 task_context->logical_port_index = iport->physical_port_index; 580 task_context->logical_port_index = iport->physical_port_index;
558 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; 581 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
559 task_context->task_index = ISCI_TAG_TCI(tag); 582 task_context->task_index = ISCI_TAG_TCI(tag);
560 task_context->valid = SCU_TASK_CONTEXT_VALID; 583 task_context->valid = SCU_TASK_CONTEXT_VALID;
561 task_context->context_type = SCU_TASK_CONTEXT_TYPE; 584 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
562 task_context->remote_node_index = iport->reserved_rni; 585 task_context->remote_node_index = iport->reserved_rni;
563 task_context->do_not_dma_ssp_good_response = 1; 586 task_context->do_not_dma_ssp_good_response = 1;
564 task_context->task_phase = 0x01; 587 task_context->task_phase = 0x01;
565 } 588 }
566 589
567 static void sci_port_destroy_dummy_resources(struct isci_port *iport) 590 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
568 { 591 {
569 struct isci_host *ihost = iport->owning_controller; 592 struct isci_host *ihost = iport->owning_controller;
570 593
571 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG) 594 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
572 isci_free_tag(ihost, iport->reserved_tag); 595 isci_free_tag(ihost, iport->reserved_tag);
573 596
574 if (iport->reserved_rni != SCU_DUMMY_INDEX) 597 if (iport->reserved_rni != SCU_DUMMY_INDEX)
575 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes, 598 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
576 1, iport->reserved_rni); 599 1, iport->reserved_rni);
577 600
578 iport->reserved_rni = SCU_DUMMY_INDEX; 601 iport->reserved_rni = SCU_DUMMY_INDEX;
579 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 602 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
580 } 603 }
581 604
582 void sci_port_setup_transports(struct isci_port *iport, u32 device_id) 605 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
583 { 606 {
584 u8 index; 607 u8 index;
585 608
586 for (index = 0; index < SCI_MAX_PHYS; index++) { 609 for (index = 0; index < SCI_MAX_PHYS; index++) {
587 if (iport->active_phy_mask & (1 << index)) 610 if (iport->active_phy_mask & (1 << index))
588 sci_phy_setup_transport(iport->phy_table[index], device_id); 611 sci_phy_setup_transport(iport->phy_table[index], device_id);
589 } 612 }
590 } 613 }
591 614
592 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy) 615 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
593 { 616 {
594 sci_phy_resume(iphy); 617 sci_phy_resume(iphy);
595 iport->enabled_phy_mask |= 1 << iphy->phy_index; 618 iport->enabled_phy_mask |= 1 << iphy->phy_index;
596 } 619 }
597 620
598 static void sci_port_activate_phy(struct isci_port *iport, 621 static void sci_port_activate_phy(struct isci_port *iport,
599 struct isci_phy *iphy, 622 struct isci_phy *iphy,
600 u8 flags) 623 u8 flags)
601 { 624 {
602 struct isci_host *ihost = iport->owning_controller; 625 struct isci_host *ihost = iport->owning_controller;
603 626
604 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) 627 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
605 sci_phy_resume(iphy); 628 sci_phy_resume(iphy);
606 629
607 iport->active_phy_mask |= 1 << iphy->phy_index; 630 iport->active_phy_mask |= 1 << iphy->phy_index;
608 631
609 sci_controller_clear_invalid_phy(ihost, iphy); 632 sci_controller_clear_invalid_phy(ihost, iphy);
610 633
611 if (flags & PF_NOTIFY) 634 if (flags & PF_NOTIFY)
612 isci_port_link_up(ihost, iport, iphy); 635 isci_port_link_up(ihost, iport, iphy);
613 } 636 }
614 637
615 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy, 638 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
616 bool do_notify_user) 639 bool do_notify_user)
617 { 640 {
618 struct isci_host *ihost = iport->owning_controller; 641 struct isci_host *ihost = iport->owning_controller;
619 642
620 iport->active_phy_mask &= ~(1 << iphy->phy_index); 643 iport->active_phy_mask &= ~(1 << iphy->phy_index);
621 iport->enabled_phy_mask &= ~(1 << iphy->phy_index); 644 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
622 if (!iport->active_phy_mask) 645 if (!iport->active_phy_mask)
623 iport->last_active_phy = iphy->phy_index; 646 iport->last_active_phy = iphy->phy_index;
624 647
625 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; 648 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
626 649
627 /* Re-assign the phy back to the LP as if it were a narrow port for APC 650 /* Re-assign the phy back to the LP as if it were a narrow port for APC
628 * mode. For MPC mode, the phy will remain in the port. 651 * mode. For MPC mode, the phy will remain in the port.
629 */ 652 */
630 if (iport->owning_controller->oem_parameters.controller.mode_type == 653 if (iport->owning_controller->oem_parameters.controller.mode_type ==
631 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) 654 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
632 writel(iphy->phy_index, 655 writel(iphy->phy_index,
633 &iport->port_pe_configuration_register[iphy->phy_index]); 656 &iport->port_pe_configuration_register[iphy->phy_index]);
634 657
635 if (do_notify_user == true) 658 if (do_notify_user == true)
636 isci_port_link_down(ihost, iphy, iport); 659 isci_port_link_down(ihost, iphy, iport);
637 } 660 }
638 661
639 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy) 662 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
640 { 663 {
641 struct isci_host *ihost = iport->owning_controller; 664 struct isci_host *ihost = iport->owning_controller;
642 665
643 /* 666 /*
644 * Check to see if we have alreay reported this link as bad and if 667 * Check to see if we have alreay reported this link as bad and if
645 * not go ahead and tell the SCI_USER that we have discovered an 668 * not go ahead and tell the SCI_USER that we have discovered an
646 * invalid link. 669 * invalid link.
647 */ 670 */
648 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) { 671 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
649 ihost->invalid_phy_mask |= 1 << iphy->phy_index; 672 ihost->invalid_phy_mask |= 1 << iphy->phy_index;
650 dev_warn(&ihost->pdev->dev, "Invalid link up!\n"); 673 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
651 } 674 }
652 } 675 }
653 676
654 /** 677 /**
655 * sci_port_general_link_up_handler - phy can be assigned to port? 678 * sci_port_general_link_up_handler - phy can be assigned to port?
656 * @sci_port: sci_port object for which has a phy that has gone link up. 679 * @sci_port: sci_port object for which has a phy that has gone link up.
657 * @sci_phy: This is the struct isci_phy object that has gone link up. 680 * @sci_phy: This is the struct isci_phy object that has gone link up.
658 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy 681 * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
659 * 682 *
660 * Determine if this phy can be assigned to this port . If the phy is 683 * Determine if this phy can be assigned to this port . If the phy is
661 * not a valid PHY for this port then the function will notify the user. 684 * not a valid PHY for this port then the function will notify the user.
662 * A PHY can only be part of a port if it's attached SAS ADDRESS is the 685 * A PHY can only be part of a port if it's attached SAS ADDRESS is the
663 * same as all other PHYs in the same port. 686 * same as all other PHYs in the same port.
664 */ 687 */
665 static void sci_port_general_link_up_handler(struct isci_port *iport, 688 static void sci_port_general_link_up_handler(struct isci_port *iport,
666 struct isci_phy *iphy, 689 struct isci_phy *iphy,
667 u8 flags) 690 u8 flags)
668 { 691 {
669 struct sci_sas_address port_sas_address; 692 struct sci_sas_address port_sas_address;
670 struct sci_sas_address phy_sas_address; 693 struct sci_sas_address phy_sas_address;
671 694
672 sci_port_get_attached_sas_address(iport, &port_sas_address); 695 sci_port_get_attached_sas_address(iport, &port_sas_address);
673 sci_phy_get_attached_sas_address(iphy, &phy_sas_address); 696 sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
674 697
675 /* If the SAS address of the new phy matches the SAS address of 698 /* If the SAS address of the new phy matches the SAS address of
676 * other phys in the port OR this is the first phy in the port, 699 * other phys in the port OR this is the first phy in the port,
677 * then activate the phy and allow it to be used for operations 700 * then activate the phy and allow it to be used for operations
678 * in this port. 701 * in this port.
679 */ 702 */
680 if ((phy_sas_address.high == port_sas_address.high && 703 if ((phy_sas_address.high == port_sas_address.high &&
681 phy_sas_address.low == port_sas_address.low) || 704 phy_sas_address.low == port_sas_address.low) ||
682 iport->active_phy_mask == 0) { 705 iport->active_phy_mask == 0) {
683 struct sci_base_state_machine *sm = &iport->sm; 706 struct sci_base_state_machine *sm = &iport->sm;
684 707
685 sci_port_activate_phy(iport, iphy, flags); 708 sci_port_activate_phy(iport, iphy, flags);
686 if (sm->current_state_id == SCI_PORT_RESETTING) 709 if (sm->current_state_id == SCI_PORT_RESETTING)
687 port_state_machine_change(iport, SCI_PORT_READY); 710 port_state_machine_change(iport, SCI_PORT_READY);
688 } else 711 } else
689 sci_port_invalid_link_up(iport, iphy); 712 sci_port_invalid_link_up(iport, iphy);
690 } 713 }
691 714
692 715
693 716
694 /** 717 /**
695 * This method returns false if the port only has a single phy object assigned. 718 * This method returns false if the port only has a single phy object assigned.
696 * If there are no phys or more than one phy then the method will return 719 * If there are no phys or more than one phy then the method will return
697 * true. 720 * true.
698 * @sci_port: The port for which the wide port condition is to be checked. 721 * @sci_port: The port for which the wide port condition is to be checked.
699 * 722 *
700 * bool true Is returned if this is a wide ported port. false Is returned if 723 * bool true Is returned if this is a wide ported port. false Is returned if
701 * this is a narrow port. 724 * this is a narrow port.
702 */ 725 */
703 static bool sci_port_is_wide(struct isci_port *iport) 726 static bool sci_port_is_wide(struct isci_port *iport)
704 { 727 {
705 u32 index; 728 u32 index;
706 u32 phy_count = 0; 729 u32 phy_count = 0;
707 730
708 for (index = 0; index < SCI_MAX_PHYS; index++) { 731 for (index = 0; index < SCI_MAX_PHYS; index++) {
709 if (iport->phy_table[index] != NULL) { 732 if (iport->phy_table[index] != NULL) {
710 phy_count++; 733 phy_count++;
711 } 734 }
712 } 735 }
713 736
714 return phy_count != 1; 737 return phy_count != 1;
715 } 738 }
716 739
717 /** 740 /**
718 * This method is called by the PHY object when the link is detected. if the 741 * This method is called by the PHY object when the link is detected. if the
719 * port wants the PHY to continue on to the link up state then the port 742 * port wants the PHY to continue on to the link up state then the port
720 * layer must return true. If the port object returns false the phy object 743 * layer must return true. If the port object returns false the phy object
721 * must halt its attempt to go link up. 744 * must halt its attempt to go link up.
722 * @sci_port: The port associated with the phy object. 745 * @sci_port: The port associated with the phy object.
723 * @sci_phy: The phy object that is trying to go link up. 746 * @sci_phy: The phy object that is trying to go link up.
724 * 747 *
725 * true if the phy object can continue to the link up condition. true Is 748 * true if the phy object can continue to the link up condition. true Is
726 * returned if this phy can continue to the ready state. false Is returned if 749 * returned if this phy can continue to the ready state. false Is returned if
727 * can not continue on to the ready state. This notification is in place for 750 * can not continue on to the ready state. This notification is in place for
728 * wide ports and direct attached phys. Since there are no wide ported SATA 751 * wide ports and direct attached phys. Since there are no wide ported SATA
729 * devices this could become an invalid port configuration. 752 * devices this could become an invalid port configuration.
730 */ 753 */
731 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) 754 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
732 { 755 {
733 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && 756 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
734 (iphy->protocol == SAS_PROTOCOL_SATA)) { 757 (iphy->protocol == SAS_PROTOCOL_SATA)) {
735 if (sci_port_is_wide(iport)) { 758 if (sci_port_is_wide(iport)) {
736 sci_port_invalid_link_up(iport, iphy); 759 sci_port_invalid_link_up(iport, iphy);
737 return false; 760 return false;
738 } else { 761 } else {
739 struct isci_host *ihost = iport->owning_controller; 762 struct isci_host *ihost = iport->owning_controller;
740 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]); 763 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
741 writel(iphy->phy_index, 764 writel(iphy->phy_index,
742 &dst_port->port_pe_configuration_register[iphy->phy_index]); 765 &dst_port->port_pe_configuration_register[iphy->phy_index]);
743 } 766 }
744 } 767 }
745 768
746 return true; 769 return true;
747 } 770 }
748 771
749 static void port_timeout(unsigned long data) 772 static void port_timeout(unsigned long data)
750 { 773 {
751 struct sci_timer *tmr = (struct sci_timer *)data; 774 struct sci_timer *tmr = (struct sci_timer *)data;
752 struct isci_port *iport = container_of(tmr, typeof(*iport), timer); 775 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
753 struct isci_host *ihost = iport->owning_controller; 776 struct isci_host *ihost = iport->owning_controller;
754 unsigned long flags; 777 unsigned long flags;
755 u32 current_state; 778 u32 current_state;
756 779
757 spin_lock_irqsave(&ihost->scic_lock, flags); 780 spin_lock_irqsave(&ihost->scic_lock, flags);
758 781
759 if (tmr->cancel) 782 if (tmr->cancel)
760 goto done; 783 goto done;
761 784
762 current_state = iport->sm.current_state_id; 785 current_state = iport->sm.current_state_id;
763 786
764 if (current_state == SCI_PORT_RESETTING) { 787 if (current_state == SCI_PORT_RESETTING) {
765 /* if the port is still in the resetting state then the timeout 788 /* if the port is still in the resetting state then the timeout
766 * fired before the reset completed. 789 * fired before the reset completed.
767 */ 790 */
768 port_state_machine_change(iport, SCI_PORT_FAILED); 791 port_state_machine_change(iport, SCI_PORT_FAILED);
769 } else if (current_state == SCI_PORT_STOPPED) { 792 } else if (current_state == SCI_PORT_STOPPED) {
770 /* if the port is stopped then the start request failed In this 793 /* if the port is stopped then the start request failed In this
771 * case stay in the stopped state. 794 * case stay in the stopped state.
772 */ 795 */
773 dev_err(sciport_to_dev(iport), 796 dev_err(sciport_to_dev(iport),
774 "%s: SCIC Port 0x%p failed to stop before tiemout.\n", 797 "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
775 __func__, 798 __func__,
776 iport); 799 iport);
777 } else if (current_state == SCI_PORT_STOPPING) { 800 } else if (current_state == SCI_PORT_STOPPING) {
778 dev_dbg(sciport_to_dev(iport), 801 dev_dbg(sciport_to_dev(iport),
779 "%s: port%d: stop complete timeout\n", 802 "%s: port%d: stop complete timeout\n",
780 __func__, iport->physical_port_index); 803 __func__, iport->physical_port_index);
781 } else { 804 } else {
782 /* The port is in the ready state and we have a timer 805 /* The port is in the ready state and we have a timer
783 * reporting a timeout this should not happen. 806 * reporting a timeout this should not happen.
784 */ 807 */
785 dev_err(sciport_to_dev(iport), 808 dev_err(sciport_to_dev(iport),
786 "%s: SCIC Port 0x%p is processing a timeout operation " 809 "%s: SCIC Port 0x%p is processing a timeout operation "
787 "in state %d.\n", __func__, iport, current_state); 810 "in state %d.\n", __func__, iport, current_state);
788 } 811 }
789 812
790 done: 813 done:
791 spin_unlock_irqrestore(&ihost->scic_lock, flags); 814 spin_unlock_irqrestore(&ihost->scic_lock, flags);
792 } 815 }
793 816
794 /* --------------------------------------------------------------------------- */ 817 /* --------------------------------------------------------------------------- */
795 818
796 /** 819 /**
797 * This function updates the hardwares VIIT entry for this port. 820 * This function updates the hardwares VIIT entry for this port.
798 * 821 *
799 * 822 *
800 */ 823 */
801 static void sci_port_update_viit_entry(struct isci_port *iport) 824 static void sci_port_update_viit_entry(struct isci_port *iport)
802 { 825 {
803 struct sci_sas_address sas_address; 826 struct sci_sas_address sas_address;
804 827
805 sci_port_get_sas_address(iport, &sas_address); 828 sci_port_get_sas_address(iport, &sas_address);
806 829
807 writel(sas_address.high, 830 writel(sas_address.high,
808 &iport->viit_registers->initiator_sas_address_hi); 831 &iport->viit_registers->initiator_sas_address_hi);
809 writel(sas_address.low, 832 writel(sas_address.low,
810 &iport->viit_registers->initiator_sas_address_lo); 833 &iport->viit_registers->initiator_sas_address_lo);
811 834
812 /* This value get cleared just in case its not already cleared */ 835 /* This value get cleared just in case its not already cleared */
813 writel(0, &iport->viit_registers->reserved); 836 writel(0, &iport->viit_registers->reserved);
814 837
815 /* We are required to update the status register last */ 838 /* We are required to update the status register last */
816 writel(SCU_VIIT_ENTRY_ID_VIIT | 839 writel(SCU_VIIT_ENTRY_ID_VIIT |
817 SCU_VIIT_IPPT_INITIATOR | 840 SCU_VIIT_IPPT_INITIATOR |
818 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) | 841 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
819 SCU_VIIT_STATUS_ALL_VALID, 842 SCU_VIIT_STATUS_ALL_VALID,
820 &iport->viit_registers->status); 843 &iport->viit_registers->status);
821 } 844 }
822 845
823 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport) 846 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
824 { 847 {
825 u16 index; 848 u16 index;
826 struct isci_phy *iphy; 849 struct isci_phy *iphy;
827 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS; 850 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
828 851
829 /* 852 /*
830 * Loop through all of the phys in this port and find the phy with the 853 * Loop through all of the phys in this port and find the phy with the
831 * lowest maximum link rate. */ 854 * lowest maximum link rate. */
832 for (index = 0; index < SCI_MAX_PHYS; index++) { 855 for (index = 0; index < SCI_MAX_PHYS; index++) {
833 iphy = iport->phy_table[index]; 856 iphy = iport->phy_table[index];
834 if (iphy && sci_port_active_phy(iport, iphy) && 857 if (iphy && sci_port_active_phy(iport, iphy) &&
835 iphy->max_negotiated_speed < max_allowed_speed) 858 iphy->max_negotiated_speed < max_allowed_speed)
836 max_allowed_speed = iphy->max_negotiated_speed; 859 max_allowed_speed = iphy->max_negotiated_speed;
837 } 860 }
838 861
839 return max_allowed_speed; 862 return max_allowed_speed;
840 } 863 }
841 864
842 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport) 865 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
843 { 866 {
844 u32 pts_control_value; 867 u32 pts_control_value;
845 868
846 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 869 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
847 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND); 870 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
848 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 871 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
849 } 872 }
850 873
851 /** 874 /**
852 * sci_port_post_dummy_request() - post dummy/workaround request 875 * sci_port_post_dummy_request() - post dummy/workaround request
853 * @sci_port: port to post task 876 * @sci_port: port to post task
854 * 877 *
855 * Prevent the hardware scheduler from posting new requests to the front 878 * Prevent the hardware scheduler from posting new requests to the front
856 * of the scheduler queue causing a starvation problem for currently 879 * of the scheduler queue causing a starvation problem for currently
857 * ongoing requests. 880 * ongoing requests.
858 * 881 *
859 */ 882 */
860 static void sci_port_post_dummy_request(struct isci_port *iport) 883 static void sci_port_post_dummy_request(struct isci_port *iport)
861 { 884 {
862 struct isci_host *ihost = iport->owning_controller; 885 struct isci_host *ihost = iport->owning_controller;
863 u16 tag = iport->reserved_tag; 886 u16 tag = iport->reserved_tag;
864 struct scu_task_context *tc; 887 struct scu_task_context *tc;
865 u32 command; 888 u32 command;
866 889
867 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 890 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
868 tc->abort = 0; 891 tc->abort = 0;
869 892
870 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | 893 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
871 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | 894 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
872 ISCI_TAG_TCI(tag); 895 ISCI_TAG_TCI(tag);
873 896
874 sci_controller_post_request(ihost, command); 897 sci_controller_post_request(ihost, command);
875 } 898 }
876 899
877 /** 900 /**
878 * This routine will abort the dummy request. This will alow the hardware to 901 * This routine will abort the dummy request. This will alow the hardware to
879 * power down parts of the silicon to save power. 902 * power down parts of the silicon to save power.
880 * 903 *
881 * @sci_port: The port on which the task must be aborted. 904 * @sci_port: The port on which the task must be aborted.
882 * 905 *
883 */ 906 */
884 static void sci_port_abort_dummy_request(struct isci_port *iport) 907 static void sci_port_abort_dummy_request(struct isci_port *iport)
885 { 908 {
886 struct isci_host *ihost = iport->owning_controller; 909 struct isci_host *ihost = iport->owning_controller;
887 u16 tag = iport->reserved_tag; 910 u16 tag = iport->reserved_tag;
888 struct scu_task_context *tc; 911 struct scu_task_context *tc;
889 u32 command; 912 u32 command;
890 913
891 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)]; 914 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
892 tc->abort = 1; 915 tc->abort = 1;
893 916
894 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT | 917 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
895 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | 918 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
896 ISCI_TAG_TCI(tag); 919 ISCI_TAG_TCI(tag);
897 920
898 sci_controller_post_request(ihost, command); 921 sci_controller_post_request(ihost, command);
899 } 922 }
900 923
901 /** 924 /**
902 * 925 *
903 * @sci_port: This is the struct isci_port object to resume. 926 * @sci_port: This is the struct isci_port object to resume.
904 * 927 *
905 * This method will resume the port task scheduler for this port object. none 928 * This method will resume the port task scheduler for this port object. none
906 */ 929 */
907 static void 930 static void
908 sci_port_resume_port_task_scheduler(struct isci_port *iport) 931 sci_port_resume_port_task_scheduler(struct isci_port *iport)
909 { 932 {
910 u32 pts_control_value; 933 u32 pts_control_value;
911 934
912 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 935 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
913 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND); 936 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
914 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 937 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
915 } 938 }
916 939
917 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm) 940 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
918 { 941 {
919 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 942 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
920 943
921 sci_port_suspend_port_task_scheduler(iport); 944 sci_port_suspend_port_task_scheduler(iport);
922 945
923 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS; 946 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
924 947
925 if (iport->active_phy_mask != 0) { 948 if (iport->active_phy_mask != 0) {
926 /* At least one of the phys on the port is ready */ 949 /* At least one of the phys on the port is ready */
927 port_state_machine_change(iport, 950 port_state_machine_change(iport,
928 SCI_PORT_SUB_OPERATIONAL); 951 SCI_PORT_SUB_OPERATIONAL);
929 } 952 }
930 } 953 }
931 954
932 static void scic_sds_port_ready_substate_waiting_exit( 955 static void scic_sds_port_ready_substate_waiting_exit(
933 struct sci_base_state_machine *sm) 956 struct sci_base_state_machine *sm)
934 { 957 {
935 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 958 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
936 sci_port_resume_port_task_scheduler(iport); 959 sci_port_resume_port_task_scheduler(iport);
937 } 960 }
938 961
939 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm) 962 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
940 { 963 {
941 u32 index; 964 u32 index;
942 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 965 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
943 struct isci_host *ihost = iport->owning_controller; 966 struct isci_host *ihost = iport->owning_controller;
944 967
945 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n", 968 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
946 __func__, iport->physical_port_index); 969 __func__, iport->physical_port_index);
947 970
948 for (index = 0; index < SCI_MAX_PHYS; index++) { 971 for (index = 0; index < SCI_MAX_PHYS; index++) {
949 if (iport->phy_table[index]) { 972 if (iport->phy_table[index]) {
950 writel(iport->physical_port_index, 973 writel(iport->physical_port_index,
951 &iport->port_pe_configuration_register[ 974 &iport->port_pe_configuration_register[
952 iport->phy_table[index]->phy_index]); 975 iport->phy_table[index]->phy_index]);
953 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0) 976 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
954 sci_port_resume_phy(iport, iport->phy_table[index]); 977 sci_port_resume_phy(iport, iport->phy_table[index]);
955 } 978 }
956 } 979 }
957 980
958 sci_port_update_viit_entry(iport); 981 sci_port_update_viit_entry(iport);
959 982
960 /* 983 /*
961 * Post the dummy task for the port so the hardware can schedule 984 * Post the dummy task for the port so the hardware can schedule
962 * io correctly 985 * io correctly
963 */ 986 */
964 sci_port_post_dummy_request(iport); 987 sci_port_post_dummy_request(iport);
965 } 988 }
966 989
967 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport) 990 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
968 { 991 {
969 struct isci_host *ihost = iport->owning_controller; 992 struct isci_host *ihost = iport->owning_controller;
970 u8 phys_index = iport->physical_port_index; 993 u8 phys_index = iport->physical_port_index;
971 union scu_remote_node_context *rnc; 994 union scu_remote_node_context *rnc;
972 u16 rni = iport->reserved_rni; 995 u16 rni = iport->reserved_rni;
973 u32 command; 996 u32 command;
974 997
975 rnc = &ihost->remote_node_context_table[rni]; 998 rnc = &ihost->remote_node_context_table[rni];
976 999
977 rnc->ssp.is_valid = false; 1000 rnc->ssp.is_valid = false;
978 1001
979 /* ensure the preceding tc abort request has reached the 1002 /* ensure the preceding tc abort request has reached the
980 * controller and give it ample time to act before posting the rnc 1003 * controller and give it ample time to act before posting the rnc
981 * invalidate 1004 * invalidate
982 */ 1005 */
983 readl(&ihost->smu_registers->interrupt_status); /* flush */ 1006 readl(&ihost->smu_registers->interrupt_status); /* flush */
984 udelay(10); 1007 udelay(10);
985 1008
986 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE | 1009 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
987 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1010 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
988 1011
989 sci_controller_post_request(ihost, command); 1012 sci_controller_post_request(ihost, command);
990 } 1013 }
991 1014
992 /** 1015 /**
993 * 1016 *
994 * @object: This is the object which is cast to a struct isci_port object. 1017 * @object: This is the object which is cast to a struct isci_port object.
995 * 1018 *
996 * This method will perform the actions required by the struct isci_port on 1019 * This method will perform the actions required by the struct isci_port on
997 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports 1020 * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
998 * the port not ready and suspends the port task scheduler. none 1021 * the port not ready and suspends the port task scheduler. none
999 */ 1022 */
1000 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm) 1023 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1001 { 1024 {
1002 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1025 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1003 struct isci_host *ihost = iport->owning_controller; 1026 struct isci_host *ihost = iport->owning_controller;
1004 1027
1005 /* 1028 /*
1006 * Kill the dummy task for this port if it has not yet posted 1029 * Kill the dummy task for this port if it has not yet posted
1007 * the hardware will treat this as a NOP and just return abort 1030 * the hardware will treat this as a NOP and just return abort
1008 * complete. 1031 * complete.
1009 */ 1032 */
1010 sci_port_abort_dummy_request(iport); 1033 sci_port_abort_dummy_request(iport);
1011 1034
1012 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1035 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1013 __func__, iport->physical_port_index); 1036 __func__, iport->physical_port_index);
1014 1037
1015 if (iport->ready_exit) 1038 if (iport->ready_exit)
1016 sci_port_invalidate_dummy_remote_node(iport); 1039 sci_port_invalidate_dummy_remote_node(iport);
1017 } 1040 }
1018 1041
1019 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm) 1042 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1020 { 1043 {
1021 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1044 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1022 struct isci_host *ihost = iport->owning_controller; 1045 struct isci_host *ihost = iport->owning_controller;
1023 1046
1024 if (iport->active_phy_mask == 0) { 1047 if (iport->active_phy_mask == 0) {
1025 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1048 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1026 __func__, iport->physical_port_index); 1049 __func__, iport->physical_port_index);
1027 1050
1028 port_state_machine_change(iport, SCI_PORT_SUB_WAITING); 1051 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1029 } else 1052 } else
1030 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL); 1053 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1031 } 1054 }
1032 1055
1033 enum sci_status sci_port_start(struct isci_port *iport) 1056 enum sci_status sci_port_start(struct isci_port *iport)
1034 { 1057 {
1035 struct isci_host *ihost = iport->owning_controller; 1058 struct isci_host *ihost = iport->owning_controller;
1036 enum sci_status status = SCI_SUCCESS; 1059 enum sci_status status = SCI_SUCCESS;
1037 enum sci_port_states state; 1060 enum sci_port_states state;
1038 u32 phy_mask; 1061 u32 phy_mask;
1039 1062
1040 state = iport->sm.current_state_id; 1063 state = iport->sm.current_state_id;
1041 if (state != SCI_PORT_STOPPED) { 1064 if (state != SCI_PORT_STOPPED) {
1042 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1065 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1043 __func__, port_state_name(state)); 1066 __func__, port_state_name(state));
1044 return SCI_FAILURE_INVALID_STATE; 1067 return SCI_FAILURE_INVALID_STATE;
1045 } 1068 }
1046 1069
1047 if (iport->assigned_device_count > 0) { 1070 if (iport->assigned_device_count > 0) {
1048 /* TODO This is a start failure operation because 1071 /* TODO This is a start failure operation because
1049 * there are still devices assigned to this port. 1072 * there are still devices assigned to this port.
1050 * There must be no devices assigned to a port on a 1073 * There must be no devices assigned to a port on a
1051 * start operation. 1074 * start operation.
1052 */ 1075 */
1053 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; 1076 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1054 } 1077 }
1055 1078
1056 if (iport->reserved_rni == SCU_DUMMY_INDEX) { 1079 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1057 u16 rni = sci_remote_node_table_allocate_remote_node( 1080 u16 rni = sci_remote_node_table_allocate_remote_node(
1058 &ihost->available_remote_nodes, 1); 1081 &ihost->available_remote_nodes, 1);
1059 1082
1060 if (rni != SCU_DUMMY_INDEX) 1083 if (rni != SCU_DUMMY_INDEX)
1061 sci_port_construct_dummy_rnc(iport, rni); 1084 sci_port_construct_dummy_rnc(iport, rni);
1062 else 1085 else
1063 status = SCI_FAILURE_INSUFFICIENT_RESOURCES; 1086 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1064 iport->reserved_rni = rni; 1087 iport->reserved_rni = rni;
1065 } 1088 }
1066 1089
1067 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) { 1090 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1068 u16 tag; 1091 u16 tag;
1069 1092
1070 tag = isci_alloc_tag(ihost); 1093 tag = isci_alloc_tag(ihost);
1071 if (tag == SCI_CONTROLLER_INVALID_IO_TAG) 1094 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1072 status = SCI_FAILURE_INSUFFICIENT_RESOURCES; 1095 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1073 else 1096 else
1074 sci_port_construct_dummy_task(iport, tag); 1097 sci_port_construct_dummy_task(iport, tag);
1075 iport->reserved_tag = tag; 1098 iport->reserved_tag = tag;
1076 } 1099 }
1077 1100
1078 if (status == SCI_SUCCESS) { 1101 if (status == SCI_SUCCESS) {
1079 phy_mask = sci_port_get_phys(iport); 1102 phy_mask = sci_port_get_phys(iport);
1080 1103
1081 /* 1104 /*
1082 * There are one or more phys assigned to this port. Make sure 1105 * There are one or more phys assigned to this port. Make sure
1083 * the port's phy mask is in fact legal and supported by the 1106 * the port's phy mask is in fact legal and supported by the
1084 * silicon. 1107 * silicon.
1085 */ 1108 */
1086 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) { 1109 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1087 port_state_machine_change(iport, 1110 port_state_machine_change(iport,
1088 SCI_PORT_READY); 1111 SCI_PORT_READY);
1089 1112
1090 return SCI_SUCCESS; 1113 return SCI_SUCCESS;
1091 } 1114 }
1092 status = SCI_FAILURE; 1115 status = SCI_FAILURE;
1093 } 1116 }
1094 1117
1095 if (status != SCI_SUCCESS) 1118 if (status != SCI_SUCCESS)
1096 sci_port_destroy_dummy_resources(iport); 1119 sci_port_destroy_dummy_resources(iport);
1097 1120
1098 return status; 1121 return status;
1099 } 1122 }
1100 1123
1101 enum sci_status sci_port_stop(struct isci_port *iport) 1124 enum sci_status sci_port_stop(struct isci_port *iport)
1102 { 1125 {
1103 enum sci_port_states state; 1126 enum sci_port_states state;
1104 1127
1105 state = iport->sm.current_state_id; 1128 state = iport->sm.current_state_id;
1106 switch (state) { 1129 switch (state) {
1107 case SCI_PORT_STOPPED: 1130 case SCI_PORT_STOPPED:
1108 return SCI_SUCCESS; 1131 return SCI_SUCCESS;
1109 case SCI_PORT_SUB_WAITING: 1132 case SCI_PORT_SUB_WAITING:
1110 case SCI_PORT_SUB_OPERATIONAL: 1133 case SCI_PORT_SUB_OPERATIONAL:
1111 case SCI_PORT_SUB_CONFIGURING: 1134 case SCI_PORT_SUB_CONFIGURING:
1112 case SCI_PORT_RESETTING: 1135 case SCI_PORT_RESETTING:
1113 port_state_machine_change(iport, 1136 port_state_machine_change(iport,
1114 SCI_PORT_STOPPING); 1137 SCI_PORT_STOPPING);
1115 return SCI_SUCCESS; 1138 return SCI_SUCCESS;
1116 default: 1139 default:
1117 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1140 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1118 __func__, port_state_name(state)); 1141 __func__, port_state_name(state));
1119 return SCI_FAILURE_INVALID_STATE; 1142 return SCI_FAILURE_INVALID_STATE;
1120 } 1143 }
1121 } 1144 }
1122 1145
1123 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout) 1146 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1124 { 1147 {
1125 enum sci_status status = SCI_FAILURE_INVALID_PHY; 1148 enum sci_status status = SCI_FAILURE_INVALID_PHY;
1126 struct isci_phy *iphy = NULL; 1149 struct isci_phy *iphy = NULL;
1127 enum sci_port_states state; 1150 enum sci_port_states state;
1128 u32 phy_index; 1151 u32 phy_index;
1129 1152
1130 state = iport->sm.current_state_id; 1153 state = iport->sm.current_state_id;
1131 if (state != SCI_PORT_SUB_OPERATIONAL) { 1154 if (state != SCI_PORT_SUB_OPERATIONAL) {
1132 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1155 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1133 __func__, port_state_name(state)); 1156 __func__, port_state_name(state));
1134 return SCI_FAILURE_INVALID_STATE; 1157 return SCI_FAILURE_INVALID_STATE;
1135 } 1158 }
1136 1159
1137 /* Select a phy on which we can send the hard reset request. */ 1160 /* Select a phy on which we can send the hard reset request. */
1138 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) { 1161 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1139 iphy = iport->phy_table[phy_index]; 1162 iphy = iport->phy_table[phy_index];
1140 if (iphy && !sci_port_active_phy(iport, iphy)) { 1163 if (iphy && !sci_port_active_phy(iport, iphy)) {
1141 /* 1164 /*
1142 * We found a phy but it is not ready select 1165 * We found a phy but it is not ready select
1143 * different phy 1166 * different phy
1144 */ 1167 */
1145 iphy = NULL; 1168 iphy = NULL;
1146 } 1169 }
1147 } 1170 }
1148 1171
1149 /* If we have a phy then go ahead and start the reset procedure */ 1172 /* If we have a phy then go ahead and start the reset procedure */
1150 if (!iphy) 1173 if (!iphy)
1151 return status; 1174 return status;
1152 status = sci_phy_reset(iphy); 1175 status = sci_phy_reset(iphy);
1153 1176
1154 if (status != SCI_SUCCESS) 1177 if (status != SCI_SUCCESS)
1155 return status; 1178 return status;
1156 1179
1157 sci_mod_timer(&iport->timer, timeout); 1180 sci_mod_timer(&iport->timer, timeout);
1158 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED; 1181 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1159 1182
1160 port_state_machine_change(iport, SCI_PORT_RESETTING); 1183 port_state_machine_change(iport, SCI_PORT_RESETTING);
1161 return SCI_SUCCESS; 1184 return SCI_SUCCESS;
1162 } 1185 }
1163 1186
1164 /** 1187 /**
1165 * sci_port_add_phy() - 1188 * sci_port_add_phy() -
1166 * @sci_port: This parameter specifies the port in which the phy will be added. 1189 * @sci_port: This parameter specifies the port in which the phy will be added.
1167 * @sci_phy: This parameter is the phy which is to be added to the port. 1190 * @sci_phy: This parameter is the phy which is to be added to the port.
1168 * 1191 *
1169 * This method will add a PHY to the selected port. This method returns an 1192 * This method will add a PHY to the selected port. This method returns an
1170 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other 1193 * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
1171 * status is a failure to add the phy to the port. 1194 * status is a failure to add the phy to the port.
1172 */ 1195 */
1173 enum sci_status sci_port_add_phy(struct isci_port *iport, 1196 enum sci_status sci_port_add_phy(struct isci_port *iport,
1174 struct isci_phy *iphy) 1197 struct isci_phy *iphy)
1175 { 1198 {
1176 enum sci_status status; 1199 enum sci_status status;
1177 enum sci_port_states state; 1200 enum sci_port_states state;
1178 1201
1179 sci_port_bcn_enable(iport); 1202 sci_port_bcn_enable(iport);
1180 1203
1181 state = iport->sm.current_state_id; 1204 state = iport->sm.current_state_id;
1182 switch (state) { 1205 switch (state) {
1183 case SCI_PORT_STOPPED: { 1206 case SCI_PORT_STOPPED: {
1184 struct sci_sas_address port_sas_address; 1207 struct sci_sas_address port_sas_address;
1185 1208
1186 /* Read the port assigned SAS Address if there is one */ 1209 /* Read the port assigned SAS Address if there is one */
1187 sci_port_get_sas_address(iport, &port_sas_address); 1210 sci_port_get_sas_address(iport, &port_sas_address);
1188 1211
1189 if (port_sas_address.high != 0 && port_sas_address.low != 0) { 1212 if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1190 struct sci_sas_address phy_sas_address; 1213 struct sci_sas_address phy_sas_address;
1191 1214
1192 /* Make sure that the PHY SAS Address matches the SAS Address 1215 /* Make sure that the PHY SAS Address matches the SAS Address
1193 * for this port 1216 * for this port
1194 */ 1217 */
1195 sci_phy_get_sas_address(iphy, &phy_sas_address); 1218 sci_phy_get_sas_address(iphy, &phy_sas_address);
1196 1219
1197 if (port_sas_address.high != phy_sas_address.high || 1220 if (port_sas_address.high != phy_sas_address.high ||
1198 port_sas_address.low != phy_sas_address.low) 1221 port_sas_address.low != phy_sas_address.low)
1199 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION; 1222 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1200 } 1223 }
1201 return sci_port_set_phy(iport, iphy); 1224 return sci_port_set_phy(iport, iphy);
1202 } 1225 }
1203 case SCI_PORT_SUB_WAITING: 1226 case SCI_PORT_SUB_WAITING:
1204 case SCI_PORT_SUB_OPERATIONAL: 1227 case SCI_PORT_SUB_OPERATIONAL:
1205 status = sci_port_set_phy(iport, iphy); 1228 status = sci_port_set_phy(iport, iphy);
1206 1229
1207 if (status != SCI_SUCCESS) 1230 if (status != SCI_SUCCESS)
1208 return status; 1231 return status;
1209 1232
1210 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); 1233 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1211 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1234 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1212 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING); 1235 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1213 1236
1214 return status; 1237 return status;
1215 case SCI_PORT_SUB_CONFIGURING: 1238 case SCI_PORT_SUB_CONFIGURING:
1216 status = sci_port_set_phy(iport, iphy); 1239 status = sci_port_set_phy(iport, iphy);
1217 1240
1218 if (status != SCI_SUCCESS) 1241 if (status != SCI_SUCCESS)
1219 return status; 1242 return status;
1220 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY); 1243 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1221 1244
1222 /* Re-enter the configuring state since this may be the last phy in 1245 /* Re-enter the configuring state since this may be the last phy in
1223 * the port. 1246 * the port.
1224 */ 1247 */
1225 port_state_machine_change(iport, 1248 port_state_machine_change(iport,
1226 SCI_PORT_SUB_CONFIGURING); 1249 SCI_PORT_SUB_CONFIGURING);
1227 return SCI_SUCCESS; 1250 return SCI_SUCCESS;
1228 default: 1251 default:
1229 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1252 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1230 __func__, port_state_name(state)); 1253 __func__, port_state_name(state));
1231 return SCI_FAILURE_INVALID_STATE; 1254 return SCI_FAILURE_INVALID_STATE;
1232 } 1255 }
1233 } 1256 }
1234 1257
1235 /** 1258 /**
1236 * sci_port_remove_phy() - 1259 * sci_port_remove_phy() -
1237 * @sci_port: This parameter specifies the port in which the phy will be added. 1260 * @sci_port: This parameter specifies the port in which the phy will be added.
1238 * @sci_phy: This parameter is the phy which is to be added to the port. 1261 * @sci_phy: This parameter is the phy which is to be added to the port.
1239 * 1262 *
1240 * This method will remove the PHY from the selected PORT. This method returns 1263 * This method will remove the PHY from the selected PORT. This method returns
1241 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any 1264 * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
1242 * other status is a failure to add the phy to the port. 1265 * other status is a failure to add the phy to the port.
1243 */ 1266 */
1244 enum sci_status sci_port_remove_phy(struct isci_port *iport, 1267 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1245 struct isci_phy *iphy) 1268 struct isci_phy *iphy)
1246 { 1269 {
1247 enum sci_status status; 1270 enum sci_status status;
1248 enum sci_port_states state; 1271 enum sci_port_states state;
1249 1272
1250 state = iport->sm.current_state_id; 1273 state = iport->sm.current_state_id;
1251 1274
1252 switch (state) { 1275 switch (state) {
1253 case SCI_PORT_STOPPED: 1276 case SCI_PORT_STOPPED:
1254 return sci_port_clear_phy(iport, iphy); 1277 return sci_port_clear_phy(iport, iphy);
1255 case SCI_PORT_SUB_OPERATIONAL: 1278 case SCI_PORT_SUB_OPERATIONAL:
1256 status = sci_port_clear_phy(iport, iphy); 1279 status = sci_port_clear_phy(iport, iphy);
1257 if (status != SCI_SUCCESS) 1280 if (status != SCI_SUCCESS)
1258 return status; 1281 return status;
1259 1282
1260 sci_port_deactivate_phy(iport, iphy, true); 1283 sci_port_deactivate_phy(iport, iphy, true);
1261 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING; 1284 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1262 port_state_machine_change(iport, 1285 port_state_machine_change(iport,
1263 SCI_PORT_SUB_CONFIGURING); 1286 SCI_PORT_SUB_CONFIGURING);
1264 return SCI_SUCCESS; 1287 return SCI_SUCCESS;
1265 case SCI_PORT_SUB_CONFIGURING: 1288 case SCI_PORT_SUB_CONFIGURING:
1266 status = sci_port_clear_phy(iport, iphy); 1289 status = sci_port_clear_phy(iport, iphy);
1267 1290
1268 if (status != SCI_SUCCESS) 1291 if (status != SCI_SUCCESS)
1269 return status; 1292 return status;
1270 sci_port_deactivate_phy(iport, iphy, true); 1293 sci_port_deactivate_phy(iport, iphy, true);
1271 1294
1272 /* Re-enter the configuring state since this may be the last phy in 1295 /* Re-enter the configuring state since this may be the last phy in
1273 * the port 1296 * the port
1274 */ 1297 */
1275 port_state_machine_change(iport, 1298 port_state_machine_change(iport,
1276 SCI_PORT_SUB_CONFIGURING); 1299 SCI_PORT_SUB_CONFIGURING);
1277 return SCI_SUCCESS; 1300 return SCI_SUCCESS;
1278 default: 1301 default:
1279 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1302 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1280 __func__, port_state_name(state)); 1303 __func__, port_state_name(state));
1281 return SCI_FAILURE_INVALID_STATE; 1304 return SCI_FAILURE_INVALID_STATE;
1282 } 1305 }
1283 } 1306 }
1284 1307
1285 enum sci_status sci_port_link_up(struct isci_port *iport, 1308 enum sci_status sci_port_link_up(struct isci_port *iport,
1286 struct isci_phy *iphy) 1309 struct isci_phy *iphy)
1287 { 1310 {
1288 enum sci_port_states state; 1311 enum sci_port_states state;
1289 1312
1290 state = iport->sm.current_state_id; 1313 state = iport->sm.current_state_id;
1291 switch (state) { 1314 switch (state) {
1292 case SCI_PORT_SUB_WAITING: 1315 case SCI_PORT_SUB_WAITING:
1293 /* Since this is the first phy going link up for the port we 1316 /* Since this is the first phy going link up for the port we
1294 * can just enable it and continue 1317 * can just enable it and continue
1295 */ 1318 */
1296 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME); 1319 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1297 1320
1298 port_state_machine_change(iport, 1321 port_state_machine_change(iport,
1299 SCI_PORT_SUB_OPERATIONAL); 1322 SCI_PORT_SUB_OPERATIONAL);
1300 return SCI_SUCCESS; 1323 return SCI_SUCCESS;
1301 case SCI_PORT_SUB_OPERATIONAL: 1324 case SCI_PORT_SUB_OPERATIONAL:
1302 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME); 1325 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1303 return SCI_SUCCESS; 1326 return SCI_SUCCESS;
1304 case SCI_PORT_RESETTING: 1327 case SCI_PORT_RESETTING:
1305 /* TODO We should make sure that the phy that has gone 1328 /* TODO We should make sure that the phy that has gone
1306 * link up is the same one on which we sent the reset. It is 1329 * link up is the same one on which we sent the reset. It is
1307 * possible that the phy on which we sent the reset is not the 1330 * possible that the phy on which we sent the reset is not the
1308 * one that has gone link up and we want to make sure that 1331 * one that has gone link up and we want to make sure that
1309 * phy being reset comes back. Consider the case where a 1332 * phy being reset comes back. Consider the case where a
1310 * reset is sent but before the hardware processes the reset it 1333 * reset is sent but before the hardware processes the reset it
1311 * get a link up on the port because of a hot plug event. 1334 * get a link up on the port because of a hot plug event.
1312 * because of the reset request this phy will go link down 1335 * because of the reset request this phy will go link down
1313 * almost immediately. 1336 * almost immediately.
1314 */ 1337 */
1315 1338
1316 /* In the resetting state we don't notify the user regarding 1339 /* In the resetting state we don't notify the user regarding
1317 * link up and link down notifications. 1340 * link up and link down notifications.
1318 */ 1341 */
1319 sci_port_general_link_up_handler(iport, iphy, PF_RESUME); 1342 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1320 return SCI_SUCCESS; 1343 return SCI_SUCCESS;
1321 default: 1344 default:
1322 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1345 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1323 __func__, port_state_name(state)); 1346 __func__, port_state_name(state));
1324 return SCI_FAILURE_INVALID_STATE; 1347 return SCI_FAILURE_INVALID_STATE;
1325 } 1348 }
1326 } 1349 }
1327 1350
1328 enum sci_status sci_port_link_down(struct isci_port *iport, 1351 enum sci_status sci_port_link_down(struct isci_port *iport,
1329 struct isci_phy *iphy) 1352 struct isci_phy *iphy)
1330 { 1353 {
1331 enum sci_port_states state; 1354 enum sci_port_states state;
1332 1355
1333 state = iport->sm.current_state_id; 1356 state = iport->sm.current_state_id;
1334 switch (state) { 1357 switch (state) {
1335 case SCI_PORT_SUB_OPERATIONAL: 1358 case SCI_PORT_SUB_OPERATIONAL:
1336 sci_port_deactivate_phy(iport, iphy, true); 1359 sci_port_deactivate_phy(iport, iphy, true);
1337 1360
1338 /* If there are no active phys left in the port, then 1361 /* If there are no active phys left in the port, then
1339 * transition the port to the WAITING state until such time 1362 * transition the port to the WAITING state until such time
1340 * as a phy goes link up 1363 * as a phy goes link up
1341 */ 1364 */
1342 if (iport->active_phy_mask == 0) 1365 if (iport->active_phy_mask == 0)
1343 port_state_machine_change(iport, 1366 port_state_machine_change(iport,
1344 SCI_PORT_SUB_WAITING); 1367 SCI_PORT_SUB_WAITING);
1345 return SCI_SUCCESS; 1368 return SCI_SUCCESS;
1346 case SCI_PORT_RESETTING: 1369 case SCI_PORT_RESETTING:
1347 /* In the resetting state we don't notify the user regarding 1370 /* In the resetting state we don't notify the user regarding
1348 * link up and link down notifications. */ 1371 * link up and link down notifications. */
1349 sci_port_deactivate_phy(iport, iphy, false); 1372 sci_port_deactivate_phy(iport, iphy, false);
1350 return SCI_SUCCESS; 1373 return SCI_SUCCESS;
1351 default: 1374 default:
1352 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1375 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1353 __func__, port_state_name(state)); 1376 __func__, port_state_name(state));
1354 return SCI_FAILURE_INVALID_STATE; 1377 return SCI_FAILURE_INVALID_STATE;
1355 } 1378 }
1356 } 1379 }
1357 1380
1358 enum sci_status sci_port_start_io(struct isci_port *iport, 1381 enum sci_status sci_port_start_io(struct isci_port *iport,
1359 struct isci_remote_device *idev, 1382 struct isci_remote_device *idev,
1360 struct isci_request *ireq) 1383 struct isci_request *ireq)
1361 { 1384 {
1362 enum sci_port_states state; 1385 enum sci_port_states state;
1363 1386
1364 state = iport->sm.current_state_id; 1387 state = iport->sm.current_state_id;
1365 switch (state) { 1388 switch (state) {
1366 case SCI_PORT_SUB_WAITING: 1389 case SCI_PORT_SUB_WAITING:
1367 return SCI_FAILURE_INVALID_STATE; 1390 return SCI_FAILURE_INVALID_STATE;
1368 case SCI_PORT_SUB_OPERATIONAL: 1391 case SCI_PORT_SUB_OPERATIONAL:
1369 iport->started_request_count++; 1392 iport->started_request_count++;
1370 return SCI_SUCCESS; 1393 return SCI_SUCCESS;
1371 default: 1394 default:
1372 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1395 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1373 __func__, port_state_name(state)); 1396 __func__, port_state_name(state));
1374 return SCI_FAILURE_INVALID_STATE; 1397 return SCI_FAILURE_INVALID_STATE;
1375 } 1398 }
1376 } 1399 }
1377 1400
1378 enum sci_status sci_port_complete_io(struct isci_port *iport, 1401 enum sci_status sci_port_complete_io(struct isci_port *iport,
1379 struct isci_remote_device *idev, 1402 struct isci_remote_device *idev,
1380 struct isci_request *ireq) 1403 struct isci_request *ireq)
1381 { 1404 {
1382 enum sci_port_states state; 1405 enum sci_port_states state;
1383 1406
1384 state = iport->sm.current_state_id; 1407 state = iport->sm.current_state_id;
1385 switch (state) { 1408 switch (state) {
1386 case SCI_PORT_STOPPED: 1409 case SCI_PORT_STOPPED:
1387 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n", 1410 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1388 __func__, port_state_name(state)); 1411 __func__, port_state_name(state));
1389 return SCI_FAILURE_INVALID_STATE; 1412 return SCI_FAILURE_INVALID_STATE;
1390 case SCI_PORT_STOPPING: 1413 case SCI_PORT_STOPPING:
1391 sci_port_decrement_request_count(iport); 1414 sci_port_decrement_request_count(iport);
1392 1415
1393 if (iport->started_request_count == 0) 1416 if (iport->started_request_count == 0)
1394 port_state_machine_change(iport, 1417 port_state_machine_change(iport,
1395 SCI_PORT_STOPPED); 1418 SCI_PORT_STOPPED);
1396 break; 1419 break;
1397 case SCI_PORT_READY: 1420 case SCI_PORT_READY:
1398 case SCI_PORT_RESETTING: 1421 case SCI_PORT_RESETTING:
1399 case SCI_PORT_FAILED: 1422 case SCI_PORT_FAILED:
1400 case SCI_PORT_SUB_WAITING: 1423 case SCI_PORT_SUB_WAITING:
1401 case SCI_PORT_SUB_OPERATIONAL: 1424 case SCI_PORT_SUB_OPERATIONAL:
1402 sci_port_decrement_request_count(iport); 1425 sci_port_decrement_request_count(iport);
1403 break; 1426 break;
1404 case SCI_PORT_SUB_CONFIGURING: 1427 case SCI_PORT_SUB_CONFIGURING:
1405 sci_port_decrement_request_count(iport); 1428 sci_port_decrement_request_count(iport);
1406 if (iport->started_request_count == 0) { 1429 if (iport->started_request_count == 0) {
1407 port_state_machine_change(iport, 1430 port_state_machine_change(iport,
1408 SCI_PORT_SUB_OPERATIONAL); 1431 SCI_PORT_SUB_OPERATIONAL);
1409 } 1432 }
1410 break; 1433 break;
1411 } 1434 }
1412 return SCI_SUCCESS; 1435 return SCI_SUCCESS;
1413 } 1436 }
1414 1437
1415 static void sci_port_enable_port_task_scheduler(struct isci_port *iport) 1438 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1416 { 1439 {
1417 u32 pts_control_value; 1440 u32 pts_control_value;
1418 1441
1419 /* enable the port task scheduler in a suspended state */ 1442 /* enable the port task scheduler in a suspended state */
1420 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 1443 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1421 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND); 1444 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1422 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 1445 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1423 } 1446 }
1424 1447
1425 static void sci_port_disable_port_task_scheduler(struct isci_port *iport) 1448 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1426 { 1449 {
1427 u32 pts_control_value; 1450 u32 pts_control_value;
1428 1451
1429 pts_control_value = readl(&iport->port_task_scheduler_registers->control); 1452 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1430 pts_control_value &= 1453 pts_control_value &=
1431 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND)); 1454 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1432 writel(pts_control_value, &iport->port_task_scheduler_registers->control); 1455 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1433 } 1456 }
1434 1457
1435 static void sci_port_post_dummy_remote_node(struct isci_port *iport) 1458 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1436 { 1459 {
1437 struct isci_host *ihost = iport->owning_controller; 1460 struct isci_host *ihost = iport->owning_controller;
1438 u8 phys_index = iport->physical_port_index; 1461 u8 phys_index = iport->physical_port_index;
1439 union scu_remote_node_context *rnc; 1462 union scu_remote_node_context *rnc;
1440 u16 rni = iport->reserved_rni; 1463 u16 rni = iport->reserved_rni;
1441 u32 command; 1464 u32 command;
1442 1465
1443 rnc = &ihost->remote_node_context_table[rni]; 1466 rnc = &ihost->remote_node_context_table[rni];
1444 rnc->ssp.is_valid = true; 1467 rnc->ssp.is_valid = true;
1445 1468
1446 command = SCU_CONTEXT_COMMAND_POST_RNC_32 | 1469 command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1447 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1470 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1448 1471
1449 sci_controller_post_request(ihost, command); 1472 sci_controller_post_request(ihost, command);
1450 1473
1451 /* ensure hardware has seen the post rnc command and give it 1474 /* ensure hardware has seen the post rnc command and give it
1452 * ample time to act before sending the suspend 1475 * ample time to act before sending the suspend
1453 */ 1476 */
1454 readl(&ihost->smu_registers->interrupt_status); /* flush */ 1477 readl(&ihost->smu_registers->interrupt_status); /* flush */
1455 udelay(10); 1478 udelay(10);
1456 1479
1457 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX | 1480 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1458 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni; 1481 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1459 1482
1460 sci_controller_post_request(ihost, command); 1483 sci_controller_post_request(ihost, command);
1461 } 1484 }
1462 1485
1463 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm) 1486 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1464 { 1487 {
1465 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1488 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1466 1489
1467 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) { 1490 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1468 /* 1491 /*
1469 * If we enter this state becasuse of a request to stop 1492 * If we enter this state becasuse of a request to stop
1470 * the port then we want to disable the hardwares port 1493 * the port then we want to disable the hardwares port
1471 * task scheduler. */ 1494 * task scheduler. */
1472 sci_port_disable_port_task_scheduler(iport); 1495 sci_port_disable_port_task_scheduler(iport);
1473 } 1496 }
1474 } 1497 }
1475 1498
1476 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm) 1499 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1477 { 1500 {
1478 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1501 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1479 1502
1480 /* Enable and suspend the port task scheduler */ 1503 /* Enable and suspend the port task scheduler */
1481 sci_port_enable_port_task_scheduler(iport); 1504 sci_port_enable_port_task_scheduler(iport);
1482 } 1505 }
1483 1506
1484 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm) 1507 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1485 { 1508 {
1486 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1509 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1487 struct isci_host *ihost = iport->owning_controller; 1510 struct isci_host *ihost = iport->owning_controller;
1488 u32 prev_state; 1511 u32 prev_state;
1489 1512
1490 prev_state = iport->sm.previous_state_id; 1513 prev_state = iport->sm.previous_state_id;
1491 if (prev_state == SCI_PORT_RESETTING) 1514 if (prev_state == SCI_PORT_RESETTING)
1492 isci_port_hard_reset_complete(iport, SCI_SUCCESS); 1515 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1493 else 1516 else
1494 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n", 1517 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1495 __func__, iport->physical_port_index); 1518 __func__, iport->physical_port_index);
1496 1519
1497 /* Post and suspend the dummy remote node context for this port. */ 1520 /* Post and suspend the dummy remote node context for this port. */
1498 sci_port_post_dummy_remote_node(iport); 1521 sci_port_post_dummy_remote_node(iport);
1499 1522
1500 /* Start the ready substate machine */ 1523 /* Start the ready substate machine */
1501 port_state_machine_change(iport, 1524 port_state_machine_change(iport,
1502 SCI_PORT_SUB_WAITING); 1525 SCI_PORT_SUB_WAITING);
1503 } 1526 }
1504 1527
1505 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm) 1528 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1506 { 1529 {
1507 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1530 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1508 1531
1509 sci_del_timer(&iport->timer); 1532 sci_del_timer(&iport->timer);
1510 } 1533 }
1511 1534
1512 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm) 1535 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1513 { 1536 {
1514 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1537 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1515 1538
1516 sci_del_timer(&iport->timer); 1539 sci_del_timer(&iport->timer);
1517 1540
1518 sci_port_destroy_dummy_resources(iport); 1541 sci_port_destroy_dummy_resources(iport);
1519 } 1542 }
1520 1543
1521 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) 1544 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1522 { 1545 {
1523 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 1546 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1524 1547
1525 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); 1548 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1526 } 1549 }
1527 1550
1528 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) 1551 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1529 { 1552 {
1530 int phy_index; 1553 int phy_index;
1531 u32 phy_mask = iport->active_phy_mask; 1554 u32 phy_mask = iport->active_phy_mask;
1532 1555
1533 if (timeout) 1556 if (timeout)
1534 ++iport->hang_detect_users; 1557 ++iport->hang_detect_users;
1535 else if (iport->hang_detect_users > 1) 1558 else if (iport->hang_detect_users > 1)
1536 --iport->hang_detect_users; 1559 --iport->hang_detect_users;
1537 else 1560 else
1538 iport->hang_detect_users = 0; 1561 iport->hang_detect_users = 0;
1539 1562
1540 if (timeout || (iport->hang_detect_users == 0)) { 1563 if (timeout || (iport->hang_detect_users == 0)) {
1541 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { 1564 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1542 if ((phy_mask >> phy_index) & 1) { 1565 if ((phy_mask >> phy_index) & 1) {
1543 writel(timeout, 1566 writel(timeout,
1544 &iport->phy_table[phy_index] 1567 &iport->phy_table[phy_index]
1545 ->link_layer_registers 1568 ->link_layer_registers
1546 ->link_layer_hang_detection_timeout); 1569 ->link_layer_hang_detection_timeout);
1547 } 1570 }
1548 } 1571 }
1549 } 1572 }
1550 } 1573 }
1551 /* --------------------------------------------------------------------------- */ 1574 /* --------------------------------------------------------------------------- */
1552 1575
1553 static const struct sci_base_state sci_port_state_table[] = { 1576 static const struct sci_base_state sci_port_state_table[] = {
1554 [SCI_PORT_STOPPED] = { 1577 [SCI_PORT_STOPPED] = {
1555 .enter_state = sci_port_stopped_state_enter, 1578 .enter_state = sci_port_stopped_state_enter,
1556 .exit_state = sci_port_stopped_state_exit 1579 .exit_state = sci_port_stopped_state_exit
1557 }, 1580 },
1558 [SCI_PORT_STOPPING] = { 1581 [SCI_PORT_STOPPING] = {
1559 .exit_state = sci_port_stopping_state_exit 1582 .exit_state = sci_port_stopping_state_exit
1560 }, 1583 },
1561 [SCI_PORT_READY] = { 1584 [SCI_PORT_READY] = {
1562 .enter_state = sci_port_ready_state_enter, 1585 .enter_state = sci_port_ready_state_enter,
1563 }, 1586 },
1564 [SCI_PORT_SUB_WAITING] = { 1587 [SCI_PORT_SUB_WAITING] = {
1565 .enter_state = sci_port_ready_substate_waiting_enter, 1588 .enter_state = sci_port_ready_substate_waiting_enter,
1566 .exit_state = scic_sds_port_ready_substate_waiting_exit, 1589 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1567 }, 1590 },
1568 [SCI_PORT_SUB_OPERATIONAL] = { 1591 [SCI_PORT_SUB_OPERATIONAL] = {
1569 .enter_state = sci_port_ready_substate_operational_enter, 1592 .enter_state = sci_port_ready_substate_operational_enter,
1570 .exit_state = sci_port_ready_substate_operational_exit 1593 .exit_state = sci_port_ready_substate_operational_exit
1571 }, 1594 },
1572 [SCI_PORT_SUB_CONFIGURING] = { 1595 [SCI_PORT_SUB_CONFIGURING] = {
1573 .enter_state = sci_port_ready_substate_configuring_enter 1596 .enter_state = sci_port_ready_substate_configuring_enter
1574 }, 1597 },
1575 [SCI_PORT_RESETTING] = { 1598 [SCI_PORT_RESETTING] = {
1576 .exit_state = sci_port_resetting_state_exit 1599 .exit_state = sci_port_resetting_state_exit
1577 }, 1600 },
1578 [SCI_PORT_FAILED] = { 1601 [SCI_PORT_FAILED] = {
1579 .enter_state = sci_port_failed_state_enter, 1602 .enter_state = sci_port_failed_state_enter,
1580 } 1603 }
1581 }; 1604 };
1582 1605
1583 void sci_port_construct(struct isci_port *iport, u8 index, 1606 void sci_port_construct(struct isci_port *iport, u8 index,
1584 struct isci_host *ihost) 1607 struct isci_host *ihost)
1585 { 1608 {
1586 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED); 1609 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1587 1610
1588 iport->logical_port_index = SCIC_SDS_DUMMY_PORT; 1611 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1589 iport->physical_port_index = index; 1612 iport->physical_port_index = index;
1590 iport->active_phy_mask = 0; 1613 iport->active_phy_mask = 0;
1591 iport->enabled_phy_mask = 0; 1614 iport->enabled_phy_mask = 0;
1592 iport->last_active_phy = 0; 1615 iport->last_active_phy = 0;
1593 iport->ready_exit = false; 1616 iport->ready_exit = false;
1594 1617
1595 iport->owning_controller = ihost; 1618 iport->owning_controller = ihost;
1596 1619
1597 iport->started_request_count = 0; 1620 iport->started_request_count = 0;
1598 iport->assigned_device_count = 0; 1621 iport->assigned_device_count = 0;
1599 iport->hang_detect_users = 0; 1622 iport->hang_detect_users = 0;
1600 1623
1601 iport->reserved_rni = SCU_DUMMY_INDEX; 1624 iport->reserved_rni = SCU_DUMMY_INDEX;
1602 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; 1625 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1603 1626
1604 sci_init_timer(&iport->timer, port_timeout); 1627 sci_init_timer(&iport->timer, port_timeout);
1605 1628
1606 iport->port_task_scheduler_registers = NULL; 1629 iport->port_task_scheduler_registers = NULL;
1607 1630
1608 for (index = 0; index < SCI_MAX_PHYS; index++) 1631 for (index = 0; index < SCI_MAX_PHYS; index++)
1609 iport->phy_table[index] = NULL; 1632 iport->phy_table[index] = NULL;
1610 } 1633 }
1611 1634
1612 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1635 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1613 { 1636 {
1614 struct isci_host *ihost = iport->owning_controller; 1637 struct isci_host *ihost = iport->owning_controller;
1615 1638
1616 /* notify the user. */ 1639 /* notify the user. */
1617 isci_port_bc_change_received(ihost, iport, iphy); 1640 isci_port_bc_change_received(ihost, iport, iphy);
1618 } 1641 }
1619 1642
1620 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport) 1643 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1621 { 1644 {
1622 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state)); 1645 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1623 } 1646 }
1624 1647
1625 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 1648 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1626 struct isci_phy *iphy) 1649 struct isci_phy *iphy)
1627 { 1650 {
1628 unsigned long flags; 1651 unsigned long flags;
1629 enum sci_status status; 1652 enum sci_status status;
1630 int ret = TMF_RESP_FUNC_COMPLETE; 1653 int ret = TMF_RESP_FUNC_COMPLETE;
1631 1654
1632 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", 1655 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1633 __func__, iport); 1656 __func__, iport);
1634 1657
1635 spin_lock_irqsave(&ihost->scic_lock, flags); 1658 spin_lock_irqsave(&ihost->scic_lock, flags);
1636 set_bit(IPORT_RESET_PENDING, &iport->state); 1659 set_bit(IPORT_RESET_PENDING, &iport->state);
1637 1660
1638 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT 1661 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1639 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); 1662 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1640 1663
1641 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1664 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1642 1665
1643 if (status == SCI_SUCCESS) { 1666 if (status == SCI_SUCCESS) {
1644 wait_port_reset(ihost, iport); 1667 wait_port_reset(ihost, iport);
1645 1668
1646 dev_dbg(&ihost->pdev->dev, 1669 dev_dbg(&ihost->pdev->dev,
1647 "%s: iport = %p; hard reset completion\n", 1670 "%s: iport = %p; hard reset completion\n",
1648 __func__, iport); 1671 __func__, iport);
1649 1672
1650 if (iport->hard_reset_status != SCI_SUCCESS) { 1673 if (iport->hard_reset_status != SCI_SUCCESS) {
1651 ret = TMF_RESP_FUNC_FAILED; 1674 ret = TMF_RESP_FUNC_FAILED;
1652 1675
1653 dev_err(&ihost->pdev->dev, 1676 dev_err(&ihost->pdev->dev,
1654 "%s: iport = %p; hard reset failed (0x%x)\n", 1677 "%s: iport = %p; hard reset failed (0x%x)\n",
1655 __func__, iport, iport->hard_reset_status); 1678 __func__, iport, iport->hard_reset_status);
1656 } 1679 }
1657 } else { 1680 } else {
1658 clear_bit(IPORT_RESET_PENDING, &iport->state); 1681 clear_bit(IPORT_RESET_PENDING, &iport->state);
1659 wake_up(&ihost->eventq); 1682 wake_up(&ihost->eventq);
1660 ret = TMF_RESP_FUNC_FAILED; 1683 ret = TMF_RESP_FUNC_FAILED;
1661 1684
1662 dev_err(&ihost->pdev->dev, 1685 dev_err(&ihost->pdev->dev,
1663 "%s: iport = %p; sci_port_hard_reset call" 1686 "%s: iport = %p; sci_port_hard_reset call"
1664 " failed 0x%x\n", 1687 " failed 0x%x\n",
1665 __func__, iport, status); 1688 __func__, iport, status);
1666 1689
1667 } 1690 }
1668 return ret; 1691 return ret;
1669 } 1692 }
1670 1693
1671 int isci_ata_check_ready(struct domain_device *dev) 1694 int isci_ata_check_ready(struct domain_device *dev)
1672 { 1695 {
1673 struct isci_port *iport = dev->port->lldd_port; 1696 struct isci_port *iport = dev->port->lldd_port;
1674 struct isci_host *ihost = dev_to_ihost(dev); 1697 struct isci_host *ihost = dev_to_ihost(dev);
1675 struct isci_remote_device *idev; 1698 struct isci_remote_device *idev;
1676 unsigned long flags; 1699 unsigned long flags;
1677 int rc = 0; 1700 int rc = 0;
1678 1701
1679 spin_lock_irqsave(&ihost->scic_lock, flags); 1702 spin_lock_irqsave(&ihost->scic_lock, flags);
1680 idev = isci_lookup_device(dev); 1703 idev = isci_lookup_device(dev);
1681 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1704 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1682 1705
1683 if (!idev) 1706 if (!idev)
1684 goto out; 1707 goto out;
1685 1708
1686 if (test_bit(IPORT_RESET_PENDING, &iport->state)) 1709 if (test_bit(IPORT_RESET_PENDING, &iport->state))
1687 goto out; 1710 goto out;
1688 1711
1689 rc = !!iport->active_phy_mask; 1712 rc = !!iport->active_phy_mask;
1690 out: 1713 out:
1691 isci_put_device(idev); 1714 isci_put_device(idev);
1692 1715
1693 return rc; 1716 return rc;
1694 } 1717 }
1695 1718
1696 void isci_port_deformed(struct asd_sas_phy *phy) 1719 void isci_port_deformed(struct asd_sas_phy *phy)
1697 { 1720 {
1698 struct isci_host *ihost = phy->ha->lldd_ha; 1721 struct isci_host *ihost = phy->ha->lldd_ha;
1699 struct isci_port *iport = phy->port->lldd_port; 1722 struct isci_port *iport = phy->port->lldd_port;
1700 unsigned long flags; 1723 unsigned long flags;
1701 int i; 1724 int i;
1702 1725
1703 /* we got a port notification on a port that was subsequently 1726 /* we got a port notification on a port that was subsequently
1704 * torn down and libsas is just now catching up 1727 * torn down and libsas is just now catching up
1705 */ 1728 */
1706 if (!iport) 1729 if (!iport)
1707 return; 1730 return;
1708 1731
1709 spin_lock_irqsave(&ihost->scic_lock, flags); 1732 spin_lock_irqsave(&ihost->scic_lock, flags);
1710 for (i = 0; i < SCI_MAX_PHYS; i++) { 1733 for (i = 0; i < SCI_MAX_PHYS; i++) {
1711 if (iport->active_phy_mask & 1 << i) 1734 if (iport->active_phy_mask & 1 << i)
1712 break; 1735 break;
1713 } 1736 }
1714 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1737 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1715 1738
1716 if (i >= SCI_MAX_PHYS) 1739 if (i >= SCI_MAX_PHYS)
1717 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n", 1740 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1718 __func__, (long) (iport - &ihost->ports[0])); 1741 __func__, (long) (iport - &ihost->ports[0]));
1719 } 1742 }
1720 1743
1721 void isci_port_formed(struct asd_sas_phy *phy) 1744 void isci_port_formed(struct asd_sas_phy *phy)
1722 { 1745 {
1723 struct isci_host *ihost = phy->ha->lldd_ha; 1746 struct isci_host *ihost = phy->ha->lldd_ha;
1724 struct isci_phy *iphy = to_iphy(phy); 1747 struct isci_phy *iphy = to_iphy(phy);
1725 struct asd_sas_port *port = phy->port; 1748 struct asd_sas_port *port = phy->port;
1726 struct isci_port *iport = NULL; 1749 struct isci_port *iport = NULL;
1727 unsigned long flags; 1750 unsigned long flags;
1728 int i; 1751 int i;
1729 1752
1730 /* initial ports are formed as the driver is still initializing, 1753 /* initial ports are formed as the driver is still initializing,
1731 * wait for that process to complete 1754 * wait for that process to complete
1732 */ 1755 */
1733 wait_for_start(ihost); 1756 wait_for_start(ihost);
1734 1757
1735 spin_lock_irqsave(&ihost->scic_lock, flags); 1758 spin_lock_irqsave(&ihost->scic_lock, flags);
1736 for (i = 0; i < SCI_MAX_PORTS; i++) { 1759 for (i = 0; i < SCI_MAX_PORTS; i++) {
1737 iport = &ihost->ports[i]; 1760 iport = &ihost->ports[i];
1738 if (iport->active_phy_mask & 1 << iphy->phy_index) 1761 if (iport->active_phy_mask & 1 << iphy->phy_index)
1739 break; 1762 break;
1740 } 1763 }
1741 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1764 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1742 1765
1743 if (i >= SCI_MAX_PORTS) 1766 if (i >= SCI_MAX_PORTS)
1744 iport = NULL; 1767 iport = NULL;
1745 1768
1746 port->lldd_port = iport; 1769 port->lldd_port = iport;
1747 } 1770 }
1748 1771
drivers/scsi/isci/task.c
1 /* 1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or 2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license. 3 * redistributing this file, you may do so under either license.
4 * 4 *
5 * GPL LICENSE SUMMARY 5 * GPL LICENSE SUMMARY
6 * 6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as 10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * BSD LICENSE 24 * BSD LICENSE
25 * 25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. 26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved. 27 * All rights reserved.
28 * 28 *
29 * Redistribution and use in source and binary forms, with or without 29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions 30 * modification, are permitted provided that the following conditions
31 * are met: 31 * are met:
32 * 32 *
33 * * Redistributions of source code must retain the above copyright 33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer. 34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright 35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in 36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the 37 * the documentation and/or other materials provided with the
38 * distribution. 38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its 39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived 40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission. 41 * from this software without specific prior written permission.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56 #include <linux/completion.h> 56 #include <linux/completion.h>
57 #include <linux/irqflags.h> 57 #include <linux/irqflags.h>
58 #include "sas.h" 58 #include "sas.h"
59 #include <scsi/libsas.h> 59 #include <scsi/libsas.h>
60 #include "remote_device.h" 60 #include "remote_device.h"
61 #include "remote_node_context.h" 61 #include "remote_node_context.h"
62 #include "isci.h" 62 #include "isci.h"
63 #include "request.h" 63 #include "request.h"
64 #include "task.h" 64 #include "task.h"
65 #include "host.h" 65 #include "host.h"
66 66
67 /** 67 /**
68 * isci_task_refuse() - complete the request to the upper layer driver in 68 * isci_task_refuse() - complete the request to the upper layer driver in
69 * the case where an I/O needs to be completed back in the submit path. 69 * the case where an I/O needs to be completed back in the submit path.
70 * @ihost: host on which the the request was queued 70 * @ihost: host on which the the request was queued
71 * @task: request to complete 71 * @task: request to complete
72 * @response: response code for the completed task. 72 * @response: response code for the completed task.
73 * @status: status code for the completed task. 73 * @status: status code for the completed task.
74 * 74 *
75 */ 75 */
76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, 76 static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
77 enum service_response response, 77 enum service_response response,
78 enum exec_status status) 78 enum exec_status status)
79 79
80 { 80 {
81 unsigned long flags; 81 unsigned long flags;
82 82
83 /* Normal notification (task_done) */ 83 /* Normal notification (task_done) */
84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", 84 dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
85 __func__, task, response, status); 85 __func__, task, response, status);
86 86
87 spin_lock_irqsave(&task->task_state_lock, flags); 87 spin_lock_irqsave(&task->task_state_lock, flags);
88 88
89 task->task_status.resp = response; 89 task->task_status.resp = response;
90 task->task_status.stat = status; 90 task->task_status.stat = status;
91 91
92 /* Normal notification (task_done) */ 92 /* Normal notification (task_done) */
93 task->task_state_flags |= SAS_TASK_STATE_DONE; 93 task->task_state_flags |= SAS_TASK_STATE_DONE;
94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 94 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
95 SAS_TASK_STATE_PENDING); 95 SAS_TASK_STATE_PENDING);
96 task->lldd_task = NULL; 96 task->lldd_task = NULL;
97 spin_unlock_irqrestore(&task->task_state_lock, flags); 97 spin_unlock_irqrestore(&task->task_state_lock, flags);
98 98
99 task->task_done(task); 99 task->task_done(task);
100 } 100 }
101 101
102 #define for_each_sas_task(num, task) \ 102 #define for_each_sas_task(num, task) \
103 for (; num > 0; num--,\ 103 for (; num > 0; num--,\
104 task = list_entry(task->list.next, struct sas_task, list)) 104 task = list_entry(task->list.next, struct sas_task, list))
105 105
106 106
107 static inline int isci_device_io_ready(struct isci_remote_device *idev, 107 static inline int isci_device_io_ready(struct isci_remote_device *idev,
108 struct sas_task *task) 108 struct sas_task *task)
109 { 109 {
110 return idev ? test_bit(IDEV_IO_READY, &idev->flags) || 110 return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
111 (test_bit(IDEV_IO_NCQERROR, &idev->flags) && 111 (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
112 isci_task_is_ncq_recovery(task)) 112 isci_task_is_ncq_recovery(task))
113 : 0; 113 : 0;
114 } 114 }
115 /** 115 /**
116 * isci_task_execute_task() - This function is one of the SAS Domain Template 116 * isci_task_execute_task() - This function is one of the SAS Domain Template
117 * functions. This function is called by libsas to send a task down to 117 * functions. This function is called by libsas to send a task down to
118 * hardware. 118 * hardware.
119 * @task: This parameter specifies the SAS task to send. 119 * @task: This parameter specifies the SAS task to send.
120 * @num: This parameter specifies the number of tasks to queue. 120 * @num: This parameter specifies the number of tasks to queue.
121 * @gfp_flags: This parameter specifies the context of this call. 121 * @gfp_flags: This parameter specifies the context of this call.
122 * 122 *
123 * status, zero indicates success. 123 * status, zero indicates success.
124 */ 124 */
125 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 125 int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
126 { 126 {
127 struct isci_host *ihost = dev_to_ihost(task->dev); 127 struct isci_host *ihost = dev_to_ihost(task->dev);
128 struct isci_remote_device *idev; 128 struct isci_remote_device *idev;
129 unsigned long flags; 129 unsigned long flags;
130 bool io_ready; 130 bool io_ready;
131 u16 tag; 131 u16 tag;
132 132
133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
134 134
135 for_each_sas_task(num, task) { 135 for_each_sas_task(num, task) {
136 enum sci_status status = SCI_FAILURE; 136 enum sci_status status = SCI_FAILURE;
137 137
138 spin_lock_irqsave(&ihost->scic_lock, flags); 138 spin_lock_irqsave(&ihost->scic_lock, flags);
139 idev = isci_lookup_device(task->dev); 139 idev = isci_lookup_device(task->dev);
140 io_ready = isci_device_io_ready(idev, task); 140 io_ready = isci_device_io_ready(idev, task);
141 tag = isci_alloc_tag(ihost); 141 tag = isci_alloc_tag(ihost);
142 spin_unlock_irqrestore(&ihost->scic_lock, flags); 142 spin_unlock_irqrestore(&ihost->scic_lock, flags);
143 143
144 dev_dbg(&ihost->pdev->dev, 144 dev_dbg(&ihost->pdev->dev,
145 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", 145 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
146 task, num, task->dev, idev, idev ? idev->flags : 0, 146 task, num, task->dev, idev, idev ? idev->flags : 0,
147 task->uldd_task); 147 task->uldd_task);
148 148
149 if (!idev) { 149 if (!idev) {
150 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, 150 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
151 SAS_DEVICE_UNKNOWN); 151 SAS_DEVICE_UNKNOWN);
152 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) { 152 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
153 /* Indicate QUEUE_FULL so that the scsi midlayer 153 /* Indicate QUEUE_FULL so that the scsi midlayer
154 * retries. 154 * retries.
155 */ 155 */
156 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE, 156 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
157 SAS_QUEUE_FULL); 157 SAS_QUEUE_FULL);
158 } else { 158 } else {
159 /* There is a device and it's ready for I/O. */ 159 /* There is a device and it's ready for I/O. */
160 spin_lock_irqsave(&task->task_state_lock, flags); 160 spin_lock_irqsave(&task->task_state_lock, flags);
161 161
162 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 162 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
163 /* The I/O was aborted. */ 163 /* The I/O was aborted. */
164 spin_unlock_irqrestore(&task->task_state_lock, 164 spin_unlock_irqrestore(&task->task_state_lock,
165 flags); 165 flags);
166 166
167 isci_task_refuse(ihost, task, 167 isci_task_refuse(ihost, task,
168 SAS_TASK_UNDELIVERED, 168 SAS_TASK_UNDELIVERED,
169 SAM_STAT_TASK_ABORTED); 169 SAM_STAT_TASK_ABORTED);
170 } else { 170 } else {
171 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 171 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
172 spin_unlock_irqrestore(&task->task_state_lock, flags); 172 spin_unlock_irqrestore(&task->task_state_lock, flags);
173 173
174 /* build and send the request. */ 174 /* build and send the request. */
175 status = isci_request_execute(ihost, idev, task, tag); 175 status = isci_request_execute(ihost, idev, task, tag);
176 176
177 if (status != SCI_SUCCESS) { 177 if (status != SCI_SUCCESS) {
178 178
179 spin_lock_irqsave(&task->task_state_lock, flags); 179 spin_lock_irqsave(&task->task_state_lock, flags);
180 /* Did not really start this command. */ 180 /* Did not really start this command. */
181 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 181 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
182 spin_unlock_irqrestore(&task->task_state_lock, flags); 182 spin_unlock_irqrestore(&task->task_state_lock, flags);
183 183
184 if (test_bit(IDEV_GONE, &idev->flags)) { 184 if (test_bit(IDEV_GONE, &idev->flags)) {
185 185
186 /* Indicate that the device 186 /* Indicate that the device
187 * is gone. 187 * is gone.
188 */ 188 */
189 isci_task_refuse(ihost, task, 189 isci_task_refuse(ihost, task,
190 SAS_TASK_UNDELIVERED, 190 SAS_TASK_UNDELIVERED,
191 SAS_DEVICE_UNKNOWN); 191 SAS_DEVICE_UNKNOWN);
192 } else { 192 } else {
193 /* Indicate QUEUE_FULL so that 193 /* Indicate QUEUE_FULL so that
194 * the scsi midlayer retries. 194 * the scsi midlayer retries.
195 * If the request failed for 195 * If the request failed for
196 * remote device reasons, it 196 * remote device reasons, it
197 * gets returned as 197 * gets returned as
198 * SAS_TASK_UNDELIVERED next 198 * SAS_TASK_UNDELIVERED next
199 * time through. 199 * time through.
200 */ 200 */
201 isci_task_refuse(ihost, task, 201 isci_task_refuse(ihost, task,
202 SAS_TASK_COMPLETE, 202 SAS_TASK_COMPLETE,
203 SAS_QUEUE_FULL); 203 SAS_QUEUE_FULL);
204 } 204 }
205 } 205 }
206 } 206 }
207 } 207 }
208 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) { 208 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
209 spin_lock_irqsave(&ihost->scic_lock, flags); 209 spin_lock_irqsave(&ihost->scic_lock, flags);
210 /* command never hit the device, so just free 210 /* command never hit the device, so just free
211 * the tci and skip the sequence increment 211 * the tci and skip the sequence increment
212 */ 212 */
213 isci_tci_free(ihost, ISCI_TAG_TCI(tag)); 213 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
214 spin_unlock_irqrestore(&ihost->scic_lock, flags); 214 spin_unlock_irqrestore(&ihost->scic_lock, flags);
215 } 215 }
216 isci_put_device(idev); 216 isci_put_device(idev);
217 } 217 }
218 return 0; 218 return 0;
219 } 219 }
220 220
221 static struct isci_request *isci_task_request_build(struct isci_host *ihost, 221 static struct isci_request *isci_task_request_build(struct isci_host *ihost,
222 struct isci_remote_device *idev, 222 struct isci_remote_device *idev,
223 u16 tag, struct isci_tmf *isci_tmf) 223 u16 tag, struct isci_tmf *isci_tmf)
224 { 224 {
225 enum sci_status status = SCI_FAILURE; 225 enum sci_status status = SCI_FAILURE;
226 struct isci_request *ireq = NULL; 226 struct isci_request *ireq = NULL;
227 struct domain_device *dev; 227 struct domain_device *dev;
228 228
229 dev_dbg(&ihost->pdev->dev, 229 dev_dbg(&ihost->pdev->dev,
230 "%s: isci_tmf = %p\n", __func__, isci_tmf); 230 "%s: isci_tmf = %p\n", __func__, isci_tmf);
231 231
232 dev = idev->domain_dev; 232 dev = idev->domain_dev;
233 233
234 /* do common allocation and init of request object. */ 234 /* do common allocation and init of request object. */
235 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag); 235 ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
236 if (!ireq) 236 if (!ireq)
237 return NULL; 237 return NULL;
238 238
239 /* let the core do it's construct. */ 239 /* let the core do it's construct. */
240 status = sci_task_request_construct(ihost, idev, tag, 240 status = sci_task_request_construct(ihost, idev, tag,
241 ireq); 241 ireq);
242 242
243 if (status != SCI_SUCCESS) { 243 if (status != SCI_SUCCESS) {
244 dev_warn(&ihost->pdev->dev, 244 dev_warn(&ihost->pdev->dev,
245 "%s: sci_task_request_construct failed - " 245 "%s: sci_task_request_construct failed - "
246 "status = 0x%x\n", 246 "status = 0x%x\n",
247 __func__, 247 __func__,
248 status); 248 status);
249 return NULL; 249 return NULL;
250 } 250 }
251 251
252 /* XXX convert to get this from task->tproto like other drivers */ 252 /* XXX convert to get this from task->tproto like other drivers */
253 if (dev->dev_type == SAS_END_DEV) { 253 if (dev->dev_type == SAS_END_DEV) {
254 isci_tmf->proto = SAS_PROTOCOL_SSP; 254 isci_tmf->proto = SAS_PROTOCOL_SSP;
255 status = sci_task_request_construct_ssp(ireq); 255 status = sci_task_request_construct_ssp(ireq);
256 if (status != SCI_SUCCESS) 256 if (status != SCI_SUCCESS)
257 return NULL; 257 return NULL;
258 } 258 }
259 259
260 return ireq; 260 return ireq;
261 } 261 }
262 262
263 static int isci_task_execute_tmf(struct isci_host *ihost, 263 static int isci_task_execute_tmf(struct isci_host *ihost,
264 struct isci_remote_device *idev, 264 struct isci_remote_device *idev,
265 struct isci_tmf *tmf, unsigned long timeout_ms) 265 struct isci_tmf *tmf, unsigned long timeout_ms)
266 { 266 {
267 DECLARE_COMPLETION_ONSTACK(completion); 267 DECLARE_COMPLETION_ONSTACK(completion);
268 enum sci_task_status status = SCI_TASK_FAILURE; 268 enum sci_task_status status = SCI_TASK_FAILURE;
269 struct isci_request *ireq; 269 struct isci_request *ireq;
270 int ret = TMF_RESP_FUNC_FAILED; 270 int ret = TMF_RESP_FUNC_FAILED;
271 unsigned long flags; 271 unsigned long flags;
272 unsigned long timeleft; 272 unsigned long timeleft;
273 u16 tag; 273 u16 tag;
274 274
275 spin_lock_irqsave(&ihost->scic_lock, flags); 275 spin_lock_irqsave(&ihost->scic_lock, flags);
276 tag = isci_alloc_tag(ihost); 276 tag = isci_alloc_tag(ihost);
277 spin_unlock_irqrestore(&ihost->scic_lock, flags); 277 spin_unlock_irqrestore(&ihost->scic_lock, flags);
278 278
279 if (tag == SCI_CONTROLLER_INVALID_IO_TAG) 279 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
280 return ret; 280 return ret;
281 281
282 /* sanity check, return TMF_RESP_FUNC_FAILED 282 /* sanity check, return TMF_RESP_FUNC_FAILED
283 * if the device is not there and ready. 283 * if the device is not there and ready.
284 */ 284 */
285 if (!idev || 285 if (!idev ||
286 (!test_bit(IDEV_IO_READY, &idev->flags) && 286 (!test_bit(IDEV_IO_READY, &idev->flags) &&
287 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) { 287 !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
288 dev_dbg(&ihost->pdev->dev, 288 dev_dbg(&ihost->pdev->dev,
289 "%s: idev = %p not ready (%#lx)\n", 289 "%s: idev = %p not ready (%#lx)\n",
290 __func__, 290 __func__,
291 idev, idev ? idev->flags : 0); 291 idev, idev ? idev->flags : 0);
292 goto err_tci; 292 goto err_tci;
293 } else 293 } else
294 dev_dbg(&ihost->pdev->dev, 294 dev_dbg(&ihost->pdev->dev,
295 "%s: idev = %p\n", 295 "%s: idev = %p\n",
296 __func__, idev); 296 __func__, idev);
297 297
298 /* Assign the pointer to the TMF's completion kernel wait structure. */ 298 /* Assign the pointer to the TMF's completion kernel wait structure. */
299 tmf->complete = &completion; 299 tmf->complete = &completion;
300 tmf->status = SCI_FAILURE_TIMEOUT; 300 tmf->status = SCI_FAILURE_TIMEOUT;
301 301
302 ireq = isci_task_request_build(ihost, idev, tag, tmf); 302 ireq = isci_task_request_build(ihost, idev, tag, tmf);
303 if (!ireq) 303 if (!ireq)
304 goto err_tci; 304 goto err_tci;
305 305
306 spin_lock_irqsave(&ihost->scic_lock, flags); 306 spin_lock_irqsave(&ihost->scic_lock, flags);
307 307
308 /* start the TMF io. */ 308 /* start the TMF io. */
309 status = sci_controller_start_task(ihost, idev, ireq); 309 status = sci_controller_start_task(ihost, idev, ireq);
310 310
311 if (status != SCI_TASK_SUCCESS) { 311 if (status != SCI_TASK_SUCCESS) {
312 dev_dbg(&ihost->pdev->dev, 312 dev_dbg(&ihost->pdev->dev,
313 "%s: start_io failed - status = 0x%x, request = %p\n", 313 "%s: start_io failed - status = 0x%x, request = %p\n",
314 __func__, 314 __func__,
315 status, 315 status,
316 ireq); 316 ireq);
317 spin_unlock_irqrestore(&ihost->scic_lock, flags); 317 spin_unlock_irqrestore(&ihost->scic_lock, flags);
318 goto err_tci; 318 goto err_tci;
319 } 319 }
320 spin_unlock_irqrestore(&ihost->scic_lock, flags); 320 spin_unlock_irqrestore(&ihost->scic_lock, flags);
321 321
322 /* The RNC must be unsuspended before the TMF can get a response. */ 322 /* The RNC must be unsuspended before the TMF can get a response. */
323 isci_remote_device_resume_from_abort(ihost, idev); 323 isci_remote_device_resume_from_abort(ihost, idev);
324 324
325 /* Wait for the TMF to complete, or a timeout. */ 325 /* Wait for the TMF to complete, or a timeout. */
326 timeleft = wait_for_completion_timeout(&completion, 326 timeleft = wait_for_completion_timeout(&completion,
327 msecs_to_jiffies(timeout_ms)); 327 msecs_to_jiffies(timeout_ms));
328 328
329 if (timeleft == 0) { 329 if (timeleft == 0) {
330 /* The TMF did not complete - this could be because 330 /* The TMF did not complete - this could be because
331 * of an unplug. Terminate the TMF request now. 331 * of an unplug. Terminate the TMF request now.
332 */ 332 */
333 isci_remote_device_suspend_terminate(ihost, idev, ireq); 333 isci_remote_device_suspend_terminate(ihost, idev, ireq);
334 } 334 }
335 335
336 isci_print_tmf(ihost, tmf); 336 isci_print_tmf(ihost, tmf);
337 337
338 if (tmf->status == SCI_SUCCESS) 338 if (tmf->status == SCI_SUCCESS)
339 ret = TMF_RESP_FUNC_COMPLETE; 339 ret = TMF_RESP_FUNC_COMPLETE;
340 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) { 340 else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
341 dev_dbg(&ihost->pdev->dev, 341 dev_dbg(&ihost->pdev->dev,
342 "%s: tmf.status == " 342 "%s: tmf.status == "
343 "SCI_FAILURE_IO_RESPONSE_VALID\n", 343 "SCI_FAILURE_IO_RESPONSE_VALID\n",
344 __func__); 344 __func__);
345 ret = TMF_RESP_FUNC_COMPLETE; 345 ret = TMF_RESP_FUNC_COMPLETE;
346 } 346 }
347 /* Else - leave the default "failed" status alone. */ 347 /* Else - leave the default "failed" status alone. */
348 348
349 dev_dbg(&ihost->pdev->dev, 349 dev_dbg(&ihost->pdev->dev,
350 "%s: completed request = %p\n", 350 "%s: completed request = %p\n",
351 __func__, 351 __func__,
352 ireq); 352 ireq);
353 353
354 return ret; 354 return ret;
355 355
356 err_tci: 356 err_tci:
357 spin_lock_irqsave(&ihost->scic_lock, flags); 357 spin_lock_irqsave(&ihost->scic_lock, flags);
358 isci_tci_free(ihost, ISCI_TAG_TCI(tag)); 358 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
359 spin_unlock_irqrestore(&ihost->scic_lock, flags); 359 spin_unlock_irqrestore(&ihost->scic_lock, flags);
360 360
361 return ret; 361 return ret;
362 } 362 }
363 363
364 static void isci_task_build_tmf(struct isci_tmf *tmf, 364 static void isci_task_build_tmf(struct isci_tmf *tmf,
365 enum isci_tmf_function_codes code) 365 enum isci_tmf_function_codes code)
366 { 366 {
367 memset(tmf, 0, sizeof(*tmf)); 367 memset(tmf, 0, sizeof(*tmf));
368 tmf->tmf_code = code; 368 tmf->tmf_code = code;
369 } 369 }
370 370
371 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, 371 static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
372 enum isci_tmf_function_codes code, 372 enum isci_tmf_function_codes code,
373 struct isci_request *old_request) 373 struct isci_request *old_request)
374 { 374 {
375 isci_task_build_tmf(tmf, code); 375 isci_task_build_tmf(tmf, code);
376 tmf->io_tag = old_request->io_tag; 376 tmf->io_tag = old_request->io_tag;
377 } 377 }
378 378
379 /** 379 /**
380 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain 380 * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
381 * Template functions. 381 * Template functions.
382 * @lun: This parameter specifies the lun to be reset. 382 * @lun: This parameter specifies the lun to be reset.
383 * 383 *
384 * status, zero indicates success. 384 * status, zero indicates success.
385 */ 385 */
386 static int isci_task_send_lu_reset_sas( 386 static int isci_task_send_lu_reset_sas(
387 struct isci_host *isci_host, 387 struct isci_host *isci_host,
388 struct isci_remote_device *isci_device, 388 struct isci_remote_device *isci_device,
389 u8 *lun) 389 u8 *lun)
390 { 390 {
391 struct isci_tmf tmf; 391 struct isci_tmf tmf;
392 int ret = TMF_RESP_FUNC_FAILED; 392 int ret = TMF_RESP_FUNC_FAILED;
393 393
394 dev_dbg(&isci_host->pdev->dev, 394 dev_dbg(&isci_host->pdev->dev,
395 "%s: isci_host = %p, isci_device = %p\n", 395 "%s: isci_host = %p, isci_device = %p\n",
396 __func__, isci_host, isci_device); 396 __func__, isci_host, isci_device);
397 /* Send the LUN reset to the target. By the time the call returns, 397 /* Send the LUN reset to the target. By the time the call returns,
398 * the TMF has fully exected in the target (in which case the return 398 * the TMF has fully exected in the target (in which case the return
399 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or 399 * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
400 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). 400 * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
401 */ 401 */
402 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); 402 isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
403 403
404 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ 404 #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
405 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); 405 ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
406 406
407 if (ret == TMF_RESP_FUNC_COMPLETE) 407 if (ret == TMF_RESP_FUNC_COMPLETE)
408 dev_dbg(&isci_host->pdev->dev, 408 dev_dbg(&isci_host->pdev->dev,
409 "%s: %p: TMF_LU_RESET passed\n", 409 "%s: %p: TMF_LU_RESET passed\n",
410 __func__, isci_device); 410 __func__, isci_device);
411 else 411 else
412 dev_dbg(&isci_host->pdev->dev, 412 dev_dbg(&isci_host->pdev->dev,
413 "%s: %p: TMF_LU_RESET failed (%x)\n", 413 "%s: %p: TMF_LU_RESET failed (%x)\n",
414 __func__, isci_device, ret); 414 __func__, isci_device, ret);
415 415
416 return ret; 416 return ret;
417 } 417 }
418 418
419 int isci_task_lu_reset(struct domain_device *dev, u8 *lun) 419 int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
420 { 420 {
421 struct isci_host *ihost = dev_to_ihost(dev); 421 struct isci_host *ihost = dev_to_ihost(dev);
422 struct isci_remote_device *idev; 422 struct isci_remote_device *idev;
423 unsigned long flags; 423 unsigned long flags;
424 int ret; 424 int ret = TMF_RESP_FUNC_COMPLETE;
425 425
426 spin_lock_irqsave(&ihost->scic_lock, flags); 426 spin_lock_irqsave(&ihost->scic_lock, flags);
427 idev = isci_get_device(dev->lldd_dev); 427 idev = isci_get_device(dev->lldd_dev);
428 spin_unlock_irqrestore(&ihost->scic_lock, flags); 428 spin_unlock_irqrestore(&ihost->scic_lock, flags);
429 429
430 dev_dbg(&ihost->pdev->dev, 430 dev_dbg(&ihost->pdev->dev,
431 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 431 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
432 __func__, dev, ihost, idev); 432 __func__, dev, ihost, idev);
433 433
434 if (!idev) { 434 if (!idev) {
435 /* If the device is gone, escalate to I_T_Nexus_Reset. */ 435 /* If the device is gone, escalate to I_T_Nexus_Reset. */
436 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); 436 dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
437 437
438 ret = TMF_RESP_FUNC_FAILED; 438 ret = TMF_RESP_FUNC_FAILED;
439 goto out; 439 goto out;
440 } 440 }
441 441
442 /* Suspend the RNC, kill all TCs */ 442 /* Suspend the RNC, kill all TCs */
443 if (isci_remote_device_suspend_terminate(ihost, idev, NULL) 443 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
444 != SCI_SUCCESS) { 444 != SCI_SUCCESS) {
445 /* The suspend/terminate only fails if isci_get_device fails */ 445 /* The suspend/terminate only fails if isci_get_device fails */
446 ret = TMF_RESP_FUNC_FAILED; 446 ret = TMF_RESP_FUNC_FAILED;
447 goto out; 447 goto out;
448 } 448 }
449 /* All pending I/Os have been terminated and cleaned up. */ 449 /* All pending I/Os have been terminated and cleaned up. */
450 if (dev_is_sata(dev)) { 450 if (!test_bit(IDEV_GONE, &idev->flags)) {
451 sas_ata_schedule_reset(dev); 451 if (dev_is_sata(dev))
452 ret = TMF_RESP_FUNC_COMPLETE; 452 sas_ata_schedule_reset(dev);
453 } else { 453 else
454 /* Send the task management part of the reset. */ 454 /* Send the task management part of the reset. */
455 ret = isci_task_send_lu_reset_sas(ihost, idev, lun); 455 ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
456 } 456 }
457 out: 457 out:
458 isci_put_device(idev); 458 isci_put_device(idev);
459 return ret; 459 return ret;
460 } 460 }
461 461
462 462
463 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */ 463 /* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
464 int isci_task_clear_nexus_port(struct asd_sas_port *port) 464 int isci_task_clear_nexus_port(struct asd_sas_port *port)
465 { 465 {
466 return TMF_RESP_FUNC_FAILED; 466 return TMF_RESP_FUNC_FAILED;
467 } 467 }
468 468
469 469
470 470
471 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) 471 int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
472 { 472 {
473 return TMF_RESP_FUNC_FAILED; 473 return TMF_RESP_FUNC_FAILED;
474 } 474 }
475 475
476 /* Task Management Functions. Must be called from process context. */ 476 /* Task Management Functions. Must be called from process context. */
477 477
478 /** 478 /**
479 * isci_task_abort_task() - This function is one of the SAS Domain Template 479 * isci_task_abort_task() - This function is one of the SAS Domain Template
480 * functions. This function is called by libsas to abort a specified task. 480 * functions. This function is called by libsas to abort a specified task.
481 * @task: This parameter specifies the SAS task to abort. 481 * @task: This parameter specifies the SAS task to abort.
482 * 482 *
483 * status, zero indicates success. 483 * status, zero indicates success.
484 */ 484 */
485 int isci_task_abort_task(struct sas_task *task) 485 int isci_task_abort_task(struct sas_task *task)
486 { 486 {
487 struct isci_host *ihost = dev_to_ihost(task->dev); 487 struct isci_host *ihost = dev_to_ihost(task->dev);
488 DECLARE_COMPLETION_ONSTACK(aborted_io_completion); 488 DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
489 struct isci_request *old_request = NULL; 489 struct isci_request *old_request = NULL;
490 struct isci_remote_device *idev = NULL; 490 struct isci_remote_device *idev = NULL;
491 struct isci_tmf tmf; 491 struct isci_tmf tmf;
492 int ret = TMF_RESP_FUNC_FAILED; 492 int ret = TMF_RESP_FUNC_FAILED;
493 unsigned long flags; 493 unsigned long flags;
494 494
495 /* Get the isci_request reference from the task. Note that 495 /* Get the isci_request reference from the task. Note that
496 * this check does not depend on the pending request list 496 * this check does not depend on the pending request list
497 * in the device, because tasks driving resets may land here 497 * in the device, because tasks driving resets may land here
498 * after completion in the core. 498 * after completion in the core.
499 */ 499 */
500 spin_lock_irqsave(&ihost->scic_lock, flags); 500 spin_lock_irqsave(&ihost->scic_lock, flags);
501 spin_lock(&task->task_state_lock); 501 spin_lock(&task->task_state_lock);
502 502
503 old_request = task->lldd_task; 503 old_request = task->lldd_task;
504 504
505 /* If task is already done, the request isn't valid */ 505 /* If task is already done, the request isn't valid */
506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && 506 if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) && 507 (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
508 old_request) 508 old_request)
509 idev = isci_get_device(task->dev->lldd_dev); 509 idev = isci_get_device(task->dev->lldd_dev);
510 510
511 spin_unlock(&task->task_state_lock); 511 spin_unlock(&task->task_state_lock);
512 spin_unlock_irqrestore(&ihost->scic_lock, flags); 512 spin_unlock_irqrestore(&ihost->scic_lock, flags);
513 513
514 dev_warn(&ihost->pdev->dev, 514 dev_warn(&ihost->pdev->dev,
515 "%s: dev = %p, task = %p, old_request == %p\n", 515 "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
516 __func__, idev, task, old_request); 516 __func__, idev,
517 (dev_is_sata(task->dev) ? "STP/SATA"
518 : ((dev_is_expander(task->dev))
519 ? "SMP"
520 : "SSP")),
521 ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
522 ? " IDEV_GONE"
523 : "")
524 : " <NULL>"),
525 task, old_request);
517 526
518 /* Device reset conditions signalled in task_state_flags are the 527 /* Device reset conditions signalled in task_state_flags are the
519 * responsbility of libsas to observe at the start of the error 528 * responsbility of libsas to observe at the start of the error
520 * handler thread. 529 * handler thread.
521 */ 530 */
522 if (!idev || !old_request) { 531 if (!idev || !old_request) {
523 /* The request has already completed and there 532 /* The request has already completed and there
524 * is nothing to do here other than to set the task 533 * is nothing to do here other than to set the task
525 * done bit, and indicate that the task abort function 534 * done bit, and indicate that the task abort function
526 * was sucessful. 535 * was sucessful.
527 */ 536 */
528 spin_lock_irqsave(&task->task_state_lock, flags); 537 spin_lock_irqsave(&task->task_state_lock, flags);
529 task->task_state_flags |= SAS_TASK_STATE_DONE; 538 task->task_state_flags |= SAS_TASK_STATE_DONE;
530 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 539 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
531 SAS_TASK_STATE_PENDING); 540 SAS_TASK_STATE_PENDING);
532 spin_unlock_irqrestore(&task->task_state_lock, flags); 541 spin_unlock_irqrestore(&task->task_state_lock, flags);
533 542
534 ret = TMF_RESP_FUNC_COMPLETE; 543 ret = TMF_RESP_FUNC_COMPLETE;
535 544
536 dev_warn(&ihost->pdev->dev, 545 dev_warn(&ihost->pdev->dev,
537 "%s: abort task not needed for %p\n", 546 "%s: abort task not needed for %p\n",
538 __func__, task); 547 __func__, task);
539 goto out; 548 goto out;
540 } 549 }
541 /* Suspend the RNC, kill the TC */ 550 /* Suspend the RNC, kill the TC */
542 if (isci_remote_device_suspend_terminate(ihost, idev, old_request) 551 if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
543 != SCI_SUCCESS) { 552 != SCI_SUCCESS) {
544 dev_warn(&ihost->pdev->dev, 553 dev_warn(&ihost->pdev->dev,
545 "%s: isci_remote_device_reset_terminate(dev=%p, " 554 "%s: isci_remote_device_reset_terminate(dev=%p, "
546 "req=%p, task=%p) failed\n", 555 "req=%p, task=%p) failed\n",
547 __func__, idev, old_request, task); 556 __func__, idev, old_request, task);
548 ret = TMF_RESP_FUNC_FAILED; 557 ret = TMF_RESP_FUNC_FAILED;
549 goto out; 558 goto out;
550 } 559 }
551 spin_lock_irqsave(&ihost->scic_lock, flags); 560 spin_lock_irqsave(&ihost->scic_lock, flags);
552 561
553 if (task->task_proto == SAS_PROTOCOL_SMP || 562 if (task->task_proto == SAS_PROTOCOL_SMP ||
554 sas_protocol_ata(task->task_proto) || 563 sas_protocol_ata(task->task_proto) ||
555 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { 564 test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
565 test_bit(IDEV_GONE, &idev->flags)) {
556 566
557 spin_unlock_irqrestore(&ihost->scic_lock, flags); 567 spin_unlock_irqrestore(&ihost->scic_lock, flags);
558 568
559 /* No task to send, so explicitly resume the device here */ 569 /* No task to send, so explicitly resume the device here */
560 isci_remote_device_resume_from_abort(ihost, idev); 570 isci_remote_device_resume_from_abort(ihost, idev);
561 571
562 dev_warn(&ihost->pdev->dev, 572 dev_warn(&ihost->pdev->dev,
563 "%s: %s request" 573 "%s: %s request"
564 " or complete_in_target (%d), thus no TMF\n", 574 " or complete_in_target (%d), "
575 "or IDEV_GONE (%d), thus no TMF\n",
565 __func__, 576 __func__,
566 ((task->task_proto == SAS_PROTOCOL_SMP) 577 ((task->task_proto == SAS_PROTOCOL_SMP)
567 ? "SMP" 578 ? "SMP"
568 : (sas_protocol_ata(task->task_proto) 579 : (sas_protocol_ata(task->task_proto)
569 ? "SATA/STP" 580 ? "SATA/STP"
570 : "<other>") 581 : "<other>")
571 ), 582 ),
572 test_bit(IREQ_COMPLETE_IN_TARGET, 583 test_bit(IREQ_COMPLETE_IN_TARGET,
573 &old_request->flags)); 584 &old_request->flags),
585 test_bit(IDEV_GONE, &idev->flags));
574 586
575 spin_lock_irqsave(&task->task_state_lock, flags); 587 spin_lock_irqsave(&task->task_state_lock, flags);
576 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | 588 task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
577 SAS_TASK_STATE_PENDING); 589 SAS_TASK_STATE_PENDING);
578 task->task_state_flags |= SAS_TASK_STATE_DONE; 590 task->task_state_flags |= SAS_TASK_STATE_DONE;
579 spin_unlock_irqrestore(&task->task_state_lock, flags); 591 spin_unlock_irqrestore(&task->task_state_lock, flags);
580 592
581 ret = TMF_RESP_FUNC_COMPLETE; 593 ret = TMF_RESP_FUNC_COMPLETE;
582 } else { 594 } else {
583 /* Fill in the tmf stucture */ 595 /* Fill in the tmf stucture */
584 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, 596 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
585 old_request); 597 old_request);
586 598
587 spin_unlock_irqrestore(&ihost->scic_lock, flags); 599 spin_unlock_irqrestore(&ihost->scic_lock, flags);
588 600
589 /* Send the task management request. */ 601 /* Send the task management request. */
590 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ 602 #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
591 ret = isci_task_execute_tmf(ihost, idev, &tmf, 603 ret = isci_task_execute_tmf(ihost, idev, &tmf,
592 ISCI_ABORT_TASK_TIMEOUT_MS); 604 ISCI_ABORT_TASK_TIMEOUT_MS);
593 } 605 }
594 out: 606 out:
595 dev_warn(&ihost->pdev->dev, 607 dev_warn(&ihost->pdev->dev,
596 "%s: Done; dev = %p, task = %p , old_request == %p\n", 608 "%s: Done; dev = %p, task = %p , old_request == %p\n",
597 __func__, idev, task, old_request); 609 __func__, idev, task, old_request);
598 isci_put_device(idev); 610 isci_put_device(idev);
599 return ret; 611 return ret;
600 } 612 }
601 613
602 /** 614 /**
603 * isci_task_abort_task_set() - This function is one of the SAS Domain Template 615 * isci_task_abort_task_set() - This function is one of the SAS Domain Template
604 * functions. This is one of the Task Management functoins called by libsas, 616 * functions. This is one of the Task Management functoins called by libsas,
605 * to abort all task for the given lun. 617 * to abort all task for the given lun.
606 * @d_device: This parameter specifies the domain device associated with this 618 * @d_device: This parameter specifies the domain device associated with this
607 * request. 619 * request.
608 * @lun: This parameter specifies the lun associated with this request. 620 * @lun: This parameter specifies the lun associated with this request.
609 * 621 *
610 * status, zero indicates success. 622 * status, zero indicates success.
611 */ 623 */
612 int isci_task_abort_task_set( 624 int isci_task_abort_task_set(
613 struct domain_device *d_device, 625 struct domain_device *d_device,
614 u8 *lun) 626 u8 *lun)
615 { 627 {
616 return TMF_RESP_FUNC_FAILED; 628 return TMF_RESP_FUNC_FAILED;
617 } 629 }
618 630
619 631
620 /** 632 /**
621 * isci_task_clear_aca() - This function is one of the SAS Domain Template 633 * isci_task_clear_aca() - This function is one of the SAS Domain Template
622 * functions. This is one of the Task Management functoins called by libsas. 634 * functions. This is one of the Task Management functoins called by libsas.
623 * @d_device: This parameter specifies the domain device associated with this 635 * @d_device: This parameter specifies the domain device associated with this
624 * request. 636 * request.
625 * @lun: This parameter specifies the lun associated with this request. 637 * @lun: This parameter specifies the lun associated with this request.
626 * 638 *
627 * status, zero indicates success. 639 * status, zero indicates success.
628 */ 640 */
629 int isci_task_clear_aca( 641 int isci_task_clear_aca(
630 struct domain_device *d_device, 642 struct domain_device *d_device,
631 u8 *lun) 643 u8 *lun)
632 { 644 {
633 return TMF_RESP_FUNC_FAILED; 645 return TMF_RESP_FUNC_FAILED;
634 } 646 }
635 647
636 648
637 649
638 /** 650 /**
639 * isci_task_clear_task_set() - This function is one of the SAS Domain Template 651 * isci_task_clear_task_set() - This function is one of the SAS Domain Template
640 * functions. This is one of the Task Management functoins called by libsas. 652 * functions. This is one of the Task Management functoins called by libsas.
641 * @d_device: This parameter specifies the domain device associated with this 653 * @d_device: This parameter specifies the domain device associated with this
642 * request. 654 * request.
643 * @lun: This parameter specifies the lun associated with this request. 655 * @lun: This parameter specifies the lun associated with this request.
644 * 656 *
645 * status, zero indicates success. 657 * status, zero indicates success.
646 */ 658 */
647 int isci_task_clear_task_set( 659 int isci_task_clear_task_set(
648 struct domain_device *d_device, 660 struct domain_device *d_device,
649 u8 *lun) 661 u8 *lun)
650 { 662 {
651 return TMF_RESP_FUNC_FAILED; 663 return TMF_RESP_FUNC_FAILED;
652 } 664 }
653 665
654 666
655 /** 667 /**
656 * isci_task_query_task() - This function is implemented to cause libsas to 668 * isci_task_query_task() - This function is implemented to cause libsas to
657 * correctly escalate the failed abort to a LUN or target reset (this is 669 * correctly escalate the failed abort to a LUN or target reset (this is
658 * because sas_scsi_find_task libsas function does not correctly interpret 670 * because sas_scsi_find_task libsas function does not correctly interpret
659 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is 671 * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
660 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is 672 * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
661 * returned, libsas will turn this into a target reset 673 * returned, libsas will turn this into a target reset
662 * @task: This parameter specifies the sas task being queried. 674 * @task: This parameter specifies the sas task being queried.
663 * @lun: This parameter specifies the lun associated with this request. 675 * @lun: This parameter specifies the lun associated with this request.
664 * 676 *
665 * status, zero indicates success. 677 * status, zero indicates success.
666 */ 678 */
667 int isci_task_query_task( 679 int isci_task_query_task(
668 struct sas_task *task) 680 struct sas_task *task)
669 { 681 {
670 /* See if there is a pending device reset for this device. */ 682 /* See if there is a pending device reset for this device. */
671 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) 683 if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
672 return TMF_RESP_FUNC_FAILED; 684 return TMF_RESP_FUNC_FAILED;
673 else 685 else
674 return TMF_RESP_FUNC_SUCC; 686 return TMF_RESP_FUNC_SUCC;
675 } 687 }
676 688
677 /* 689 /*
678 * isci_task_request_complete() - This function is called by the sci core when 690 * isci_task_request_complete() - This function is called by the sci core when
679 * an task request completes. 691 * an task request completes.
680 * @ihost: This parameter specifies the ISCI host object 692 * @ihost: This parameter specifies the ISCI host object
681 * @ireq: This parameter is the completed isci_request object. 693 * @ireq: This parameter is the completed isci_request object.
682 * @completion_status: This parameter specifies the completion status from the 694 * @completion_status: This parameter specifies the completion status from the
683 * sci core. 695 * sci core.
684 * 696 *
685 * none. 697 * none.
686 */ 698 */
687 void 699 void
688 isci_task_request_complete(struct isci_host *ihost, 700 isci_task_request_complete(struct isci_host *ihost,
689 struct isci_request *ireq, 701 struct isci_request *ireq,
690 enum sci_task_status completion_status) 702 enum sci_task_status completion_status)
691 { 703 {
692 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 704 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
693 struct completion *tmf_complete = NULL; 705 struct completion *tmf_complete = NULL;
694 706
695 dev_dbg(&ihost->pdev->dev, 707 dev_dbg(&ihost->pdev->dev,
696 "%s: request = %p, status=%d\n", 708 "%s: request = %p, status=%d\n",
697 __func__, ireq, completion_status); 709 __func__, ireq, completion_status);
698 710
699 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); 711 set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
700 712
701 if (tmf) { 713 if (tmf) {
702 tmf->status = completion_status; 714 tmf->status = completion_status;
703 715
704 if (tmf->proto == SAS_PROTOCOL_SSP) { 716 if (tmf->proto == SAS_PROTOCOL_SSP) {
705 memcpy(&tmf->resp.resp_iu, 717 memcpy(&tmf->resp.resp_iu,
706 &ireq->ssp.rsp, 718 &ireq->ssp.rsp,
707 SSP_RESP_IU_MAX_SIZE); 719 SSP_RESP_IU_MAX_SIZE);
708 } else if (tmf->proto == SAS_PROTOCOL_SATA) { 720 } else if (tmf->proto == SAS_PROTOCOL_SATA) {
709 memcpy(&tmf->resp.d2h_fis, 721 memcpy(&tmf->resp.d2h_fis,
710 &ireq->stp.rsp, 722 &ireq->stp.rsp,
711 sizeof(struct dev_to_host_fis)); 723 sizeof(struct dev_to_host_fis));
712 } 724 }
713 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */ 725 /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
714 tmf_complete = tmf->complete; 726 tmf_complete = tmf->complete;
715 } 727 }
716 sci_controller_complete_io(ihost, ireq->target_device, ireq); 728 sci_controller_complete_io(ihost, ireq->target_device, ireq);
717 /* set the 'terminated' flag handle to make sure it cannot be terminated 729 /* set the 'terminated' flag handle to make sure it cannot be terminated
718 * or completed again. 730 * or completed again.
719 */ 731 */
720 set_bit(IREQ_TERMINATED, &ireq->flags); 732 set_bit(IREQ_TERMINATED, &ireq->flags);
721 733
722 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) 734 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
723 wake_up_all(&ihost->eventq); 735 wake_up_all(&ihost->eventq);
724 736
725 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) 737 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
726 isci_free_tag(ihost, ireq->io_tag); 738 isci_free_tag(ihost, ireq->io_tag);
727 739
728 /* The task management part completes last. */ 740 /* The task management part completes last. */
729 if (tmf_complete) 741 if (tmf_complete)
730 complete(tmf_complete); 742 complete(tmf_complete);
731 } 743 }
732 744
733 static int isci_reset_device(struct isci_host *ihost, 745 static int isci_reset_device(struct isci_host *ihost,
734 struct domain_device *dev, 746 struct domain_device *dev,
735 struct isci_remote_device *idev) 747 struct isci_remote_device *idev)
736 { 748 {
737 int rc = TMF_RESP_FUNC_COMPLETE, reset_stat; 749 int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
738 struct sas_phy *phy = sas_get_local_phy(dev); 750 struct sas_phy *phy = sas_get_local_phy(dev);
739 struct isci_port *iport = dev->port->lldd_port; 751 struct isci_port *iport = dev->port->lldd_port;
740 752
741 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 753 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
742 754
743 /* Suspend the RNC, terminate all outstanding TCs. */ 755 /* Suspend the RNC, terminate all outstanding TCs. */
744 if (isci_remote_device_suspend_terminate(ihost, idev, NULL) 756 if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
745 != SCI_SUCCESS) { 757 != SCI_SUCCESS) {
746 rc = TMF_RESP_FUNC_FAILED; 758 rc = TMF_RESP_FUNC_FAILED;
747 goto out; 759 goto out;
748 } 760 }
749 /* Note that since the termination for outstanding requests succeeded, 761 /* Note that since the termination for outstanding requests succeeded,
750 * this function will return success. This is because the resets will 762 * this function will return success. This is because the resets will
751 * only fail if the device has been removed (ie. hotplug), and the 763 * only fail if the device has been removed (ie. hotplug), and the
752 * primary duty of this function is to cleanup tasks, so that is the 764 * primary duty of this function is to cleanup tasks, so that is the
753 * relevant status. 765 * relevant status.
754 */ 766 */
767 if (!test_bit(IDEV_GONE, &idev->flags)) {
768 if (scsi_is_sas_phy_local(phy)) {
769 struct isci_phy *iphy = &ihost->phys[phy->number];
755 770
756 if (scsi_is_sas_phy_local(phy)) { 771 reset_stat = isci_port_perform_hard_reset(ihost, iport,
757 struct isci_phy *iphy = &ihost->phys[phy->number]; 772 iphy);
758 773 } else
759 reset_stat = isci_port_perform_hard_reset(ihost, iport, iphy); 774 reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
760 } else 775 }
761 reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
762
763 /* Explicitly resume the RNC here, since there was no task sent. */ 776 /* Explicitly resume the RNC here, since there was no task sent. */
764 isci_remote_device_resume_from_abort(ihost, idev); 777 isci_remote_device_resume_from_abort(ihost, idev);
765 778
766 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", 779 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
767 __func__, idev, reset_stat); 780 __func__, idev, reset_stat);
768 out: 781 out:
769 sas_put_local_phy(phy); 782 sas_put_local_phy(phy);
770 return rc; 783 return rc;
771 } 784 }
772 785
773 int isci_task_I_T_nexus_reset(struct domain_device *dev) 786 int isci_task_I_T_nexus_reset(struct domain_device *dev)
774 { 787 {
775 struct isci_host *ihost = dev_to_ihost(dev); 788 struct isci_host *ihost = dev_to_ihost(dev);
776 struct isci_remote_device *idev; 789 struct isci_remote_device *idev;
777 unsigned long flags; 790 unsigned long flags;
778 int ret; 791 int ret;
779 792
780 spin_lock_irqsave(&ihost->scic_lock, flags); 793 spin_lock_irqsave(&ihost->scic_lock, flags);
781 idev = isci_get_device(dev->lldd_dev); 794 idev = isci_get_device(dev->lldd_dev);
782 spin_unlock_irqrestore(&ihost->scic_lock, flags); 795 spin_unlock_irqrestore(&ihost->scic_lock, flags);
783 796
784 if (!idev) { 797 if (!idev) {
785 /* XXX: need to cleanup any ireqs targeting this 798 /* XXX: need to cleanup any ireqs targeting this
786 * domain_device 799 * domain_device
787 */ 800 */
788 ret = TMF_RESP_FUNC_COMPLETE; 801 ret = TMF_RESP_FUNC_COMPLETE;
789 goto out; 802 goto out;
790 } 803 }
791 804
792 ret = isci_reset_device(ihost, dev, idev); 805 ret = isci_reset_device(ihost, dev, idev);
793 out: 806 out:
794 isci_put_device(idev); 807 isci_put_device(idev);
795 return ret; 808 return ret;