Commit 670ffb50413f386f92e59511c60b679f46e5cace

Authored by Nicholas Bellinger
Committed by Greg Kroah-Hartman
1 parent e75fa31d0f

target: Allow READ_CAPACITY opcode in ALUA Standby access state

commit e7810c2d2c37fa8e58dda74b00790dab60fe6fba upstream.

This patch allows READ_CAPACITY + SAI_READ_CAPACITY_16 opcode
processing to occur while the associated ALUA group is in Standby
access state.

This is required to avoid host side LUN probe failures during the
initial scan if an ALUA group has already implicitly changed into
Standby access state.

This addresses a bug reported by Chris + Philip using dm-multipath
+ ESX hosts configured with ALUA multipath.

(Drop v3.15 specific set_ascq usage - nab)

Reported-by: Chris Boot <crb@tiger-computing.co.uk>
Reported-by: Philip Gaw <pgaw@darktech.org.uk>
Cc: Chris Boot <crb@tiger-computing.co.uk>
Cc: Philip Gaw <pgaw@darktech.org.uk>
Cc: Hannes Reinecke <hare@suse.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 1 changed file with 9 additions and 0 deletions Inline Diff

drivers/target/target_core_alua.c
1 /******************************************************************************* 1 /*******************************************************************************
2 * Filename: target_core_alua.c 2 * Filename: target_core_alua.c
3 * 3 *
4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA) 4 * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5 * 5 *
6 * (c) Copyright 2009-2013 Datera, Inc. 6 * (c) Copyright 2009-2013 Datera, Inc.
7 * 7 *
8 * Nicholas A. Bellinger <nab@kernel.org> 8 * Nicholas A. Bellinger <nab@kernel.org>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * 23 *
24 ******************************************************************************/ 24 ******************************************************************************/
25 25
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/spinlock.h> 27 #include <linux/spinlock.h>
28 #include <linux/configfs.h> 28 #include <linux/configfs.h>
29 #include <linux/export.h> 29 #include <linux/export.h>
30 #include <linux/file.h> 30 #include <linux/file.h>
31 #include <scsi/scsi.h> 31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h> 33 #include <asm/unaligned.h>
34 34
35 #include <target/target_core_base.h> 35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h> 36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h> 37 #include <target/target_core_fabric.h>
38 #include <target/target_core_configfs.h> 38 #include <target/target_core_configfs.h>
39 39
40 #include "target_core_internal.h" 40 #include "target_core_internal.h"
41 #include "target_core_alua.h" 41 #include "target_core_alua.h"
42 #include "target_core_ua.h" 42 #include "target_core_ua.h"
43 43
44 static sense_reason_t core_alua_check_transition(int state, int valid, 44 static sense_reason_t core_alua_check_transition(int state, int valid,
45 int *primary); 45 int *primary);
46 static int core_alua_set_tg_pt_secondary_state( 46 static int core_alua_set_tg_pt_secondary_state(
47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 47 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
48 struct se_port *port, int explicit, int offline); 48 struct se_port *port, int explicit, int offline);
49 49
50 static char *core_alua_dump_state(int state); 50 static char *core_alua_dump_state(int state);
51 51
52 static u16 alua_lu_gps_counter; 52 static u16 alua_lu_gps_counter;
53 static u32 alua_lu_gps_count; 53 static u32 alua_lu_gps_count;
54 54
55 static DEFINE_SPINLOCK(lu_gps_lock); 55 static DEFINE_SPINLOCK(lu_gps_lock);
56 static LIST_HEAD(lu_gps_list); 56 static LIST_HEAD(lu_gps_list);
57 57
58 struct t10_alua_lu_gp *default_lu_gp; 58 struct t10_alua_lu_gp *default_lu_gp;
59 59
60 /* 60 /*
61 * REPORT REFERRALS 61 * REPORT REFERRALS
62 * 62 *
63 * See sbc3r35 section 5.23 63 * See sbc3r35 section 5.23
64 */ 64 */
65 sense_reason_t 65 sense_reason_t
66 target_emulate_report_referrals(struct se_cmd *cmd) 66 target_emulate_report_referrals(struct se_cmd *cmd)
67 { 67 {
68 struct se_device *dev = cmd->se_dev; 68 struct se_device *dev = cmd->se_dev;
69 struct t10_alua_lba_map *map; 69 struct t10_alua_lba_map *map;
70 struct t10_alua_lba_map_member *map_mem; 70 struct t10_alua_lba_map_member *map_mem;
71 unsigned char *buf; 71 unsigned char *buf;
72 u32 rd_len = 0, off; 72 u32 rd_len = 0, off;
73 73
74 if (cmd->data_length < 4) { 74 if (cmd->data_length < 4) {
75 pr_warn("REPORT REFERRALS allocation length %u too" 75 pr_warn("REPORT REFERRALS allocation length %u too"
76 " small\n", cmd->data_length); 76 " small\n", cmd->data_length);
77 return TCM_INVALID_CDB_FIELD; 77 return TCM_INVALID_CDB_FIELD;
78 } 78 }
79 79
80 buf = transport_kmap_data_sg(cmd); 80 buf = transport_kmap_data_sg(cmd);
81 if (!buf) 81 if (!buf)
82 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 82 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
83 83
84 off = 4; 84 off = 4;
85 spin_lock(&dev->t10_alua.lba_map_lock); 85 spin_lock(&dev->t10_alua.lba_map_lock);
86 if (list_empty(&dev->t10_alua.lba_map_list)) { 86 if (list_empty(&dev->t10_alua.lba_map_list)) {
87 spin_unlock(&dev->t10_alua.lba_map_lock); 87 spin_unlock(&dev->t10_alua.lba_map_lock);
88 transport_kunmap_data_sg(cmd); 88 transport_kunmap_data_sg(cmd);
89 89
90 return TCM_UNSUPPORTED_SCSI_OPCODE; 90 return TCM_UNSUPPORTED_SCSI_OPCODE;
91 } 91 }
92 92
93 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 93 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
94 lba_map_list) { 94 lba_map_list) {
95 int desc_num = off + 3; 95 int desc_num = off + 3;
96 int pg_num; 96 int pg_num;
97 97
98 off += 4; 98 off += 4;
99 if (cmd->data_length > off) 99 if (cmd->data_length > off)
100 put_unaligned_be64(map->lba_map_first_lba, &buf[off]); 100 put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
101 off += 8; 101 off += 8;
102 if (cmd->data_length > off) 102 if (cmd->data_length > off)
103 put_unaligned_be64(map->lba_map_last_lba, &buf[off]); 103 put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
104 off += 8; 104 off += 8;
105 rd_len += 20; 105 rd_len += 20;
106 pg_num = 0; 106 pg_num = 0;
107 list_for_each_entry(map_mem, &map->lba_map_mem_list, 107 list_for_each_entry(map_mem, &map->lba_map_mem_list,
108 lba_map_mem_list) { 108 lba_map_mem_list) {
109 int alua_state = map_mem->lba_map_mem_alua_state; 109 int alua_state = map_mem->lba_map_mem_alua_state;
110 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id; 110 int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
111 111
112 if (cmd->data_length > off) 112 if (cmd->data_length > off)
113 buf[off] = alua_state & 0x0f; 113 buf[off] = alua_state & 0x0f;
114 off += 2; 114 off += 2;
115 if (cmd->data_length > off) 115 if (cmd->data_length > off)
116 buf[off] = (alua_pg_id >> 8) & 0xff; 116 buf[off] = (alua_pg_id >> 8) & 0xff;
117 off++; 117 off++;
118 if (cmd->data_length > off) 118 if (cmd->data_length > off)
119 buf[off] = (alua_pg_id & 0xff); 119 buf[off] = (alua_pg_id & 0xff);
120 off++; 120 off++;
121 rd_len += 4; 121 rd_len += 4;
122 pg_num++; 122 pg_num++;
123 } 123 }
124 if (cmd->data_length > desc_num) 124 if (cmd->data_length > desc_num)
125 buf[desc_num] = pg_num; 125 buf[desc_num] = pg_num;
126 } 126 }
127 spin_unlock(&dev->t10_alua.lba_map_lock); 127 spin_unlock(&dev->t10_alua.lba_map_lock);
128 128
129 /* 129 /*
130 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 130 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
131 */ 131 */
132 put_unaligned_be16(rd_len, &buf[2]); 132 put_unaligned_be16(rd_len, &buf[2]);
133 133
134 transport_kunmap_data_sg(cmd); 134 transport_kunmap_data_sg(cmd);
135 135
136 target_complete_cmd(cmd, GOOD); 136 target_complete_cmd(cmd, GOOD);
137 return 0; 137 return 0;
138 } 138 }
139 139
140 /* 140 /*
141 * REPORT_TARGET_PORT_GROUPS 141 * REPORT_TARGET_PORT_GROUPS
142 * 142 *
143 * See spc4r17 section 6.27 143 * See spc4r17 section 6.27
144 */ 144 */
145 sense_reason_t 145 sense_reason_t
146 target_emulate_report_target_port_groups(struct se_cmd *cmd) 146 target_emulate_report_target_port_groups(struct se_cmd *cmd)
147 { 147 {
148 struct se_device *dev = cmd->se_dev; 148 struct se_device *dev = cmd->se_dev;
149 struct se_port *port; 149 struct se_port *port;
150 struct t10_alua_tg_pt_gp *tg_pt_gp; 150 struct t10_alua_tg_pt_gp *tg_pt_gp;
151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 151 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
152 unsigned char *buf; 152 unsigned char *buf;
153 u32 rd_len = 0, off; 153 u32 rd_len = 0, off;
154 int ext_hdr = (cmd->t_task_cdb[1] & 0x20); 154 int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
155 155
156 /* 156 /*
157 * Skip over RESERVED area to first Target port group descriptor 157 * Skip over RESERVED area to first Target port group descriptor
158 * depending on the PARAMETER DATA FORMAT type.. 158 * depending on the PARAMETER DATA FORMAT type..
159 */ 159 */
160 if (ext_hdr != 0) 160 if (ext_hdr != 0)
161 off = 8; 161 off = 8;
162 else 162 else
163 off = 4; 163 off = 4;
164 164
165 if (cmd->data_length < off) { 165 if (cmd->data_length < off) {
166 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too" 166 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
167 " small for %s header\n", cmd->data_length, 167 " small for %s header\n", cmd->data_length,
168 (ext_hdr) ? "extended" : "normal"); 168 (ext_hdr) ? "extended" : "normal");
169 return TCM_INVALID_CDB_FIELD; 169 return TCM_INVALID_CDB_FIELD;
170 } 170 }
171 buf = transport_kmap_data_sg(cmd); 171 buf = transport_kmap_data_sg(cmd);
172 if (!buf) 172 if (!buf)
173 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 173 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
174 174
175 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 175 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
176 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 176 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
177 tg_pt_gp_list) { 177 tg_pt_gp_list) {
178 /* 178 /*
179 * Check if the Target port group and Target port descriptor list 179 * Check if the Target port group and Target port descriptor list
180 * based on tg_pt_gp_members count will fit into the response payload. 180 * based on tg_pt_gp_members count will fit into the response payload.
181 * Otherwise, bump rd_len to let the initiator know we have exceeded 181 * Otherwise, bump rd_len to let the initiator know we have exceeded
182 * the allocation length and the response is truncated. 182 * the allocation length and the response is truncated.
183 */ 183 */
184 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > 184 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
185 cmd->data_length) { 185 cmd->data_length) {
186 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); 186 rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
187 continue; 187 continue;
188 } 188 }
189 /* 189 /*
190 * PREF: Preferred target port bit, determine if this 190 * PREF: Preferred target port bit, determine if this
191 * bit should be set for port group. 191 * bit should be set for port group.
192 */ 192 */
193 if (tg_pt_gp->tg_pt_gp_pref) 193 if (tg_pt_gp->tg_pt_gp_pref)
194 buf[off] = 0x80; 194 buf[off] = 0x80;
195 /* 195 /*
196 * Set the ASYMMETRIC ACCESS State 196 * Set the ASYMMETRIC ACCESS State
197 */ 197 */
198 buf[off++] |= (atomic_read( 198 buf[off++] |= (atomic_read(
199 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); 199 &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
200 /* 200 /*
201 * Set supported ASYMMETRIC ACCESS State bits 201 * Set supported ASYMMETRIC ACCESS State bits
202 */ 202 */
203 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states; 203 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
204 /* 204 /*
205 * TARGET PORT GROUP 205 * TARGET PORT GROUP
206 */ 206 */
207 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); 207 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
208 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); 208 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
209 209
210 off++; /* Skip over Reserved */ 210 off++; /* Skip over Reserved */
211 /* 211 /*
212 * STATUS CODE 212 * STATUS CODE
213 */ 213 */
214 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff); 214 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
215 /* 215 /*
216 * Vendor Specific field 216 * Vendor Specific field
217 */ 217 */
218 buf[off++] = 0x00; 218 buf[off++] = 0x00;
219 /* 219 /*
220 * TARGET PORT COUNT 220 * TARGET PORT COUNT
221 */ 221 */
222 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff); 222 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
223 rd_len += 8; 223 rd_len += 8;
224 224
225 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 225 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
226 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list, 226 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
227 tg_pt_gp_mem_list) { 227 tg_pt_gp_mem_list) {
228 port = tg_pt_gp_mem->tg_pt; 228 port = tg_pt_gp_mem->tg_pt;
229 /* 229 /*
230 * Start Target Port descriptor format 230 * Start Target Port descriptor format
231 * 231 *
232 * See spc4r17 section 6.2.7 Table 247 232 * See spc4r17 section 6.2.7 Table 247
233 */ 233 */
234 off += 2; /* Skip over Obsolete */ 234 off += 2; /* Skip over Obsolete */
235 /* 235 /*
236 * Set RELATIVE TARGET PORT IDENTIFIER 236 * Set RELATIVE TARGET PORT IDENTIFIER
237 */ 237 */
238 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 238 buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
239 buf[off++] = (port->sep_rtpi & 0xff); 239 buf[off++] = (port->sep_rtpi & 0xff);
240 rd_len += 4; 240 rd_len += 4;
241 } 241 }
242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 242 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
243 } 243 }
244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
245 /* 245 /*
246 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload 246 * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
247 */ 247 */
248 put_unaligned_be32(rd_len, &buf[0]); 248 put_unaligned_be32(rd_len, &buf[0]);
249 249
250 /* 250 /*
251 * Fill in the Extended header parameter data format if requested 251 * Fill in the Extended header parameter data format if requested
252 */ 252 */
253 if (ext_hdr != 0) { 253 if (ext_hdr != 0) {
254 buf[4] = 0x10; 254 buf[4] = 0x10;
255 /* 255 /*
256 * Set the implicit transition time (in seconds) for the application 256 * Set the implicit transition time (in seconds) for the application
257 * client to use as a base for it's transition timeout value. 257 * client to use as a base for it's transition timeout value.
258 * 258 *
259 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN 259 * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
260 * this CDB was received upon to determine this value individually 260 * this CDB was received upon to determine this value individually
261 * for ALUA target port group. 261 * for ALUA target port group.
262 */ 262 */
263 port = cmd->se_lun->lun_sep; 263 port = cmd->se_lun->lun_sep;
264 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 264 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
265 if (tg_pt_gp_mem) { 265 if (tg_pt_gp_mem) {
266 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 266 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
267 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 267 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
268 if (tg_pt_gp) 268 if (tg_pt_gp)
269 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; 269 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
270 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 270 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
271 } 271 }
272 } 272 }
273 transport_kunmap_data_sg(cmd); 273 transport_kunmap_data_sg(cmd);
274 274
275 target_complete_cmd(cmd, GOOD); 275 target_complete_cmd(cmd, GOOD);
276 return 0; 276 return 0;
277 } 277 }
278 278
279 /* 279 /*
280 * SET_TARGET_PORT_GROUPS for explicit ALUA operation. 280 * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
281 * 281 *
282 * See spc4r17 section 6.35 282 * See spc4r17 section 6.35
283 */ 283 */
284 sense_reason_t 284 sense_reason_t
285 target_emulate_set_target_port_groups(struct se_cmd *cmd) 285 target_emulate_set_target_port_groups(struct se_cmd *cmd)
286 { 286 {
287 struct se_device *dev = cmd->se_dev; 287 struct se_device *dev = cmd->se_dev;
288 struct se_port *port, *l_port = cmd->se_lun->lun_sep; 288 struct se_port *port, *l_port = cmd->se_lun->lun_sep;
289 struct se_node_acl *nacl = cmd->se_sess->se_node_acl; 289 struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
290 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; 290 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
291 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; 291 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
292 unsigned char *buf; 292 unsigned char *buf;
293 unsigned char *ptr; 293 unsigned char *ptr;
294 sense_reason_t rc = TCM_NO_SENSE; 294 sense_reason_t rc = TCM_NO_SENSE;
295 u32 len = 4; /* Skip over RESERVED area in header */ 295 u32 len = 4; /* Skip over RESERVED area in header */
296 int alua_access_state, primary = 0, valid_states; 296 int alua_access_state, primary = 0, valid_states;
297 u16 tg_pt_id, rtpi; 297 u16 tg_pt_id, rtpi;
298 298
299 if (!l_port) 299 if (!l_port)
300 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 300 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
301 301
302 if (cmd->data_length < 4) { 302 if (cmd->data_length < 4) {
303 pr_warn("SET TARGET PORT GROUPS parameter list length %u too" 303 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
304 " small\n", cmd->data_length); 304 " small\n", cmd->data_length);
305 return TCM_INVALID_PARAMETER_LIST; 305 return TCM_INVALID_PARAMETER_LIST;
306 } 306 }
307 307
308 buf = transport_kmap_data_sg(cmd); 308 buf = transport_kmap_data_sg(cmd);
309 if (!buf) 309 if (!buf)
310 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 310 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
311 311
312 /* 312 /*
313 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed 313 * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
314 * for the local tg_pt_gp. 314 * for the local tg_pt_gp.
315 */ 315 */
316 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; 316 l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
317 if (!l_tg_pt_gp_mem) { 317 if (!l_tg_pt_gp_mem) {
318 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); 318 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
319 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 319 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
320 goto out; 320 goto out;
321 } 321 }
322 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 322 spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
323 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; 323 l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
324 if (!l_tg_pt_gp) { 324 if (!l_tg_pt_gp) {
325 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 325 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
326 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); 326 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
327 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 327 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
328 goto out; 328 goto out;
329 } 329 }
330 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); 330 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
331 331
332 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 332 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
333 pr_debug("Unable to process SET_TARGET_PORT_GROUPS" 333 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
334 " while TPGS_EXPLICIT_ALUA is disabled\n"); 334 " while TPGS_EXPLICIT_ALUA is disabled\n");
335 rc = TCM_UNSUPPORTED_SCSI_OPCODE; 335 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
336 goto out; 336 goto out;
337 } 337 }
338 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 338 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
339 339
340 ptr = &buf[4]; /* Skip over RESERVED area in header */ 340 ptr = &buf[4]; /* Skip over RESERVED area in header */
341 341
342 while (len < cmd->data_length) { 342 while (len < cmd->data_length) {
343 bool found = false; 343 bool found = false;
344 alua_access_state = (ptr[0] & 0x0f); 344 alua_access_state = (ptr[0] & 0x0f);
345 /* 345 /*
346 * Check the received ALUA access state, and determine if 346 * Check the received ALUA access state, and determine if
347 * the state is a primary or secondary target port asymmetric 347 * the state is a primary or secondary target port asymmetric
348 * access state. 348 * access state.
349 */ 349 */
350 rc = core_alua_check_transition(alua_access_state, 350 rc = core_alua_check_transition(alua_access_state,
351 valid_states, &primary); 351 valid_states, &primary);
352 if (rc) { 352 if (rc) {
353 /* 353 /*
354 * If the SET TARGET PORT GROUPS attempts to establish 354 * If the SET TARGET PORT GROUPS attempts to establish
355 * an invalid combination of target port asymmetric 355 * an invalid combination of target port asymmetric
356 * access states or attempts to establish an 356 * access states or attempts to establish an
357 * unsupported target port asymmetric access state, 357 * unsupported target port asymmetric access state,
358 * then the command shall be terminated with CHECK 358 * then the command shall be terminated with CHECK
359 * CONDITION status, with the sense key set to ILLEGAL 359 * CONDITION status, with the sense key set to ILLEGAL
360 * REQUEST, and the additional sense code set to INVALID 360 * REQUEST, and the additional sense code set to INVALID
361 * FIELD IN PARAMETER LIST. 361 * FIELD IN PARAMETER LIST.
362 */ 362 */
363 goto out; 363 goto out;
364 } 364 }
365 365
366 /* 366 /*
367 * If the ASYMMETRIC ACCESS STATE field (see table 267) 367 * If the ASYMMETRIC ACCESS STATE field (see table 267)
368 * specifies a primary target port asymmetric access state, 368 * specifies a primary target port asymmetric access state,
369 * then the TARGET PORT GROUP OR TARGET PORT field specifies 369 * then the TARGET PORT GROUP OR TARGET PORT field specifies
370 * a primary target port group for which the primary target 370 * a primary target port group for which the primary target
371 * port asymmetric access state shall be changed. If the 371 * port asymmetric access state shall be changed. If the
372 * ASYMMETRIC ACCESS STATE field specifies a secondary target 372 * ASYMMETRIC ACCESS STATE field specifies a secondary target
373 * port asymmetric access state, then the TARGET PORT GROUP OR 373 * port asymmetric access state, then the TARGET PORT GROUP OR
374 * TARGET PORT field specifies the relative target port 374 * TARGET PORT field specifies the relative target port
375 * identifier (see 3.1.120) of the target port for which the 375 * identifier (see 3.1.120) of the target port for which the
376 * secondary target port asymmetric access state shall be 376 * secondary target port asymmetric access state shall be
377 * changed. 377 * changed.
378 */ 378 */
379 if (primary) { 379 if (primary) {
380 tg_pt_id = get_unaligned_be16(ptr + 2); 380 tg_pt_id = get_unaligned_be16(ptr + 2);
381 /* 381 /*
382 * Locate the matching target port group ID from 382 * Locate the matching target port group ID from
383 * the global tg_pt_gp list 383 * the global tg_pt_gp list
384 */ 384 */
385 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 385 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
386 list_for_each_entry(tg_pt_gp, 386 list_for_each_entry(tg_pt_gp,
387 &dev->t10_alua.tg_pt_gps_list, 387 &dev->t10_alua.tg_pt_gps_list,
388 tg_pt_gp_list) { 388 tg_pt_gp_list) {
389 if (!tg_pt_gp->tg_pt_gp_valid_id) 389 if (!tg_pt_gp->tg_pt_gp_valid_id)
390 continue; 390 continue;
391 391
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic_inc(); 396 smp_mb__after_atomic_inc();
397 397
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 399
400 if (!core_alua_do_port_transition(tg_pt_gp, 400 if (!core_alua_do_port_transition(tg_pt_gp,
401 dev, l_port, nacl, 401 dev, l_port, nacl,
402 alua_access_state, 1)) 402 alua_access_state, 1))
403 found = true; 403 found = true;
404 404
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 405 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic_dec(); 407 smp_mb__after_atomic_dec();
408 break; 408 break;
409 } 409 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
411 } else { 411 } else {
412 /* 412 /*
413 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify 413 * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
414 * the Target Port in question for the the incoming 414 * the Target Port in question for the the incoming
415 * SET_TARGET_PORT_GROUPS op. 415 * SET_TARGET_PORT_GROUPS op.
416 */ 416 */
417 rtpi = get_unaligned_be16(ptr + 2); 417 rtpi = get_unaligned_be16(ptr + 2);
418 /* 418 /*
419 * Locate the matching relative target port identifier 419 * Locate the matching relative target port identifier
420 * for the struct se_device storage object. 420 * for the struct se_device storage object.
421 */ 421 */
422 spin_lock(&dev->se_port_lock); 422 spin_lock(&dev->se_port_lock);
423 list_for_each_entry(port, &dev->dev_sep_list, 423 list_for_each_entry(port, &dev->dev_sep_list,
424 sep_list) { 424 sep_list) {
425 if (port->sep_rtpi != rtpi) 425 if (port->sep_rtpi != rtpi)
426 continue; 426 continue;
427 427
428 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 428 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
429 429
430 spin_unlock(&dev->se_port_lock); 430 spin_unlock(&dev->se_port_lock);
431 431
432 if (!core_alua_set_tg_pt_secondary_state( 432 if (!core_alua_set_tg_pt_secondary_state(
433 tg_pt_gp_mem, port, 1, 1)) 433 tg_pt_gp_mem, port, 1, 1))
434 found = true; 434 found = true;
435 435
436 spin_lock(&dev->se_port_lock); 436 spin_lock(&dev->se_port_lock);
437 break; 437 break;
438 } 438 }
439 spin_unlock(&dev->se_port_lock); 439 spin_unlock(&dev->se_port_lock);
440 } 440 }
441 441
442 if (!found) { 442 if (!found) {
443 rc = TCM_INVALID_PARAMETER_LIST; 443 rc = TCM_INVALID_PARAMETER_LIST;
444 goto out; 444 goto out;
445 } 445 }
446 446
447 ptr += 4; 447 ptr += 4;
448 len += 4; 448 len += 4;
449 } 449 }
450 450
451 out: 451 out:
452 transport_kunmap_data_sg(cmd); 452 transport_kunmap_data_sg(cmd);
453 if (!rc) 453 if (!rc)
454 target_complete_cmd(cmd, GOOD); 454 target_complete_cmd(cmd, GOOD);
455 return rc; 455 return rc;
456 } 456 }
457 457
458 static inline int core_alua_state_nonoptimized( 458 static inline int core_alua_state_nonoptimized(
459 struct se_cmd *cmd, 459 struct se_cmd *cmd,
460 unsigned char *cdb, 460 unsigned char *cdb,
461 int nonop_delay_msecs, 461 int nonop_delay_msecs,
462 u8 *alua_ascq) 462 u8 *alua_ascq)
463 { 463 {
464 /* 464 /*
465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked 465 * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
466 * later to determine if processing of this cmd needs to be 466 * later to determine if processing of this cmd needs to be
467 * temporarily delayed for the Active/NonOptimized primary access state. 467 * temporarily delayed for the Active/NonOptimized primary access state.
468 */ 468 */
469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED; 469 cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470 cmd->alua_nonop_delay = nonop_delay_msecs; 470 cmd->alua_nonop_delay = nonop_delay_msecs;
471 return 0; 471 return 0;
472 } 472 }
473 473
474 static inline int core_alua_state_lba_dependent( 474 static inline int core_alua_state_lba_dependent(
475 struct se_cmd *cmd, 475 struct se_cmd *cmd,
476 struct t10_alua_tg_pt_gp *tg_pt_gp, 476 struct t10_alua_tg_pt_gp *tg_pt_gp,
477 u8 *alua_ascq) 477 u8 *alua_ascq)
478 { 478 {
479 struct se_device *dev = cmd->se_dev; 479 struct se_device *dev = cmd->se_dev;
480 u64 segment_size, segment_mult, sectors, lba; 480 u64 segment_size, segment_mult, sectors, lba;
481 481
482 /* Only need to check for cdb actually containing LBAs */ 482 /* Only need to check for cdb actually containing LBAs */
483 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB)) 483 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
484 return 0; 484 return 0;
485 485
486 spin_lock(&dev->t10_alua.lba_map_lock); 486 spin_lock(&dev->t10_alua.lba_map_lock);
487 segment_size = dev->t10_alua.lba_map_segment_size; 487 segment_size = dev->t10_alua.lba_map_segment_size;
488 segment_mult = dev->t10_alua.lba_map_segment_multiplier; 488 segment_mult = dev->t10_alua.lba_map_segment_multiplier;
489 sectors = cmd->data_length / dev->dev_attrib.block_size; 489 sectors = cmd->data_length / dev->dev_attrib.block_size;
490 490
491 lba = cmd->t_task_lba; 491 lba = cmd->t_task_lba;
492 while (lba < cmd->t_task_lba + sectors) { 492 while (lba < cmd->t_task_lba + sectors) {
493 struct t10_alua_lba_map *cur_map = NULL, *map; 493 struct t10_alua_lba_map *cur_map = NULL, *map;
494 struct t10_alua_lba_map_member *map_mem; 494 struct t10_alua_lba_map_member *map_mem;
495 495
496 list_for_each_entry(map, &dev->t10_alua.lba_map_list, 496 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
497 lba_map_list) { 497 lba_map_list) {
498 u64 start_lba, last_lba; 498 u64 start_lba, last_lba;
499 u64 first_lba = map->lba_map_first_lba; 499 u64 first_lba = map->lba_map_first_lba;
500 500
501 if (segment_mult) { 501 if (segment_mult) {
502 u64 tmp = lba; 502 u64 tmp = lba;
503 start_lba = do_div(tmp, segment_size * segment_mult); 503 start_lba = do_div(tmp, segment_size * segment_mult);
504 504
505 last_lba = first_lba + segment_size - 1; 505 last_lba = first_lba + segment_size - 1;
506 if (start_lba >= first_lba && 506 if (start_lba >= first_lba &&
507 start_lba <= last_lba) { 507 start_lba <= last_lba) {
508 lba += segment_size; 508 lba += segment_size;
509 cur_map = map; 509 cur_map = map;
510 break; 510 break;
511 } 511 }
512 } else { 512 } else {
513 last_lba = map->lba_map_last_lba; 513 last_lba = map->lba_map_last_lba;
514 if (lba >= first_lba && lba <= last_lba) { 514 if (lba >= first_lba && lba <= last_lba) {
515 lba = last_lba + 1; 515 lba = last_lba + 1;
516 cur_map = map; 516 cur_map = map;
517 break; 517 break;
518 } 518 }
519 } 519 }
520 } 520 }
521 if (!cur_map) { 521 if (!cur_map) {
522 spin_unlock(&dev->t10_alua.lba_map_lock); 522 spin_unlock(&dev->t10_alua.lba_map_lock);
523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 523 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
524 return 1; 524 return 1;
525 } 525 }
526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, 526 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
527 lba_map_mem_list) { 527 lba_map_mem_list) {
528 if (map_mem->lba_map_mem_alua_pg_id != 528 if (map_mem->lba_map_mem_alua_pg_id !=
529 tg_pt_gp->tg_pt_gp_id) 529 tg_pt_gp->tg_pt_gp_id)
530 continue; 530 continue;
531 switch(map_mem->lba_map_mem_alua_state) { 531 switch(map_mem->lba_map_mem_alua_state) {
532 case ALUA_ACCESS_STATE_STANDBY: 532 case ALUA_ACCESS_STATE_STANDBY:
533 spin_unlock(&dev->t10_alua.lba_map_lock); 533 spin_unlock(&dev->t10_alua.lba_map_lock);
534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 534 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
535 return 1; 535 return 1;
536 case ALUA_ACCESS_STATE_UNAVAILABLE: 536 case ALUA_ACCESS_STATE_UNAVAILABLE:
537 spin_unlock(&dev->t10_alua.lba_map_lock); 537 spin_unlock(&dev->t10_alua.lba_map_lock);
538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 538 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
539 return 1; 539 return 1;
540 default: 540 default:
541 break; 541 break;
542 } 542 }
543 } 543 }
544 } 544 }
545 spin_unlock(&dev->t10_alua.lba_map_lock); 545 spin_unlock(&dev->t10_alua.lba_map_lock);
546 return 0; 546 return 0;
547 } 547 }
548 548
549 static inline int core_alua_state_standby( 549 static inline int core_alua_state_standby(
550 struct se_cmd *cmd, 550 struct se_cmd *cmd,
551 unsigned char *cdb, 551 unsigned char *cdb,
552 u8 *alua_ascq) 552 u8 *alua_ascq)
553 { 553 {
554 /* 554 /*
555 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by 555 * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
556 * spc4r17 section 5.9.2.4.4 556 * spc4r17 section 5.9.2.4.4
557 */ 557 */
558 switch (cdb[0]) { 558 switch (cdb[0]) {
559 case INQUIRY: 559 case INQUIRY:
560 case LOG_SELECT: 560 case LOG_SELECT:
561 case LOG_SENSE: 561 case LOG_SENSE:
562 case MODE_SELECT: 562 case MODE_SELECT:
563 case MODE_SENSE: 563 case MODE_SENSE:
564 case REPORT_LUNS: 564 case REPORT_LUNS:
565 case RECEIVE_DIAGNOSTIC: 565 case RECEIVE_DIAGNOSTIC:
566 case SEND_DIAGNOSTIC: 566 case SEND_DIAGNOSTIC:
567 case READ_CAPACITY:
567 return 0; 568 return 0;
569 case SERVICE_ACTION_IN:
570 switch (cdb[1] & 0x1f) {
571 case SAI_READ_CAPACITY_16:
572 return 0;
573 default:
574 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
575 return 1;
576 }
568 case MAINTENANCE_IN: 577 case MAINTENANCE_IN:
569 switch (cdb[1] & 0x1f) { 578 switch (cdb[1] & 0x1f) {
570 case MI_REPORT_TARGET_PGS: 579 case MI_REPORT_TARGET_PGS:
571 return 0; 580 return 0;
572 default: 581 default:
573 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 582 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
574 return 1; 583 return 1;
575 } 584 }
576 case MAINTENANCE_OUT: 585 case MAINTENANCE_OUT:
577 switch (cdb[1]) { 586 switch (cdb[1]) {
578 case MO_SET_TARGET_PGS: 587 case MO_SET_TARGET_PGS:
579 return 0; 588 return 0;
580 default: 589 default:
581 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 590 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
582 return 1; 591 return 1;
583 } 592 }
584 case REQUEST_SENSE: 593 case REQUEST_SENSE:
585 case PERSISTENT_RESERVE_IN: 594 case PERSISTENT_RESERVE_IN:
586 case PERSISTENT_RESERVE_OUT: 595 case PERSISTENT_RESERVE_OUT:
587 case READ_BUFFER: 596 case READ_BUFFER:
588 case WRITE_BUFFER: 597 case WRITE_BUFFER:
589 return 0; 598 return 0;
590 default: 599 default:
591 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY; 600 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
592 return 1; 601 return 1;
593 } 602 }
594 603
595 return 0; 604 return 0;
596 } 605 }
597 606
598 static inline int core_alua_state_unavailable( 607 static inline int core_alua_state_unavailable(
599 struct se_cmd *cmd, 608 struct se_cmd *cmd,
600 unsigned char *cdb, 609 unsigned char *cdb,
601 u8 *alua_ascq) 610 u8 *alua_ascq)
602 { 611 {
603 /* 612 /*
604 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by 613 * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
605 * spc4r17 section 5.9.2.4.5 614 * spc4r17 section 5.9.2.4.5
606 */ 615 */
607 switch (cdb[0]) { 616 switch (cdb[0]) {
608 case INQUIRY: 617 case INQUIRY:
609 case REPORT_LUNS: 618 case REPORT_LUNS:
610 return 0; 619 return 0;
611 case MAINTENANCE_IN: 620 case MAINTENANCE_IN:
612 switch (cdb[1] & 0x1f) { 621 switch (cdb[1] & 0x1f) {
613 case MI_REPORT_TARGET_PGS: 622 case MI_REPORT_TARGET_PGS:
614 return 0; 623 return 0;
615 default: 624 default:
616 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 625 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
617 return 1; 626 return 1;
618 } 627 }
619 case MAINTENANCE_OUT: 628 case MAINTENANCE_OUT:
620 switch (cdb[1]) { 629 switch (cdb[1]) {
621 case MO_SET_TARGET_PGS: 630 case MO_SET_TARGET_PGS:
622 return 0; 631 return 0;
623 default: 632 default:
624 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 633 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
625 return 1; 634 return 1;
626 } 635 }
627 case REQUEST_SENSE: 636 case REQUEST_SENSE:
628 case READ_BUFFER: 637 case READ_BUFFER:
629 case WRITE_BUFFER: 638 case WRITE_BUFFER:
630 return 0; 639 return 0;
631 default: 640 default:
632 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE; 641 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
633 return 1; 642 return 1;
634 } 643 }
635 644
636 return 0; 645 return 0;
637 } 646 }
638 647
639 static inline int core_alua_state_transition( 648 static inline int core_alua_state_transition(
640 struct se_cmd *cmd, 649 struct se_cmd *cmd,
641 unsigned char *cdb, 650 unsigned char *cdb,
642 u8 *alua_ascq) 651 u8 *alua_ascq)
643 { 652 {
644 /* 653 /*
645 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by 654 * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
646 * spc4r17 section 5.9.2.5 655 * spc4r17 section 5.9.2.5
647 */ 656 */
648 switch (cdb[0]) { 657 switch (cdb[0]) {
649 case INQUIRY: 658 case INQUIRY:
650 case REPORT_LUNS: 659 case REPORT_LUNS:
651 return 0; 660 return 0;
652 case MAINTENANCE_IN: 661 case MAINTENANCE_IN:
653 switch (cdb[1] & 0x1f) { 662 switch (cdb[1] & 0x1f) {
654 case MI_REPORT_TARGET_PGS: 663 case MI_REPORT_TARGET_PGS:
655 return 0; 664 return 0;
656 default: 665 default:
657 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 666 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
658 return 1; 667 return 1;
659 } 668 }
660 case REQUEST_SENSE: 669 case REQUEST_SENSE:
661 case READ_BUFFER: 670 case READ_BUFFER:
662 case WRITE_BUFFER: 671 case WRITE_BUFFER:
663 return 0; 672 return 0;
664 default: 673 default:
665 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION; 674 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
666 return 1; 675 return 1;
667 } 676 }
668 677
669 return 0; 678 return 0;
670 } 679 }
671 680
672 /* 681 /*
673 * return 1: Is used to signal LUN not accessible, and check condition/not ready 682 * return 1: Is used to signal LUN not accessible, and check condition/not ready
674 * return 0: Used to signal success 683 * return 0: Used to signal success
675 * return -1: Used to signal failure, and invalid cdb field 684 * return -1: Used to signal failure, and invalid cdb field
676 */ 685 */
677 sense_reason_t 686 sense_reason_t
678 target_alua_state_check(struct se_cmd *cmd) 687 target_alua_state_check(struct se_cmd *cmd)
679 { 688 {
680 struct se_device *dev = cmd->se_dev; 689 struct se_device *dev = cmd->se_dev;
681 unsigned char *cdb = cmd->t_task_cdb; 690 unsigned char *cdb = cmd->t_task_cdb;
682 struct se_lun *lun = cmd->se_lun; 691 struct se_lun *lun = cmd->se_lun;
683 struct se_port *port = lun->lun_sep; 692 struct se_port *port = lun->lun_sep;
684 struct t10_alua_tg_pt_gp *tg_pt_gp; 693 struct t10_alua_tg_pt_gp *tg_pt_gp;
685 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 694 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
686 int out_alua_state, nonop_delay_msecs; 695 int out_alua_state, nonop_delay_msecs;
687 u8 alua_ascq; 696 u8 alua_ascq;
688 int ret; 697 int ret;
689 698
690 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) 699 if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
691 return 0; 700 return 0;
692 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 701 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
693 return 0; 702 return 0;
694 703
695 if (!port) 704 if (!port)
696 return 0; 705 return 0;
697 /* 706 /*
698 * First, check for a struct se_port specific secondary ALUA target port 707 * First, check for a struct se_port specific secondary ALUA target port
699 * access state: OFFLINE 708 * access state: OFFLINE
700 */ 709 */
701 if (atomic_read(&port->sep_tg_pt_secondary_offline)) { 710 if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
702 pr_debug("ALUA: Got secondary offline status for local" 711 pr_debug("ALUA: Got secondary offline status for local"
703 " target port\n"); 712 " target port\n");
704 alua_ascq = ASCQ_04H_ALUA_OFFLINE; 713 alua_ascq = ASCQ_04H_ALUA_OFFLINE;
705 ret = 1; 714 ret = 1;
706 goto out; 715 goto out;
707 } 716 }
708 /* 717 /*
709 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the 718 * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
710 * ALUA target port group, to obtain current ALUA access state. 719 * ALUA target port group, to obtain current ALUA access state.
711 * Otherwise look for the underlying struct se_device association with 720 * Otherwise look for the underlying struct se_device association with
712 * a ALUA logical unit group. 721 * a ALUA logical unit group.
713 */ 722 */
714 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 723 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
715 if (!tg_pt_gp_mem) 724 if (!tg_pt_gp_mem)
716 return 0; 725 return 0;
717 726
718 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 727 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
719 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 728 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
720 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 729 out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
721 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; 730 nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
722 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 731 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
723 /* 732 /*
724 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional 733 * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
725 * statement so the compiler knows explicitly to check this case first. 734 * statement so the compiler knows explicitly to check this case first.
726 * For the Optimized ALUA access state case, we want to process the 735 * For the Optimized ALUA access state case, we want to process the
727 * incoming fabric cmd ASAP.. 736 * incoming fabric cmd ASAP..
728 */ 737 */
729 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED) 738 if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
730 return 0; 739 return 0;
731 740
732 switch (out_alua_state) { 741 switch (out_alua_state) {
733 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 742 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
734 ret = core_alua_state_nonoptimized(cmd, cdb, 743 ret = core_alua_state_nonoptimized(cmd, cdb,
735 nonop_delay_msecs, &alua_ascq); 744 nonop_delay_msecs, &alua_ascq);
736 break; 745 break;
737 case ALUA_ACCESS_STATE_STANDBY: 746 case ALUA_ACCESS_STATE_STANDBY:
738 ret = core_alua_state_standby(cmd, cdb, &alua_ascq); 747 ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
739 break; 748 break;
740 case ALUA_ACCESS_STATE_UNAVAILABLE: 749 case ALUA_ACCESS_STATE_UNAVAILABLE:
741 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq); 750 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
742 break; 751 break;
743 case ALUA_ACCESS_STATE_TRANSITION: 752 case ALUA_ACCESS_STATE_TRANSITION:
744 ret = core_alua_state_transition(cmd, cdb, &alua_ascq); 753 ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
745 break; 754 break;
746 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 755 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
747 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq); 756 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
748 break; 757 break;
749 /* 758 /*
750 * OFFLINE is a secondary ALUA target port group access state, that is 759 * OFFLINE is a secondary ALUA target port group access state, that is
751 * handled above with struct se_port->sep_tg_pt_secondary_offline=1 760 * handled above with struct se_port->sep_tg_pt_secondary_offline=1
752 */ 761 */
753 case ALUA_ACCESS_STATE_OFFLINE: 762 case ALUA_ACCESS_STATE_OFFLINE:
754 default: 763 default:
755 pr_err("Unknown ALUA access state: 0x%02x\n", 764 pr_err("Unknown ALUA access state: 0x%02x\n",
756 out_alua_state); 765 out_alua_state);
757 return TCM_INVALID_CDB_FIELD; 766 return TCM_INVALID_CDB_FIELD;
758 } 767 }
759 768
760 out: 769 out:
761 if (ret > 0) { 770 if (ret > 0) {
762 /* 771 /*
763 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; 772 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
764 * The ALUA additional sense code qualifier (ASCQ) is determined 773 * The ALUA additional sense code qualifier (ASCQ) is determined
765 * by the ALUA primary or secondary access state.. 774 * by the ALUA primary or secondary access state..
766 */ 775 */
767 pr_debug("[%s]: ALUA TG Port not available, " 776 pr_debug("[%s]: ALUA TG Port not available, "
768 "SenseKey: NOT_READY, ASC/ASCQ: " 777 "SenseKey: NOT_READY, ASC/ASCQ: "
769 "0x04/0x%02x\n", 778 "0x04/0x%02x\n",
770 cmd->se_tfo->get_fabric_name(), alua_ascq); 779 cmd->se_tfo->get_fabric_name(), alua_ascq);
771 780
772 cmd->scsi_asc = 0x04; 781 cmd->scsi_asc = 0x04;
773 cmd->scsi_ascq = alua_ascq; 782 cmd->scsi_ascq = alua_ascq;
774 return TCM_CHECK_CONDITION_NOT_READY; 783 return TCM_CHECK_CONDITION_NOT_READY;
775 } 784 }
776 785
777 return 0; 786 return 0;
778 } 787 }
779 788
780 /* 789 /*
781 * Check implicit and explicit ALUA state change request. 790 * Check implicit and explicit ALUA state change request.
782 */ 791 */
783 static sense_reason_t 792 static sense_reason_t
784 core_alua_check_transition(int state, int valid, int *primary) 793 core_alua_check_transition(int state, int valid, int *primary)
785 { 794 {
786 /* 795 /*
787 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are 796 * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
788 * defined as primary target port asymmetric access states. 797 * defined as primary target port asymmetric access states.
789 */ 798 */
790 switch (state) { 799 switch (state) {
791 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 800 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
792 if (!(valid & ALUA_AO_SUP)) 801 if (!(valid & ALUA_AO_SUP))
793 goto not_supported; 802 goto not_supported;
794 *primary = 1; 803 *primary = 1;
795 break; 804 break;
796 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 805 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
797 if (!(valid & ALUA_AN_SUP)) 806 if (!(valid & ALUA_AN_SUP))
798 goto not_supported; 807 goto not_supported;
799 *primary = 1; 808 *primary = 1;
800 break; 809 break;
801 case ALUA_ACCESS_STATE_STANDBY: 810 case ALUA_ACCESS_STATE_STANDBY:
802 if (!(valid & ALUA_S_SUP)) 811 if (!(valid & ALUA_S_SUP))
803 goto not_supported; 812 goto not_supported;
804 *primary = 1; 813 *primary = 1;
805 break; 814 break;
806 case ALUA_ACCESS_STATE_UNAVAILABLE: 815 case ALUA_ACCESS_STATE_UNAVAILABLE:
807 if (!(valid & ALUA_U_SUP)) 816 if (!(valid & ALUA_U_SUP))
808 goto not_supported; 817 goto not_supported;
809 *primary = 1; 818 *primary = 1;
810 break; 819 break;
811 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 820 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
812 if (!(valid & ALUA_LBD_SUP)) 821 if (!(valid & ALUA_LBD_SUP))
813 goto not_supported; 822 goto not_supported;
814 *primary = 1; 823 *primary = 1;
815 break; 824 break;
816 case ALUA_ACCESS_STATE_OFFLINE: 825 case ALUA_ACCESS_STATE_OFFLINE:
817 /* 826 /*
818 * OFFLINE state is defined as a secondary target port 827 * OFFLINE state is defined as a secondary target port
819 * asymmetric access state. 828 * asymmetric access state.
820 */ 829 */
821 if (!(valid & ALUA_O_SUP)) 830 if (!(valid & ALUA_O_SUP))
822 goto not_supported; 831 goto not_supported;
823 *primary = 0; 832 *primary = 0;
824 break; 833 break;
825 case ALUA_ACCESS_STATE_TRANSITION: 834 case ALUA_ACCESS_STATE_TRANSITION:
826 /* 835 /*
827 * Transitioning is set internally, and 836 * Transitioning is set internally, and
828 * cannot be selected manually. 837 * cannot be selected manually.
829 */ 838 */
830 goto not_supported; 839 goto not_supported;
831 default: 840 default:
832 pr_err("Unknown ALUA access state: 0x%02x\n", state); 841 pr_err("Unknown ALUA access state: 0x%02x\n", state);
833 return TCM_INVALID_PARAMETER_LIST; 842 return TCM_INVALID_PARAMETER_LIST;
834 } 843 }
835 844
836 return 0; 845 return 0;
837 846
838 not_supported: 847 not_supported:
839 pr_err("ALUA access state %s not supported", 848 pr_err("ALUA access state %s not supported",
840 core_alua_dump_state(state)); 849 core_alua_dump_state(state));
841 return TCM_INVALID_PARAMETER_LIST; 850 return TCM_INVALID_PARAMETER_LIST;
842 } 851 }
843 852
844 static char *core_alua_dump_state(int state) 853 static char *core_alua_dump_state(int state)
845 { 854 {
846 switch (state) { 855 switch (state) {
847 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED: 856 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
848 return "Active/Optimized"; 857 return "Active/Optimized";
849 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED: 858 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
850 return "Active/NonOptimized"; 859 return "Active/NonOptimized";
851 case ALUA_ACCESS_STATE_LBA_DEPENDENT: 860 case ALUA_ACCESS_STATE_LBA_DEPENDENT:
852 return "LBA Dependent"; 861 return "LBA Dependent";
853 case ALUA_ACCESS_STATE_STANDBY: 862 case ALUA_ACCESS_STATE_STANDBY:
854 return "Standby"; 863 return "Standby";
855 case ALUA_ACCESS_STATE_UNAVAILABLE: 864 case ALUA_ACCESS_STATE_UNAVAILABLE:
856 return "Unavailable"; 865 return "Unavailable";
857 case ALUA_ACCESS_STATE_OFFLINE: 866 case ALUA_ACCESS_STATE_OFFLINE:
858 return "Offline"; 867 return "Offline";
859 case ALUA_ACCESS_STATE_TRANSITION: 868 case ALUA_ACCESS_STATE_TRANSITION:
860 return "Transitioning"; 869 return "Transitioning";
861 default: 870 default:
862 return "Unknown"; 871 return "Unknown";
863 } 872 }
864 873
865 return NULL; 874 return NULL;
866 } 875 }
867 876
868 char *core_alua_dump_status(int status) 877 char *core_alua_dump_status(int status)
869 { 878 {
870 switch (status) { 879 switch (status) {
871 case ALUA_STATUS_NONE: 880 case ALUA_STATUS_NONE:
872 return "None"; 881 return "None";
873 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG: 882 case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
874 return "Altered by Explicit STPG"; 883 return "Altered by Explicit STPG";
875 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA: 884 case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
876 return "Altered by Implicit ALUA"; 885 return "Altered by Implicit ALUA";
877 default: 886 default:
878 return "Unknown"; 887 return "Unknown";
879 } 888 }
880 889
881 return NULL; 890 return NULL;
882 } 891 }
883 892
884 /* 893 /*
885 * Used by fabric modules to determine when we need to delay processing 894 * Used by fabric modules to determine when we need to delay processing
886 * for the Active/NonOptimized paths.. 895 * for the Active/NonOptimized paths..
887 */ 896 */
888 int core_alua_check_nonop_delay( 897 int core_alua_check_nonop_delay(
889 struct se_cmd *cmd) 898 struct se_cmd *cmd)
890 { 899 {
891 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED)) 900 if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
892 return 0; 901 return 0;
893 if (in_interrupt()) 902 if (in_interrupt())
894 return 0; 903 return 0;
895 /* 904 /*
896 * The ALUA Active/NonOptimized access state delay can be disabled 905 * The ALUA Active/NonOptimized access state delay can be disabled
897 * in via configfs with a value of zero 906 * in via configfs with a value of zero
898 */ 907 */
899 if (!cmd->alua_nonop_delay) 908 if (!cmd->alua_nonop_delay)
900 return 0; 909 return 0;
901 /* 910 /*
902 * struct se_cmd->alua_nonop_delay gets set by a target port group 911 * struct se_cmd->alua_nonop_delay gets set by a target port group
903 * defined interval in core_alua_state_nonoptimized() 912 * defined interval in core_alua_state_nonoptimized()
904 */ 913 */
905 msleep_interruptible(cmd->alua_nonop_delay); 914 msleep_interruptible(cmd->alua_nonop_delay);
906 return 0; 915 return 0;
907 } 916 }
908 EXPORT_SYMBOL(core_alua_check_nonop_delay); 917 EXPORT_SYMBOL(core_alua_check_nonop_delay);
909 918
910 /* 919 /*
911 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex 920 * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
912 * 921 *
913 */ 922 */
914 static int core_alua_write_tpg_metadata( 923 static int core_alua_write_tpg_metadata(
915 const char *path, 924 const char *path,
916 unsigned char *md_buf, 925 unsigned char *md_buf,
917 u32 md_buf_len) 926 u32 md_buf_len)
918 { 927 {
919 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600); 928 struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
920 int ret; 929 int ret;
921 930
922 if (IS_ERR(file)) { 931 if (IS_ERR(file)) {
923 pr_err("filp_open(%s) for ALUA metadata failed\n", path); 932 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
924 return -ENODEV; 933 return -ENODEV;
925 } 934 }
926 ret = kernel_write(file, md_buf, md_buf_len, 0); 935 ret = kernel_write(file, md_buf, md_buf_len, 0);
927 if (ret < 0) 936 if (ret < 0)
928 pr_err("Error writing ALUA metadata file: %s\n", path); 937 pr_err("Error writing ALUA metadata file: %s\n", path);
929 fput(file); 938 fput(file);
930 return (ret < 0) ? -EIO : 0; 939 return (ret < 0) ? -EIO : 0;
931 } 940 }
932 941
933 /* 942 /*
934 * Called with tg_pt_gp->tg_pt_gp_md_mutex held 943 * Called with tg_pt_gp->tg_pt_gp_md_mutex held
935 */ 944 */
936 static int core_alua_update_tpg_primary_metadata( 945 static int core_alua_update_tpg_primary_metadata(
937 struct t10_alua_tg_pt_gp *tg_pt_gp) 946 struct t10_alua_tg_pt_gp *tg_pt_gp)
938 { 947 {
939 unsigned char *md_buf; 948 unsigned char *md_buf;
940 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn; 949 struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
941 char path[ALUA_METADATA_PATH_LEN]; 950 char path[ALUA_METADATA_PATH_LEN];
942 int len, rc; 951 int len, rc;
943 952
944 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 953 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
945 if (!md_buf) { 954 if (!md_buf) {
946 pr_err("Unable to allocate buf for ALUA metadata\n"); 955 pr_err("Unable to allocate buf for ALUA metadata\n");
947 return -ENOMEM; 956 return -ENOMEM;
948 } 957 }
949 958
950 memset(path, 0, ALUA_METADATA_PATH_LEN); 959 memset(path, 0, ALUA_METADATA_PATH_LEN);
951 960
952 len = snprintf(md_buf, ALUA_MD_BUF_LEN, 961 len = snprintf(md_buf, ALUA_MD_BUF_LEN,
953 "tg_pt_gp_id=%hu\n" 962 "tg_pt_gp_id=%hu\n"
954 "alua_access_state=0x%02x\n" 963 "alua_access_state=0x%02x\n"
955 "alua_access_status=0x%02x\n", 964 "alua_access_status=0x%02x\n",
956 tg_pt_gp->tg_pt_gp_id, 965 tg_pt_gp->tg_pt_gp_id,
957 tg_pt_gp->tg_pt_gp_alua_pending_state, 966 tg_pt_gp->tg_pt_gp_alua_pending_state,
958 tg_pt_gp->tg_pt_gp_alua_access_status); 967 tg_pt_gp->tg_pt_gp_alua_access_status);
959 968
960 snprintf(path, ALUA_METADATA_PATH_LEN, 969 snprintf(path, ALUA_METADATA_PATH_LEN,
961 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0], 970 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
962 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item)); 971 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
963 972
964 rc = core_alua_write_tpg_metadata(path, md_buf, len); 973 rc = core_alua_write_tpg_metadata(path, md_buf, len);
965 kfree(md_buf); 974 kfree(md_buf);
966 return rc; 975 return rc;
967 } 976 }
968 977
969 static void core_alua_do_transition_tg_pt_work(struct work_struct *work) 978 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
970 { 979 {
971 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, 980 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
972 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); 981 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
973 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 982 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
974 struct se_dev_entry *se_deve; 983 struct se_dev_entry *se_deve;
975 struct se_lun_acl *lacl; 984 struct se_lun_acl *lacl;
976 struct se_port *port; 985 struct se_port *port;
977 struct t10_alua_tg_pt_gp_member *mem; 986 struct t10_alua_tg_pt_gp_member *mem;
978 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == 987 bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
979 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); 988 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
980 989
981 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 990 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
982 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list, 991 list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
983 tg_pt_gp_mem_list) { 992 tg_pt_gp_mem_list) {
984 port = mem->tg_pt; 993 port = mem->tg_pt;
985 /* 994 /*
986 * After an implicit target port asymmetric access state 995 * After an implicit target port asymmetric access state
987 * change, a device server shall establish a unit attention 996 * change, a device server shall establish a unit attention
988 * condition for the initiator port associated with every I_T 997 * condition for the initiator port associated with every I_T
989 * nexus with the additional sense code set to ASYMMETRIC 998 * nexus with the additional sense code set to ASYMMETRIC
990 * ACCESS STATE CHANGED. 999 * ACCESS STATE CHANGED.
991 * 1000 *
992 * After an explicit target port asymmetric access state 1001 * After an explicit target port asymmetric access state
993 * change, a device server shall establish a unit attention 1002 * change, a device server shall establish a unit attention
994 * condition with the additional sense code set to ASYMMETRIC 1003 * condition with the additional sense code set to ASYMMETRIC
995 * ACCESS STATE CHANGED for the initiator port associated with 1004 * ACCESS STATE CHANGED for the initiator port associated with
996 * every I_T nexus other than the I_T nexus on which the SET 1005 * every I_T nexus other than the I_T nexus on which the SET
997 * TARGET PORT GROUPS command 1006 * TARGET PORT GROUPS command
998 */ 1007 */
999 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 1008 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1000 smp_mb__after_atomic_inc(); 1009 smp_mb__after_atomic_inc();
1001 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1010 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1002 1011
1003 spin_lock_bh(&port->sep_alua_lock); 1012 spin_lock_bh(&port->sep_alua_lock);
1004 list_for_each_entry(se_deve, &port->sep_alua_list, 1013 list_for_each_entry(se_deve, &port->sep_alua_list,
1005 alua_port_list) { 1014 alua_port_list) {
1006 lacl = se_deve->se_lun_acl; 1015 lacl = se_deve->se_lun_acl;
1007 /* 1016 /*
1008 * se_deve->se_lun_acl pointer may be NULL for a 1017 * se_deve->se_lun_acl pointer may be NULL for a
1009 * entry created without explicit Node+MappedLUN ACLs 1018 * entry created without explicit Node+MappedLUN ACLs
1010 */ 1019 */
1011 if (!lacl) 1020 if (!lacl)
1012 continue; 1021 continue;
1013 1022
1014 if ((tg_pt_gp->tg_pt_gp_alua_access_status == 1023 if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
1015 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 1024 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1016 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) && 1025 (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1017 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) && 1026 (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1018 (tg_pt_gp->tg_pt_gp_alua_port != NULL) && 1027 (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1019 (tg_pt_gp->tg_pt_gp_alua_port == port)) 1028 (tg_pt_gp->tg_pt_gp_alua_port == port))
1020 continue; 1029 continue;
1021 1030
1022 core_scsi3_ua_allocate(lacl->se_lun_nacl, 1031 core_scsi3_ua_allocate(lacl->se_lun_nacl,
1023 se_deve->mapped_lun, 0x2A, 1032 se_deve->mapped_lun, 0x2A,
1024 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED); 1033 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1025 } 1034 }
1026 spin_unlock_bh(&port->sep_alua_lock); 1035 spin_unlock_bh(&port->sep_alua_lock);
1027 1036
1028 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1037 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1029 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1038 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1030 smp_mb__after_atomic_dec(); 1039 smp_mb__after_atomic_dec();
1031 } 1040 }
1032 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1041 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1033 /* 1042 /*
1034 * Update the ALUA metadata buf that has been allocated in 1043 * Update the ALUA metadata buf that has been allocated in
1035 * core_alua_do_port_transition(), this metadata will be written 1044 * core_alua_do_port_transition(), this metadata will be written
1036 * to struct file. 1045 * to struct file.
1037 * 1046 *
1038 * Note that there is the case where we do not want to update the 1047 * Note that there is the case where we do not want to update the
1039 * metadata when the saved metadata is being parsed in userspace 1048 * metadata when the saved metadata is being parsed in userspace
1040 * when setting the existing port access state and access status. 1049 * when setting the existing port access state and access status.
1041 * 1050 *
1042 * Also note that the failure to write out the ALUA metadata to 1051 * Also note that the failure to write out the ALUA metadata to
1043 * struct file does NOT affect the actual ALUA transition. 1052 * struct file does NOT affect the actual ALUA transition.
1044 */ 1053 */
1045 if (tg_pt_gp->tg_pt_gp_write_metadata) { 1054 if (tg_pt_gp->tg_pt_gp_write_metadata) {
1046 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); 1055 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1047 core_alua_update_tpg_primary_metadata(tg_pt_gp); 1056 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1048 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); 1057 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1049 } 1058 }
1050 /* 1059 /*
1051 * Set the current primary ALUA access state to the requested new state 1060 * Set the current primary ALUA access state to the requested new state
1052 */ 1061 */
1053 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1062 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1054 tg_pt_gp->tg_pt_gp_alua_pending_state); 1063 tg_pt_gp->tg_pt_gp_alua_pending_state);
1055 1064
1056 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1065 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1057 " from primary access state %s to %s\n", (explicit) ? "explicit" : 1066 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1058 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1067 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1059 tg_pt_gp->tg_pt_gp_id, 1068 tg_pt_gp->tg_pt_gp_id,
1060 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), 1069 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1061 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1070 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1062 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1071 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1063 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1072 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1064 smp_mb__after_atomic_dec(); 1073 smp_mb__after_atomic_dec();
1065 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1074 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1066 1075
1067 if (tg_pt_gp->tg_pt_gp_transition_complete) 1076 if (tg_pt_gp->tg_pt_gp_transition_complete)
1068 complete(tg_pt_gp->tg_pt_gp_transition_complete); 1077 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1069 } 1078 }
1070 1079
1071 static int core_alua_do_transition_tg_pt( 1080 static int core_alua_do_transition_tg_pt(
1072 struct t10_alua_tg_pt_gp *tg_pt_gp, 1081 struct t10_alua_tg_pt_gp *tg_pt_gp,
1073 int new_state, 1082 int new_state,
1074 int explicit) 1083 int explicit)
1075 { 1084 {
1076 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1085 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1077 DECLARE_COMPLETION_ONSTACK(wait); 1086 DECLARE_COMPLETION_ONSTACK(wait);
1078 1087
1079 /* Nothing to be done here */ 1088 /* Nothing to be done here */
1080 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) 1089 if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1081 return 0; 1090 return 0;
1082 1091
1083 if (new_state == ALUA_ACCESS_STATE_TRANSITION) 1092 if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1084 return -EAGAIN; 1093 return -EAGAIN;
1085 1094
1086 /* 1095 /*
1087 * Flush any pending transitions 1096 * Flush any pending transitions
1088 */ 1097 */
1089 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && 1098 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1090 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == 1099 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1091 ALUA_ACCESS_STATE_TRANSITION) { 1100 ALUA_ACCESS_STATE_TRANSITION) {
1092 /* Just in case */ 1101 /* Just in case */
1093 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 1102 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1094 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1103 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1095 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1104 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1096 wait_for_completion(&wait); 1105 wait_for_completion(&wait);
1097 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1106 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1098 return 0; 1107 return 0;
1099 } 1108 }
1100 1109
1101 /* 1110 /*
1102 * Save the old primary ALUA access state, and set the current state 1111 * Save the old primary ALUA access state, and set the current state
1103 * to ALUA_ACCESS_STATE_TRANSITION. 1112 * to ALUA_ACCESS_STATE_TRANSITION.
1104 */ 1113 */
1105 tg_pt_gp->tg_pt_gp_alua_previous_state = 1114 tg_pt_gp->tg_pt_gp_alua_previous_state =
1106 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); 1115 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1107 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; 1116 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1108 1117
1109 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1118 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1110 ALUA_ACCESS_STATE_TRANSITION); 1119 ALUA_ACCESS_STATE_TRANSITION);
1111 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? 1120 tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1112 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1121 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1113 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1122 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1114 1123
1115 /* 1124 /*
1116 * Check for the optional ALUA primary state transition delay 1125 * Check for the optional ALUA primary state transition delay
1117 */ 1126 */
1118 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) 1127 if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1119 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); 1128 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1120 1129
1121 /* 1130 /*
1122 * Take a reference for workqueue item 1131 * Take a reference for workqueue item
1123 */ 1132 */
1124 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1133 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1125 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1134 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1126 smp_mb__after_atomic_inc(); 1135 smp_mb__after_atomic_inc();
1127 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1136 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1128 1137
1129 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1138 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1130 unsigned long transition_tmo; 1139 unsigned long transition_tmo;
1131 1140
1132 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; 1141 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1133 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 1142 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1134 &tg_pt_gp->tg_pt_gp_transition_work, 1143 &tg_pt_gp->tg_pt_gp_transition_work,
1135 transition_tmo); 1144 transition_tmo);
1136 } else { 1145 } else {
1137 tg_pt_gp->tg_pt_gp_transition_complete = &wait; 1146 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1138 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, 1147 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1139 &tg_pt_gp->tg_pt_gp_transition_work, 0); 1148 &tg_pt_gp->tg_pt_gp_transition_work, 0);
1140 wait_for_completion(&wait); 1149 wait_for_completion(&wait);
1141 tg_pt_gp->tg_pt_gp_transition_complete = NULL; 1150 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1142 } 1151 }
1143 1152
1144 return 0; 1153 return 0;
1145 } 1154 }
1146 1155
1147 int core_alua_do_port_transition( 1156 int core_alua_do_port_transition(
1148 struct t10_alua_tg_pt_gp *l_tg_pt_gp, 1157 struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1149 struct se_device *l_dev, 1158 struct se_device *l_dev,
1150 struct se_port *l_port, 1159 struct se_port *l_port,
1151 struct se_node_acl *l_nacl, 1160 struct se_node_acl *l_nacl,
1152 int new_state, 1161 int new_state,
1153 int explicit) 1162 int explicit)
1154 { 1163 {
1155 struct se_device *dev; 1164 struct se_device *dev;
1156 struct t10_alua_lu_gp *lu_gp; 1165 struct t10_alua_lu_gp *lu_gp;
1157 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem; 1166 struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1158 struct t10_alua_tg_pt_gp *tg_pt_gp; 1167 struct t10_alua_tg_pt_gp *tg_pt_gp;
1159 int primary, valid_states, rc = 0; 1168 int primary, valid_states, rc = 0;
1160 1169
1161 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; 1170 valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1162 if (core_alua_check_transition(new_state, valid_states, &primary) != 0) 1171 if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1163 return -EINVAL; 1172 return -EINVAL;
1164 1173
1165 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; 1174 local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1166 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1175 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1167 lu_gp = local_lu_gp_mem->lu_gp; 1176 lu_gp = local_lu_gp_mem->lu_gp;
1168 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1177 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1169 smp_mb__after_atomic_inc(); 1178 smp_mb__after_atomic_inc();
1170 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1179 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1171 /* 1180 /*
1172 * For storage objects that are members of the 'default_lu_gp', 1181 * For storage objects that are members of the 'default_lu_gp',
1173 * we only do transition on the passed *l_tp_pt_gp, and not 1182 * we only do transition on the passed *l_tp_pt_gp, and not
1174 * on all of the matching target port groups IDs in default_lu_gp. 1183 * on all of the matching target port groups IDs in default_lu_gp.
1175 */ 1184 */
1176 if (!lu_gp->lu_gp_id) { 1185 if (!lu_gp->lu_gp_id) {
1177 /* 1186 /*
1178 * core_alua_do_transition_tg_pt() will always return 1187 * core_alua_do_transition_tg_pt() will always return
1179 * success. 1188 * success.
1180 */ 1189 */
1181 l_tg_pt_gp->tg_pt_gp_alua_port = l_port; 1190 l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
1182 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1191 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1183 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1192 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1184 new_state, explicit); 1193 new_state, explicit);
1185 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1194 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1186 smp_mb__after_atomic_dec(); 1195 smp_mb__after_atomic_dec();
1187 return rc; 1196 return rc;
1188 } 1197 }
1189 /* 1198 /*
1190 * For all other LU groups aside from 'default_lu_gp', walk all of 1199 * For all other LU groups aside from 'default_lu_gp', walk all of
1191 * the associated storage objects looking for a matching target port 1200 * the associated storage objects looking for a matching target port
1192 * group ID from the local target port group. 1201 * group ID from the local target port group.
1193 */ 1202 */
1194 spin_lock(&lu_gp->lu_gp_lock); 1203 spin_lock(&lu_gp->lu_gp_lock);
1195 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, 1204 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1196 lu_gp_mem_list) { 1205 lu_gp_mem_list) {
1197 1206
1198 dev = lu_gp_mem->lu_gp_mem_dev; 1207 dev = lu_gp_mem->lu_gp_mem_dev;
1199 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1208 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1200 smp_mb__after_atomic_inc(); 1209 smp_mb__after_atomic_inc();
1201 spin_unlock(&lu_gp->lu_gp_lock); 1210 spin_unlock(&lu_gp->lu_gp_lock);
1202 1211
1203 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1212 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1204 list_for_each_entry(tg_pt_gp, 1213 list_for_each_entry(tg_pt_gp,
1205 &dev->t10_alua.tg_pt_gps_list, 1214 &dev->t10_alua.tg_pt_gps_list,
1206 tg_pt_gp_list) { 1215 tg_pt_gp_list) {
1207 1216
1208 if (!tg_pt_gp->tg_pt_gp_valid_id) 1217 if (!tg_pt_gp->tg_pt_gp_valid_id)
1209 continue; 1218 continue;
1210 /* 1219 /*
1211 * If the target behavior port asymmetric access state 1220 * If the target behavior port asymmetric access state
1212 * is changed for any target port group accessible via 1221 * is changed for any target port group accessible via
1213 * a logical unit within a LU group, the target port 1222 * a logical unit within a LU group, the target port
1214 * behavior group asymmetric access states for the same 1223 * behavior group asymmetric access states for the same
1215 * target port group accessible via other logical units 1224 * target port group accessible via other logical units
1216 * in that LU group will also change. 1225 * in that LU group will also change.
1217 */ 1226 */
1218 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id) 1227 if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1219 continue; 1228 continue;
1220 1229
1221 if (l_tg_pt_gp == tg_pt_gp) { 1230 if (l_tg_pt_gp == tg_pt_gp) {
1222 tg_pt_gp->tg_pt_gp_alua_port = l_port; 1231 tg_pt_gp->tg_pt_gp_alua_port = l_port;
1223 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1232 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1224 } else { 1233 } else {
1225 tg_pt_gp->tg_pt_gp_alua_port = NULL; 1234 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1226 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1235 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1227 } 1236 }
1228 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1237 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1229 smp_mb__after_atomic_inc(); 1238 smp_mb__after_atomic_inc();
1230 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1239 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1231 /* 1240 /*
1232 * core_alua_do_transition_tg_pt() will always return 1241 * core_alua_do_transition_tg_pt() will always return
1233 * success. 1242 * success.
1234 */ 1243 */
1235 rc = core_alua_do_transition_tg_pt(tg_pt_gp, 1244 rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1236 new_state, explicit); 1245 new_state, explicit);
1237 1246
1238 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1247 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1239 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1248 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1240 smp_mb__after_atomic_dec(); 1249 smp_mb__after_atomic_dec();
1241 if (rc) 1250 if (rc)
1242 break; 1251 break;
1243 } 1252 }
1244 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1253 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1245 1254
1246 spin_lock(&lu_gp->lu_gp_lock); 1255 spin_lock(&lu_gp->lu_gp_lock);
1247 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1256 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1248 smp_mb__after_atomic_dec(); 1257 smp_mb__after_atomic_dec();
1249 } 1258 }
1250 spin_unlock(&lu_gp->lu_gp_lock); 1259 spin_unlock(&lu_gp->lu_gp_lock);
1251 1260
1252 if (!rc) { 1261 if (!rc) {
1253 pr_debug("Successfully processed LU Group: %s all ALUA TG PT" 1262 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1254 " Group IDs: %hu %s transition to primary state: %s\n", 1263 " Group IDs: %hu %s transition to primary state: %s\n",
1255 config_item_name(&lu_gp->lu_gp_group.cg_item), 1264 config_item_name(&lu_gp->lu_gp_group.cg_item),
1256 l_tg_pt_gp->tg_pt_gp_id, 1265 l_tg_pt_gp->tg_pt_gp_id,
1257 (explicit) ? "explicit" : "implicit", 1266 (explicit) ? "explicit" : "implicit",
1258 core_alua_dump_state(new_state)); 1267 core_alua_dump_state(new_state));
1259 } 1268 }
1260 1269
1261 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1270 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1262 smp_mb__after_atomic_dec(); 1271 smp_mb__after_atomic_dec();
1263 return rc; 1272 return rc;
1264 } 1273 }
1265 1274
1266 /* 1275 /*
1267 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held 1276 * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1268 */ 1277 */
1269 static int core_alua_update_tpg_secondary_metadata( 1278 static int core_alua_update_tpg_secondary_metadata(
1270 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1279 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1271 struct se_port *port) 1280 struct se_port *port)
1272 { 1281 {
1273 unsigned char *md_buf; 1282 unsigned char *md_buf;
1274 struct se_portal_group *se_tpg = port->sep_tpg; 1283 struct se_portal_group *se_tpg = port->sep_tpg;
1275 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN]; 1284 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1276 int len, rc; 1285 int len, rc;
1277 1286
1278 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL); 1287 md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1279 if (!md_buf) { 1288 if (!md_buf) {
1280 pr_err("Unable to allocate buf for ALUA metadata\n"); 1289 pr_err("Unable to allocate buf for ALUA metadata\n");
1281 return -ENOMEM; 1290 return -ENOMEM;
1282 } 1291 }
1283 1292
1284 memset(path, 0, ALUA_METADATA_PATH_LEN); 1293 memset(path, 0, ALUA_METADATA_PATH_LEN);
1285 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); 1294 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1286 1295
1287 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", 1296 len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1288 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); 1297 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1289 1298
1290 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) 1299 if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1291 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", 1300 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1292 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); 1301 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1293 1302
1294 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n" 1303 len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1295 "alua_tg_pt_status=0x%02x\n", 1304 "alua_tg_pt_status=0x%02x\n",
1296 atomic_read(&port->sep_tg_pt_secondary_offline), 1305 atomic_read(&port->sep_tg_pt_secondary_offline),
1297 port->sep_tg_pt_secondary_stat); 1306 port->sep_tg_pt_secondary_stat);
1298 1307
1299 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", 1308 snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1300 se_tpg->se_tpg_tfo->get_fabric_name(), wwn, 1309 se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1301 port->sep_lun->unpacked_lun); 1310 port->sep_lun->unpacked_lun);
1302 1311
1303 rc = core_alua_write_tpg_metadata(path, md_buf, len); 1312 rc = core_alua_write_tpg_metadata(path, md_buf, len);
1304 kfree(md_buf); 1313 kfree(md_buf);
1305 1314
1306 return rc; 1315 return rc;
1307 } 1316 }
1308 1317
1309 static int core_alua_set_tg_pt_secondary_state( 1318 static int core_alua_set_tg_pt_secondary_state(
1310 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1319 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1311 struct se_port *port, 1320 struct se_port *port,
1312 int explicit, 1321 int explicit,
1313 int offline) 1322 int offline)
1314 { 1323 {
1315 struct t10_alua_tg_pt_gp *tg_pt_gp; 1324 struct t10_alua_tg_pt_gp *tg_pt_gp;
1316 int trans_delay_msecs; 1325 int trans_delay_msecs;
1317 1326
1318 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1327 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1319 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1328 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1320 if (!tg_pt_gp) { 1329 if (!tg_pt_gp) {
1321 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1330 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1322 pr_err("Unable to complete secondary state" 1331 pr_err("Unable to complete secondary state"
1323 " transition\n"); 1332 " transition\n");
1324 return -EINVAL; 1333 return -EINVAL;
1325 } 1334 }
1326 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; 1335 trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1327 /* 1336 /*
1328 * Set the secondary ALUA target port access state to OFFLINE 1337 * Set the secondary ALUA target port access state to OFFLINE
1329 * or release the previously secondary state for struct se_port 1338 * or release the previously secondary state for struct se_port
1330 */ 1339 */
1331 if (offline) 1340 if (offline)
1332 atomic_set(&port->sep_tg_pt_secondary_offline, 1); 1341 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1333 else 1342 else
1334 atomic_set(&port->sep_tg_pt_secondary_offline, 0); 1343 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1335 1344
1336 port->sep_tg_pt_secondary_stat = (explicit) ? 1345 port->sep_tg_pt_secondary_stat = (explicit) ?
1337 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : 1346 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1338 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; 1347 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1339 1348
1340 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" 1349 pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1341 " to secondary access state: %s\n", (explicit) ? "explicit" : 1350 " to secondary access state: %s\n", (explicit) ? "explicit" :
1342 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), 1351 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1343 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); 1352 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1344 1353
1345 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1354 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1346 /* 1355 /*
1347 * Do the optional transition delay after we set the secondary 1356 * Do the optional transition delay after we set the secondary
1348 * ALUA access state. 1357 * ALUA access state.
1349 */ 1358 */
1350 if (trans_delay_msecs != 0) 1359 if (trans_delay_msecs != 0)
1351 msleep_interruptible(trans_delay_msecs); 1360 msleep_interruptible(trans_delay_msecs);
1352 /* 1361 /*
1353 * See if we need to update the ALUA fabric port metadata for 1362 * See if we need to update the ALUA fabric port metadata for
1354 * secondary state and status 1363 * secondary state and status
1355 */ 1364 */
1356 if (port->sep_tg_pt_secondary_write_md) { 1365 if (port->sep_tg_pt_secondary_write_md) {
1357 mutex_lock(&port->sep_tg_pt_md_mutex); 1366 mutex_lock(&port->sep_tg_pt_md_mutex);
1358 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port); 1367 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1359 mutex_unlock(&port->sep_tg_pt_md_mutex); 1368 mutex_unlock(&port->sep_tg_pt_md_mutex);
1360 } 1369 }
1361 1370
1362 return 0; 1371 return 0;
1363 } 1372 }
1364 1373
1365 struct t10_alua_lba_map * 1374 struct t10_alua_lba_map *
1366 core_alua_allocate_lba_map(struct list_head *list, 1375 core_alua_allocate_lba_map(struct list_head *list,
1367 u64 first_lba, u64 last_lba) 1376 u64 first_lba, u64 last_lba)
1368 { 1377 {
1369 struct t10_alua_lba_map *lba_map; 1378 struct t10_alua_lba_map *lba_map;
1370 1379
1371 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL); 1380 lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1372 if (!lba_map) { 1381 if (!lba_map) {
1373 pr_err("Unable to allocate struct t10_alua_lba_map\n"); 1382 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1374 return ERR_PTR(-ENOMEM); 1383 return ERR_PTR(-ENOMEM);
1375 } 1384 }
1376 INIT_LIST_HEAD(&lba_map->lba_map_mem_list); 1385 INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1377 lba_map->lba_map_first_lba = first_lba; 1386 lba_map->lba_map_first_lba = first_lba;
1378 lba_map->lba_map_last_lba = last_lba; 1387 lba_map->lba_map_last_lba = last_lba;
1379 1388
1380 list_add_tail(&lba_map->lba_map_list, list); 1389 list_add_tail(&lba_map->lba_map_list, list);
1381 return lba_map; 1390 return lba_map;
1382 } 1391 }
1383 1392
1384 int 1393 int
1385 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map, 1394 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1386 int pg_id, int state) 1395 int pg_id, int state)
1387 { 1396 {
1388 struct t10_alua_lba_map_member *lba_map_mem; 1397 struct t10_alua_lba_map_member *lba_map_mem;
1389 1398
1390 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list, 1399 list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1391 lba_map_mem_list) { 1400 lba_map_mem_list) {
1392 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { 1401 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1393 pr_err("Duplicate pg_id %d in lba_map\n", pg_id); 1402 pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1394 return -EINVAL; 1403 return -EINVAL;
1395 } 1404 }
1396 } 1405 }
1397 1406
1398 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL); 1407 lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1399 if (!lba_map_mem) { 1408 if (!lba_map_mem) {
1400 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n"); 1409 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1401 return -ENOMEM; 1410 return -ENOMEM;
1402 } 1411 }
1403 lba_map_mem->lba_map_mem_alua_state = state; 1412 lba_map_mem->lba_map_mem_alua_state = state;
1404 lba_map_mem->lba_map_mem_alua_pg_id = pg_id; 1413 lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1405 1414
1406 list_add_tail(&lba_map_mem->lba_map_mem_list, 1415 list_add_tail(&lba_map_mem->lba_map_mem_list,
1407 &lba_map->lba_map_mem_list); 1416 &lba_map->lba_map_mem_list);
1408 return 0; 1417 return 0;
1409 } 1418 }
1410 1419
1411 void 1420 void
1412 core_alua_free_lba_map(struct list_head *lba_list) 1421 core_alua_free_lba_map(struct list_head *lba_list)
1413 { 1422 {
1414 struct t10_alua_lba_map *lba_map, *lba_map_tmp; 1423 struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1415 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp; 1424 struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1416 1425
1417 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list, 1426 list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1418 lba_map_list) { 1427 lba_map_list) {
1419 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp, 1428 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1420 &lba_map->lba_map_mem_list, 1429 &lba_map->lba_map_mem_list,
1421 lba_map_mem_list) { 1430 lba_map_mem_list) {
1422 list_del(&lba_map_mem->lba_map_mem_list); 1431 list_del(&lba_map_mem->lba_map_mem_list);
1423 kmem_cache_free(t10_alua_lba_map_mem_cache, 1432 kmem_cache_free(t10_alua_lba_map_mem_cache,
1424 lba_map_mem); 1433 lba_map_mem);
1425 } 1434 }
1426 list_del(&lba_map->lba_map_list); 1435 list_del(&lba_map->lba_map_list);
1427 kmem_cache_free(t10_alua_lba_map_cache, lba_map); 1436 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1428 } 1437 }
1429 } 1438 }
1430 1439
1431 void 1440 void
1432 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list, 1441 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1433 int segment_size, int segment_mult) 1442 int segment_size, int segment_mult)
1434 { 1443 {
1435 struct list_head old_lba_map_list; 1444 struct list_head old_lba_map_list;
1436 struct t10_alua_tg_pt_gp *tg_pt_gp; 1445 struct t10_alua_tg_pt_gp *tg_pt_gp;
1437 int activate = 0, supported; 1446 int activate = 0, supported;
1438 1447
1439 INIT_LIST_HEAD(&old_lba_map_list); 1448 INIT_LIST_HEAD(&old_lba_map_list);
1440 spin_lock(&dev->t10_alua.lba_map_lock); 1449 spin_lock(&dev->t10_alua.lba_map_lock);
1441 dev->t10_alua.lba_map_segment_size = segment_size; 1450 dev->t10_alua.lba_map_segment_size = segment_size;
1442 dev->t10_alua.lba_map_segment_multiplier = segment_mult; 1451 dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1443 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list); 1452 list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1444 if (lba_map_list) { 1453 if (lba_map_list) {
1445 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list); 1454 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1446 activate = 1; 1455 activate = 1;
1447 } 1456 }
1448 spin_unlock(&dev->t10_alua.lba_map_lock); 1457 spin_unlock(&dev->t10_alua.lba_map_lock);
1449 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1458 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1450 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1459 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1451 tg_pt_gp_list) { 1460 tg_pt_gp_list) {
1452 1461
1453 if (!tg_pt_gp->tg_pt_gp_valid_id) 1462 if (!tg_pt_gp->tg_pt_gp_valid_id)
1454 continue; 1463 continue;
1455 supported = tg_pt_gp->tg_pt_gp_alua_supported_states; 1464 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1456 if (activate) 1465 if (activate)
1457 supported |= ALUA_LBD_SUP; 1466 supported |= ALUA_LBD_SUP;
1458 else 1467 else
1459 supported &= ~ALUA_LBD_SUP; 1468 supported &= ~ALUA_LBD_SUP;
1460 tg_pt_gp->tg_pt_gp_alua_supported_states = supported; 1469 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1461 } 1470 }
1462 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1471 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1463 core_alua_free_lba_map(&old_lba_map_list); 1472 core_alua_free_lba_map(&old_lba_map_list);
1464 } 1473 }
1465 1474
1466 struct t10_alua_lu_gp * 1475 struct t10_alua_lu_gp *
1467 core_alua_allocate_lu_gp(const char *name, int def_group) 1476 core_alua_allocate_lu_gp(const char *name, int def_group)
1468 { 1477 {
1469 struct t10_alua_lu_gp *lu_gp; 1478 struct t10_alua_lu_gp *lu_gp;
1470 1479
1471 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); 1480 lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1472 if (!lu_gp) { 1481 if (!lu_gp) {
1473 pr_err("Unable to allocate struct t10_alua_lu_gp\n"); 1482 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1474 return ERR_PTR(-ENOMEM); 1483 return ERR_PTR(-ENOMEM);
1475 } 1484 }
1476 INIT_LIST_HEAD(&lu_gp->lu_gp_node); 1485 INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1477 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); 1486 INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1478 spin_lock_init(&lu_gp->lu_gp_lock); 1487 spin_lock_init(&lu_gp->lu_gp_lock);
1479 atomic_set(&lu_gp->lu_gp_ref_cnt, 0); 1488 atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1480 1489
1481 if (def_group) { 1490 if (def_group) {
1482 lu_gp->lu_gp_id = alua_lu_gps_counter++; 1491 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1483 lu_gp->lu_gp_valid_id = 1; 1492 lu_gp->lu_gp_valid_id = 1;
1484 alua_lu_gps_count++; 1493 alua_lu_gps_count++;
1485 } 1494 }
1486 1495
1487 return lu_gp; 1496 return lu_gp;
1488 } 1497 }
1489 1498
1490 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) 1499 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1491 { 1500 {
1492 struct t10_alua_lu_gp *lu_gp_tmp; 1501 struct t10_alua_lu_gp *lu_gp_tmp;
1493 u16 lu_gp_id_tmp; 1502 u16 lu_gp_id_tmp;
1494 /* 1503 /*
1495 * The lu_gp->lu_gp_id may only be set once.. 1504 * The lu_gp->lu_gp_id may only be set once..
1496 */ 1505 */
1497 if (lu_gp->lu_gp_valid_id) { 1506 if (lu_gp->lu_gp_valid_id) {
1498 pr_warn("ALUA LU Group already has a valid ID," 1507 pr_warn("ALUA LU Group already has a valid ID,"
1499 " ignoring request\n"); 1508 " ignoring request\n");
1500 return -EINVAL; 1509 return -EINVAL;
1501 } 1510 }
1502 1511
1503 spin_lock(&lu_gps_lock); 1512 spin_lock(&lu_gps_lock);
1504 if (alua_lu_gps_count == 0x0000ffff) { 1513 if (alua_lu_gps_count == 0x0000ffff) {
1505 pr_err("Maximum ALUA alua_lu_gps_count:" 1514 pr_err("Maximum ALUA alua_lu_gps_count:"
1506 " 0x0000ffff reached\n"); 1515 " 0x0000ffff reached\n");
1507 spin_unlock(&lu_gps_lock); 1516 spin_unlock(&lu_gps_lock);
1508 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1517 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1509 return -ENOSPC; 1518 return -ENOSPC;
1510 } 1519 }
1511 again: 1520 again:
1512 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : 1521 lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1513 alua_lu_gps_counter++; 1522 alua_lu_gps_counter++;
1514 1523
1515 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { 1524 list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1516 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { 1525 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1517 if (!lu_gp_id) 1526 if (!lu_gp_id)
1518 goto again; 1527 goto again;
1519 1528
1520 pr_warn("ALUA Logical Unit Group ID: %hu" 1529 pr_warn("ALUA Logical Unit Group ID: %hu"
1521 " already exists, ignoring request\n", 1530 " already exists, ignoring request\n",
1522 lu_gp_id); 1531 lu_gp_id);
1523 spin_unlock(&lu_gps_lock); 1532 spin_unlock(&lu_gps_lock);
1524 return -EINVAL; 1533 return -EINVAL;
1525 } 1534 }
1526 } 1535 }
1527 1536
1528 lu_gp->lu_gp_id = lu_gp_id_tmp; 1537 lu_gp->lu_gp_id = lu_gp_id_tmp;
1529 lu_gp->lu_gp_valid_id = 1; 1538 lu_gp->lu_gp_valid_id = 1;
1530 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); 1539 list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1531 alua_lu_gps_count++; 1540 alua_lu_gps_count++;
1532 spin_unlock(&lu_gps_lock); 1541 spin_unlock(&lu_gps_lock);
1533 1542
1534 return 0; 1543 return 0;
1535 } 1544 }
1536 1545
1537 static struct t10_alua_lu_gp_member * 1546 static struct t10_alua_lu_gp_member *
1538 core_alua_allocate_lu_gp_mem(struct se_device *dev) 1547 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1539 { 1548 {
1540 struct t10_alua_lu_gp_member *lu_gp_mem; 1549 struct t10_alua_lu_gp_member *lu_gp_mem;
1541 1550
1542 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); 1551 lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1543 if (!lu_gp_mem) { 1552 if (!lu_gp_mem) {
1544 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); 1553 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1545 return ERR_PTR(-ENOMEM); 1554 return ERR_PTR(-ENOMEM);
1546 } 1555 }
1547 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); 1556 INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1548 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock); 1557 spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1549 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0); 1558 atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1550 1559
1551 lu_gp_mem->lu_gp_mem_dev = dev; 1560 lu_gp_mem->lu_gp_mem_dev = dev;
1552 dev->dev_alua_lu_gp_mem = lu_gp_mem; 1561 dev->dev_alua_lu_gp_mem = lu_gp_mem;
1553 1562
1554 return lu_gp_mem; 1563 return lu_gp_mem;
1555 } 1564 }
1556 1565
1557 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) 1566 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1558 { 1567 {
1559 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp; 1568 struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1560 /* 1569 /*
1561 * Once we have reached this point, config_item_put() has 1570 * Once we have reached this point, config_item_put() has
1562 * already been called from target_core_alua_drop_lu_gp(). 1571 * already been called from target_core_alua_drop_lu_gp().
1563 * 1572 *
1564 * Here, we remove the *lu_gp from the global list so that 1573 * Here, we remove the *lu_gp from the global list so that
1565 * no associations can be made while we are releasing 1574 * no associations can be made while we are releasing
1566 * struct t10_alua_lu_gp. 1575 * struct t10_alua_lu_gp.
1567 */ 1576 */
1568 spin_lock(&lu_gps_lock); 1577 spin_lock(&lu_gps_lock);
1569 list_del(&lu_gp->lu_gp_node); 1578 list_del(&lu_gp->lu_gp_node);
1570 alua_lu_gps_count--; 1579 alua_lu_gps_count--;
1571 spin_unlock(&lu_gps_lock); 1580 spin_unlock(&lu_gps_lock);
1572 /* 1581 /*
1573 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() 1582 * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1574 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be 1583 * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1575 * released with core_alua_put_lu_gp_from_name() 1584 * released with core_alua_put_lu_gp_from_name()
1576 */ 1585 */
1577 while (atomic_read(&lu_gp->lu_gp_ref_cnt)) 1586 while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1578 cpu_relax(); 1587 cpu_relax();
1579 /* 1588 /*
1580 * Release reference to struct t10_alua_lu_gp * from all associated 1589 * Release reference to struct t10_alua_lu_gp * from all associated
1581 * struct se_device. 1590 * struct se_device.
1582 */ 1591 */
1583 spin_lock(&lu_gp->lu_gp_lock); 1592 spin_lock(&lu_gp->lu_gp_lock);
1584 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp, 1593 list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1585 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) { 1594 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1586 if (lu_gp_mem->lu_gp_assoc) { 1595 if (lu_gp_mem->lu_gp_assoc) {
1587 list_del(&lu_gp_mem->lu_gp_mem_list); 1596 list_del(&lu_gp_mem->lu_gp_mem_list);
1588 lu_gp->lu_gp_members--; 1597 lu_gp->lu_gp_members--;
1589 lu_gp_mem->lu_gp_assoc = 0; 1598 lu_gp_mem->lu_gp_assoc = 0;
1590 } 1599 }
1591 spin_unlock(&lu_gp->lu_gp_lock); 1600 spin_unlock(&lu_gp->lu_gp_lock);
1592 /* 1601 /*
1593 * 1602 *
1594 * lu_gp_mem is associated with a single 1603 * lu_gp_mem is associated with a single
1595 * struct se_device->dev_alua_lu_gp_mem, and is released when 1604 * struct se_device->dev_alua_lu_gp_mem, and is released when
1596 * struct se_device is released via core_alua_free_lu_gp_mem(). 1605 * struct se_device is released via core_alua_free_lu_gp_mem().
1597 * 1606 *
1598 * If the passed lu_gp does NOT match the default_lu_gp, assume 1607 * If the passed lu_gp does NOT match the default_lu_gp, assume
1599 * we want to re-associate a given lu_gp_mem with default_lu_gp. 1608 * we want to re-associate a given lu_gp_mem with default_lu_gp.
1600 */ 1609 */
1601 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1610 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1602 if (lu_gp != default_lu_gp) 1611 if (lu_gp != default_lu_gp)
1603 __core_alua_attach_lu_gp_mem(lu_gp_mem, 1612 __core_alua_attach_lu_gp_mem(lu_gp_mem,
1604 default_lu_gp); 1613 default_lu_gp);
1605 else 1614 else
1606 lu_gp_mem->lu_gp = NULL; 1615 lu_gp_mem->lu_gp = NULL;
1607 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1616 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1608 1617
1609 spin_lock(&lu_gp->lu_gp_lock); 1618 spin_lock(&lu_gp->lu_gp_lock);
1610 } 1619 }
1611 spin_unlock(&lu_gp->lu_gp_lock); 1620 spin_unlock(&lu_gp->lu_gp_lock);
1612 1621
1613 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); 1622 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1614 } 1623 }
1615 1624
1616 void core_alua_free_lu_gp_mem(struct se_device *dev) 1625 void core_alua_free_lu_gp_mem(struct se_device *dev)
1617 { 1626 {
1618 struct t10_alua_lu_gp *lu_gp; 1627 struct t10_alua_lu_gp *lu_gp;
1619 struct t10_alua_lu_gp_member *lu_gp_mem; 1628 struct t10_alua_lu_gp_member *lu_gp_mem;
1620 1629
1621 lu_gp_mem = dev->dev_alua_lu_gp_mem; 1630 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1622 if (!lu_gp_mem) 1631 if (!lu_gp_mem)
1623 return; 1632 return;
1624 1633
1625 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) 1634 while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1626 cpu_relax(); 1635 cpu_relax();
1627 1636
1628 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 1637 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1629 lu_gp = lu_gp_mem->lu_gp; 1638 lu_gp = lu_gp_mem->lu_gp;
1630 if (lu_gp) { 1639 if (lu_gp) {
1631 spin_lock(&lu_gp->lu_gp_lock); 1640 spin_lock(&lu_gp->lu_gp_lock);
1632 if (lu_gp_mem->lu_gp_assoc) { 1641 if (lu_gp_mem->lu_gp_assoc) {
1633 list_del(&lu_gp_mem->lu_gp_mem_list); 1642 list_del(&lu_gp_mem->lu_gp_mem_list);
1634 lu_gp->lu_gp_members--; 1643 lu_gp->lu_gp_members--;
1635 lu_gp_mem->lu_gp_assoc = 0; 1644 lu_gp_mem->lu_gp_assoc = 0;
1636 } 1645 }
1637 spin_unlock(&lu_gp->lu_gp_lock); 1646 spin_unlock(&lu_gp->lu_gp_lock);
1638 lu_gp_mem->lu_gp = NULL; 1647 lu_gp_mem->lu_gp = NULL;
1639 } 1648 }
1640 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 1649 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1641 1650
1642 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem); 1651 kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1643 } 1652 }
1644 1653
1645 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) 1654 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1646 { 1655 {
1647 struct t10_alua_lu_gp *lu_gp; 1656 struct t10_alua_lu_gp *lu_gp;
1648 struct config_item *ci; 1657 struct config_item *ci;
1649 1658
1650 spin_lock(&lu_gps_lock); 1659 spin_lock(&lu_gps_lock);
1651 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { 1660 list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1652 if (!lu_gp->lu_gp_valid_id) 1661 if (!lu_gp->lu_gp_valid_id)
1653 continue; 1662 continue;
1654 ci = &lu_gp->lu_gp_group.cg_item; 1663 ci = &lu_gp->lu_gp_group.cg_item;
1655 if (!strcmp(config_item_name(ci), name)) { 1664 if (!strcmp(config_item_name(ci), name)) {
1656 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1665 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1657 spin_unlock(&lu_gps_lock); 1666 spin_unlock(&lu_gps_lock);
1658 return lu_gp; 1667 return lu_gp;
1659 } 1668 }
1660 } 1669 }
1661 spin_unlock(&lu_gps_lock); 1670 spin_unlock(&lu_gps_lock);
1662 1671
1663 return NULL; 1672 return NULL;
1664 } 1673 }
1665 1674
1666 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) 1675 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1667 { 1676 {
1668 spin_lock(&lu_gps_lock); 1677 spin_lock(&lu_gps_lock);
1669 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1678 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1670 spin_unlock(&lu_gps_lock); 1679 spin_unlock(&lu_gps_lock);
1671 } 1680 }
1672 1681
1673 /* 1682 /*
1674 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1683 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1675 */ 1684 */
1676 void __core_alua_attach_lu_gp_mem( 1685 void __core_alua_attach_lu_gp_mem(
1677 struct t10_alua_lu_gp_member *lu_gp_mem, 1686 struct t10_alua_lu_gp_member *lu_gp_mem,
1678 struct t10_alua_lu_gp *lu_gp) 1687 struct t10_alua_lu_gp *lu_gp)
1679 { 1688 {
1680 spin_lock(&lu_gp->lu_gp_lock); 1689 spin_lock(&lu_gp->lu_gp_lock);
1681 lu_gp_mem->lu_gp = lu_gp; 1690 lu_gp_mem->lu_gp = lu_gp;
1682 lu_gp_mem->lu_gp_assoc = 1; 1691 lu_gp_mem->lu_gp_assoc = 1;
1683 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list); 1692 list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1684 lu_gp->lu_gp_members++; 1693 lu_gp->lu_gp_members++;
1685 spin_unlock(&lu_gp->lu_gp_lock); 1694 spin_unlock(&lu_gp->lu_gp_lock);
1686 } 1695 }
1687 1696
1688 /* 1697 /*
1689 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock 1698 * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1690 */ 1699 */
1691 void __core_alua_drop_lu_gp_mem( 1700 void __core_alua_drop_lu_gp_mem(
1692 struct t10_alua_lu_gp_member *lu_gp_mem, 1701 struct t10_alua_lu_gp_member *lu_gp_mem,
1693 struct t10_alua_lu_gp *lu_gp) 1702 struct t10_alua_lu_gp *lu_gp)
1694 { 1703 {
1695 spin_lock(&lu_gp->lu_gp_lock); 1704 spin_lock(&lu_gp->lu_gp_lock);
1696 list_del(&lu_gp_mem->lu_gp_mem_list); 1705 list_del(&lu_gp_mem->lu_gp_mem_list);
1697 lu_gp_mem->lu_gp = NULL; 1706 lu_gp_mem->lu_gp = NULL;
1698 lu_gp_mem->lu_gp_assoc = 0; 1707 lu_gp_mem->lu_gp_assoc = 0;
1699 lu_gp->lu_gp_members--; 1708 lu_gp->lu_gp_members--;
1700 spin_unlock(&lu_gp->lu_gp_lock); 1709 spin_unlock(&lu_gp->lu_gp_lock);
1701 } 1710 }
1702 1711
1703 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, 1712 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1704 const char *name, int def_group) 1713 const char *name, int def_group)
1705 { 1714 {
1706 struct t10_alua_tg_pt_gp *tg_pt_gp; 1715 struct t10_alua_tg_pt_gp *tg_pt_gp;
1707 1716
1708 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); 1717 tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1709 if (!tg_pt_gp) { 1718 if (!tg_pt_gp) {
1710 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); 1719 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1711 return NULL; 1720 return NULL;
1712 } 1721 }
1713 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); 1722 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1714 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list); 1723 INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1715 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); 1724 mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1716 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); 1725 spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1717 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); 1726 atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1718 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, 1727 INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1719 core_alua_do_transition_tg_pt_work); 1728 core_alua_do_transition_tg_pt_work);
1720 tg_pt_gp->tg_pt_gp_dev = dev; 1729 tg_pt_gp->tg_pt_gp_dev = dev;
1721 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, 1730 atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1722 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); 1731 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1723 /* 1732 /*
1724 * Enable both explicit and implicit ALUA support by default 1733 * Enable both explicit and implicit ALUA support by default
1725 */ 1734 */
1726 tg_pt_gp->tg_pt_gp_alua_access_type = 1735 tg_pt_gp->tg_pt_gp_alua_access_type =
1727 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA; 1736 TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1728 /* 1737 /*
1729 * Set the default Active/NonOptimized Delay in milliseconds 1738 * Set the default Active/NonOptimized Delay in milliseconds
1730 */ 1739 */
1731 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS; 1740 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1732 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; 1741 tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1733 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS; 1742 tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1734 1743
1735 /* 1744 /*
1736 * Enable all supported states 1745 * Enable all supported states
1737 */ 1746 */
1738 tg_pt_gp->tg_pt_gp_alua_supported_states = 1747 tg_pt_gp->tg_pt_gp_alua_supported_states =
1739 ALUA_T_SUP | ALUA_O_SUP | 1748 ALUA_T_SUP | ALUA_O_SUP |
1740 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP; 1749 ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1741 1750
1742 if (def_group) { 1751 if (def_group) {
1743 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1752 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1744 tg_pt_gp->tg_pt_gp_id = 1753 tg_pt_gp->tg_pt_gp_id =
1745 dev->t10_alua.alua_tg_pt_gps_counter++; 1754 dev->t10_alua.alua_tg_pt_gps_counter++;
1746 tg_pt_gp->tg_pt_gp_valid_id = 1; 1755 tg_pt_gp->tg_pt_gp_valid_id = 1;
1747 dev->t10_alua.alua_tg_pt_gps_count++; 1756 dev->t10_alua.alua_tg_pt_gps_count++;
1748 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1757 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1749 &dev->t10_alua.tg_pt_gps_list); 1758 &dev->t10_alua.tg_pt_gps_list);
1750 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1759 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1751 } 1760 }
1752 1761
1753 return tg_pt_gp; 1762 return tg_pt_gp;
1754 } 1763 }
1755 1764
1756 int core_alua_set_tg_pt_gp_id( 1765 int core_alua_set_tg_pt_gp_id(
1757 struct t10_alua_tg_pt_gp *tg_pt_gp, 1766 struct t10_alua_tg_pt_gp *tg_pt_gp,
1758 u16 tg_pt_gp_id) 1767 u16 tg_pt_gp_id)
1759 { 1768 {
1760 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1769 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1761 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp; 1770 struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1762 u16 tg_pt_gp_id_tmp; 1771 u16 tg_pt_gp_id_tmp;
1763 1772
1764 /* 1773 /*
1765 * The tg_pt_gp->tg_pt_gp_id may only be set once.. 1774 * The tg_pt_gp->tg_pt_gp_id may only be set once..
1766 */ 1775 */
1767 if (tg_pt_gp->tg_pt_gp_valid_id) { 1776 if (tg_pt_gp->tg_pt_gp_valid_id) {
1768 pr_warn("ALUA TG PT Group already has a valid ID," 1777 pr_warn("ALUA TG PT Group already has a valid ID,"
1769 " ignoring request\n"); 1778 " ignoring request\n");
1770 return -EINVAL; 1779 return -EINVAL;
1771 } 1780 }
1772 1781
1773 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1782 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1774 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { 1783 if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1775 pr_err("Maximum ALUA alua_tg_pt_gps_count:" 1784 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1776 " 0x0000ffff reached\n"); 1785 " 0x0000ffff reached\n");
1777 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1786 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1778 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1787 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1779 return -ENOSPC; 1788 return -ENOSPC;
1780 } 1789 }
1781 again: 1790 again:
1782 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : 1791 tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1783 dev->t10_alua.alua_tg_pt_gps_counter++; 1792 dev->t10_alua.alua_tg_pt_gps_counter++;
1784 1793
1785 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list, 1794 list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1786 tg_pt_gp_list) { 1795 tg_pt_gp_list) {
1787 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { 1796 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1788 if (!tg_pt_gp_id) 1797 if (!tg_pt_gp_id)
1789 goto again; 1798 goto again;
1790 1799
1791 pr_err("ALUA Target Port Group ID: %hu already" 1800 pr_err("ALUA Target Port Group ID: %hu already"
1792 " exists, ignoring request\n", tg_pt_gp_id); 1801 " exists, ignoring request\n", tg_pt_gp_id);
1793 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1802 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1794 return -EINVAL; 1803 return -EINVAL;
1795 } 1804 }
1796 } 1805 }
1797 1806
1798 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; 1807 tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1799 tg_pt_gp->tg_pt_gp_valid_id = 1; 1808 tg_pt_gp->tg_pt_gp_valid_id = 1;
1800 list_add_tail(&tg_pt_gp->tg_pt_gp_list, 1809 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1801 &dev->t10_alua.tg_pt_gps_list); 1810 &dev->t10_alua.tg_pt_gps_list);
1802 dev->t10_alua.alua_tg_pt_gps_count++; 1811 dev->t10_alua.alua_tg_pt_gps_count++;
1803 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1812 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1804 1813
1805 return 0; 1814 return 0;
1806 } 1815 }
1807 1816
1808 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( 1817 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1809 struct se_port *port) 1818 struct se_port *port)
1810 { 1819 {
1811 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1820 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1812 1821
1813 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, 1822 tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1814 GFP_KERNEL); 1823 GFP_KERNEL);
1815 if (!tg_pt_gp_mem) { 1824 if (!tg_pt_gp_mem) {
1816 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); 1825 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1817 return ERR_PTR(-ENOMEM); 1826 return ERR_PTR(-ENOMEM);
1818 } 1827 }
1819 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1828 INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1820 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1829 spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1821 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0); 1830 atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1822 1831
1823 tg_pt_gp_mem->tg_pt = port; 1832 tg_pt_gp_mem->tg_pt = port;
1824 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem; 1833 port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1825 1834
1826 return tg_pt_gp_mem; 1835 return tg_pt_gp_mem;
1827 } 1836 }
1828 1837
1829 void core_alua_free_tg_pt_gp( 1838 void core_alua_free_tg_pt_gp(
1830 struct t10_alua_tg_pt_gp *tg_pt_gp) 1839 struct t10_alua_tg_pt_gp *tg_pt_gp)
1831 { 1840 {
1832 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1841 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1833 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp; 1842 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1834 1843
1835 /* 1844 /*
1836 * Once we have reached this point, config_item_put() has already 1845 * Once we have reached this point, config_item_put() has already
1837 * been called from target_core_alua_drop_tg_pt_gp(). 1846 * been called from target_core_alua_drop_tg_pt_gp().
1838 * 1847 *
1839 * Here we remove *tg_pt_gp from the global list so that 1848 * Here we remove *tg_pt_gp from the global list so that
1840 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS 1849 * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1841 * can be made while we are releasing struct t10_alua_tg_pt_gp. 1850 * can be made while we are releasing struct t10_alua_tg_pt_gp.
1842 */ 1851 */
1843 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1852 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1844 list_del(&tg_pt_gp->tg_pt_gp_list); 1853 list_del(&tg_pt_gp->tg_pt_gp_list);
1845 dev->t10_alua.alua_tg_pt_gps_counter--; 1854 dev->t10_alua.alua_tg_pt_gps_counter--;
1846 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1855 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1847 1856
1848 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); 1857 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1849 1858
1850 /* 1859 /*
1851 * Allow a struct t10_alua_tg_pt_gp_member * referenced by 1860 * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1852 * core_alua_get_tg_pt_gp_by_name() in 1861 * core_alua_get_tg_pt_gp_by_name() in
1853 * target_core_configfs.c:target_core_store_alua_tg_pt_gp() 1862 * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1854 * to be released with core_alua_put_tg_pt_gp_from_name(). 1863 * to be released with core_alua_put_tg_pt_gp_from_name().
1855 */ 1864 */
1856 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt)) 1865 while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1857 cpu_relax(); 1866 cpu_relax();
1858 1867
1859 /* 1868 /*
1860 * Release reference to struct t10_alua_tg_pt_gp from all associated 1869 * Release reference to struct t10_alua_tg_pt_gp from all associated
1861 * struct se_port. 1870 * struct se_port.
1862 */ 1871 */
1863 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1872 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1864 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp, 1873 list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1865 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) { 1874 &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1866 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1875 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1867 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1876 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1868 tg_pt_gp->tg_pt_gp_members--; 1877 tg_pt_gp->tg_pt_gp_members--;
1869 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1878 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1870 } 1879 }
1871 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1880 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1872 /* 1881 /*
1873 * tg_pt_gp_mem is associated with a single 1882 * tg_pt_gp_mem is associated with a single
1874 * se_port->sep_alua_tg_pt_gp_mem, and is released via 1883 * se_port->sep_alua_tg_pt_gp_mem, and is released via
1875 * core_alua_free_tg_pt_gp_mem(). 1884 * core_alua_free_tg_pt_gp_mem().
1876 * 1885 *
1877 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp, 1886 * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1878 * assume we want to re-associate a given tg_pt_gp_mem with 1887 * assume we want to re-associate a given tg_pt_gp_mem with
1879 * default_tg_pt_gp. 1888 * default_tg_pt_gp.
1880 */ 1889 */
1881 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1890 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1882 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) { 1891 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1883 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 1892 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1884 dev->t10_alua.default_tg_pt_gp); 1893 dev->t10_alua.default_tg_pt_gp);
1885 } else 1894 } else
1886 tg_pt_gp_mem->tg_pt_gp = NULL; 1895 tg_pt_gp_mem->tg_pt_gp = NULL;
1887 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1896 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1888 1897
1889 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1898 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1890 } 1899 }
1891 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1900 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1892 1901
1893 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); 1902 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1894 } 1903 }
1895 1904
1896 void core_alua_free_tg_pt_gp_mem(struct se_port *port) 1905 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1897 { 1906 {
1898 struct t10_alua_tg_pt_gp *tg_pt_gp; 1907 struct t10_alua_tg_pt_gp *tg_pt_gp;
1899 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 1908 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1900 1909
1901 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 1910 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1902 if (!tg_pt_gp_mem) 1911 if (!tg_pt_gp_mem)
1903 return; 1912 return;
1904 1913
1905 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) 1914 while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1906 cpu_relax(); 1915 cpu_relax();
1907 1916
1908 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1917 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1909 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 1918 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1910 if (tg_pt_gp) { 1919 if (tg_pt_gp) {
1911 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1920 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1912 if (tg_pt_gp_mem->tg_pt_gp_assoc) { 1921 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1913 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1922 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1914 tg_pt_gp->tg_pt_gp_members--; 1923 tg_pt_gp->tg_pt_gp_members--;
1915 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1924 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1916 } 1925 }
1917 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1926 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1918 tg_pt_gp_mem->tg_pt_gp = NULL; 1927 tg_pt_gp_mem->tg_pt_gp = NULL;
1919 } 1928 }
1920 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 1929 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1921 1930
1922 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem); 1931 kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1923 } 1932 }
1924 1933
1925 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( 1934 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1926 struct se_device *dev, const char *name) 1935 struct se_device *dev, const char *name)
1927 { 1936 {
1928 struct t10_alua_tg_pt_gp *tg_pt_gp; 1937 struct t10_alua_tg_pt_gp *tg_pt_gp;
1929 struct config_item *ci; 1938 struct config_item *ci;
1930 1939
1931 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1940 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1932 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list, 1941 list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1933 tg_pt_gp_list) { 1942 tg_pt_gp_list) {
1934 if (!tg_pt_gp->tg_pt_gp_valid_id) 1943 if (!tg_pt_gp->tg_pt_gp_valid_id)
1935 continue; 1944 continue;
1936 ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 1945 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1937 if (!strcmp(config_item_name(ci), name)) { 1946 if (!strcmp(config_item_name(ci), name)) {
1938 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1947 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1939 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1948 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1940 return tg_pt_gp; 1949 return tg_pt_gp;
1941 } 1950 }
1942 } 1951 }
1943 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1952 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1944 1953
1945 return NULL; 1954 return NULL;
1946 } 1955 }
1947 1956
1948 static void core_alua_put_tg_pt_gp_from_name( 1957 static void core_alua_put_tg_pt_gp_from_name(
1949 struct t10_alua_tg_pt_gp *tg_pt_gp) 1958 struct t10_alua_tg_pt_gp *tg_pt_gp)
1950 { 1959 {
1951 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; 1960 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1952 1961
1953 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1962 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1954 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1963 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1955 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1964 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1956 } 1965 }
1957 1966
1958 /* 1967 /*
1959 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1968 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1960 */ 1969 */
1961 void __core_alua_attach_tg_pt_gp_mem( 1970 void __core_alua_attach_tg_pt_gp_mem(
1962 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1971 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1963 struct t10_alua_tg_pt_gp *tg_pt_gp) 1972 struct t10_alua_tg_pt_gp *tg_pt_gp)
1964 { 1973 {
1965 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1974 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1966 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp; 1975 tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1967 tg_pt_gp_mem->tg_pt_gp_assoc = 1; 1976 tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1968 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list, 1977 list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1969 &tg_pt_gp->tg_pt_gp_mem_list); 1978 &tg_pt_gp->tg_pt_gp_mem_list);
1970 tg_pt_gp->tg_pt_gp_members++; 1979 tg_pt_gp->tg_pt_gp_members++;
1971 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1980 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1972 } 1981 }
1973 1982
1974 /* 1983 /*
1975 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held 1984 * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1976 */ 1985 */
1977 static void __core_alua_drop_tg_pt_gp_mem( 1986 static void __core_alua_drop_tg_pt_gp_mem(
1978 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, 1987 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1979 struct t10_alua_tg_pt_gp *tg_pt_gp) 1988 struct t10_alua_tg_pt_gp *tg_pt_gp)
1980 { 1989 {
1981 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1990 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1982 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); 1991 list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1983 tg_pt_gp_mem->tg_pt_gp = NULL; 1992 tg_pt_gp_mem->tg_pt_gp = NULL;
1984 tg_pt_gp_mem->tg_pt_gp_assoc = 0; 1993 tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1985 tg_pt_gp->tg_pt_gp_members--; 1994 tg_pt_gp->tg_pt_gp_members--;
1986 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1995 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1987 } 1996 }
1988 1997
1989 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) 1998 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1990 { 1999 {
1991 struct config_item *tg_pt_ci; 2000 struct config_item *tg_pt_ci;
1992 struct t10_alua_tg_pt_gp *tg_pt_gp; 2001 struct t10_alua_tg_pt_gp *tg_pt_gp;
1993 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2002 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1994 ssize_t len = 0; 2003 ssize_t len = 0;
1995 2004
1996 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 2005 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1997 if (!tg_pt_gp_mem) 2006 if (!tg_pt_gp_mem)
1998 return len; 2007 return len;
1999 2008
2000 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2009 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2001 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 2010 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2002 if (tg_pt_gp) { 2011 if (tg_pt_gp) {
2003 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; 2012 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
2004 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" 2013 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
2005 " %hu\nTG Port Primary Access State: %s\nTG Port " 2014 " %hu\nTG Port Primary Access State: %s\nTG Port "
2006 "Primary Access Status: %s\nTG Port Secondary Access" 2015 "Primary Access Status: %s\nTG Port Secondary Access"
2007 " State: %s\nTG Port Secondary Access Status: %s\n", 2016 " State: %s\nTG Port Secondary Access Status: %s\n",
2008 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, 2017 config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
2009 core_alua_dump_state(atomic_read( 2018 core_alua_dump_state(atomic_read(
2010 &tg_pt_gp->tg_pt_gp_alua_access_state)), 2019 &tg_pt_gp->tg_pt_gp_alua_access_state)),
2011 core_alua_dump_status( 2020 core_alua_dump_status(
2012 tg_pt_gp->tg_pt_gp_alua_access_status), 2021 tg_pt_gp->tg_pt_gp_alua_access_status),
2013 (atomic_read(&port->sep_tg_pt_secondary_offline)) ? 2022 (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
2014 "Offline" : "None", 2023 "Offline" : "None",
2015 core_alua_dump_status(port->sep_tg_pt_secondary_stat)); 2024 core_alua_dump_status(port->sep_tg_pt_secondary_stat));
2016 } 2025 }
2017 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2026 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2018 2027
2019 return len; 2028 return len;
2020 } 2029 }
2021 2030
2022 ssize_t core_alua_store_tg_pt_gp_info( 2031 ssize_t core_alua_store_tg_pt_gp_info(
2023 struct se_port *port, 2032 struct se_port *port,
2024 const char *page, 2033 const char *page,
2025 size_t count) 2034 size_t count)
2026 { 2035 {
2027 struct se_portal_group *tpg; 2036 struct se_portal_group *tpg;
2028 struct se_lun *lun; 2037 struct se_lun *lun;
2029 struct se_device *dev = port->sep_lun->lun_se_dev; 2038 struct se_device *dev = port->sep_lun->lun_se_dev;
2030 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL; 2039 struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
2031 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2040 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2032 unsigned char buf[TG_PT_GROUP_NAME_BUF]; 2041 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2033 int move = 0; 2042 int move = 0;
2034 2043
2035 tpg = port->sep_tpg; 2044 tpg = port->sep_tpg;
2036 lun = port->sep_lun; 2045 lun = port->sep_lun;
2037 2046
2038 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 2047 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2039 if (!tg_pt_gp_mem) 2048 if (!tg_pt_gp_mem)
2040 return 0; 2049 return 0;
2041 2050
2042 if (count > TG_PT_GROUP_NAME_BUF) { 2051 if (count > TG_PT_GROUP_NAME_BUF) {
2043 pr_err("ALUA Target Port Group alias too large!\n"); 2052 pr_err("ALUA Target Port Group alias too large!\n");
2044 return -EINVAL; 2053 return -EINVAL;
2045 } 2054 }
2046 memset(buf, 0, TG_PT_GROUP_NAME_BUF); 2055 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2047 memcpy(buf, page, count); 2056 memcpy(buf, page, count);
2048 /* 2057 /*
2049 * Any ALUA target port group alias besides "NULL" means we will be 2058 * Any ALUA target port group alias besides "NULL" means we will be
2050 * making a new group association. 2059 * making a new group association.
2051 */ 2060 */
2052 if (strcmp(strstrip(buf), "NULL")) { 2061 if (strcmp(strstrip(buf), "NULL")) {
2053 /* 2062 /*
2054 * core_alua_get_tg_pt_gp_by_name() will increment reference to 2063 * core_alua_get_tg_pt_gp_by_name() will increment reference to
2055 * struct t10_alua_tg_pt_gp. This reference is released with 2064 * struct t10_alua_tg_pt_gp. This reference is released with
2056 * core_alua_put_tg_pt_gp_from_name() below. 2065 * core_alua_put_tg_pt_gp_from_name() below.
2057 */ 2066 */
2058 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, 2067 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
2059 strstrip(buf)); 2068 strstrip(buf));
2060 if (!tg_pt_gp_new) 2069 if (!tg_pt_gp_new)
2061 return -ENODEV; 2070 return -ENODEV;
2062 } 2071 }
2063 2072
2064 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2073 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2065 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 2074 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2066 if (tg_pt_gp) { 2075 if (tg_pt_gp) {
2067 /* 2076 /*
2068 * Clearing an existing tg_pt_gp association, and replacing 2077 * Clearing an existing tg_pt_gp association, and replacing
2069 * with the default_tg_pt_gp. 2078 * with the default_tg_pt_gp.
2070 */ 2079 */
2071 if (!tg_pt_gp_new) { 2080 if (!tg_pt_gp_new) {
2072 pr_debug("Target_Core_ConfigFS: Moving" 2081 pr_debug("Target_Core_ConfigFS: Moving"
2073 " %s/tpgt_%hu/%s from ALUA Target Port Group:" 2082 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2074 " alua/%s, ID: %hu back to" 2083 " alua/%s, ID: %hu back to"
2075 " default_tg_pt_gp\n", 2084 " default_tg_pt_gp\n",
2076 tpg->se_tpg_tfo->tpg_get_wwn(tpg), 2085 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2077 tpg->se_tpg_tfo->tpg_get_tag(tpg), 2086 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2078 config_item_name(&lun->lun_group.cg_item), 2087 config_item_name(&lun->lun_group.cg_item),
2079 config_item_name( 2088 config_item_name(
2080 &tg_pt_gp->tg_pt_gp_group.cg_item), 2089 &tg_pt_gp->tg_pt_gp_group.cg_item),
2081 tg_pt_gp->tg_pt_gp_id); 2090 tg_pt_gp->tg_pt_gp_id);
2082 2091
2083 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 2092 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2084 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, 2093 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
2085 dev->t10_alua.default_tg_pt_gp); 2094 dev->t10_alua.default_tg_pt_gp);
2086 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2095 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2087 2096
2088 return count; 2097 return count;
2089 } 2098 }
2090 /* 2099 /*
2091 * Removing existing association of tg_pt_gp_mem with tg_pt_gp 2100 * Removing existing association of tg_pt_gp_mem with tg_pt_gp
2092 */ 2101 */
2093 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); 2102 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2094 move = 1; 2103 move = 1;
2095 } 2104 }
2096 /* 2105 /*
2097 * Associate tg_pt_gp_mem with tg_pt_gp_new. 2106 * Associate tg_pt_gp_mem with tg_pt_gp_new.
2098 */ 2107 */
2099 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); 2108 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
2100 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 2109 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2101 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" 2110 pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2102 " Target Port Group: alua/%s, ID: %hu\n", (move) ? 2111 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2103 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), 2112 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2104 tpg->se_tpg_tfo->tpg_get_tag(tpg), 2113 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2105 config_item_name(&lun->lun_group.cg_item), 2114 config_item_name(&lun->lun_group.cg_item),
2106 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), 2115 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2107 tg_pt_gp_new->tg_pt_gp_id); 2116 tg_pt_gp_new->tg_pt_gp_id);
2108 2117
2109 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); 2118 core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2110 return count; 2119 return count;
2111 } 2120 }
2112 2121
2113 ssize_t core_alua_show_access_type( 2122 ssize_t core_alua_show_access_type(
2114 struct t10_alua_tg_pt_gp *tg_pt_gp, 2123 struct t10_alua_tg_pt_gp *tg_pt_gp,
2115 char *page) 2124 char *page)
2116 { 2125 {
2117 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) && 2126 if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2118 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) 2127 (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2119 return sprintf(page, "Implicit and Explicit\n"); 2128 return sprintf(page, "Implicit and Explicit\n");
2120 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA) 2129 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2121 return sprintf(page, "Implicit\n"); 2130 return sprintf(page, "Implicit\n");
2122 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) 2131 else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2123 return sprintf(page, "Explicit\n"); 2132 return sprintf(page, "Explicit\n");
2124 else 2133 else
2125 return sprintf(page, "None\n"); 2134 return sprintf(page, "None\n");
2126 } 2135 }
2127 2136
2128 ssize_t core_alua_store_access_type( 2137 ssize_t core_alua_store_access_type(
2129 struct t10_alua_tg_pt_gp *tg_pt_gp, 2138 struct t10_alua_tg_pt_gp *tg_pt_gp,
2130 const char *page, 2139 const char *page,
2131 size_t count) 2140 size_t count)
2132 { 2141 {
2133 unsigned long tmp; 2142 unsigned long tmp;
2134 int ret; 2143 int ret;
2135 2144
2136 ret = kstrtoul(page, 0, &tmp); 2145 ret = kstrtoul(page, 0, &tmp);
2137 if (ret < 0) { 2146 if (ret < 0) {
2138 pr_err("Unable to extract alua_access_type\n"); 2147 pr_err("Unable to extract alua_access_type\n");
2139 return ret; 2148 return ret;
2140 } 2149 }
2141 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { 2150 if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2142 pr_err("Illegal value for alua_access_type:" 2151 pr_err("Illegal value for alua_access_type:"
2143 " %lu\n", tmp); 2152 " %lu\n", tmp);
2144 return -EINVAL; 2153 return -EINVAL;
2145 } 2154 }
2146 if (tmp == 3) 2155 if (tmp == 3)
2147 tg_pt_gp->tg_pt_gp_alua_access_type = 2156 tg_pt_gp->tg_pt_gp_alua_access_type =
2148 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA; 2157 TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2149 else if (tmp == 2) 2158 else if (tmp == 2)
2150 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA; 2159 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2151 else if (tmp == 1) 2160 else if (tmp == 1)
2152 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA; 2161 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2153 else 2162 else
2154 tg_pt_gp->tg_pt_gp_alua_access_type = 0; 2163 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2155 2164
2156 return count; 2165 return count;
2157 } 2166 }
2158 2167
2159 ssize_t core_alua_show_nonop_delay_msecs( 2168 ssize_t core_alua_show_nonop_delay_msecs(
2160 struct t10_alua_tg_pt_gp *tg_pt_gp, 2169 struct t10_alua_tg_pt_gp *tg_pt_gp,
2161 char *page) 2170 char *page)
2162 { 2171 {
2163 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); 2172 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2164 } 2173 }
2165 2174
2166 ssize_t core_alua_store_nonop_delay_msecs( 2175 ssize_t core_alua_store_nonop_delay_msecs(
2167 struct t10_alua_tg_pt_gp *tg_pt_gp, 2176 struct t10_alua_tg_pt_gp *tg_pt_gp,
2168 const char *page, 2177 const char *page,
2169 size_t count) 2178 size_t count)
2170 { 2179 {
2171 unsigned long tmp; 2180 unsigned long tmp;
2172 int ret; 2181 int ret;
2173 2182
2174 ret = kstrtoul(page, 0, &tmp); 2183 ret = kstrtoul(page, 0, &tmp);
2175 if (ret < 0) { 2184 if (ret < 0) {
2176 pr_err("Unable to extract nonop_delay_msecs\n"); 2185 pr_err("Unable to extract nonop_delay_msecs\n");
2177 return ret; 2186 return ret;
2178 } 2187 }
2179 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { 2188 if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2180 pr_err("Passed nonop_delay_msecs: %lu, exceeds" 2189 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2181 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 2190 " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2182 ALUA_MAX_NONOP_DELAY_MSECS); 2191 ALUA_MAX_NONOP_DELAY_MSECS);
2183 return -EINVAL; 2192 return -EINVAL;
2184 } 2193 }
2185 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp; 2194 tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2186 2195
2187 return count; 2196 return count;
2188 } 2197 }
2189 2198
2190 ssize_t core_alua_show_trans_delay_msecs( 2199 ssize_t core_alua_show_trans_delay_msecs(
2191 struct t10_alua_tg_pt_gp *tg_pt_gp, 2200 struct t10_alua_tg_pt_gp *tg_pt_gp,
2192 char *page) 2201 char *page)
2193 { 2202 {
2194 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); 2203 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2195 } 2204 }
2196 2205
2197 ssize_t core_alua_store_trans_delay_msecs( 2206 ssize_t core_alua_store_trans_delay_msecs(
2198 struct t10_alua_tg_pt_gp *tg_pt_gp, 2207 struct t10_alua_tg_pt_gp *tg_pt_gp,
2199 const char *page, 2208 const char *page,
2200 size_t count) 2209 size_t count)
2201 { 2210 {
2202 unsigned long tmp; 2211 unsigned long tmp;
2203 int ret; 2212 int ret;
2204 2213
2205 ret = kstrtoul(page, 0, &tmp); 2214 ret = kstrtoul(page, 0, &tmp);
2206 if (ret < 0) { 2215 if (ret < 0) {
2207 pr_err("Unable to extract trans_delay_msecs\n"); 2216 pr_err("Unable to extract trans_delay_msecs\n");
2208 return ret; 2217 return ret;
2209 } 2218 }
2210 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { 2219 if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2211 pr_err("Passed trans_delay_msecs: %lu, exceeds" 2220 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2212 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 2221 " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2213 ALUA_MAX_TRANS_DELAY_MSECS); 2222 ALUA_MAX_TRANS_DELAY_MSECS);
2214 return -EINVAL; 2223 return -EINVAL;
2215 } 2224 }
2216 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp; 2225 tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2217 2226
2218 return count; 2227 return count;
2219 } 2228 }
2220 2229
2221 ssize_t core_alua_show_implicit_trans_secs( 2230 ssize_t core_alua_show_implicit_trans_secs(
2222 struct t10_alua_tg_pt_gp *tg_pt_gp, 2231 struct t10_alua_tg_pt_gp *tg_pt_gp,
2223 char *page) 2232 char *page)
2224 { 2233 {
2225 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); 2234 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2226 } 2235 }
2227 2236
2228 ssize_t core_alua_store_implicit_trans_secs( 2237 ssize_t core_alua_store_implicit_trans_secs(
2229 struct t10_alua_tg_pt_gp *tg_pt_gp, 2238 struct t10_alua_tg_pt_gp *tg_pt_gp,
2230 const char *page, 2239 const char *page,
2231 size_t count) 2240 size_t count)
2232 { 2241 {
2233 unsigned long tmp; 2242 unsigned long tmp;
2234 int ret; 2243 int ret;
2235 2244
2236 ret = kstrtoul(page, 0, &tmp); 2245 ret = kstrtoul(page, 0, &tmp);
2237 if (ret < 0) { 2246 if (ret < 0) {
2238 pr_err("Unable to extract implicit_trans_secs\n"); 2247 pr_err("Unable to extract implicit_trans_secs\n");
2239 return ret; 2248 return ret;
2240 } 2249 }
2241 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) { 2250 if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2242 pr_err("Passed implicit_trans_secs: %lu, exceeds" 2251 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2243 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 2252 " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2244 ALUA_MAX_IMPLICIT_TRANS_SECS); 2253 ALUA_MAX_IMPLICIT_TRANS_SECS);
2245 return -EINVAL; 2254 return -EINVAL;
2246 } 2255 }
2247 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp; 2256 tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2248 2257
2249 return count; 2258 return count;
2250 } 2259 }
2251 2260
2252 ssize_t core_alua_show_preferred_bit( 2261 ssize_t core_alua_show_preferred_bit(
2253 struct t10_alua_tg_pt_gp *tg_pt_gp, 2262 struct t10_alua_tg_pt_gp *tg_pt_gp,
2254 char *page) 2263 char *page)
2255 { 2264 {
2256 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); 2265 return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2257 } 2266 }
2258 2267
2259 ssize_t core_alua_store_preferred_bit( 2268 ssize_t core_alua_store_preferred_bit(
2260 struct t10_alua_tg_pt_gp *tg_pt_gp, 2269 struct t10_alua_tg_pt_gp *tg_pt_gp,
2261 const char *page, 2270 const char *page,
2262 size_t count) 2271 size_t count)
2263 { 2272 {
2264 unsigned long tmp; 2273 unsigned long tmp;
2265 int ret; 2274 int ret;
2266 2275
2267 ret = kstrtoul(page, 0, &tmp); 2276 ret = kstrtoul(page, 0, &tmp);
2268 if (ret < 0) { 2277 if (ret < 0) {
2269 pr_err("Unable to extract preferred ALUA value\n"); 2278 pr_err("Unable to extract preferred ALUA value\n");
2270 return ret; 2279 return ret;
2271 } 2280 }
2272 if ((tmp != 0) && (tmp != 1)) { 2281 if ((tmp != 0) && (tmp != 1)) {
2273 pr_err("Illegal value for preferred ALUA: %lu\n", tmp); 2282 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2274 return -EINVAL; 2283 return -EINVAL;
2275 } 2284 }
2276 tg_pt_gp->tg_pt_gp_pref = (int)tmp; 2285 tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2277 2286
2278 return count; 2287 return count;
2279 } 2288 }
2280 2289
2281 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) 2290 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2282 { 2291 {
2283 if (!lun->lun_sep) 2292 if (!lun->lun_sep)
2284 return -ENODEV; 2293 return -ENODEV;
2285 2294
2286 return sprintf(page, "%d\n", 2295 return sprintf(page, "%d\n",
2287 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline)); 2296 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
2288 } 2297 }
2289 2298
2290 ssize_t core_alua_store_offline_bit( 2299 ssize_t core_alua_store_offline_bit(
2291 struct se_lun *lun, 2300 struct se_lun *lun,
2292 const char *page, 2301 const char *page,
2293 size_t count) 2302 size_t count)
2294 { 2303 {
2295 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 2304 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2296 unsigned long tmp; 2305 unsigned long tmp;
2297 int ret; 2306 int ret;
2298 2307
2299 if (!lun->lun_sep) 2308 if (!lun->lun_sep)
2300 return -ENODEV; 2309 return -ENODEV;
2301 2310
2302 ret = kstrtoul(page, 0, &tmp); 2311 ret = kstrtoul(page, 0, &tmp);
2303 if (ret < 0) { 2312 if (ret < 0) {
2304 pr_err("Unable to extract alua_tg_pt_offline value\n"); 2313 pr_err("Unable to extract alua_tg_pt_offline value\n");
2305 return ret; 2314 return ret;
2306 } 2315 }
2307 if ((tmp != 0) && (tmp != 1)) { 2316 if ((tmp != 0) && (tmp != 1)) {
2308 pr_err("Illegal value for alua_tg_pt_offline: %lu\n", 2317 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2309 tmp); 2318 tmp);
2310 return -EINVAL; 2319 return -EINVAL;
2311 } 2320 }
2312 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; 2321 tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
2313 if (!tg_pt_gp_mem) { 2322 if (!tg_pt_gp_mem) {
2314 pr_err("Unable to locate *tg_pt_gp_mem\n"); 2323 pr_err("Unable to locate *tg_pt_gp_mem\n");
2315 return -EINVAL; 2324 return -EINVAL;
2316 } 2325 }
2317 2326
2318 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem, 2327 ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
2319 lun->lun_sep, 0, (int)tmp); 2328 lun->lun_sep, 0, (int)tmp);
2320 if (ret < 0) 2329 if (ret < 0)
2321 return -EINVAL; 2330 return -EINVAL;
2322 2331
2323 return count; 2332 return count;
2324 } 2333 }
2325 2334
2326 ssize_t core_alua_show_secondary_status( 2335 ssize_t core_alua_show_secondary_status(
2327 struct se_lun *lun, 2336 struct se_lun *lun,
2328 char *page) 2337 char *page)
2329 { 2338 {
2330 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat); 2339 return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
2331 } 2340 }
2332 2341
2333 ssize_t core_alua_store_secondary_status( 2342 ssize_t core_alua_store_secondary_status(
2334 struct se_lun *lun, 2343 struct se_lun *lun,
2335 const char *page, 2344 const char *page,
2336 size_t count) 2345 size_t count)
2337 { 2346 {
2338 unsigned long tmp; 2347 unsigned long tmp;
2339 int ret; 2348 int ret;
2340 2349
2341 ret = kstrtoul(page, 0, &tmp); 2350 ret = kstrtoul(page, 0, &tmp);
2342 if (ret < 0) { 2351 if (ret < 0) {
2343 pr_err("Unable to extract alua_tg_pt_status\n"); 2352 pr_err("Unable to extract alua_tg_pt_status\n");
2344 return ret; 2353 return ret;
2345 } 2354 }
2346 if ((tmp != ALUA_STATUS_NONE) && 2355 if ((tmp != ALUA_STATUS_NONE) &&
2347 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) && 2356 (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2348 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) { 2357 (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2349 pr_err("Illegal value for alua_tg_pt_status: %lu\n", 2358 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2350 tmp); 2359 tmp);
2351 return -EINVAL; 2360 return -EINVAL;
2352 } 2361 }
2353 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp; 2362 lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
2354 2363
2355 return count; 2364 return count;
2356 } 2365 }
2357 2366
2358 ssize_t core_alua_show_secondary_write_metadata( 2367 ssize_t core_alua_show_secondary_write_metadata(
2359 struct se_lun *lun, 2368 struct se_lun *lun,
2360 char *page) 2369 char *page)
2361 { 2370 {
2362 return sprintf(page, "%d\n", 2371 return sprintf(page, "%d\n",
2363 lun->lun_sep->sep_tg_pt_secondary_write_md); 2372 lun->lun_sep->sep_tg_pt_secondary_write_md);
2364 } 2373 }
2365 2374
2366 ssize_t core_alua_store_secondary_write_metadata( 2375 ssize_t core_alua_store_secondary_write_metadata(
2367 struct se_lun *lun, 2376 struct se_lun *lun,
2368 const char *page, 2377 const char *page,
2369 size_t count) 2378 size_t count)
2370 { 2379 {
2371 unsigned long tmp; 2380 unsigned long tmp;
2372 int ret; 2381 int ret;
2373 2382
2374 ret = kstrtoul(page, 0, &tmp); 2383 ret = kstrtoul(page, 0, &tmp);
2375 if (ret < 0) { 2384 if (ret < 0) {
2376 pr_err("Unable to extract alua_tg_pt_write_md\n"); 2385 pr_err("Unable to extract alua_tg_pt_write_md\n");
2377 return ret; 2386 return ret;
2378 } 2387 }
2379 if ((tmp != 0) && (tmp != 1)) { 2388 if ((tmp != 0) && (tmp != 1)) {
2380 pr_err("Illegal value for alua_tg_pt_write_md:" 2389 pr_err("Illegal value for alua_tg_pt_write_md:"
2381 " %lu\n", tmp); 2390 " %lu\n", tmp);
2382 return -EINVAL; 2391 return -EINVAL;
2383 } 2392 }
2384 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp; 2393 lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2385 2394
2386 return count; 2395 return count;
2387 } 2396 }
2388 2397
2389 int core_setup_alua(struct se_device *dev) 2398 int core_setup_alua(struct se_device *dev)
2390 { 2399 {
2391 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && 2400 if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2392 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { 2401 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2393 struct t10_alua_lu_gp_member *lu_gp_mem; 2402 struct t10_alua_lu_gp_member *lu_gp_mem;
2394 2403
2395 /* 2404 /*
2396 * Associate this struct se_device with the default ALUA 2405 * Associate this struct se_device with the default ALUA
2397 * LUN Group. 2406 * LUN Group.
2398 */ 2407 */
2399 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); 2408 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2400 if (IS_ERR(lu_gp_mem)) 2409 if (IS_ERR(lu_gp_mem))
2401 return PTR_ERR(lu_gp_mem); 2410 return PTR_ERR(lu_gp_mem);
2402 2411
2403 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 2412 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2404 __core_alua_attach_lu_gp_mem(lu_gp_mem, 2413 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2405 default_lu_gp); 2414 default_lu_gp);
2406 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 2415 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2407 2416
2408 pr_debug("%s: Adding to default ALUA LU Group:" 2417 pr_debug("%s: Adding to default ALUA LU Group:"
2409 " core/alua/lu_gps/default_lu_gp\n", 2418 " core/alua/lu_gps/default_lu_gp\n",
2410 dev->transport->name); 2419 dev->transport->name);
2411 } 2420 }
2412 2421
2413 return 0; 2422 return 0;
2414 } 2423 }
2415 2424